text stringlengths 38 1.54M |
|---|
import pandas
from geopy.extra.rate_limiter import RateLimiter
from geopy.geocoders import Nominatim
from tqdm import tqdm
import math
input_filename=input("Enter input filename(without ext.):")
input_filename=input_filename+".xlsx"
output_filename=input("Enter output filename(without ext.):")
output_filename=output_filename+".xlsx"
def correct(df,order):
locator = Nominatim(user_agent="myGeocoder")
geocode = RateLimiter(locator.geocode, min_delay_seconds=0)
index=df.index
number_of_rows=len(index)
for row_num in tqdm(range(number_of_rows)):
row= df.loc[row_num]
print(row["latitude"])
if(math.isnan(row["latitude"])):
address=str(row["patient_address"])+", "+str(row["patient_block"])+", "+str(row["patient_district_name"])+", Punjab, India"
location = locator.geocode(address)
if location==None:
address=str(row["patient_block"])+", "+str(row["patient_district_name"])+", Punjab, India"
location = locator.geocode(address)
if location==None:
address=str(row["patient_district_name"])+", Punjab, India"
location = locator.geocode(address)
if location==None:
exceptions.append(row_num)
pass
else:
point=tuple(location.point)
latitude=point[0]
longitude=point[1]
df.at[row_num,"final_address"]=address
df.at[row_num,"latitude"]=latitude
df.at[row_num,"longitude"]=longitude
print(location,latitude,longitude)
return df
df=pandas.read_excel(input_filename)
df1=df
df1=correct(df1,order)
df1.to_excel(output_filename) |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Huoty, All rights reserved
# Author: Huoty <sudohuoty@163.com>
# CreateTime: 2018-02-12 17:40:18
import os
import sys
import logging
from inspect import isgenerator
from pprint import pprint
from kmailbox import Message, MailBox, string_types
try:
from unittest import mock
except ImportError:
import mock
html_content = '''\
<body>
<p><img src="cid:0"></p>
<p>Hello! I am <em>Huoty</em>.</p>
<p>How are you?</p>
<p>Give you a picture:</p>
<p><img src="cid:1"></p>
</body>
'''
class TestMessage(object):
def test_property(self):
msg = Message()
assert msg.sender is None
msg.sender = "hello@email.com"
assert msg.sender == "hello@email.com"
msg.recipient = "to@email.com"
msg.cc_recipient = "cc@email.com"
assert len(msg.to_addrs) == 2
def test_as_string(self):
msg = Message()
msg.sender = "Test<test@email.com>"
msg.recipient = "to@email.com"
msg.reply_recipient = "reply@email.com"
msg.content = "This is Test"
msg_str = msg.as_string()
print(msg_str)
assert msg_str
class TestMailBox(object):
def setup_class(cls):
logger = logging.getLogger("kmailbox")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(
"%(levelname)s - %(asctime)s - %(message)s"
))
logger.addHandler(handler)
logger.propagate = False
cls.mailbox = MailBox(
imap_host=os.getenv("KMAILBOX_IMAP_HOST"),
smtp_host=os.getenv("KMAILBOX_SMTP_HOST"),
use_ssl=True,
logger=logger,
)
cls.mailuser = os.environ["KMAILBOX_USERNAME"]
cls.mailbox.username = cls.mailuser
cls.mailbox.password = os.environ["KMAILBOX_PASSWORD"]
cls.sender = "Tester<{}>".format(cls.mailuser)
cls.recipient = "huayongkuang@foxmail.com"
cls.cc_recipient = ["测试<{}>".format(cls.mailuser)]
def teardown_class(cls):
cls.mailbox.close()
def create_message(self):
msg = Message()
msg.sender = self.sender
msg.recipient = self.recipient
msg.cc_recipient = self.cc_recipient
return msg
def test_sendmail(self):
msg = self.create_message()
msg.subject = "kmailbox 测试"
msg.content = "This is test"
print(msg.as_string())
self.mailbox.send(msg)
def test_send_html_mail(self):
msg = self.create_message()
msg.subject = "kmailbox test send html mail"
msg.content = html_content
msg.is_html = True
msg.attachments = ["cid0:imgs/mailbox-icon.png",
"cid1:imgs/20171005170550.jpg"]
with mock.patch.object(self.mailbox, "use_tls", True):
self.mailbox.send(msg)
def test_send_attachments(self):
msg = self.create_message()
msg.subject = "kmailbox test send attachments"
msg.content = html_content
msg.is_html = True
msg.attachments = ["cid0:imgs/mailbox-icon.png",
"cid1:imgs/20171005170550.jpg",
"kmailbox.py", "README.md"]
self.mailbox.send(msg)
def test_receive_mails(self):
print(self.mailbox.folders)
self.mailbox.select()
# print(self.mailbox.select("垃圾邮件"))
mails = self.mailbox.all(mark_seen=False)
# mails = self.mailbox.unread(mark_seen=False)
# mails = self.mailbox.new(mark_seen=False)
# mails = self.mailbox.from_criteria("test@mail.net", mark_seen=False)
print(mails)
pprint([{
"uid": mail.uid,
"sender": mail.sender,
"to_addrs": mail.to_addrs,
"subject": mail.subject,
"date": str(mail.date),
"flags": mail.flags,
"attachments": [att.filename for att in mail.attachments],
} for mail in mails])
assert isinstance(mails[0].content, string_types)
assert isgenerator(self.mailbox.all(mark_seen=False, gen=True))
def test_flag(self):
self.mailbox.select()
print(self.mailbox.mark_as_unseen("1384335828"))
print(self.mailbox.mark_as_seen("1384335828"))
def test_delete_mail(self):
self.mailbox.select()
self.mailbox.mark_as_delete("1384335845, 1384335844")
self.mailbox.expunge()
def test_download_attachment(self):
self.mailbox.select()
for mail in self.mailbox.all(mark_seen=False, gen=True):
if not mail.attachments:
continue
for att in mail.attachments:
att.download(os.path.expanduser("~/Temp"))
print("download attachment '%s'" % att.filename)
def test_move_mails(self):
print("Folders:", self.mailbox.folders)
self.mailbox.select()
self.mailbox.move("test", criterions='TO "test@mail.com"')
|
# -*- coding: utf-8 -*-
# mysql数据库配置
MYSQL_HOST = 'localhost'
MYSQL_USER = 'root'
MYSQL_PASS = 'Zx3620382.'
|
"""
Keras main file. Preprosessing and data generator + training entrance.
Biyi Fang
"""
from __future__ import print_function
import os
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import time
import numpy as np
import ReNet
import scipy
from numpy.random import shuffle
from sklearn.externals import joblib
batch_size = 40
num_classes = 10
epochs = 20
data_augmentation = True
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'VGG16-E30p-1fc512-epoch_{}.h5'.format(time.time())
# def data_generator(data_gen):
# while True:
# x, y = data_gen.next()
# yield x, [y] * num_classifiers
def prepro_fn(img):
"""
VGG style preprocessing. there is NO normalization involved
:param img:
:return:
"""
img = np.array(img).astype('float32')
mean_pixel = [103.939, 116.779, 123.68]
for c in range(3):
img[:, :, c] = img[:, :, c] - mean_pixel[c]
return img
def resize(gen):
"""
resize image to 224 x 224
change to one-hot
"""
while True:
g = gen.next()
img = np.array([scipy.misc.imresize(g[0][i, ...], (224, 224)) for i in range(batch_size)])
y = np.zeros((batch_size, num_classes))
y[np.arange(batch_size), np.squeeze(g[1])] = 1
yield (img, y)
def main():
# The data, shuffled and split between train and test sets:
x_train_num = 50000
x_test_num = 10000
# model = ReNet.VGG_full(weights='VGG16-100p-9336-1fc512-11epoch.h5').get_model()
# model = ReNet.VGG_50p(weights='VGG16-S75p-1fc512-0epoch.h5').get_model()
model = ReNet.VGG_03p(weights='VGG16-S03p-1fc512-0epoch.h5').get_model()
auto_save_callback = keras.callbacks.ModelCheckpoint(os.path.join('./models', model_name), monitor='val_acc',
mode='max', save_best_only=True)
tensorboard = keras.callbacks.TensorBoard(log_dir='./logs')
print('Using real-time data augmentation.')
x_train, y_train, _ = joblib.load('cifar10_train.pkl')
x_test, y_test, _ = joblib.load('cifar10_test.pkl')
train_datagen = ImageDataGenerator(
preprocessing_function=prepro_fn,
shear_range=0.1,
horizontal_flip=True,
rotation_range=30.,
width_shift_range=0.1,
height_shift_range=0.1)
# train_generator = train_datagen.flow_from_directory(
# './dataset/cifar10/train/',
# target_size=(224, 224),
# batch_size=batch_size,
# class_mode='categorical')
#
# validation_generator = test_datagen.flow_from_directory(
# './dataset/cifar10/test/',
# target_size=(224, 224),
# batch_size=batch_size,
# class_mode='categorical')
train_generator = resize(train_datagen.flow(x_train, y_train,
batch_size=batch_size))
test_datagen = ImageDataGenerator(preprocessing_function=prepro_fn)
validation_generator = resize(test_datagen.flow(x_test, y_test,
batch_size=batch_size))
model.fit_generator(generator=train_generator,
steps_per_epoch=x_train_num // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=x_test_num // batch_size,
callbacks=[auto_save_callback, tensorboard],
max_queue_size=50)
# Evaluate model with test data set and share sample prediction results
evaluation = model.evaluate_generator(validation_generator,
steps=x_test_num // batch_size)
print('Model Accuracy = %.4f' % (evaluation[1]))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# V0.1: source:www.pythoncentrail.io/python-snippets-how-to-generate-random-string/
# V0.2: adapted/enhanced M. Grie�er 16.01.2018: add allchar2 without punctuation and pwd2, add readable date, add length of created pwd, print it at the console
vers = "V0.2"
import string
import datetime
from random import *
min_char = 8
max_char = 12
allchar = string.ascii_letters + string.punctuation + string.digits
allchar2 = string.ascii_letters + string.digits
password = "".join(choice(allchar) for x in range (randint(min_char, max_char)))
password2 = "".join(choice(allchar2) for x in range (randint(min_char, max_char)))
#currentdatetime = datetime.datetime.now()
#readabledate= currentdatetime.strftime("%Y-%m-%d %H:%M:%S")
# optimized output of datetime
readabledate = (datetime.datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
print "Generation Date:" ,readabledate+" Version: ",vers
print "This is your password:" ,password+"the length is: ",len(password)
print "This is your password2:" ,password2+"the length is: ",len(password2)
|
#import scipy
from scipy.fftpack import rfft, irfft, rfftfreq
import numpy as np
from numpy import linalg as LA
from obspy import Trace, Stream
from obspy.core import UTCDateTime
from obspy.geodetics import locations2degrees, degrees2kilometers
from sqlalchemy.orm import *
from sqlalchemy import create_engine
from sqlalchemy import or_
# database format import
from . import table_nsta24 as table_nsta24
from . import table_tt_curve as table_tt_curve
from . import tables3D as tables3D
from .table_nsta24 import *
from .table_tt_curve import *
from .tables3D import *
# import 3d travel-time calculator
#from .pbr import pbr
import os
import sys
import struct
import csv
from datetime import *
import math
import configparser
from shutil import copyfile
# put all pre-processing presure in one
class Pre_processing():
def __init__(self, A_File = None, DB = None, config = None, EEW_Mode = False):
if not A_File:
sys.exit("A_File not given ---- class Pre_processing")
elif not os.path.isfile(A_File):
sys.exit("A_File not exist ---- class Pre_processing")
else:
self.A_File = A_File
if not DB:
sys.exit("Database not given ---- class Pre_processing")
else:
self.DB = DB
if not config:
sys.exit("Config not given ---- class Pre_processing")
else:
self.config = config
# EEW mode
self.EEW_Mode = EEW_Mode
def pre_picking_processing(self):
# read Afile
self.st, self.FirstStn, self.fileHeader = self.unpackAfile(self.A_File)
# remove station not in nsta24 database
self.checkNonDBsta(self.st, self.DB.db_nsta)
# rotate OBS stations
if not self.EEW_Mode:
OBS_rotate(self.st, Config_File = self.config)
# remove station in DONT_USE_LIST
self.remove_sta_not_use(self.st, checklist = self.config['Misc']['CheckList_sta'])
# remove station in addon block list
self.remove_addon_block_list(self.st, blocklist = self.config['Misc']['Addon_BlockList'])
# remove stations is outside velocty model
self.Remove_Stn_Outside_Grid(self.st, nsta_db = self.DB.db_nsta)
# de-mean without zeros
self.demean_wo_zero()
# copy trace for intensity use
st_intensity_tmp = Stream()
st_intensity_tmp += self.st.select(channel = 'Ch1')
st_intensity_tmp += self.st.select(channel = 'Ch2')
st_intensity_tmp += self.st.select(channel = 'Ch3')
self.st_intensity = st_intensity_tmp.copy()
# copy streams for calculate Magnitude
self.stmag = self.st.copy()
# remove no data Streams
self.checkZeroGap_new(self.st)
return self.st, self.st_intensity, self.stmag, self.FirstStn, self.fileHeader
"""
Unpack Binary File ->12bit
Unpack New Binary File -> 24bit
"""
def unpackAfile(self, infile):
# == opening Afile ==
b= os.path.getsize(infile)
FH = open(infile, 'rb')
line = FH.read(b)
fileHeader= struct.unpack("<4s3h6bh6s", line[0:24])
fileLength = fileHeader[3]
port = fileHeader[10]
FirstStn = fileHeader[11][0:4].decode('ASCII').rstrip()
print(fileHeader)
# =================================Header===================================
portHeader = []
for i in range(24,port*32,32):
port_data = struct.unpack("<4s4s3sbh2b4s12b",line[i:i+32])
portHeader.append(port_data)
# =================================Data===================================
dataStartByte = 24+int(port)*32
dataPoint = 3*int(port)*int(fileLength)*100
times = int(port)*3*4
data=[]
data = struct.unpack("<%di"%dataPoint,line[dataStartByte:dataStartByte+dataPoint*4])
portHeader = np.array(portHeader)
data = np.array(data)
idata =data.reshape((3,port,fileLength*100),order='F')
#== write to obspy Stream --
#print(fileHeader)
#print(len(idata[0][0]))
sttime = UTCDateTime(fileHeader[1],fileHeader[4],fileHeader[5],fileHeader[6],fileHeader[7],fileHeader[8],fileHeader[2])
npts = fileHeader[3]*fileHeader[9]
samp = fileHeader[9]
#print(sttime)
# afst = Afile's Stream
afst = Stream()
for stc in range(fileHeader[10]):
stn = portHeader[stc][0].decode('ASCII').rstrip()
instrument = portHeader[stc][1].decode('ASCII').rstrip()
loc = '0'+str(portHeader[stc][6].decode('ASCII'))
#net = "TW"
net = str(portHeader[stc][7].decode('ASCII')).rstrip()
GPS = int(portHeader[stc][3])
# remove GPS unlock or broken station
if ( GPS == 1 or GPS == 2 ):
chc = 0
if instrument == 'FBA':
chc = 1
elif instrument == 'SP':
chc = 4
elif instrument == 'BB':
chc = 7
#print(chc,instrument)
# for each channel in port
for ch in range(3):
#print(num,ch,chc)
chn = 'Ch'+str(chc+ch)
#print(stc,channel)
stats = {'network': net, 'station': stn, 'location': loc,
'channel': chn, 'npts': npts, 'sampling_rate': samp,
'starttime': sttime}
data = np.array(idata[ch][stc], dtype=float)
sttmp = Stream([Trace(data=data, header=stats)])
afst += sttmp
return afst, FirstStn, fileHeader
def checkNonDBsta(self, InputStream, db_nsta):
#== check station in database
for tr in InputStream:
sta = tr.stats['station']
stadb = db_nsta.query(NSTATable.id).filter(NSTATable.sta==sta).first()
#print(stadb)
if stadb == None:
#print(sta,'will remove')
InputStream.remove(tr)
def remove_sta_not_use(self, InputStream, checklist = None):
if checklist:
not_use_sta = []
csvfile = open(checklist,'r')
for row in csv.reader(csvfile, delimiter=' '):
if row[-1] == '0':
not_use_sta.append(row[0])
for tr in InputStream:
sta = tr.stats['station']
if sta in not_use_sta:
InputStream.remove(tr)
else:
sys.exit("checklist not given ---- remove_sta_not_use")
def remove_addon_block_list(self, InputStream, blocklist = None):
if blocklist:
BL_DAT = open(blocklist, 'r')
for line in BL_DAT:
# read block channel from file
STN = line[0:4].rstrip()
CHN = line[5:9].rstrip()
NET = line[10:14].rstrip()
LOC = '0'+line[15:].rstrip()
if CHN == 'FBA':
CHN_DEL = ('Ch1', 'Ch2', 'Ch3')
elif CHN == 'SP':
CHN_DEL = ('Ch4', 'Ch5', 'Ch6')
elif CHN == 'BB':
CHN_DEL = ('Ch7', 'Ch8', 'Ch9')
else:
print('input error in remove_addon_block_list')
exit()
for CH in CHN_DEL:
st = InputStream.select(station=STN,channel=CH,network=NET,location=LOC)
for tr in st:
InputStream.remove(tr)
else:
sys.exit("blocklist not given ---- remove_addon_block_list")
def Remove_Stn_Outside_Grid(self, InputStream, nsta_db = None, min_lon = 115, max_lon = 130, min_lat = 15, max_lat = 30):
#check station distance
for tr in InputStream:
stn = tr.stats['station']
#print(stn)
tr_lon, tr_lat = nsta_db.query(NSTATable.longitude, NSTATable.latitude).filter(NSTATable.sta==stn).first()
if tr_lon > max_lon or tr_lon < min_lon or tr_lat > max_lat or tr_lat < min_lat:
InputStream.remove(tr)
def checkZeroGap_new(self, InputStream):
for tr in InputStream:
zerocount = 0
idata = tr.data
if np.count_nonzero(idata) == 0:
InputStream.remove(tr)
break
#idata_mask = idata.astype(bool)
#tmp_data = np.ma.masked_array(idata,mask=idata_mask)
tmp_data = np.ma.masked_where(idata == 0, idata)
mean = tmp_data.mean()
tmp_data = tmp_data - mean
tmp_data = tmp_data.filled(0)
tr.data = tmp_data
def demean_wo_zero(self):
# demean with out zero value (gap) in trace
for tr in self.st:
idata = tr.data
zerocount = np.count_nonzero(idata)
if zerocount == 0:
self.st.remove(tr)
continue
#idata_mask = idata.astype(bool)
#tmp_data = np.ma.masked_array(idata,mask=idata_mask)
tmp_data = np.ma.masked_where(idata == 0, idata)
mean = tmp_data.mean()
tmp_data = tmp_data - mean
tmp_data = tmp_data.filled(0)
tr.data = tmp_data
def check_pick_in_gap(inputStream, pick, time_window=0.2):
result = False
tiny_st = inputStream.slice(pick-time_window, pick+time_window)
if np.count_nonzero(tiny_st.data==0) > 4:
#if np.count_nonzero(tiny_st.data) == 0:
result = True
return result
#
def RemoveStnFarAway(InputStream,db_nsta,FirstStn,MaxDeg):
fs_lon, fs_lat = db_nsta.query(NSTATable.longitude, NSTATable.latitude).filter(NSTATable.sta==FirstStn).first()
#check station distance
for tr in InputStream:
stn = tr.stats['station']
tr_lon, tr_lat = db_nsta.query(NSTATable.longitude, NSTATable.latitude).filter(NSTATable.sta==stn).first()
EpicDist=locations2degrees(fs_lat, fs_lon, tr_lat, tr_lon)
if EpicDist > MaxDeg:
InputStream.remove(tr)
return
class Auto_Picking_Initializing():
def __init__(self, Config_File='PyAP.ini', use_af_db=False):
self.config = configparser.ConfigParser()
self.config.read(Config_File)
# Database path:
self.db_assoc_path = self.config['DataBase']['Assoc_DB'] # associator database
self.db_nsta_path = self.config['DataBase']['NSTA_DB'] # nsta24 info database
#self.db_AF_path = self.config['DataBase']['AFile_DB'] # A_File database
self.db_curve_path = self.config['DataBase']['TT_CURVE_DB'] # Travel-time Curve database
# Our SQLite databases are:
self.sqlite_assoc = 'sqlite:///'+self.db_assoc_path # associator database
self.sqlite_nsta = 'sqlite:///'+self.db_nsta_path # nsta24 info database
#self.sqlite_AF = 'sqlite:///'+self.db_AF_path # A_File database
self.sqlite_tt_curve = 'sqlite:///'+self.db_curve_path # Travel_time Curve database
# Check database exist or not
if os.path.exists(self.db_assoc_path):
os.remove(self.db_assoc_path)
if not os.path.exists(self.db_curve_path):
self.db_tt_curve = self.read_tt_curve()
else:
os.remove(self.db_curve_path)
self.db_tt_curve = self.read_tt_curve()
if not os.path.exists(self.db_nsta_path):
self.db_nsta = self.read_nsta24_gain()
else:
os.remove(self.db_nsta_path)
self.db_nsta = self.read_nsta24_gain()
# Connect to our databases
# Associator
engine_assoc = create_engine(self.sqlite_assoc, echo=False)
tables3D.Base_Assoc.metadata.create_all(engine_assoc)
Session1=sessionmaker(bind=engine_assoc)
self.db_assoc = Session1()
# AFile database
if use_af_db:
engine_AFdb=create_engine(self.sqlite_AF, echo=False)
Session2=sessionmaker(bind=engine_AFdb)
self.db_AF = Session2()
def read_nsta24_gain(self):
engine_nsta=create_engine(self.sqlite_nsta, echo=False)
table_nsta24.NSTA24.metadata.create_all(engine_nsta)
Session=sessionmaker(bind=engine_nsta)
session_nsta=Session()
f = open(self.config['Misc']['NSTA24_dat'],'r')
for i in f:
alive = int(i[30])
if alive == 1:
sta = i[0:4].rstrip()
net = i[39:44].rstrip()
loc = '0'+i[32]
instrument = i[45:49].rstrip()
longitude = float(i[5:14])
latitude = float(i[13:22])
elevation = float(i[22:30])
gain = []
gain.append(float(i[49:60]))
gain.append(float(i[60:71]))
gain.append(float(i[71:82]))
if instrument == 'FBA':
chc = 1
elif instrument == 'SP':
chc = 4
elif instrument == 'BB':
chc = 7
# for each channel in port
for ch in range(3):
#print(num,ch,chc)
chn = 'Ch'+str(chc+ch)
station = table_nsta24.NSTATable(sta,chn,net,loc,gain[ch],latitude,longitude,elevation)
session_nsta.add(station)
session_nsta.commit()
f.close()
return session_nsta
def read_tt_curve(self):
# create engine
engine_tt_curve=create_engine(self.sqlite_tt_curve, echo=False)
# Create the tables for travel-time curve
table_tt_curve.BASE_TT_CURVE.metadata.create_all(engine_tt_curve)
Session=sessionmaker(bind=engine_tt_curve)
session_tt_curve=Session()
f = open(self.config['Misc']['TT_CURVE_DAT'],'r')
for line in f:
STN = line[0:4].rstrip()
A_VALUE = float(line[4:12])
B_VALUE = float(line[12:20])
stn_table = table_tt_curve.TT_CURVE(STN,A_VALUE,B_VALUE)
session_tt_curve.add(stn_table)
f.close()
session_tt_curve.commit()
return session_tt_curve
def Begin_Small_Events(self):
# remove old assoc db
os.remove(self.db_assoc_path)
# re-create new assoc db
# Associator
engine_assoc = create_engine(self.sqlite_assoc, echo=False)
tables3D.Base_Assoc.metadata.create_all(engine_assoc)
Session1=sessionmaker(bind=engine_assoc)
self.db_assoc = Session1()
return
def run_rehypo(tpfilename, method='3D', config=None):
copyfile(tpfilename, 'hypo.tmp')
tmp_file = open('hypo.tmp', 'r')
tp = open(tpfilename, 'w')
f_line = True
for line in tmp_file:
if f_line:
# set initial depth for different method
if method == '1D':
depth = 20.0
elif method == '3D':
depth = float(line[34:40])
tp.write("%s%6.2f%s"%(line[0:34],depth,line[40:]))
f_line = False
else:
tp.write("%s"%(line))
tp.close()
tmp_file.close()
if method == '1D':
os.system(config['Misc']['HypoRun_Path']+' -m 1 -p '+tpfilename)
elif method == '3D':
os.system(config['Misc']['Hypo3D_Path']+' '+tpfilename)
return
class OBS_rotate():
def __init__(self, st, Config_File = None):
# get config information
self.config = Config_File
if not Config_File:
sys.exit("Config_File not given ---- class OBS_rotate")
# find EOS station in time period
self.st_time = st[0].stats.starttime
# get station info from file
self.get_station_info()
# search if EOS in this A file
EOS_TMP = Stream()
for STN in self.STNs:
EOS_TMP += st.select(station=STN)
#print(len(EOS_TMP))
if len(EOS_TMP) > 0:
self.rotate_EOS_station(st)
def rotate_EOS_station(self, st):
CHAN_PAIR = (['Ch1','Ch2','Ch3'], ['Ch4','Ch5','Ch6'])
for STN in self.STNs:
for PAIR in CHAN_PAIR:
EOS_st = Stream()
for CHN in PAIR:
EOS_st += st.select(station=STN).select(channel=CHN)
if len(EOS_st) > 0:
#print(STN, self.STNs_Roll[STN], self.STNs_Pitch[STN], self.STNs_Az[STN])
PHI = np.deg2rad(self.STNs_Roll[STN])
PSI = np.deg2rad(self.STNs_Pitch[STN])
azimuth = self.STNs_Az[STN] - 90
self.rotate_seis(EOS_st, PHI, PSI, azimuth)
def make_YAW_PITCH_ROLL(self, x_vector, y_vector, z_vector):
YZ_len = LA.norm(np.array([y_vector, z_vector]))
PHI = np.arctan2(y_vector, z_vector)
PSI = np.arctan2(-x_vector, YZ_len)
ROLL = np.rad2deg(PHI)
PITCH = np.rad2deg(PSI)
return PHI, PSI
def make_rotate_matrix(self, PHI, PSI, azimuth):
COS_PHI = np.cos(-PHI)
SIN_PHI = np.sin(-PHI)
ROTATEM_PHI = np.array([[1.0, 0.0, 0.0], \
[0.0, COS_PHI, SIN_PHI], \
[0.0, -SIN_PHI, COS_PHI]])
COS_PSI = np.cos(-PSI)
SIN_PSI = np.sin(-PSI)
ROTATEM_PSI = np.array([[ COS_PSI, 0.0, -SIN_PSI], \
[ 0.0, 1.0, 0.0], \
[ SIN_PSI, 0.0, COS_PSI]])
COS_AZI = np.cos(np.deg2rad(azimuth))
SIN_AZI = np.sin(np.deg2rad(azimuth))
ROTATEM_azimuth = np.array([[ COS_AZI, SIN_AZI, 0.0], \
[-SIN_AZI, COS_AZI, 0.0], \
[ 0.0, 0.0, 1.0]])
ROTATE_M_TMP = np.matmul(ROTATEM_PSI, ROTATEM_PHI)
ROTATE_M = np.matmul(ROTATEM_azimuth, ROTATE_M_TMP)
return ROTATE_M
def rotate_seis(self, st, PHI, PSI, azimuth):
#new_st = st.copy()
tmp = np.append([st[2].data.data, st[1].data.data], [st[0].data.data], axis=0)
SEIS_3D = tmp.reshape(3, st[2].stats.npts)
ROTM_3D = self.make_rotate_matrix(PHI, PSI, azimuth)
SEIS_NEW = np.matmul(ROTM_3D, SEIS_3D)
st[2].data = SEIS_NEW[0]
st[1].data = SEIS_NEW[1]
st[0].data = SEIS_NEW[2]
return
def get_station_info(self):
ORI_DAT = open(self.config['Misc']['OBS_ORI_DAT'], 'r')
TIME = self.st_time.year*10000+self.st_time.month*100+self.st_time.day
self.STNs = []
self.STNs_Roll = {}
self.STNs_Pitch = {}
self.STNs_Az = {}
for line in ORI_DAT:
STN = line[0:5].strip()
period_start = int(line[37:45])
period_stop = int(line[46:54])
if STN not in self.STNs:
if TIME >= period_start and TIME <= period_stop:
self.STNs.append(STN)
self.STNs_Roll[STN] = float(line[8:18])
self.STNs_Pitch[STN] = float(line[19:29])
self.STNs_Az[STN] = float(line[30:36])
ORI_DAT.close()
return
class Pfile():
def __init__(self, stmag, st_inten, db_assoc, db_nsta, Config_File = None, polarity_mode='False', Vector_Mode=False):
# Define travel time and associator database
self.config = Config_File
if not Config_File:
sys.exit("Config_File not given ---- class Pfile")
self.Vector_Mode = Vector_Mode
self.st = stmag
self.st_inten = st_inten
self.assoc_db = db_assoc
self.nsta_db = db_nsta
if polarity_mode == 'True':
self.polarity_mode = True
else:
self.polarity_mode = False
self.paz_wa_v = {'sensitivity': 2800, 'zeros': [0j], 'gain': 1, 'poles': [-6.2832 - 4.7124j, -6.2832 + 4.7124j]} # for Velocity
self.paz_wa_a = {'sensitivity': 2800, 'zeros': [], 'gain': 1, 'poles': [-6.2832 - 4.7124j, -6.2832 + 4.7124j]} # for Accel
#self.paz_wa_v = {'sensitivity': 2080, 'zeros': [0j], 'gain': 1, 'poles': [-5.49779 - 5.60886j, -5.49779 + 5.60886j]} # IASPEI 2012, for Velocity
#self.paz_wa_a = {'sensitivity': 2080, 'zeros': [], 'gain': 1, 'poles': [-5.49779 - 5.60886j, -5.49779 + 5.60886j]} # IASPEI 2012, for Accel
def get_assoc_ids(self):
ids = self.assoc_db.query(Associated).filter(Associated.id < 99).all()
return ids |
'''
一个简单的类工厂函数
'''
def record_factory(cls_name, field_names):
try:
field_names = field_names.replace(",", " ").split()
except AttributeError:
pass
field_names = tuple(field_names)
def __init__(self, *args, **kwargs):
attrs = dict(zip(self.__slots__, args))
attrs.update(kwargs)
for name, value in attrs.items():
setattr(self, name, value)
def __iter__(self):
for name in self.__slots__:
yield getattr(self, name)
def __str__(self):
values = ",".join("{}={}".format(*i) for i in zip(self.__slots__, self))
return "{}({})".format(self.__class__.__name__, values)
cls_attrs = dict(__slots__=field_names, __init__=__init__, __iter__=__iter__, __str__=__str__)
return type(cls_name, (object,), cls_attrs)
if __name__ == "__main__":
Dog = record_factory("Dog", "name weight, owner")
rex = Dog("Rex", 30, "Bob")
print(rex)
name, weight, _ = rex
print(name, weight)
print("{2}'s dog weights {1}kg".format(*rex))
rex.weight = 32
print(rex)
print(Dog.__mro__)
|
#!/usr/bin/python
import spidev
import time
import os
import math
from datetime import datetime
ref_volt = 3.311
spi = spidev.SpiDev()
spi.open(0,0)
def ReadChannel(channel):
adc = spi.xfer2([1,(8+channel) << 4,0])
data = ((adc[1]&3)<<8) +adc[2]
return data
def ConvertVolts(data,places):
volts = (data*ref_volt)/float(1023)
volts = round(volts,places)
return volts
rms_ampno = 0
rms_amp = 0
channel0 = 0
channel1 = 1
count = 0
sleepdelay = 1
v = [0]
f = open('ProbeToPlot.csv', 'w')
g = open('ProbeToPlot3.csv', 'w')
alpha = 0.5569
y = [0]
NUMBER_OF_SAMPLES = 512
start_time = datetime.now()
for n in range (1, NUMBER_OF_SAMPLES):
i = ReadChannel(channel0)
j = ReadChannel(channel1)
i = i - j
v.append(ConvertVolts(i, 9))
rms_ampno+=v[n]*v[n]
#stoptime = datetime.now()
rms_ampno=math.sqrt(rms_ampno/(NUMBER_OF_SAMPLES-1))
rms_ampno=rms_ampno/33*2000
stoptime = datetime.now()
print("{}A".format(rms_ampno))
f.write("{},".format(v))
f.close()
print("Probe time : {} milliseconds ".format((stoptime -start_time).microseconds/1000))
for n in range (1, NUMBER_OF_SAMPLES):
y.append(alpha*v[n]+(1-alpha)*y[n-1])
rms_amp+=y[n]*y[n]
rms_amp=math.sqrt(rms_amp/(NUMBER_OF_SAMPLES-1))
rms_amp=rms_amp/33*2000
print("{}A".format(rms_amp))
g.write("{},".format(y))
g.close()
|
from fpdf import FPDF
import array
import numpy as np
import random
from numpy import *
# modified to allow for bigger numbers and more questions
def springMix(name, numbofquestions, skillLevel):
# produces math quiz with addition, subtraction, multiplication, and division
# ratio is 1 : 1 : 5 : 3
pdf = generateGeneralPdf(name)
np_arr = generateNumberPairs(numbofquestions, skillLevel)
counter = 0
for x in range(numbofquestions):
counter = counter + 1
question = str(np_arr[x, 0]) + returnOperator(10, 10, 50, 30, x, numbofquestions) + str(np_arr[x, 1]) + " = "
if (counter % 5 == 0):
pdf.cell(20)
pdf.cell(10, 5, txt=question, ln=1)
else:
pdf.cell(20)
pdf.cell(10, 5, txt=question, ln=0)
pdf.output(name +".pdf")
def returnOperator(additionstop, subtractionstop, multiplicationstop, divisionstop, current, total):
# return a different operator based on the current spot
# potential operators +, -, /, %
# potential questions involving: factorial, square root, square (power)
operands = {
1: " + ",
2: " - ",
3: " * ",
4: " / "
}
stops = {
1: additionstop,
2: additionstop + subtractionstop,
3: additionstop + subtractionstop + multiplicationstop,
4: 100
}
currentPercentage = current / total * 100
operandtofetch = 4
counter = 3
while currentPercentage < stops.get(counter, -1):
counter = counter - 1
operandtofetch = operandtofetch - 1
return operands.get(operandtofetch, " * ")
def generateGeneralPdf(name):
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", size=12)
pdf.cell(200, 10, txt=name, ln=1, align="C")
return pdf
def generateDifficulties(len):
difficulties = {}
for x in arange(len):
difficulties[x] = x * 10
return difficulties
def generateNumberPairs (numbofquestions, skillLevel):
# the higher the difficulty the bigger the number
difficulties = generateDifficulties(10)
np_arr = np.array([random.randint(11, difficulties.get(skillLevel,10)), random.randint(11, difficulties.get(skillLevel,10))])
for x in range(numbofquestions):
np_arr2 = np.array([random.randint(11, difficulties.get(skillLevel,10)), random.randint(11, difficulties.get(skillLevel,10))])
np_arr = np.vstack((np_arr,np_arr2))
return np_arr
springMix("test 6", 150, 9)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'meijuTT_v.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_mainWindow(object):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(800, 600)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/appicon/bat.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
mainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(90, 20, 113, 28))
self.lineEdit.setObjectName("lineEdit")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(220, 20, 93, 28))
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 20, 72, 28))
self.label.setObjectName("label")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(20, 70, 751, 491))
self.textEdit.setObjectName("textEdit")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(450, 10, 61, 40))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(520, 10, 241, 40))
self.label_3.setObjectName("label_3")
mainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(mainWindow)
self.statusbar.setObjectName("statusbar")
mainWindow.setStatusBar(self.statusbar)
self.retranslateUi(mainWindow)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
def retranslateUi(self, mainWindow):
_translate = QtCore.QCoreApplication.translate
mainWindow.setWindowTitle(_translate("mainWindow", "美剧爬虫 V0.2"))
self.pushButton.setText(_translate("mainWindow", "搜索"))
self.label.setText(_translate("mainWindow", "美剧名:"))
self.textEdit.setText(_translate("mainWindow", "显示美剧剧集及下载链接"))
self.label_2.setText(_translate("mainWindow", "Status:"))
self.label_3.setText(_translate("mainWindow", "Ready!"))
|
"""
An iteratinv LQR algorithm for CARLA
Need an initial trajectory, possibly generated by the neural controller.
Based on the trajectory, solve a perturbation such that the perturbed trajectory has lower cost than the previous one.
In concern of safety, add safety constrain to the optimization problem and the constraints query for waypoints.
The constraints are resolved by turning the constrained problem into an unconstrained one.
To minimize the cost is to satisfy the safety constraint.
"""
import argparse
import os
import sys
import numpy as np
import theano.tensor as T
import time
from .dynamics import Dynamics, BatchAutoDiffDynamics, tensor_constrain, sigmoid_constrain, hard_sigmoid_constrain
from .cost import Cost, PathQRCost
from .controller import iLQR
### The bounds for the control
STEER_BOUND_UP = 1.0
STEER_BOUND_LOW = -1.0
THROTTLE_BOUND_UP = 1.0
THROTTLE_BOUND_LOW = 0.0
# Global variables defined for monitorign the iLQR processing
J_hist = []
def on_iteration(iteration_count, xs, us, J_opt, accepted, converged):
J_hist.append(J_opt)
info = "converged" if converged else ("accepted" if accepted else "failed")
final_state = xs[-1]
final_control = us[-1]
if iteration_count % 30 == 0:
print("iteration", iteration_count, info, J_opt, final_state, final_control)
class CarDynamics(BatchAutoDiffDynamics):
"""
Define car dynamics f.
BatchAutoDiffDynamics will automatically calculate the jacobian
"""
def __init__(self,
dt = 0.05,
l = 2.5,
constrain = True,
min_bounds = np.array([THROTTLE_BOUND_LOW, STEER_BOUND_LOW]),
max_bounds = np.array([THROTTLE_BOUND_UP, STEER_BOUND_UP]),
**kwargs):
"""
Args:
dt: the time span of executing the input control
constrain: the constraints for the input control
min_bounds, max_bounds: the lower/upper bounds of the control
Notes:
state variables: [posx, posy, theta, v]
action: [v_dot, theta_dot]
"""
self.dt = dt
self.l = l
self.constrained = constrain
self.min_bounds = min_bounds
self.max_bounds = max_bounds
def f(x, u, i):
if self.constrained:
u = hard_sigmoid_constrain(u, self.min_bounds, self.max_bounds)
posx = x[..., 0]
posy = x[..., 1]
theta = x[..., 2]
v = x[..., 3]
v_dot = u[..., 0]
delta = np.pi * 70./180 * u[..., 1]
tanh_delta = T.tanh(delta)
# Define dynamics model as in paper
# Constrained Iterative LQR for On-Road Autonomous Driving Motion Planning
theta_dot = v * tanh_delta/self.l
posx_ = posx + T.cos(theta) * (v * dt + 0.5 * v_dot * self.dt**2)
posy_ = posy + T.sin(theta) * (v * dt + 0.5 * v_dot * self.dt**2)
v_ = v + v_dot * dt
theta_ = theta + theta_dot * self.dt
return T.stack([
posx_,
posy_,
theta_,
v_
]).T
super(CarDynamics, self).__init__(f, state_size = 4, action_size = 2, **kwargs)
class CarCost(Cost):
"""Quadratic Regulator Instantaneous Cost for trajectory following and barrier function."""
def __init__(self, Q, q, R, r, b12, b34, q1, q2, q3, q4, x_path, x_avoids, Q_terminal = None, u_path = None):
"""
Construct a Quadratic Cost with barriers
Args:
Q: Quadratic cost for the state variables [state_size, state_size]
q: linear cost for the state variables [state_size, 1]
R: Quadratic cost for the control variables [action_size, action_size]
r: linear cost for the control varaiables [action_size, 1]
b: linear coeff for the state upper and lower barrier cost [state_size, 1]
i.e. safe(x) = (x - wp)^2 - b^2 <= 0
q1, q2: scale coeffs for the state barrier cost
i.e. safe(x) => q1 * exp[q2 * ((x - wp)^2 - b^2)]
grad safe(x) => 2q1 q2 (x - wp) exp[q2 * ((x - wp)^2 - b^2)]
grad^2 safe(x) => 2q1q2 exp[q2*((x-wp)^2 - b^2)] + 4q1q2^2 (x-wp)^2 exp[q2*((x-wp)^2 - b^2)]
x_path: nominal trajectory to be followed [N + 1, state_size]
x_barrier_u: upper bound for the trajectory [N + 1, state_size]
x_barrier_l: lower bound for the trajectory [N + 1, state_size]
The cost is:
1/2 * x^T Q x + x^T q +
1/2 * u^T R u + u^T r +
1/2 * x^T q1q2exp(q2(Ax-b))A^T A + x^T q1q2exp(q2(Ax - b))A^T
"""
self.Q = np.array(Q)
self.q = np.array(q)
self.R = np.array(R)
self.r = np.array(r)
self.b12 = b12
self.b34 = b34
self.q1 = q1
self.q2 = q2
self.q3 = q3
self.q4 = q4
""" F,f are the quadratic and linear costs for the state barrier cost"""
self.F = np.zeros(self.Q.shape)
self.f = np.zeros(self.q.shape)
self.x_path = np.asarray(x_path)
state_size = self.Q.shape[0]
action_size = self.R.shape[0]
path_length = self.x_path.shape[0]
if Q_terminal is None:
self.Q_terminal = self.Q
else:
self.Q_terminal = np.array(self.Q_terminal)
if u_path is None:
self.u_path = np.zeros([path_length - 1, action_size])
else:
self.u_path = np.array(u_path)
if x_avoids is None:
self.x_avoids = np.zeros([path_length, state_size])
else:
self.x_avoids = np.array(x_avoids)
assert self.Q.shape[0] == self.Q.shape[1], "Q must be square"
assert self.q.shape[0] == self.Q.shape[0], "q mismatch"
assert self.R.shape[0] == self.R.shape[1], "R must be square"
assert self.r.shape[0] == self.R.shape[0], "r mismatch"
assert state_size == self.x_path.shape[1], "Q & x_path mismatch"
assert action_size == self.u_path.shape[1], "R & u_path mismatch {} vs {}".format(R.shape, u_path.shape)
# Precompute some common constants.
self._Q_plus_Q_T = self.Q + self.Q.T
self._Q_plus_Q_T_terminal = self.Q_terminal + self.Q_terminal.T
self._R_plus_R_T = self.R + self.R.T
self._F_plus_F_T = self.F + self.F.T
super(CarCost, self).__init__()
def l(self, x, u, i, terminal=False):
"""Instantaneous cost function.
Args:
x: Current state [state_size].
u: Current control [action_size]. None if terminal.
i: Current time step.
terminal: Compute terminal cost. Default: False.
Returns:
Instantaneous cost (scalar).
"""
Q = self.Q_terminal if terminal else self.Q
r = self.r
q = self.q
R = self.R
x_diff = x - self.x_path[i]
x_dist = x - self.x_avoids[i]
x_diff[2:] = 0.0
x_dist[2:] = 0.0
constant_cost = self.q3 * np.exp(self.q4 * (- x_dist.T.dot(x_dist) + self.b34 * self.b34))
constant_cost += self.q1 * np.exp(self.q2 * (x_diff.T.dot(x_diff) - self.b12 * self.b12))
squared_x_cost = x_diff.T.dot(Q).dot(x_diff)
if terminal:
return squared_x_cost
u_diff = u - self.u_path[i]
squared_u_cost = u_diff.T.dot(R).dot(u_diff)
linear_cost = x_diff.T.dot(q) + u_diff.T.dot(r)
return squared_x_cost + squared_u_cost + linear_cost + constant_cost
def l_x(self, x, u, i, terminal=False):
"""Partial derivative of cost function with respect to x.
Args:
x: Current state [state_size].
u: Current control [action_size]. None if terminal.
i: Current time step.
terminal: Compute terminal cost. Default: False.
Returns:
dl/dx [state_size].
"""
Q_plus_Q_T = self._Q_plus_Q_T_terminal if terminal else self._Q_plus_Q_T
x_diff = np.reshape(x - self.x_path[i], (self.Q.shape[0], 1))
x_diff[2:] = 0.0
x_dist = np.reshape(x - self.x_avoids[i], (self.Q.shape[0], 1))
x_dist[2:] = 0.0
self.f = - self.q3 * self.q4 * np.exp(self.q4 * (- x_dist.T.dot(x_dist) + self.b34 * self.b34)) * 2 * x_dist
self.f += self.q1 * self.q2 * np.exp(self.q2 * (x_diff.T.dot(x_diff) - self.b12 * self.b12)) * 2 * x_diff
return x_diff.T.dot(Q_plus_Q_T) + self.f.T + self.q.T
def l_u(self, x, u, i, terminal=False):
"""Partial derivative of cost function with respect to u.
Args:
x: Current state [state_size].
u: Current control [action_size]. None if terminal.
i: Current time step.
terminal: Compute terminal cost. Default: False.
Returns:
dl/du [action_size].
"""
if terminal:
return np.zeros_like(self.u_path)
u_diff = u - self.u_path[i]
return u_diff.T.dot(self._R_plus_R_T) + self.r.T
def l_xx(self, x, u, i, terminal=False):
"""Second partial derivative of cost function with respect to x.
Args:
x: Current state [state_size].
u: Current control [action_size]. None if terminal.
i: Current time step.
terminal: Compute terminal cost. Default: False.
Returns:
d^2l/dx^2 [state_size, state_size].
"""
x_diff = np.reshape(x - self.x_path[i], (self.Q.shape[0], 1))
x_diff[2:] = 0.0
x_dist = np.reshape(x - self.x_avoids[i], (self.Q.shape[0], 1))
x_dist[2:] = 0.0
self.F = - self.q3 * self.q4 * np.exp(self.q4 * (-x_dist.T.dot(x_dist) + self.b34 * self.b34)) * 2 + self.q3 * self.q4**2 * np.exp(self.q4 * (- x_dist.T.dot(x_dist) + self.b34 * self.b34)) * 4 * x_dist.dot(x_dist.T)
self.F += self.q1 * self.q2 * np.exp(self.q2 * (x_diff.T.dot(x_diff) - self.b12 * self.b12)) * 2 + self.q1 * self.q2**2 * np.exp(self.q2 * (x_diff.T.dot(x_diff) - self.b12 * self.b12)) * 4 * x_diff.dot(x_diff.T)
self._F_plus_F_T = self.F + self.F.T
return self._F_plus_F_T + self._Q_plus_Q_T_terminal if terminal else self._F_plus_F_T + self._Q_plus_Q_T
def l_ux(self, x, u, i, terminal=False):
"""Second partial derivative of cost function with respect to u and x.
Args:
x: Current state [state_size].
u: Current control [action_size]. None if terminal.
i: Current time step.
terminal: Compute terminal cost. Default: False.
Returns:
d^2l/dudx [action_size, state_size].
"""
return np.zeros((self.R.shape[0], self.Q.shape[0]))
def l_uu(self, x, u, i, terminal=False):
"""Second partial derivative of cost function with respect to u.
Args:
x: Current state [state_size].
u: Current control [action_size]. None if terminal.
i: Current time step.
terminal: Compute terminal cost. Default: False.
Returns:
d^2l/du^2 [action_size, action_size].
"""
if terminal:
return np.zeros_like(self.R)
return self._R_plus_R_T
class ILQRController():
def __init__(self, target_speed, steps_ahead = 10, dt = 0.25, l = 1.0, half_width = 2.0):
# Target speed determines the distances during each step
self.target_speed = target_speed * 1000./3600.
# Number of steps to be optimized
self.steps_ahead = steps_ahead
# Step length
self.dt = dt
# Front wheel diameter
self.l = l
# Generate sequential minor waypoints between each pair of adjacent carla waypoints
# All consecutive minor waypoints have the same distance
self.x_path_density = self.target_speed * self.dt
# Define car dynamics
self.dynamics = CarDynamics(self.dt, self.l)
# Lack of variables to build car cost
self.cost = None
# lack of variales to build nominal trajectory, lower and upper bounds
self.x_path = np.zeros([self.steps_ahead, self.dynamics.state_size])
self.u_path = np.zeros([self.steps_ahead - 1, self.dynamics.action_size])
# Obstacles to avoid
self.x_avoids = np.zeros([self.steps_ahead, self.dynamics.state_size])
# Half of the road width
self.half_width = half_width
# Current state
self.measurements = {'posx': None, 'posy': None, 'v': None, 'theta': None}
def get_state(self, measurements):
"""Get the state variables from measurements"""
v = np.linalg.norm([measurements.v.x, measurements.v.y], ord = 2)
theta = measurements.t.rotation.yaw * np.pi / 180.
posx, posy = measurements.t.location.x, measurements.t.location.y
self.measurements['posx'] = posx
self.measurements['posy'] = posy
self.measurements['v'] = v
self.measurements['theta'] = theta
#print("ilqr measurements:", self.measurements)
def predict_trajectory(self, us_init, world = None):
""" (Optional) If an initial state sequence is given, then choose the nominal barrier accordingly"""
if xs_init is None:
xs_init = np.zeros([self.steps_ahead, self.dynamics.state_size])
xs_init[0, :] = x0[:]
for t in range(self.steps_ahead - 1):
xs_init[t + 1, :] = self.dynamics.f(xs_init[t, :], us_init[t, :], t)
else:
xs_init = xs_init[:]
assert xs_init.shape == (self.steps_ahead, self.dynamics.state_size)
def generate_nominal(self, future_wps_np = None):
# Along the dense nominal trajectory, select the first self.steps_ahead waypoints
# Find the closest reference waypoints among the dense set of waypoints
# Extracrt the reference waypoints and calculate the barrier sequences
""" (Optional)Coordinate transform. The ego car is the original point
x0[:4] = np.asarray([0.0, 0.0, 0.0, self.measurements['v']])
future_wps_np_ = ILQRController.transform_into_cars_coordinate_system(future_wps_np, \
self.measurements['posx'], self.measurements['posy'], \
np.cos(self.measurements['theta']), np.sin(self.measurements['theta']))
#self.x_path[:, :2] = future_wps_np_[:self.steps_ahead, :2]
"""
"""(Optional)Turn the sparse waypoints into dense waypoints
future_wps_np_dense = ILQRController.transform_into_dense_points(future_wps_np, self.x_path_density)
"""
""" Find the closest reference waypoints among the dense set of waypoints
Extracrt the reference waypoints and calculate the barrier sequences
self.generate_nominal(future_wps_np_dense)
self.x_path[:, :2] = future_wps_np_dense[:self.steps_ahead, :2]
"""
self.x_path = np.zeros([self.steps_ahead, self.dynamics.state_size])
self.x_path[:, -1] = self.target_speed
self.x_path[:self.steps_ahead, :2] = future_wps_np[:self.steps_ahead, :2]
def generate_avoidance(self, locations = None):
if locations is not None:
for i in range(self.steps_ahead):
if len(locations) > 1:
self.x_avoids[i, 0] = locations[i].x
self.x_avoids[i, 1] = locations[i].y
else:
self.x_avoids[i, 0] = locations[0].x
self.x_avoids[i, 1] = locations[0].y
print("Distance to obstacle: {}".format(\
np.linalg.norm([self.measurements['posx'] - self.x_avoids[0, 0], \
self.measurements['posy'] - self.x_avoids[0, 1]])))
def generate_cost(self):
Q = 0.0 * np.eye(self.dynamics.state_size)
Q[3, 3] = 0.0
Q[2, 2] = 0.0
R = np.eye(self.dynamics.action_size)
R[0, 0] = .0
R[1, 1] = .0
# For staying in lane
q1 = 1.0E-2
q2 = 1.0E-4
# For avoidance
q3 = 0.0 * 1.
q4 = 0.0 * 2.0
b12 = self.half_width
b34 = self.half_width
q = np.zeros([self.dynamics.state_size, 1])
r = np.zeros([self.dynamics.action_size, 1])
self.cost = CarCost(Q = Q, q = q, R = R, r = r, b12 = b12, b34 = b34, \
q1 = q1, q2 = q2, q3 = q3, q4 = q4,\
x_path = self.x_path, x_avoids = self.x_avoids, u_path = self.u_path)
#self.cost = PathQRCost(Q = Q, R = R, x_path = self.x_path)
def run_ilqr(self, x0, us_init):
# Define static varialbe for ilqr
# Initialize ilqr solver
ilqr = iLQR(self.dynamics, self.cost, self.steps_ahead - 1)
""" Run ilqr to obtain sequence of state and control
xs: [1 + self.steps_ahead, state_size]
us: [self.steps_ahead, state_size]
"""
xs, us = ilqr.fit(x0, us_init, n_iterations = 50, on_iteration=on_iteration)
return xs, us
def control(self, future_wps_np, measurements, us_init = None, avoidances = None):
# Collect state variables
self.get_state(measurements)
# Initial state is x0
x0 = np.zeros([self.dynamics.state_size])
x0[:4] = np.asarray([self.measurements['posx'], \
self.measurements['posy'], \
self.measurements['theta'], \
self.measurements['v']])
print("Run ilqr from state ", x0)
# Define noimal path
self.generate_nominal(future_wps_np)
# Define obstacles
self.generate_avoidance(avoidances)
# Choose initial control sequence
if us_init is None:
#us_init = np.random.random([self.steps_ahead - 1, self.dynamics.action_size])
#us_init[:, 0] = 2 * us_init[:, 0] - 1.0
#us_init[:, 1] = 0.0
us_init = self.u_path
else:
us_init = us_init[:self.steps_ahead - 1, :]
assert us_init.shape == (self.steps_ahead - 1, self.dynamics.action_size)
# Define the cost
self.generate_cost()
# Run ilqr
xs, us = self.run_ilqr(x0, us_init)
#self.u_path = us
# Verify the safety of xs
#self.verify(self, xs)
#for i in range(self.steps_ahead - 1):
# print("predict trajectory:\n", xs[i], us[i])
return xs, us
@staticmethod
def transform_into_dense_points(pts, density):
""" Uniformly add additional waypoints between consecutive waypoints."""
# Firstly add the first waypoint
pts_ = [pts[0, :]]
for i in range(pts.shape[0] - 1):
# For each pair of consecutive waypoints, define the number of new waypoints to be added
# np.linspace also returns the head and the tail points which are the original waypoints
num = int(np.floor(np.linalg.norm(pts[i + 1] - pts[i])/density)) + 2
# Generate the additional waypoints along each axle
pts_i_0 = np.linspace(pts[i, 0], pts[i + 1, 0], num = num)
pts_i_1 = np.linspace(pts[i, 1], pts[i + 1, 1], num = num)
for j in range(1, num):
# Append the new way points
pts_.append([pts_i_0[j], pts_i_1[j]])
return np.asarray(pts_)
@staticmethod
def transform_into_cars_coordinate_system(pts, x, y, theta_cos, theta_sin):
diff = (pts - [x, y])
pts_ = np.zeros_like(diff)
pts_[:, 0] = theta_cos * diff[:, 0] + theta_sin * diff[:, 1]
pts_[:, 1] = - theta_sin * diff[:, 0] + theta_cos * diff[:, 1]
return pts_
@staticmethod
def transform_into_world_coordinate_system(pts, x, y, theta_cos, theta_sin):
diff = np.zeros_like(pts)
diff[:, 0] = pts[:, 0] * theta_cos + pts[:, 1] * theta_sin
diff[:, 1] = pts[:, 0] * theta_sin + pts[:, 1] * theta_cos
pts_ = diff + [x, y]
return pts_
@staticmethod
def find_trajectory_waypoints(pts, world):
# Along the initial trajectory, find the closest waypoints using the world map
m = world.get_map()
pts_ = []
for i in range(pts.shape[0]):
wp = m.get_waypoint(carla.Location(x = pts[i, 0], y = pts[i, 1], z = 0.0))
pts_.append([wp])
return pts_
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 30 15:08:56 2020
@author: jinwensun
"""
class Solution(object):
def groupStrings(self, strings):
"""
:type strings: List[str]
:rtype: List[List[str]]
"""
asic_dict = dict()
for s in strings:
c_list = []
for i in range(len(s)):
if i == 0:
c_list.append(0)
else:
c_list.append((ord(s[i]) - ord(s[0]))%26)
if tuple(c_list) in asic_dict:
asic_dict[tuple(c_list)].append(s)
else:
asic_dict[tuple(c_list)] = [s]
return asic_dict.values()
|
n = int(input())
out = [1] * (n + 1)
for i in range(2, n):
for j in range(2 * i, n + 1, i):
out[j] = i
print(' '.join(map(str, sorted(out[2:]))))
|
from itertoolz import countby, frequencies
def even(x): return x % 2 == 0
def test_frequencies():
assert (frequencies(["cat", "pig", "cat", "eel",
"pig", "dog", "dog", "dog"]) ==
{"cat": 2, "eel": 1, "pig": 2, "dog": 3})
assert frequencies([]) == {}
assert frequencies("onomatopoeia") == {"a": 2, "e": 1, "i": 1, "m": 1,
"o": 4, "n": 1, "p": 1, "t": 1}
def test_countby():
assert countby(even, [1, 2, 3]) == {True: 1, False: 2}
assert countby(len, ['cat', 'dog', 'mouse']) == {3: 2, 5: 1}
|
from setuptools import setup
setup(
name='gridworld',
packages = ['gridworld'],
package_dir = {'gridworld': 'src'},
)
|
import os
import subprocess
import time
from threading import Timer
class RemoteInstaller(object):
'''Psexec wrapper for deploy on remote host'''
_PS_TOOLS_ENV = 'PSTOOLS_PATH'
@classmethod
def IsPsTools(self):
return os.environ.has_key(self._PS_TOOLS_ENV)
@classmethod
def GetPsToolsEnv(self):
return self._PS_TOOLS_ENV
def _run(self, params):
start_time = time.time()
returncode = subprocess.call(params, shell=True)
print "operation time: {0:.2g} sec".format(time.time() - start_time)
return returncode
def __init__(self, address, user, password, connectTimeout):
self._address = address
self._user = user
self._password = password
self._connectTimeout = connectTimeout
self._psexec = os.environ[self._PS_TOOLS_ENV] + "\PsExec.exe"
def install(self, params):
cmd = "\"{0}\" -accepteula -n {4} \\\{1} -h -u {2} -p {3} -f -c {5}".\
format(self._psexec, self._address, self._user, self._password, self._connectTimeout, params)
print cmd
return self._run(cmd)
def uninstall(self, params):
cmd = "\"{0}\" -accepteula -n {4} \\\{1} -h -u {2} -p {3} {5}".\
format(self._psexec, self._address, self._user, self._password, self._connectTimeout, params)
print cmd
return self._run(cmd)
if __name__ == "__main__":
user = 'name'
pwd = '132456'
ip = '192.168.1.1'
ip_incorrect = '10.1.1.1'
path = 'C:\path'
uninstaller = 'Uninstall_file'
installer = RemoteInstaller(ip_incorrect, user, pwd, 5)
uninstall_cmd = '{0}/{1} /S'.format(path, uninstaller)
installer.uninstall(uninstall_cmd) |
from matplotlib import pyplot as plt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from math import sqrt
from skimage.color import rgb2gray
import glob
from skimage.io import imread
from matplotlib import cm
example_file=glob.glob ('Sky.JPG')[0]
im=imread(example_file, as_gray=True)
plt.imshow(im, cmap=cm.gray)
plt.show()
blobs_log=blob_log(im, max_sigma=30, num_sigma=10, threshold=0.1)
#Compute radii in the 3rd column
blobs_log[:, 2]=blobs_log[:, 2] * sqrt(2)
numrows= len(blobs_log)
print('Number of Stars Counted:', numrows)
fig, ax = plt.subplots(1, 1)
plt.imshow(im, cmap=cm.gray)
for blob in blobs_log:
y, x, r = blob
c = plt.Circle((x, y), r+5, color='lime', linewidth=2, fill=False)
ax.add_patch(c)
plt.show() |
import logging
import logging.handlers
import tornado.websocket
from tornado.escape import json_encode, json_decode
import hashlib
from dbservice import DBService
from json_keys import *
from tornado.options import define, options, parse_command_line
import json
define('port', default=8000, help='run on the given port', type=int)
clients_online = dict()
i = 0
db_service = DBService.inst()
f = logging.Formatter(fmt='%(levelname)s:%(name)s: %(message)s '
'(%(asctime)s; %(filename)s:%(lineno)d)',
datefmt="%Y-%m-%d %H:%M:%S")
handlers = [
logging.handlers.RotatingFileHandler('rotated.log', encoding='utf8',
maxBytes=100000, backupCount=1),
logging.StreamHandler()
]
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
for h in handlers:
h.setFormatter(f)
h.setLevel(logging.DEBUG)
root_logger.addHandler(h)
class UsersHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "http://localhost:3000")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
self.set_header("Access-Control-Allow-Credentials", "true"),
def data_received(self, chunk):
return
def get(self):
self.write(json_encode(db_service.get_all_users()))
class UserHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "http://localhost:3000")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
self.set_header("Access-Control-Allow-Credentials", "true"),
def data_received(self, chunk):
pass
def get(self, *args, **kwargs):
self.write(json_encode(db_service.get_user(self.get_argument(USER_TOKEN_FIELD))))
class IndexHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "http://localhost:3000")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
self.set_header("Access-Control-Allow-Credentials", "true"),
def data_received(self, chunk):
pass
def get(self, *args, **kwargs):
if not self.get_cookie(USER_TOKEN_FIELD):
username = 'user_' + db_service.get_all_users().__len__().__str__()
token = hashlib.sha256(username.encode()).hexdigest()
user = {
USER_TOKEN_FIELD: token,
USER_USERNAME_FIELD: username,
USER_MESSAGES_LIST: [],
USER_CHATS_LIST: []
}
db_service.insert_user(user)
self.set_cookie(USER_TOKEN_FIELD, token)
self.write("OK")
class ChatsHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
return
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "http://localhost:3000"),
self.set_header("Access-Control-Allow-Credentials", "true"),
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
def get(self):
users_to_chat_with = []
user = db_service.get_user(self.get_cookie(USER_TOKEN_FIELD))
if user:
for chat in user[USER_CHATS_LIST]:
users_to_chat_with.append(db_service.get_user(chat[CHAT_RECIPIENT_TOKEN_FIELD]))
self.write(json_encode(users_to_chat_with))
def post(self, *args, **kwargs):
data = json_decode(self.request.body)
recipient_token = data[CHAT_RECIPIENT_TOKEN_FIELD]
if recipient_token:
sender = db_service.get_user(self.get_cookie(USER_TOKEN_FIELD))
recipient = db_service.get_user(recipient_token)
if {CHAT_RECIPIENT_TOKEN_FIELD: sender[USER_TOKEN_FIELD]} not in recipient[USER_CHATS_LIST]:
recipient[USER_CHATS_LIST].append({CHAT_RECIPIENT_TOKEN_FIELD: sender[USER_TOKEN_FIELD]})
db_service.update_user(recipient)
if sender and {CHAT_RECIPIENT_TOKEN_FIELD: recipient} not in sender[USER_CHATS_LIST]:
sender[USER_CHATS_LIST].append({CHAT_RECIPIENT_TOKEN_FIELD: recipient_token})
db_service.update_user(sender)
class WebSocketChatHandler(tornado.websocket.WebSocketHandler):
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
def check_origin(self, origin):
return True
def open(self, *args):
user = db_service.get_user(self.get_cookie(USER_TOKEN_FIELD))
if user:
if user[USER_TOKEN_FIELD] not in clients_online.keys():
clients_online[user[USER_TOKEN_FIELD]] = self
for msg in user[USER_MESSAGES_LIST]:
self.write_message(json.dumps(msg))
user[USER_MESSAGES_LIST].clear()
db_service.update_user(user)
def on_message(self, message):
msg = json.loads(message)
recipient_token = msg[MSG_RECIPIENT_FIELD]
if recipient_token in clients_online.keys():
clients_online[msg[MSG_RECIPIENT_FIELD]].write_message(message)
else:
recipient = db_service.get_user(recipient_token)
if recipient:
recipient[USER_MESSAGES_LIST].append(msg)
db_service.update_user(recipient)
def on_close(self):
del clients_online[self.get_cookie(USER_TOKEN_FIELD)]
app = tornado.web.Application([
(r'/', IndexHandler),
(r'/users', UsersHandler),
(r'/chat', WebSocketChatHandler),
(r'/chats', ChatsHandler),
(r'/user', UserHandler)
])
if __name__ == '__main__':
parse_command_line()
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
"""
Implement rand7() using rand5()
"""
import random
def rand2():
"""
Returns 1 or 2 with equal probability by using rand5
-- Returns 1 if rand5() == 1, 3
-- Returns 0 if rand5() == 2, 4
-- Re-runs rand2() if rand5() == 5
"""
val = rand5()
if val == 5:
val = rand2()
return val % 2
def rand5():
return random.randint(1, 5)
def rand7():
val = (4 * rand2()) + (2 * rand2()) + rand2()
if val == 0:
val = rand7() # Repeat process, we want to return 1 - 7
return val
|
#!/usr/bin/python
def get_input(filename):
with open(filename) as fp:
data = fp.read().strip()
return list(map(int, data.split(',')))
def a():
data = get_input('input_07.txt')
print(min(data), max(data), len(data))
data.sort()
middle = len(data) // 2
fuel = 0
for pos in data:
fuel += abs(data[middle] - pos)
print(fuel)
def _gauss(x):
return (x * (x + 1)) / 2
# From Reddit:
def b():
data = get_input('input_07.txt')
print(min([
sum([_gauss(abs(x - target)) for x in data])
for target in range(max(data))
]))
def main():
a()
b()
if __name__ == '__main__':
main()
|
__author__ = 'dingxinhui'
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from encapsulation.encapsulation import UIHandle
from picture.report_picture import *
from constant.desired_caps_1 import *
import os
"""
搜索
"""
def search(name):
# 调用desired_caps的app配置项
driver = webdriver.Remote('http://localhost:4723/wd/hub', config())
# 传入driver对象
ui_handle = UIHandle(driver)
ui_handle.Click("搜索", "首页搜索")
ui_handle.Input("搜索", "搜索页面搜索", name)
ui_handle.Click("搜索", "搜索页面搜索")
# 切换成搜狗输入法
os.popen("adb shell ime set com.sohu.inputmethod.sogou/.SogouIME")
# 点击搜索按钮
driver.press_keycode(66)
# name = driver.find_element_by_xpath('//*[@text="新增服务号"]').text
# 定义截图方法
img = get_screenshot(driver)
ui_handle.quit()
# 定义数组a,包含name和img
a = [img]
return a
|
# 1º resposta
# metros = float(input("Digite uma medida em metros:"))
# print("Medida em centimetros é: ")
# centimetros = int(input(metros * 100))
# Praticando...(if e else)
# n1 = int(input("Digite primeiro numero: "))
# n2 = int(input("Digite segundo numero: "))
# if(n1 > n2):
# print(n1, "é maior que n2")
# else:
# print(n2, "é maior que n1")
#2º resposta
#hoje não podemos definir o genero como inválido
# genero = int(input("Digite 1 se Feminino, 2 se for Masculino:, ou 3 outros: "))
# if genero == 1:
# print("Genero Feminino:")
# if genero == 2:
# print("Genero Masculino:")
# if genero == 3:
# print("Outro genero")
#Depois quero tirar dúvidas sobre isso
#"# -*- coding: latin-1 -*"
# genero = str(input("Digite (F)-Feminino, (M)-Masculino:")).upper()
# if genero == "M":
# print("Genero Masculino:")
# elif genero =="F" :
# print("Genero Feminino:")
# else:
# print("Genero inválido")
#3º resposta
# print("Cadastro exercício de Marlon: ")
# usuario=input("LOGIN: ")
# senha=input("SENHA: ")
# while (usuario==senha):
# print("invalido: Senha não pode ser o usuário.")
# usuario=input("LOGIN: ")
# senha=input("SENHA: ")
# else:
# print("congratulation!!! cadastro realizado.")
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Sachin Mane and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import os
from frappe.model.document import Document
from frappe.modules.utils import get_module_path
from latte.dashboard.doctype.dashboard_configuration.dashboard_configuration import make_json
from frappe.modules.export_file import export_to_files
class DashboardTheme(Document):
def before_save(self):
self.make_standard()
def on_update(self):
if frappe.get_conf().developer_mode and self.module and not frappe.flags.in_migrate:
export_to_files(record_list=[[self.doctype, self.name]], record_module=self.module, create_init=True)
def make_standard(self):
if frappe.get_conf().developer_mode:
self.is_standard = "Yes" |
class Solution:
def minDeletionSize(self, strs: List[str]) -> int:
matrix = []
for s in strs:
cur = [ord(e) for e in s]
matrix.append(cur)
ans = 0
n = len(strs[0])
for j in range(n):
tmp = [x[j] for x in matrix]
if tmp != sorted(tmp):
ans += 1
return ans |
import time
import math
import requests
import traceback
import datetime
from progressbar import ProgressBar
import crawler_utils
__version__ = '1.0.5'
client_id = "<CLIENT_ID>"
def convert_url_to_file_path(url):
file_name = url.split("/")[-1].replace(".jpg", "") + str(math.floor(time.time()))
file_path = "images/{0}.jpg".format(file_name)
return file_path
def send_request(url):
user_agent = 'python-requests/{requests_ver} ttvsnap/{this_ver}'.format(
requests_ver=requests.__version__,
this_ver=__version__
)
headers = {'user-agent': user_agent, 'Client-ID': client_id}
response = requests.get(url, timeout=60, headers=headers)
return response
def get_all_pages(url):
response = send_request(url)
yield response
data = response.json()
while len(data.get("streams", [])) > 0:
next_url = data["_links"]["next"]
response = send_request(next_url)
yield response
data = response.json()
def get_image(data):
height = data['video_height']
image_url = data['preview']['template']
image_url = image_url.replace('{width}', '0').replace('{height}', str(height))
return image_url
def get_image_by_channel(channel_name):
url = "https://api.twitch.tv/kraken/streams/wraxu?api_version=3"
response = send_request(url)
data = response.json()
image_url = get_image(data)
return image_url
def get_images_by_game(game_name):
url = "https://api.twitch.tv/kraken/streams?game={0}".format(game_name)
image_urls = []
for response in get_all_pages(url):
streams_data = response.json()["streams"]
for stream_data in streams_data:
image_url = get_image(stream_data)
if not image_url:
continue
image_urls.append(image_url)
return list(set(image_urls))
def download_images_by_game(game_name):
image_urls = get_images_by_game(game_name)
pbar = ProgressBar()
for image_url in pbar(image_urls):
image_path = convert_url_to_file_path(image_url)
crawler_utils.download_image(image_url, image_path)
def collect_images_periodically(game_name, wait_time_between_samples=600):
while True:
print("{0} - Collection data for {1}".format(str(datetime.datetime.now()), game_name))
download_images_by_game(game_name)
time.sleep(wait_time_between_samples)
|
# -*- coding: utf-8 -*-
from stage import Stage
class Stage1(Stage):
#---------------------------------------------------------------------------
# This Stage's definitions
#---------------------------------------------------------------------------
NAME = u"Fase 1:\naquecimento"
WIN_MSG = u"Passou para a próxima fase!"
NUMBER_OF_NATANS = 10
|
"""Utility functions for :mod:`iter_together`."""
from typing import List, Tuple, Iterable, Union
from pathlib import Path
__all__ = [
'iter_together',
'iter_together_file',
]
def iter_together(path_1: Union[str, Path], path_2: Union[str, Path]) -> List[Tuple[str, str]]:
"""Open two files, zip them, and iterate over them together.
:param path_1: The file path of the left file
:param path_2: The file path of the right file
:returns: Pairs of lines from the two files
"""
with open(path_1) as file_1, open(path_2) as file_2:
return iter_together_file(file_1, file_2)
def iter_together_file(file_1: Iterable[str], file_2: Iterable[str]) -> List[Tuple[str, str]]:
"""Zip two iterables of strings together.
:param file_1: The left iterable
:param file_2: The right iterable
:returns: Pairs of lines from the two files
"""
results = []
for line_1, line_2 in zip(file_1, file_2):
results.append((line_1.strip(), line_2.strip()))
return results
|
import numpy as np
# a=np.arange(15).reshape(3,5)
# print(a.itemsize,type(a))
#
# c=np.array([[1,2,3],[4,5,6]],dtype=complex)
# print(c)
#
#
# print(np.arange(10000).reshape(100,100))
# a=np.array([[1,2],[2,1]])
# b=np.array([[1,1,1],[1,1,1]])
# # print(a.dot(b))
# c=np.random.random((2,3))
# print(c)
#
#
# s=np.array([-1000])
# for i in s:
# print(i**(1/3))
b=np.fromfunction(lambda x,y:2**x+y,(5,4),dtype=int)
print(b)
print(b.T,'\n',b.reshape(1,20),'\n',b.ravel())
|
from anoky.expansion.expansion_context import ExpansionContext
from anoky.generation.generation_context import GenerationContext
from anoky.macros.macro import Macro, IdentifierMacro
from anoky.syntax.code import Code
from anoky.syntax.form import Form
from anoky.syntax.identifier import Identifier
from anoky.syntax.node import Element
class Alias(IdentifierMacro):
def __init__(self, id_alias:Identifier=None, substitution:Code=None):
#parent_expander=None):
#Macro.__init__(self, parent_expander=parent_expander)
# can a code instance evaluate to False?
# we want to be able to alias empty-like code to identifiers
# if id_alias and substitution != None:
#
# self.configure_expand(id_alias, substitution)
self.substitution = substitution
def expand(self, id_element, EC:ExpansionContext):
id_element.expand(self.substitution.copy())
EC.expand(id_element)
# def configure_expand(self, id_alias:Identifier, substitution:Code):
#
#
# def alias_expand(self, element:Element, context:ExpansionContext):
#
# element.expand(substitution)
#
# # do we want to continue macro-expanding with
# # the freshly substituted code?
# context.expander.expand(element, context)
#
#
# self.expand = alias_expand
class DefAlias(Macro):
# (defalias name form)
def expand(self, element:Element, EC:ExpansionContext):
form = element.code
# check we're looking at something of the right form
assert isinstance(form, Form) and len(form) == 3
# extract the identifier and replacement code from the form
id_alias = form[1].code
substitution = form[2].code
# create an Alias instance according to the specified replacement
alias_macro = Alias(id_alias, substitution)
# add the new macro to the macro table
EC.id_macro_table[id_alias.name] = alias_macro
def generate(self, element:Element, context:GenerationContext):
raise NotImplementedError()
|
#coding=utf-8
#author:Kingving time:2020/6/8 0:10
from selenium import webdriver
from day7.上次作业.mySetting import DOMAIN,driverPath
class Driver:
#浏览器驱动对象初始化为空,一个下划线:实例变量外部可访问
#类成员,保存在类当中,实现唯一性
_driver=None
@classmethod
def get_driver(cls,browser_name='Chrome'):
"""
获取浏览器对象
:param browser_name: 浏览器类型比如Chrome、IE
:return: 浏览器驱动对象
"""
#如果不为空就不需要创建,为空就要创建
if cls._driver is None:
if browser_name=="Chrome":
cls._driver=webdriver.Chrome(driverPath["Chrome"])
elif browser_name=='Firefox':
cls._driver=webdriver.Firefox(driverPath['Firefox'])
#.......省略其它的浏览器声明
#最大化窗口
cls._driver.maximize_window()
#访问默认的网址
#执行登录
cls.__login()
return cls._driver
@classmethod
def __login(cls):
#登录逻辑,在driver初始化的时候被调用一次,去操作登录页面
cls._driver.find_element_by_name('username').sendkeys('libai')
cls._driver.find_element_by_name('password').sendkeys('opmsopms123')
cls._driver.find_element_by_tagname('button').click()
|
class Solution(object):
def reverseVowels(self, s):
"""
:type s: str
:rtype: str
"""
if s is None or len(s) == 0:
return s
Vowels = ['a','e','i','o','u']
Vowels = set(Vowels)
Vowels_position = []
for index in range(len(s)):
if s[index].lower() in Vowels:
Vowels_position.append(index)
copy = list(s)
for i in range(len(Vowels_position)//2):
temp = copy[Vowels_position[i]]
copy[Vowels_position[i]] = copy[Vowels_position[len(Vowels_position) - 1 - i]]
copy[Vowels_position[len(Vowels_position) - 1 - i]] = temp
result = ''
for c in copy:
result = result + str(c)
return result |
#!/usr/bin/env vpython
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Can be used to point environment variable to hermetic Go toolset.
Usage (on linux and mac):
$ eval `./env.py`
$ go version
Or it can be used to wrap a command:
$ ./env.py go version
"""
assert __name__ == '__main__'
import imp
import os
import pipes
import subprocess
import sys
# Do not want to mess with sys.path, load the module directly.
bootstrap = imp.load_source(
'bootstrap', os.path.join(os.path.dirname(__file__), 'bootstrap.py'))
old = os.environ.copy()
def _escape_special(v):
"""Returns (str): The supplied value, with special shell characters escaped.
Replace special characters with their escaped form. This will allow them
to be interpreted by the shell using the $'...' notation.
Args:
v (str): The input value to escape.
"""
for f, r in (
('\n', '\\n'),
('\b', '\\b'),
('\r', '\\r'),
('\t', '\\t'),
('\v', '\\v')):
v = v.replace(f, r)
return v
if sys.platform == 'win32':
def emit_env_var(key, value):
# TODO: The quoting here is probably insufficient for all corner cases.
# We strip "'" because cmd.exe doesn't like it in PATH for some reason.
print 'set %s=%s' % (key, pipes.quote(value).strip("'"))
else:
def emit_env_var(key, value):
orig_value, value = value, _escape_special(value)
# We will only use the $'...' notation if there was an escaped character
# in the string.
print 'export %s=%s%s' % (key, ('$') if orig_value != value else (''),
pipes.quote(value))
def main():
args = sys.argv[1:]
if args and args[0] == '--':
args.pop(0)
new = bootstrap.prepare_go_environ()
if not args:
for key, value in sorted(new.iteritems()):
if old.get(key) != value:
emit_env_var(key, value)
# VIRTUAL_ENV is added by the vpython wrapper. It usually *does not* exist
# in os.environ of the outer shell that executes eval `./env.py`. Since we
# are about to replace the native python in PATH with virtualenv's one, we
# must also make sure the new environment has VIRTUAL_ENV set. Otherwise
# some virtualenv-aware tools (like gcloud) get confused.
#
# Note that once env.py finishes execution, nothing is holding a lock on
# vpython virtualenv directory, and it may eventually be garbage collected
# (while the user is still inside a shell that references it). We assume it
# is rare, and the users can manually recover (by reexecuting env.py). This
# won't be happening on bots, since they don't use eval `./env.py`.
if 'VIRTUAL_ENV' in old:
emit_env_var('VIRTUAL_ENV', old['VIRTUAL_ENV'])
else:
exe = args[0]
if exe == 'python':
exe = sys.executable
else:
# Help Windows to find the executable in new PATH, do it only when
# executable is referenced by name (and not by path).
if os.sep not in exe:
exe = bootstrap.find_executable(exe, [bootstrap.WORKSPACE])
sys.exit(subprocess.call([exe] + args[1:], env=new))
assert __name__ == '__main__'
main()
|
# Databricks notebook source
# MAGIC %md # Conditional Probability Activity & Exercise
# COMMAND ----------
# MAGIC %md Below is some code to create some fake data on how much stuff people purchase given their age range.
# MAGIC
# MAGIC It generates 100,000 random "people" and randomly assigns them as being in their 20's, 30's, 40's, 50's, 60's, or 70's.
# MAGIC
# MAGIC It then assigns a lower probability for young people to buy stuff.
# MAGIC
# MAGIC In the end, we have two Python dictionaries:
# MAGIC
# MAGIC "totals" contains the total number of people in each age group.
# MAGIC "purchases" contains the total number of things purchased by people in each age group.
# MAGIC The grand total of purchases is in totalPurchases, and we know the total number of people is 100,000.
# MAGIC
# MAGIC Let's run it and have a look:
# COMMAND ----------
from numpy import random
random.seed(0)
totals = {20:0, 30:0, 40:0, 50:0, 60:0, 70:0}
purchases = {20:0, 30:0, 40:0, 50:0, 60:0, 70:0}
totalPurchases = 0
for _ in range(100000):
ageDecade = random.choice([20, 30, 40, 50, 60, 70])
purchaseProbability = float(ageDecade) / 100.0
totals[ageDecade] += 1
if (random.random() < purchaseProbability):
totalPurchases += 1
purchases[ageDecade] += 1
# COMMAND ----------
totals
# COMMAND ----------
purchases
# COMMAND ----------
totalPurchases
# COMMAND ----------
# MAGIC %md Let's play with conditional probability.
# MAGIC
# MAGIC First let's compute P(E|F), where E is "purchase" and F is "you're in your 30's". The probability of someone in their 30's buying something is just the percentage of how many 30-year-olds bought something:
# COMMAND ----------
PEF = float(purchases[30]) / float(totals[30])
print('P(purchase | 30s): ' + str(PEF))
# COMMAND ----------
# MAGIC %md P(F) is just the probability of being 30 in this data set:
# COMMAND ----------
PF = float(totals[30]) / 100000.0
print("P(30's): " + str(PF))
# COMMAND ----------
# MAGIC %md And P(E) is the overall probability of buying something, regardless of your age:
# COMMAND ----------
PE = float(totalPurchases) / 100000.0
print("P(Purchase):" + str(PE))
# COMMAND ----------
# MAGIC %md If E and F were independent, then we would expect P(E | F) to be about the same as P(E). But they're not; P(E) is 0.45, and P(E|F) is 0.3. So, that tells us that E and F are dependent (which we know they are in this example.)
# COMMAND ----------
# MAGIC %md P(E,F) is different from P(E|F). P(E,F) would be the probability of both being in your 30's and buying something, out of the total population - not just the population of people in their 30's:
# COMMAND ----------
print("P(30's, Purchase)" + str(float(purchases[30]) / 100000.0))
# COMMAND ----------
# MAGIC %md Let's also compute the product of P(E) and P(F), P(E)P(F):
# COMMAND ----------
print("P(30's)P(Purchase)" + str(PE * PF))
# COMMAND ----------
# MAGIC %md Something you may learn in stats is that P(E,F) = P(E)P(F), but this assumes E and F are independent. We've found here that P(E,F) is about 0.05, while P(E)P(F) is about 0.075. So when E and F are dependent - and we have a conditional probability going on - we can't just say that P(E,F) = P(E)P(F).
# MAGIC
# MAGIC We can also check that P(E|F) = P(E,F)/P(F), which is the relationship we showed in the slides - and sure enough, it is:
# COMMAND ----------
print((purchases[30] / 100000.0) / PF)
# COMMAND ----------
# MAGIC %md ## Your Assignment
# COMMAND ----------
# MAGIC %md Modify the code above such that the purchase probability does NOT vary with age, making E and F actually independent.
# MAGIC
# MAGIC Then, confirm that P(E|F) is about the same as P(E), showing that the conditional probability of purchase for a given age is not any different than the a-priori probability of purchase regardless of age.
# COMMAND ----------
adding some changes |
#!/usr/bin/env python3
import os
import requests
import asyncio
from concurrent import futures
# Function to fetch the content from a URL, and swallow any
# exceptions that the fetching raises. If an exception is
# raised, the function returns None. This is used to make
# sure waiting on all the requests in a list doesn't raise
# exceptions but instead puts in None in the response list.
def get_url_or_none(url):
try:
return requests.get(url)
except:
return None
# Fetch the content of a set of URLs asynchronously. The
# return value is a list of Response objects and Nones, in
# the same order as the list or URLs passed as argument.
async def fetch_images_from_urls(urls):
with futures.ThreadPoolExecutor(max_workers=16) as executor:
event_loop = asyncio.get_event_loop()
responses = [ event_loop.run_in_executor(executor, get_url_or_none, url)
for url in urls ]
return await asyncio.gather(*responses)
# Hardcoded four image types, with URL lists shipped in files
classes = ['kitty', 'puppy', 'creepies', 'ungulate']
urllists = map(lambda s: 'data/{}-urls.txt'.format(s), classes)
async def fetch():
for cls, urllist in zip(classes, urllists):
# The downloaded images will be saved under data/raw
os.makedirs('data/raw/{}'.format(cls), exist_ok=True)
print(cls)
with open(urllist, 'r',encoding="utf-8") as f:
urls = f.read().splitlines()
invalid_urls = []
responses = await fetch_images_from_urls(urls)
counter = 0
for url, response in zip(urls, responses):
# Each image gets a unique name with a running number
filename = 'data/raw/{0}/{0}{1:04}.jpg'.format(cls, counter)
# An image is saved only if the server returns a
# successful response with a JPEG content type. All
# other URLs are added to the list of invalid URLs.
if (response is not None
and response.status_code == 200
and response.headers['content-type'] == 'image/jpeg'):
with open(filename, 'wb') as f:
f.write(response.content)
counter += 1
else:
invalid_urls.append(url)
# All URLs that didn't return an image are saved and written into
# the invalid URLs file afterwards.
if invalid_urls:
with open('data/{}-invalid-urls.txt'.format(cls), 'w') as f:
for url in invalid_urls:
f.write('{}\n'.format(url))
asyncio.run(fetch())
|
"""."""
# internal modules
from datetime import datetime
# external modules
# relative modules
# from ...misc.load import load_cfg
# global attributes
__all__ = ('test',)
__doc__ = """."""
__filename__ = __file__.split('/')[-1].strip('.py')
__path__ = __file__.strip('.py').strip(__filename__)
date = str(datetime.now())
t_header = ';\n' +\
f'; File generated from {__package__} on {date}\n' +\
'; Towards _OBJECT_\n' +\
';\n'
t_setup = f'#count _C_\n' +\
f'#interval _I_\n' +\
f'#binning _B_\n' +\
f'#filter _F_\n'
t_target = f'_N_\t_RA_\t_DEC_\n'
def mosaic(data, output, obj, count=1, interval=1, binning=1, filt='B,V,R,I'):
"""Mosaic colours together."""
# assuming data is 2d, with each row 3vals <name, ra, dec>
count = str(count).split(',')
interval = str(interval).split(',')
binning = str(binning).split(',')
filt = str(filt).split(',')
_h = t_header.replace('_OBJECT_', obj)
for ite, fil in enumerate(filt):
c, i, b, fil = list(map(lambda x: x.strip(' '),
[count[ite], interval[ite],
binning[ite], fil]))
_s = t_setup.replace('_C_', c).replace('_I_', i)\
.replace('_B_', b).replace('_F_', fil)
_t = ''
for x in data:
_t += t_target.replace('_N_', x[0].replace(' ', ''))\
.replace('_RA_', x[1])\
.replace('_DEC_', x[2])
_fin = _h + _s + _t
if output:
with open(output + f'_{fil}.txt', 'w') as f:
f.write(_fin + '\n')
else:
return True
def singlet():
pass
def main():
pass
def test():
"""Testing function for module."""
assert mosaic((('Test Point 1', '1', '1'),), None, 'Test', 1, 1, 1, 'B')
if __name__ == "__main__":
"""Directly Called."""
print('Testing module')
test()
print('Test Passed')
# end of code
|
# https://www.hackerrank.com/contests/hourrank-5/challenges/ann-jimmy
n = int(raw_input().strip())
s1 = round(N/3.0)
s2 = s1
s3 = N-s1-s2
print int(s1*s2*s3)
|
from numpy.random import seed
seed(33)
import numpy as np
import keras
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, Reshape, Dropout
from keras.models import Model, Sequential
from keras.callbacks import History
from keras.callbacks import CSVLogger
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras import regularizers, optimizers
import matplotlib.pyplot as plt
import matplotlib.ticker
import time
import os
import logging
logging.getLogger('tensorflow').disabled = True
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.system('cls')
t = time.time()
#Loading the dataset
TrainData = np.load('DataTrain.npy')
TrainLabel = np.load('LabelTrain.npy')
print('Training data: ',TrainData.shape)
ValidationData = np.load('DataValidation.npy')
ValidationLabel = np.load('LabelValidation.npy')
print('Validation data: ',ValidationData.shape)
TestData = np.load('DataTest.npy')
TestLabel = np.load('DataTest.npy')
print ('Test data: ',TestData.shape)
#Normalization of the array values
max_value1 = float(TrainData.max())
max_value2 = float(ValidationData.max())
max_value3 = float(TestData.max())
TrainData = TrainData.astype('float32') / max_value1
ValidationData = ValidationData.astype('float32') / max_value2
TestData = TestData.astype('float32') / max_value3
#Reshaping & Data augmentation
TrainData = TrainData.reshape(-1, 100, 192, 1)
ValidationData = ValidationData.reshape(-1, 100, 192, 1)
TestData = TestData.reshape(-1, 100, 192, 1)
DataAugmentation = ImageDataGenerator(vertical_flip=True, horizontal_flip=True, width_shift_range=97,fill_mode="wrap")
DataAugmentation.fit(TrainData, augment=True)
#CNN Architecture
Model = Sequential()
#Convolutional layers
Model.add(Conv2D(16, kernel_size=(3, 3), strides=(2, 2), activation = 'relu', padding='same', input_shape=(100,192,1)))
Model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
Model.add(Conv2D(32, kernel_size=(3, 3), activation = 'relu', padding='same'))
Model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
Model.add(Conv2D(64, kernel_size=(3, 3), activation = 'relu', padding='same'))
Model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
Model.add(Conv2D(128, kernel_size=(3, 3), activation = 'relu', padding='same'))
Model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
Model.add(Conv2D(256, kernel_size=(3, 3), activation = 'relu', padding='same'))
Model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
Model.add(Conv2D(512, kernel_size=(3, 3), activation = 'relu', padding='same'))
Model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
Model.add(Flatten())
#Fully connected layers
Model.add(Dense(512, activation='relu'))
Model.add(Dense(256, activation='relu'))
Model.add(Dense(128, activation='relu'))
Model.add(Dense(64, activation='relu'))
Model.add(Dense(32, activation='relu'))
Model.add(Dense(1, activation='linear'))
#CNN hyperarameter
adam_mod = keras.optimizers.Adam(lr=0.0001)
Model.compile(optimizer=adam_mod, loss='mean_squared_error')
Stop = EarlyStopping(monitor='val_loss',
mode='min',
patience=25,
restore_best_weights=True)
Batch = 64
csv_logger = CSVLogger('Loss Log.csv', append=True, separator=';')
Train = Model.fit_generator(DataAugmentation.flow(TrainData, TrainLabel, batch_size=Batch),
epochs=100,
steps_per_epoch= TrainData.shape[0]//Batch,
validation_data= (ValidationData, ValidationLabel),
shuffle = True,
callbacks=[Stop, csv_logger])
Prediction = Model.predict(TestData)
np.save("Net Pay Estimation.npy", Prediction)
A=np.load('Net Pay Estimation.npy')
np.savetxt("Net Pay Estimation.csv", A, delimiter=",")
PredictionError = Model.evaluate(TestData, TestLabel)
print('Test loss (MSE):', PredictionError)
elapsed = time.time() - t
#Plot of loss function
loss = Train.history['loss']
val_loss = Train.history['val_loss']
plt.figure()
plt.plot(loss, 'b', label='Training loss')
plt.plot(val_loss, 'r', label='Validation loss')
locator = matplotlib.ticker.MultipleLocator(5)
plt.gca().xaxis.set_major_locator(locator)
formatter = matplotlib.ticker.StrMethodFormatter("{x:.0f}")
plt.gca().xaxis.set_major_formatter(formatter)
plt.title('Training and validation loss of the network')
plt.ylabel('Mean Squared Error (MSE)')
plt.xlabel('Iteration')
plt.legend()
plt.savefig('Loss PLot.jpg')
#Printing elapsed time
print("Elapsed time: %.2f" % (elapsed/60),"min") |
from .astar_near import ASTAR_NEAR
from .iddfs_near import IDDFS_NEAR
from .mc_sampling import MC_SAMPLING
from .enumeration import ENUMERATION
from .genetic import GENETIC
from .rnn_baseline import RNN_BASELINE |
from flask import request
from flask_restplus import Resource
from ..util.dto import BookAuthorDto
from ..service.book_author_service import get_all_bookauthors
api = BookAuthorDto.api
_bookauthor = BookAuthorDto.bookauthor
@api.route('/')
class CategoryList(Resource):
# @api.doc('get list of bookauthors')
@api.marshal_list_with(_bookauthor, envelope='data')
def get(self):
"""List all bookauthors"""
return get_all_bookauthors()
|
from bs4 import BeautifulSoup
import urllib.request, requests, time
# 헤더에 유저 에이전트 값 넣어야 요청이 제대로 옴
hdr = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'}
# 텔레그램 봇 요청
def sendTelegramMsg(APIKey, chatID, text):
r = requests.get("https://api.telegram.org/bot"
+ APIKey + "/sendMessage?chat_id="
+ chatID + "&text="
+ text + "&parse_mode=Markdown")
return r
# ================= 사용 전 직접 설정해 주어야 하는 부분 =================
# 텔레그램 설정
TelAPI = "123456789:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" # 텔레그램 봇키
TelChan = "@channelid" # 주소
# 갤러리 설정 {'갤러리ID': 최근 글 번호}
# '최근 글 번호'는 특별한 경우가 아니면 0으로 두기
gall = {'gall1':0, 'gall2':0}
updTime = 60 # 업데이트 주기 (초)
# ========================================================================
# 시간 표시 형식
tType = "%Y-%m-%d %H-%M-%S"
print ("========DCBELL 설정 값========")
print ("Telegram 채널ID: " + TelChan)
print ("업데이트 간격: " + str(updTime) + "초")
print ("==============================")
while(1):
print("[" + time.strftime(tType) + "] 요청 시작...")
try:
for g in gall.items():
gallid = g[0] # 갤러리 ID
prev_postnum = g[1] # 마지막 알림 게시글 번호
print("[" + time.strftime(tType) + "] " + gallid + " 조회 시작...")
#마이너, 정식갤러리 판별
link = 'https://gall.dcinside.com/board/lists/?id=' + gallid
r = requests.get(link, headers = hdr).text
print('갤러리 형식:', end=' ')
#마이너 갤러리일 경우
if 'location.replace' in r: link = link.replace('board/','mgallery/board/'); print('마이너')
else: print('정식')
req = urllib.request.Request(link, headers = hdr)
html = urllib.request.urlopen(req).read()
soup = BeautifulSoup(html, "html.parser")
link = soup.find_all("tr", { "class" : "ub-content us-post"})
for m in link:
# 게시글 제목
tmp = m.find("td", { "class" : "gall_tit ub-word"})
if "<b>" not in str(tmp):
title = tmp.a.text
postnum = m.find("td", { "class" : "gall_num"}).text # 게시글 번호
tmp = m.find("td", { "class" : "gall_writer ub-writer"}) # 게시글 작성자 (유동은 IP)
name = tmp.find("em").text
ip = tmp.find("span", { "class" : "ip"})
if ip is not None: ip = ip.text
else: ip = "고닉"
# 아래에 원하는 조건문 넣어도됨
if (int(postnum) > int(prev_postnum)):
print ("======새 글이 있습니다!=======")
print ("│갤러리: " + gallid)
print ("│글번호: " + postnum)
print ("│글제목: " + title)
print ("│닉네임(아이피): " + name + " (" + ip + ")")
# 처음에는 보내지않기 (재가동때 알림이 중복으로 가지 않도록)
if prev_postnum == 0:
print('│(최초 요청이므로 푸시를 보내지 않습니다)')
else:
print ("│푸시 보내는 중...")
sendTelegramMsg(TelAPI, TelChan, "*" + gallid + " 갤러리 새 글*\n"
+ title + " - " + name + "(" + ip + ")\n" + "[글 링크](https://gall.dcinside.com/"
+ gallid + "/" + postnum + ")")
print ("│보내기 완료")
gall[gallid] = postnum
print ("===========작업 끝============")
break
time.sleep(1)
# 오류발생시 무시하고 반복 (서버가 오류가 좀 잦음)
except Exception as ex: print("[" + time.strftime(tType) + "] 오류 발생! 무시후 다시 시도합니다.", ex)
print("[" + time.strftime(tType) + "] 대기중... (" + str(updTime) + "초)")
time.sleep(updTime)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) IBM Corporation 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script runs test.py on the output of batch_train.
The input is a list of directories for each problem/model e.g.
experiments/serial_recall/dnc and executes on every run of the model in
that directory. I.e. if you tell it to run on serial_recall/dnc, it will
process every time you have ever run serial_recall with the DNC. This
should be fixed later.
"""
import os
import sys
import yaml
from multiprocessing.pool import ThreadPool
import subprocess
import numpy as np
from glob import glob
import pandas as pd
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx], idx
def main():
batch_file = sys.argv[1]
assert os.path.isfile(batch_file)
# Load the list of yaml files to run
with open(batch_file, 'r') as f:
directory_checkpoints = [l.strip() for l in f.readlines()]
for foldername in directory_checkpoints:
assert os.path.isdir(foldername), foldername + " is not a file"
experiments_list = []
for elem in directory_checkpoints:
list_path = os.walk(elem)
_, subdir, _ = next(list_path)
for sub in subdir:
checkpoints = os.path.join(elem, sub)
experiments_list.append(checkpoints)
# Keep only the folders that contain validation.csv and training.csv
experiments_list = [elem for elem in experiments_list if os.path.isfile(
elem + '/validation.csv') and os.path.isfile(elem + '/training.csv')]
# check if the files are empty except for the first line
experiments_list = [elem for elem in experiments_list if os.stat(
elem + '/validation.csv').st_size > 24 and os.stat(elem + '/training.csv').st_size > 24]
# Run in as many threads as there are CPUs available to the script
with ThreadPool(processes=len(os.sched_getaffinity(0))) as pool:
pool.map(run_experiment, experiments_list)
def run_experiment(path: str):
# Load yaml file. To get model name and problem name.
with open(path + '/train_settings.yaml', 'r') as yaml_file:
params = yaml.load(yaml_file)
# print path
print(path)
valid_csv = pd.read_csv(path + '/validation.csv', delimiter=',', header=0)
train_csv = pd.read_csv(path + '/training.csv', delimiter=',', header=0)
# best train point
index_val_loss = pd.Series.idxmin(train_csv.loss)
train_episodes = train_csv.episode.values.astype(
int) # best train loss argument
best_train_ep = train_episodes[index_val_loss] # best train loss argument
best_train_loss = train_csv.loss[index_val_loss]
# best valid point
index_val_loss = pd.Series.idxmin(valid_csv.loss)
valid_episodes = valid_csv.episode.values.astype(
int) # best train loss argument
best_valid_ep = valid_episodes[index_val_loss] # best train loss argument
best_valid_loss = valid_csv.loss[index_val_loss]
### Find the best model ###
models_list3 = glob(path + '/models/model_episode*')
models_list2 = [os.path.basename(os.path.normpath(e))
for e in models_list3]
models_list = [int(e.split('_')[-1].split('.')[0]) for e in models_list2]
# check if models list is empty
if models_list:
# select the best model
best_num_model, idx_best = find_nearest(models_list, best_valid_ep)
last_model, idx_last = find_nearest(models_list, valid_episodes[-1])
# Run the test
command_str = "cuda-gpupick -n0 python3 test.py --model {0}".format(
models_list3[idx_best]).split()
with open(os.devnull, 'w') as devnull:
result = subprocess.run(command_str, stdout=devnull)
if result.returncode != 0:
print("Testing exited with code:", result.returncode)
else:
print('There is no model in checkpoint {} '.format(path))
if __name__ == '__main__':
main()
|
import os
import time
from contextlib import contextmanager
import requests
from bs4 import BeautifulSoup as bs
@contextmanager
def goto(dir):
cwd = os.getcwd()
try:
os.chdir(dir)
except FileNotFoundError:
os.mkdir(dir)
os.chdir(dir)
yield None
os.chdir(cwd)
INTERVAL = 0.5
class AlbumSpider:
def __init__(self, url):
self.url = url
def downloadAlbum(self, albumUrl):
"""Download a single album:
dowmloadAlbum('https://www.douban.com/photos/album/123456789/')
"""
album = requests.get(albumUrl)
soup = bs(album.text, "html.parser")
albumName = soup.find('div', class_='info').h1.string.split('-')[-1]
nextpage = soup.find('span', class_='next')
thumbs = soup.find_all('a', class_='photolst_photo')
imgUrls = [a.img['src'].replace('thumb', 'large')
for a in thumbs]
with goto(albumName):
for img in imgUrls:
imgName = img.split('/')[-1]
with open(imgName, 'wb') as imgdate:
imgdate.write(requests.get(img).content)
time.sleep(INTERVAL)
if nextpage and nextpage.a:
self.downloadAlbum(nextpage.a['href'])
def downloadAllAlbums(self, albumsUrl):
"""Download all albums of a person, the albumsUrl is the url of a person's albumlist.
downloadAllAlbums('https://www.douban.com/people/abcde/photos')
"""
allAlbums = requests.get(albumsUrl)
soup = bs(allAlbums.text, "html.parser")
people = soup.find('div', class_='info').h1.string
nextpage = soup.find('span', class_='next')
albums = soup.find_all('a', class_='album_photo')
albumUrls = [album['href'] for album in albums]
with goto(people):
for albumUrl in albumUrls:
self.downloadAlbum(albumUrl)
if nextpage and nextpage.a:
self.downloadAllAlbums(nextpage.a['href'])
def downloadCelebrity(self, celebrityUrl):
"""Download a celebrity's photos, the celebrityUrl is the celebrity's 'allphoto'
downloadCelebrity('https://movie.douban.com/celebrity/:celebrityID/photos/')
"""
celebrity = requests.get(celebrityUrl).text
soup = bs(celebrity, "html.parser")
celebrityName = "影人" + soup.find('div', id='content').h1.string.split(' ')[0]
nextpage = soup.find('span', class_='next')
photodivs = soup.find_all('div', class_='cover')
photos = [(div.a['href'], div.a.img['src']) for div in photodivs]
with goto(celebrityName):
for photo in photos:
photoName = photo[1].split('/')[-1]
with open(photoName, 'wb') as photodata:
rawphoto = photo[1].replace('thumb', 'raw')
photodata.write(requests.get(rawphoto, headers={'referer':photo[0]}).content)
time.sleep(INTERVAL)
if nextpage and nextpage.a:
self.downloadCelebrity(nextpage.a['href'])
def downloadMovie(self, movieUrl):
"""Download a movie's photos.
downloadMovie('https://movie.douban.com/subject/1234567/photos')
"""
moviePhotos = requests.get(movieUrl)
soup = bs(moviePhotos.text, "html.parser")
title = soup.title.string.split(' ')[4]
nextpage = soup.find('span', class_='next')
photodivs = soup.find_all('div', class_='cover')
photos = [(div.a['href'], div.a.img['src']) for div in photodivs]
with goto(title):
for photo in photos:
photoName = photo[1].split('/')[-1]
with open(photoName, 'wb') as photodata:
rawphoto = photo[1].replace('thumb', 'raw')
photodata.write(requests.get(rawphoto, headers={'referer':photo[0]}).content)
if nextpage and nextpage.a:
self.downloadMovie(nextpage.a['href'])
def run(self):
if 'people' in self.url:
self.downloadAllAlbums(self.url)
elif 'celebrity' in self.url:
self.downloadCelebrity(self.url)
elif 'subject' in self.url:
self.downloadMovie(self.url)
else:
self.downloadAlbum(self.url)
|
#!/usr/bin/env python
# encoding: utf-8
#
# htmlify.py
#
# Copyright 2009 Denis <denis@denis-desktop>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Htmlify squeezes all resources an HTML page needs into one big
HTML file.
http://github.com/Akral/PyHtmlify
'''
import os
import re
import base64
import mimetypes
from pymins.HtmlMinifier import HtmlMinifier
from pymins.CssMinifier import CssMinifier
from pymins.JavascriptMinifier import JavascriptMinifier
class Error(Exception): pass
class OverwriteError(Error): pass
class WritingError(Error): pass
class MimeError(Error): pass
class EncodingError(Error): pass
class Htmlifier:
force = False
addGpl3 = True
__gpl3 = '''<!--
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
-->
'''
def htmlify(self, input, output):
if os.path.exists(output) and not self.force:
raise OverwriteError('Destination exists and force mode is off.')
inf = open(input)
html = inf.read()
inf.close()
html = HtmlMinifier(html).minify().get()
parser = self.__Parser(os.path.dirname(input))
html = parser.feed(html)
if self.addGpl3 == True:
html = self.__gpl3 + html
try:
outf = open(output, 'w')
except IOError:
raise WritingError('Destination cannot be opened for writing. Check your permissions.')
outf.write(html)
outf.close()
class __Parser(object):
def __init__(self, path):
self.path = path
self.__uriRe = re.compile('^(((http|ftp)s?|file)://|(mailto|gopher):)', re.I)
def feed(self, html):
for scr in re.finditer(r'\<link [^>]*rel=[\'"]?stylesheet[^>]*>', html):
orig = scr.group(0)
path = re.search(r'href=[\'"]*([^ ]+)[\'"]', orig).group(1)
css = CssMinifier(self.__file(path)).minify().get()
html = html.replace(orig, '<style type="text/css">%s</style>' % css)
for scr in re.finditer(r'\<script [^>]*src=[\'"]?([^ >\'"]+)[^>]*>', html):
orig = scr.group(0)
path = scr.group(1)
js = JavascriptMinifier(self.__file(path)).minify().get()
html = html.replace(orig, '<script type="text/javascript">%s</script>' % js)
for r in re.finditer(r'(\<[^>]+(?:href|src)=[\'"])([^ >\'"]+)', html):
path = r.group(2)
html = html.replace(r.group(0), '%s%s' % (r.group(1), self.__uriToData(path)))
# @TODO:
# Handle other resources in CSS data
# Correctify regexes to account for pairing quotes
return html
def __uriToData(self, uri):
if self.__uriRe.match(uri):
return uri
path = os.path.join(self.path, uri)
if not os.path.exists(path) or not os.path.isfile(path):
return uri
f = open(path)
data = f.read()
f.close()
data = base64.b64encode(data)
mime, encoding = mimetypes.guess_type(path, False)
if not mime:
raise MimeError('Cannot guess MIME type of file "%s". Give it a better name.' % path)
if encoding:
raise EncodingError('File "%s" appears to be encoded. I can\'t work with encoded files. Yet.' % path)
return 'data:%s;base64,%s' % (mime, data)
def __file(self, path):
path = os.path.join(self.path, path)
f = open(path)
data = f.read()
f.close()
return data
def main():
from optparse import OptionParser
parser = OptionParser(version='%prog 1.0',
usage='Usage: %prog [options] input output',
description='''Htmlify squeezes all resources an HTML page needs into one big HTML file.
http://github.com/Akral/PyHtmlify''')
parser.add_option("-f", "--force", dest="force", default=False,
action="store_true", help="Overwrite files.")
parser.add_option("-g", "--addgpl3", dest="addgpl3", default=False,
action="store_true", help="Add GPLv3 license on top of the output.")
(options, args) = parser.parse_args()
if len(args) <> 2:
parser.error('You must specify two arguments.')
h = Htmlifier()
h.force = options.force
h.addGpl3 = options.addgpl3
try:
h.htmlify(input=args[0], output=args[1])
except OverwriteError:
parser.error('%s exists. Use -f to overwrite.' % args[1])
return 1
except Error:
import sys
print 'Fatal error. %s' % sys.exc_info()[1]
return 5
return 0
if __name__ == '__main__':
main()
|
from django.urls import path
from . import views
app_name = 'author'
urlpatterns = [
path('', views.index, name='index'),
path('<int:author_id>/books', views.books, name='books'),
] |
import re
regStart = re.compile(r'(\d+):(\d+) (PM|AM)')
regDuration = re.compile(r'(\d+):(\d+)')
dayToNumber = {
"sunday": 0,
"monday": 1,
"tuesday": 2,
"wednesday": 3,
"thursday": 4,
"friday": 5,
"saturday": 6
}
numberToDay = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
]
def add_time(start, duration, startingDay=None):
temp = re.fullmatch(regStart, start)
startHour = int(temp.group(1))
startMinute = int(temp.group(2))
if temp.group(3) == "PM":
startHour += 12
temp = re.fullmatch(regDuration, duration)
durationHours = int(temp.group(1))
durationMinutes = int(temp.group(2))
startMinute += durationMinutes
if startMinute >= 60:
startHour += 1
startMinute -= 60
startHour += durationHours
days = int(startHour / 24)
startHour %= 24
hr12 = "AM"
if startHour >= 12:
startHour -= 12
hr12 = "PM"
day = ""
nod = ""
zero_min = ""
if startingDay:
d = dayToNumber[startingDay.lower()]
d += days
day = f', {numberToDay[d % 7]}'
if days == 1:
nod = " (next day)"
elif days > 1:
nod = f' ({days} days later)'
if startHour == 0:
startHour = 12
if startMinute < 10:
zero_min = "0"
newTime = f'{startHour}:{zero_min}{startMinute} {hr12}{day}{nod}'
return newTime |
from django.conf.urls.defaults import *
from etc.settings import PROJECT_DIR, DEBUG
urlpatterns = patterns('',
url(r'^admin/jsi18n/$', 'django.views.i18n.javascript_catalog'),
)
if DEBUG == True:
urlpatterns += patterns('',
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': PROJECT_DIR + '/media'}),
)
# URL redirections.
urlpatterns += patterns('django.views.generic.simple',
url(r'^request/$', 'redirect_to', {'url':'/faculty_request_form.html'}),
url(r'^lending/$', 'redirect_to', {'url':'/student_request_form.html'}),
url(r'^equipment/$', 'redirect_to', {'url':'/equipment_description.html'}),
url(r'^policies/$', 'redirect_to', {'url':'/lending_policies.html'}),
url(r'^service/$', 'redirect_to', {'url':'/service_request.html'}),
url(r'^student_request_form.html$', 'redirect_to', {'url':'/static/pdf/student_request_form.pdf'}),
)
# Links to Etcetera.
urlpatterns += patterns('django.views.generic.simple',
url(r'^service_request.html$', 'redirect_to', {'url': '/etcetera/service/form'}),
url(r'^faculty_request_form.html$', 'redirect_to', {'url': '/etcetera/checkout/form/'}),
)
urlpatterns += patterns('',
url(r'^$', 'django.views.generic.simple.direct_to_template', {'template': 'index.html'}),
url(r'^operating_instructions.html$', 'django.views.generic.simple.direct_to_template', {'template': 'operating_instructions.html'}),
# The big one.
url(r'^(?P<path>.*)$', 'etc.views.page'),
) |
class DeviceSearchException(Exception):
pass
class RouterException(Exception):
pass
class DeviceException(Exception):
pass
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 10 14:29:56 2020
output the .csv file of RGB value
@author: kic.guest
"""
import glob
import os
import click
from silhouette_coeff import kMeans
import pandas as pd
import numpy as np
import time
from tqdm import tqdm
# Click option
@click.command()
@click.option('-p', '--path', required=True)
@click.option('-mi','--min_k', default=2, help='The min_k, minimum is 2, default is 2')
@click.option('-ma','--max_k', default=8, help='The max_k, default is 8')
def cli(path, min_k, max_k):
'''Read the image by path and return RGB value'''
savefunction(path,min_k,max_k)
def savefunction(path,min_k,max_k):
temp_name = 'temp.csv'
df_name = 'dataframe.csv'
file_list = glob.glob(os.path.join(path, '*.jpg'))
file_list_checked,exist = check_marker(file_list, path)
df = pd.DataFrame
flag = file_list_checked[0].split('\\')[-1]
try:
if exist: # temp.txt exist
df = pd.read_csv(path+temp_name)
df = df.drop(['Unnamed: 0'],axis=1)
else:
df = pd.DataFrame({'Image':[], 'R':[],'G':[],'B':[]})
print('\nStart extracting RGB value...')
t0 = time.time()
speed_count = 0
## color extraction
for filename in tqdm(file_list_checked):
result = kMeans(filename, min_k, max_k)
name = filename.split('\\')
names = np.asarray([[name[-1]]]*result.shape[0])
add = np.append(names, result, axis=1)
df = df.append(pd.DataFrame(add, columns=['Image','R','G','B']), ignore_index=True)
flag = name[-1]
speed_count += 1 # for testing speed
except:
with open((path+'temp.txt'),'w+') as f:
f.write(flag)
print('\nExtraction aborted, .csv file and temp.txt exported')
finally:
if df.empty != True: #save temp.csv file if aborted or error occur
save_path = path+temp_name
df.to_csv(save_path, header=True)
if flag == file_list[-1].split('\\')[-1]: #remove temp file when extraction completed
save_path = path+df_name
df.to_csv(save_path, header=True)
if os.path.isfile((path+'temp.txt')):
os.remove((path+'temp.txt'))
if os.path.isfile((path+temp_name)):
os.remove((path+temp_name))
print('\nExtraction completed, .csv file exported')
print(df)
if speed_count != 0:
print(f'Speed: {(time.time()-t0)/speed_count}s for each image')
## check the temp.txt file and return the list of files(dropped)
def check_marker(file_list, path):
exist = False
try:
with open((path+'temp.txt'),'r') as f:
marker = f.read()
search = (path+marker).replace('/','\\')
if search in file_list:
print('\nRecord matched')
exist = True
idx = file_list.index(search)+1 # Flag is already finished KMeans, so +1
return file_list[idx:], exist
else:
print('\ncannot match with previous record')
return file_list, exist
except:
print('\ntemp.txt not found, extraction start from zero')
return file_list,False
if __name__ == '__main__':
cli() |
# Generated by Django 2.2.9 on 2020-05-11 13:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0026_auto_20200506_1005'),
]
operations = [
migrations.AddField(
model_name='collections',
name='title_en',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Otsikko EN'),
),
migrations.AddField(
model_name='landingpages',
name='title_en',
field=models.CharField(max_length=255, null=True, verbose_name='Otsikko EN'),
),
]
|
#Importing the necessary files
from prettytable import PrettyTable
#Creating a class
class Wmn():
#Initialising the class
def __init__(self,num):
self.num = num
#Collecting the info from the user
def info1(self):
global tpl, a,b,c
b = input('Name footballer {} --> '.format(self.num))
a = int(input('How many Goals did they score --> '))
c = input('What country did they play for --> ')
#Sorting the collected info into different categories
def sort(self):
global a0,a1,a2,a3
global b0,b1,b2,b3
global c0,c1,c2,c3
global l,sum
if self.num == 1:
a0 = a
b0 = b
c0 = c
elif self.num == 2:
a1 = a
b1 = b
c1 = c
elif self.num == 3:
a2 = a
b2 = b
c2 = c
elif self.num == 4:
a3 = a
b3 = b
c3 = c
if self.num == 4:
sum = a0 + a1 + a2 + a3
x.sort2()
x.final()
x.sort3()
#Sorting the collected data on the basis of the highest score
def sort2(self):
global hghst_scr,l
l = (a0, a1, a2, a3)
if a0 == max(l):
hghst_scr = b0
elif a1 == max(l):
hghst_scr = b1
elif a2 == max(l):
hghst_scr = b2
elif a3 == max(l):
hghst_scr = b3
#Arranging the collected data in a tabular form
def sort3(self):
print('The details of the players are :')
mytable = PrettyTable(['Player Name', 'No. of Goals Scored', 'Country'])
mytable.add_row([b0,a0,c0])
mytable.add_row([b1,a1,c1])
mytable.add_row([b2,a2,c2])
mytable.add_row([b3,a3,c3])
print(mytable)
#Displaying the final output
def final(self):
print('The highest scorer is {}'.format(hghst_scr))
print('These players scored {} goals between them'.format(sum))
#Initialisng the loop
for i in range(1, 5):
x = Wmn(i)
x.info1()
x.sort()
|
import torch
import dgl
from ..utils import cuda
from collections import Counter
def random_walk_sampler(G, nodeset, restart_prob, max_nodes):
'''
G: DGLGraph
nodeset: 1D CPU Tensor of node IDs
restart_prob: float
max_nodes: int
return: list[list[Tensor]]
'''
traces = dgl.contrib.sampling.bipartite_single_sided_random_walk_with_restart(
G, nodeset, restart_prob, max_nodes)
return traces
# Note: this function is not friendly to giant graphs since we use a matrix
# with size (num_nodes_in_nodeset, num_nodes_in_graph).
def random_walk_distribution(G, nodeset, restart_prob, max_nodes):
n_nodes = nodeset.shape[0]
n_available_nodes = G.number_of_nodes()
traces = random_walk_sampler(G, nodeset, restart_prob, max_nodes)
visited_counts = torch.zeros(n_nodes, n_available_nodes)
for i in range(n_nodes):
visited_nodes = torch.cat(traces[i])
visited_counts[i].scatter_add_(0, visited_nodes, torch.ones_like(visited_nodes, dtype=torch.float32))
return visited_counts
def random_walk_distribution_topt(G, nodeset, restart_prob, max_nodes, top_T):
'''
returns the top T important neighbors of each node in nodeset, as well as
the weights of the neighbors.
'''
visited_prob = random_walk_distribution(G, nodeset, restart_prob, max_nodes)
weights, nodes = visited_prob.topk(top_T, 1)
weights = weights / weights.sum(1, keepdim=True)
return weights, nodes
def random_walk_nodeflow(G, nodeset, n_layers, restart_prob, max_nodes, top_T):
'''
returns a list of triplets (
"active" node IDs whose embeddings are computed at the i-th layer (num_nodes,)
weight of each neighboring node of each "active" node on the i-th layer (num_nodes, top_T)
neighboring node IDs for each "active" node on the i-th layer (num_nodes, top_T)
)
'''
dev = nodeset.device
nodeset = nodeset.cpu()
nodeflow = []
cur_nodeset = nodeset
for i in reversed(range(n_layers)):
nb_weights, nb_nodes = random_walk_distribution_topt(G, cur_nodeset, restart_prob, max_nodes, top_T)
nodeflow.insert(0, (cur_nodeset.to(dev), nb_weights.to(dev), nb_nodes.to(dev)))
cur_nodeset = torch.cat([nb_nodes.view(-1), cur_nodeset]).unique()
return nodeflow
|
import json
import socket
from queue import Queue
from threading import Thread
import requests
from exceptions import ErrorResponse
class Backend:
host = None
__instance = None
read = None
write = None
client = None
@staticmethod
def get_instance():
if Backend.__instance == None:
Backend.__instance = Backend()
return Backend.__instance
def start(self):
req = requests.get(self.host + ":8000/join/start").content
response = json.loads(req)
if "error" in response:
raise ErrorResponse(response["error"])
else:
return response["player"]["cards"]
def end(self):
if self.client is not None:
self.client.kill = False
if self.host is not None:
req = requests.get(self.host + ":8000/join/end", timeout=1)
def join(self):
req = requests.get(self.host + ":8000/join", timeout=1).content
response = json.loads(req)
if "error" in response:
raise ErrorResponse(response["error"])
else:
self.id = response["id"]
self.port = response["port"]
return self.id
def communication(self):
self.read = Queue()
self.write = Queue()
self.client = Client(self.read, self.write, self.host, self.id, self.port)
self.client.start()
class Message:
def __init__(self, id, ip, msg):
self.id = id
self.ip = ip
self.msg = msg
class Client(Thread):
host = "localhost"
def __init__(self, read, write, host, id, port):
Thread.__init__(self)
self.kill = True
self.host = host.strip("http://")
self.read = read
self.write = write
self.id = id
self.port = port
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.host, port))
def run(self):
receive = True
try:
while self.kill:
if receive:
received = self.s.recv(1024).decode()
if "/" in received:
# receive=re.sub('?.*','',receive)
received = received.strip("/")
msg = received.split("+")
if msg[1] == str(self.id):
self.read.put(msg[0])
receive = False
if not self.write.empty():
res = self.write.get() + "/"
self.s.send(res.encode())
receive = True
if "end" in res:
break
exit()
except Exception as e:
print(e)
return
finally:
self.s.close()
|
from src.trending_strategies.base_trending_strategy import BaseTrendingStrategy
from src.trending_strategies.EJ57D_trending_tracks_strategy import z
from src.trending_strategies.trending_type_and_version import (
TrendingType,
TrendingVersion,
)
class TrendingPlaylistsStrategyEJ57D(BaseTrendingStrategy):
def __init__(self):
super().__init__(TrendingType.PLAYLISTS, TrendingVersion.EJ57D)
def get_track_score(self, time_range, track):
return z(time_range, track)
def get_score_params(self):
return {"zq": 1000, "xf": True, "pt": 0, "mt": 3}
|
from amuse.support.core import late
from amuse.support import exceptions
from amuse import config
from amuse.rfi.tools.create_code import GenerateASourcecodeString
from amuse.rfi.tools.create_code import GenerateASourcecodeStringFromASpecificationClass
from amuse.rfi.tools.create_code import DTypeSpec
from amuse.rfi.tools.create_code import dtypes
from amuse.rfi.tools.create_code import DTypeToSpecDictionary
from amuse.rfi.tools import create_definition
from amuse.rfi.core import LegacyFunctionSpecification
dtype_to_spec = DTypeToSpecDictionary({
'int32' : DTypeSpec('integers_in','integers_out','HEADER_INTEGER_COUNT', 'integer', 'integer'),
'int64' : DTypeSpec('longs_in', 'longs_out', 'HEADER_LONG_COUNT', 'integer*8', 'long'),
'float32' : DTypeSpec('floats_in', 'floats_out', 'HEADER_FLOAT_COUNT', 'real*4', 'float'),
'float64' : DTypeSpec('doubles_in', 'doubles_out', 'HEADER_DOUBLE_COUNT', 'real*8', 'double'),
'bool' : DTypeSpec('booleans_in', 'booleans_out', 'HEADER_BOOLEAN_COUNT', 'logical', 'boolean'),
'string' : DTypeSpec('strings_in', 'strings_out', 'HEADER_STRING_COUNT', 'integer*4', 'integer'),
})
CONSTANTS_STRING = """
integer HEADER_FLAGS, HEADER_CALL_ID, HEADER_FUNCTION_ID, HEADER_CALL_COUNT, &
HEADER_INTEGER_COUNT, HEADER_LONG_COUNT, HEADER_FLOAT_COUNT, &
HEADER_DOUBLE_COUNT, HEADER_BOOLEAN_COUNT, HEADER_STRING_COUNT, &
HEADER_SIZE, MAX_COMMUNICATORS
parameter (HEADER_FLAGS=1, HEADER_CALL_ID=2, HEADER_FUNCTION_ID=3, &
HEADER_CALL_COUNT=4, HEADER_INTEGER_COUNT=5, HEADER_LONG_COUNT=6, &
HEADER_FLOAT_COUNT=7, HEADER_DOUBLE_COUNT=8, &
HEADER_BOOLEAN_COUNT=9, HEADER_STRING_COUNT=10, &
HEADER_SIZE=11, MAX_COMMUNICATORS = 2048)
"""
ARRAY_DEFINES_STRING = """
integer*4, target :: header_in(HEADER_SIZE)
integer*4, target :: header_out(HEADER_SIZE)
integer*4, allocatable, target :: integers_in(:)
integer*4, allocatable, target :: integers_out(:)
integer*8, allocatable, target :: longs_in(:)
integer*8, allocatable, target :: longs_out(:)
real*4, allocatable, target :: floats_in(:)
real*4, allocatable, target :: floats_out(:)
real*8, allocatable, target :: doubles_in(:)
real*8, allocatable, target :: doubles_out(:)
logical*1, allocatable, target :: c_booleans_in(:)
logical*1, allocatable, target :: c_booleans_out(:)
logical, allocatable, target :: booleans_in(:)
logical, allocatable, target :: booleans_out(:)
integer*4, allocatable, target :: string_sizes_in(:)
integer*4, allocatable, target :: string_sizes_out(:)
character (len=256), allocatable, target :: strings_in(:)
character (len=256), allocatable, target :: strings_out(:)
character (len=100000) :: characters_in
character (len=100000) :: characters_out
"""
ISO_ARRAY_DEFINES_STRING = """
integer (c_int32_t), target :: header_in(HEADER_SIZE)
integer (c_int32_t), target :: header_out(HEADER_SIZE)
integer (c_int32_t), allocatable, target :: integers_in(:)
integer (c_int32_t), allocatable, target :: integers_out(:)
integer (c_int64_t), allocatable, target :: longs_in(:)
integer (c_int64_t), allocatable, target :: longs_out(:)
real (c_float), allocatable, target :: floats_in(:)
real (c_float), allocatable, target :: floats_out(:)
real (c_double), allocatable, target :: doubles_in(:)
real (c_double), allocatable, target :: doubles_out(:)
logical (c_bool), allocatable, target :: c_booleans_in(:)
logical (c_bool), allocatable, target :: c_booleans_out(:)
logical, allocatable, target :: booleans_in(:)
logical, allocatable, target :: booleans_out(:)
integer (c_int32_t), allocatable, target :: string_sizes_in(:)
integer (c_int32_t), allocatable, target :: string_sizes_out(:)
character (c_char), allocatable, target :: strings_in(:) * 256
character (c_char), allocatable, target :: strings_out(:) * 256
character (len=1000000) :: characters_in
character (len=1000000) :: characters_out
character (kind=c_char), target :: c_characters_in(1000000)
character (kind=c_char), target :: c_characters_out(1000000)
"""
MODULE_GLOBALS_STRING = """
integer, save :: polling_interval = 0
integer, save :: last_communicator_id = 0
integer, save :: communicators(MAX_COMMUNICATORS)
integer, save :: id_to_activate = -1
integer, save :: active_communicator_id = -1
"""
NOMPI_MODULE_GLOBALS_STRING = """
integer, save :: polling_interval = 0
"""
MPI_INTERNAL_FUNCTIONS_STRING = """
FUNCTION internal__open_port(outval)
USE mpi
IMPLICIT NONE
character(len=MPI_MAX_PORT_NAME+1), intent(out) :: outval
INTEGER :: internal__open_port
INTEGER :: ierror
call MPI_Open_port(MPI_INFO_NULL, outval, ierror);
internal__open_port = 0
END FUNCTION
FUNCTION internal__accept_on_port(port_identifier, comm_identifier)
USE mpi
IMPLICIT NONE
character(len=*), intent(in) :: port_identifier
INTEGER, intent(out) :: comm_identifier
INTEGER :: internal__accept_on_port
INTEGER :: ierror, rank
INTEGER :: mcommunicator, communicator
last_communicator_id = last_communicator_id + 1
IF (last_communicator_id .GE. MAX_COMMUNICATORS) THEN
last_communicator_id = last_communicator_id - 1
comm_identifier = -1
internal__accept_on_port = -1
return;
END IF
call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierror);
IF (rank .EQ. 0) THEN
call MPI_Comm_accept(port_identifier, MPI_INFO_NULL, 0, MPI_COMM_SELF, communicator, ierror)
call MPI_Intercomm_merge(communicator, .FALSE., mcommunicator, ierror)
call MPI_Intercomm_create(MPI_COMM_WORLD, 0, mcommunicator, 1, 65, communicators(last_communicator_id), ierror)
call MPI_Comm_free(mcommunicator, ierror)
call MPI_Comm_free(communicator, ierror)
ELSE
call MPI_Intercomm_create(MPI_COMM_WORLD,0, MPI_COMM_NULL, 1, 65, communicators(last_communicator_id), ierror)
END IF
comm_identifier = last_communicator_id;
internal__accept_on_port = 0
END FUNCTION
FUNCTION internal__connect_to_port(port_identifier, comm_identifier)
USE MPI
IMPLICIT NONE
character(len=*), intent(in) :: port_identifier
INTEGER, intent(out) :: comm_identifier
INTEGER :: internal__connect_to_port
INTEGER :: ierror, rank
INTEGER :: mcommunicator, communicator
last_communicator_id = last_communicator_id + 1
IF (last_communicator_id .GE. MAX_COMMUNICATORS) THEN
last_communicator_id = last_communicator_id - 1
comm_identifier = -1
internal__connect_to_port = -1
return;
END IF
call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierror);
IF (rank .EQ. 0) THEN
call MPI_Comm_connect(port_identifier, MPI_INFO_NULL, 0, MPI_COMM_SELF, communicator, ierror)
call MPI_Intercomm_merge(communicator, .TRUE., mcommunicator, ierror)
call MPI_Intercomm_create(MPI_COMM_WORLD, 0, mcommunicator, 0, 65, communicators(last_communicator_id), ierror)
call MPI_Comm_free(mcommunicator, ierror)
call MPI_Comm_free(communicator, ierror)
ELSE
call MPI_Intercomm_create(MPI_COMM_WORLD,0, MPI_COMM_NULL, 1, 65, communicators(last_communicator_id), ierror)
END IF
comm_identifier = last_communicator_id;
internal__connect_to_port = 0
END FUNCTION
FUNCTION internal__activate_communicator(comm_identifier)
USE mpi
IMPLICIT NONE
INTEGER, intent(in) :: comm_identifier
INTEGER :: internal__activate_communicator
if ((comm_identifier .LT. 0) .OR. (comm_identifier .GT. last_communicator_id)) then
internal__activate_communicator = -1
return
end if
internal__activate_communicator = 0
id_to_activate = comm_identifier
END FUNCTION
FUNCTION internal__become_code(number_of_workers, modulename, classname)
IMPLICIT NONE
character(len=*), intent(in) :: modulename, classname
integer, intent(in) :: number_of_workers
INTEGER :: internal__become_code
internal__become_code = 0
END FUNCTION
"""
NOMPI_INTERNAL_FUNCTIONS_STRING = """
FUNCTION internal__open_port(outval)
IMPLICIT NONE
character(len=*), intent(out) :: outval
INTEGER :: internal__open_port
outval = ""
internal__open_port = 0
END FUNCTION
FUNCTION internal__accept_on_port(port_identifier, comm_identifier)
IMPLICIT NONE
character(len=*), intent(in) :: port_identifier
INTEGER, intent(out) :: comm_identifier
INTEGER :: internal__accept_on_port
comm_identifier = -1;
internal__accept_on_port = 0
END FUNCTION
FUNCTION internal__connect_to_port(port_identifier, comm_identifier)
IMPLICIT NONE
character(len=*), intent(in) :: port_identifier
INTEGER, intent(out) :: comm_identifier
INTEGER :: internal__connect_to_port
comm_identifier = -1
internal__connect_to_port = 0
END FUNCTION
FUNCTION internal__activate_communicator(comm_identifier)
IMPLICIT NONE
INTEGER, intent(in) :: comm_identifier
INTEGER :: internal__activate_communicator
internal__activate_communicator = 0
END FUNCTION
FUNCTION internal__become_code(number_of_workers, modulename, classname)
IMPLICIT NONE
character(len=*), intent(in) :: modulename, classname
integer, intent(in) :: number_of_workers
INTEGER :: internal__become_code
internal__become_code = 0
END FUNCTION
"""
INTERNAL_FUNCTIONS_STRING = MPI_INTERNAL_FUNCTIONS_STRING
POLLING_FUNCTIONS_STRING = """
FUNCTION internal__get_message_polling_interval(outval)
INTEGER,intent(out) :: outval
INTEGER :: internal__get_message_polling_interval
outval = polling_interval
internal__get_message_polling_interval = 0
END FUNCTION
FUNCTION internal__set_message_polling_interval(inval)
INTEGER,intent(in) :: inval
INTEGER :: internal__set_message_polling_interval
polling_interval = inval
internal__set_message_polling_interval = 0
END FUNCTION
"""
RECV_HEADER_SLEEP_STRING = """
SUBROUTINE mpi_recv_header(parent, ioerror)
use iso_c_binding
use mpi
implicit none
integer,intent(in) :: parent
integer,intent(inout) :: ioerror
integer :: request_status(MPI_STATUS_SIZE),header_request
logical is_finished
INTERFACE
INTEGER (C_INT) FUNCTION usleep(useconds) bind(C)
!SUBROUTINE usleep(useconds) bind(C)
use iso_c_binding
implicit none
INTEGER(c_int32_t), value :: useconds
END
END INTERFACE
call MPI_Irecv(header_in, HEADER_SIZE, MPI_INTEGER, 0, 989, parent, header_request, ioerror)
if(polling_interval.GT.0) then
is_finished = .false.
call MPI_Test(header_request, is_finished, request_status, ioerror)
DO WHILE(.NOT. is_finished)
ioerror = usleep(int(polling_interval, c_int32_t))
call MPI_Test(header_request, is_finished, request_status, ioerror)
END DO
call MPI_Wait(header_request, request_status, ioerror)
else
call MPI_Wait(header_request, request_status, ioerror)
endif
END SUBROUTINE
"""
RECV_HEADER_WAIT_STRING = """
SUBROUTINE mpi_recv_header(parent, ioerror)
use mpi
implicit none
integer,intent(in) :: parent
integer,intent(inout) :: ioerror
integer :: request_status(MPI_STATUS_SIZE),header_request
call MPI_Irecv(header_in, HEADER_SIZE, MPI_INTEGER, 0, 989, parent, header_request, ioerror)
call MPI_Wait(header_request, request_status, ioerror)
END SUBROUTINE
"""
EMPTY_RUN_LOOP_MPI_STRING = """
SUBROUTINE run_loop_mpi
implicit none
END SUBROUTINE
"""
RUN_LOOP_MPI_STRING = """
SUBROUTINE run_loop_mpi
use mpi
implicit none
integer :: provided
integer :: rank, parent, ioerror, max_call_count = 255
integer :: must_run_loop, maximum_size, total_string_length
integer i, offset, call_count
call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, provided, ioerror)
ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN))
ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT))
ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN))
ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT))
ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN))
ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT))
ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN))
ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT))
ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(string_sizes_out(max_call_count * MAX_STRINGS_OUT))
ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN))
!ensure there is at least one string to return an error code in
ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT)))
call MPI_COMM_GET_PARENT(parent, ioerror)
call MPI_COMM_RANK(parent, rank, ioerror)
last_communicator_id = last_communicator_id + 1
communicators(1) = parent
active_communicator_id = 1
must_run_loop = 1
do while (must_run_loop .eq. 1)
if ((id_to_activate .GE. 0) .AND. (id_to_activate .NE. active_communicator_id)) then
active_communicator_id = id_to_activate
id_to_activate = -1
parent = communicators(active_communicator_id)
call MPI_COMM_RANK(parent, rank, ioerror)
end if
call mpi_recv_header(parent, ioerror)
!print*, 'fortran: got header ', header_in
call_count = header_in(HEADER_CALL_COUNT)
IF (call_count .gt. max_call_count) THEN
max_call_count = call_count + 255;
DEALLOCATE(integers_in)
DEALLOCATE(integers_out)
DEALLOCATE(longs_in)
DEALLOCATE(longs_out)
DEALLOCATE(floats_in)
DEALLOCATE(floats_out)
DEALLOCATE(doubles_in)
DEALLOCATE(doubles_out)
DEALLOCATE(c_booleans_in)
DEALLOCATE(c_booleans_out)
DEALLOCATE(booleans_in)
DEALLOCATE(booleans_out)
DEALLOCATE(string_sizes_in)
DEALLOCATE(string_sizes_out)
DEALLOCATE(strings_in)
DEALLOCATE(strings_out)
ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN))
ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT))
ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN))
ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT))
ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN))
ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT))
ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN))
ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT))
ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(string_sizes_out(max_call_count * MAX_STRINGS_OUT))
ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT)))
END IF
if (header_in(HEADER_INTEGER_COUNT) .gt. 0) then
call MPI_BCast(integers_in, header_in(HEADER_INTEGER_COUNT), MPI_INTEGER, 0, parent, ioError);
end if
if (header_in(HEADER_LONG_COUNT) .gt. 0) then
call MPI_BCast(longs_in, header_in(HEADER_LONG_COUNT), MPI_INTEGER8, 0, parent, ioError);
end if
if (header_in(HEADER_FLOAT_COUNT) .gt. 0) then
call MPI_BCast(floats_in, header_in(HEADER_FLOAT_COUNT), MPI_REAL, 0, parent, ioError);
end if
if (header_in(HEADER_DOUBLE_COUNT) .gt. 0) then
call MPI_BCast(doubles_in, header_in(HEADER_DOUBLE_COUNT), MPI_REAL8, 0, parent, ioError);
end if
if (header_in(HEADER_BOOLEAN_COUNT) .gt. 0) then
! some older MPI do not define MPI_C_BOOL; this seems to work ok
! maybe booleans_in in this call should be replaced by char (more portable) or logical*1
call MPI_BCast(c_booleans_in, header_in(HEADER_BOOLEAN_COUNT), MPI_BYTE, 0, parent, ioError);
do i=1,header_in(HEADER_BOOLEAN_COUNT)
booleans_in(i)=logical(c_booleans_in(i))
enddo
end if
if (header_in(HEADER_STRING_COUNT) .gt. 0) then
strings_in = ' '
call MPI_BCast(string_sizes_in, header_in(HEADER_STRING_COUNT), MPI_INTEGER, 0, parent, ioError);
maximum_size = 0
total_string_length = 0
do i = 1, header_in(HEADER_STRING_COUNT), 1
total_string_length = total_string_length + string_sizes_in(i) + 1
if (string_sizes_in(i) .gt. maximum_size) then
maximum_size = string_sizes_in(i)
end if
end do
if(maximum_size.GT.256) then
print*, "fortran_worker reports too large string"
stop
endif
if(total_string_length.GT.1000000) then
print*, "fortran_worker reports too large string message"
stop
endif
call MPI_BCast(characters_in, total_string_length, MPI_CHARACTER, 0, parent, ioError);
offset = 1
do i = 1, header_in(HEADER_STRING_COUNT), 1
strings_in(i) = ' '
strings_in(i) = characters_in(offset : (offset + string_sizes_in(i)))
strings_in(i)((string_sizes_in(i) + 1):(string_sizes_in(i) + 1)) = ' '
offset = offset + string_sizes_in(i) + 1
!print*, 'fortran: strings_in(i) ', i, strings_in(i) , ' of length ', string_sizes_in(i), &
!' actually of size ', len_trim(strings_in(i))
end do
end if
header_out = 0
header_out(HEADER_CALL_ID) = header_in(HEADER_CALL_ID)
header_out(HEADER_FUNCTION_ID) = header_in(HEADER_FUNCTION_ID)
header_out(HEADER_CALL_COUNT) = header_in(HEADER_CALL_COUNT)
strings_out = ' '
must_run_loop = handle_call()
!print*, 'fortran: sending header ', header_out
if (rank .eq. 0 ) then
call MPI_SEND(header_out, HEADER_SIZE, MPI_INTEGER, 0, 999, parent, ioerror);
if (header_out(HEADER_INTEGER_COUNT) .gt. 0) then
call MPI_SEND(integers_out, header_out(HEADER_INTEGER_COUNT), MPI_INTEGER, 0, 999, parent, ioerror)
end if
if (header_out(HEADER_LONG_COUNT) .gt. 0) then
call MPI_SEND(longs_out, header_out(HEADER_LONG_COUNT), MPI_INTEGER8, 0, 999, parent, ioerror)
end if
if (header_out(HEADER_FLOAT_COUNT) .gt. 0) then
call MPI_SEND(floats_out, header_out(HEADER_FLOAT_COUNT), MPI_REAL, 0, 999, parent, ioerror)
end if
if (header_out(HEADER_DOUBLE_COUNT) .gt. 0) then
call MPI_SEND(doubles_out, header_out(HEADER_DOUBLE_COUNT), MPI_REAL8, 0, 999, parent, ioerror)
end if
if (header_out(HEADER_BOOLEAN_COUNT) .gt. 0) then
do i=1,header_out(HEADER_BOOLEAN_COUNT)
c_booleans_out(i)=booleans_out(i)
enddo
call MPI_SEND(c_booleans_out, header_out(HEADER_BOOLEAN_COUNT), MPI_BYTE, 0, 999, parent, ioerror)
end if
if (header_out(HEADER_STRING_COUNT) .gt. 0) then
offset = 1
do i = 1, header_out(HEADER_STRING_COUNT),1
string_sizes_out(i) = len_trim(strings_out(i))
!print*, 'fortran: sending strings, strings_out(i) ', i, strings_out(i) , ' of length ', string_sizes_out(i), &
!' actually of size ', len_trim(strings_out(i))
characters_out(offset:offset+string_sizes_out(i)) = strings_out(i)
offset = offset + string_sizes_out(i) + 1
characters_out(offset-1:offset-1) = char(0)
end do
total_string_length=offset-1
if(total_string_length.GT.1000000) then
print*, "fortran_worker reports too large string message"
stop
endif
call MPI_SEND(string_sizes_out, header_out(HEADER_STRING_COUNT), MPI_INTEGER, 0, 999, parent, ioerror)
call MPI_SEND(characters_out, offset -1, MPI_CHARACTER, 0, 999, parent, ioerror)
end if
end if
end do
DEALLOCATE(integers_in)
DEALLOCATE(integers_out)
DEALLOCATE(longs_in)
DEALLOCATE(longs_out)
DEALLOCATE(floats_in)
DEALLOCATE(floats_out)
DEALLOCATE(doubles_in)
DEALLOCATE(doubles_out)
DEALLOCATE(booleans_in)
DEALLOCATE(booleans_out)
DEALLOCATE(string_sizes_in)
DEALLOCATE(string_sizes_out)
DEALLOCATE(strings_in)
DEALLOCATE(strings_out)
do i = 1, last_communicator_id, 1
call MPI_COMM_DISCONNECT(communicators(i), ioerror);
end do
call MPI_FINALIZE(ioerror)
return
end subroutine
"""
RUN_LOOP_SOCKETS_STRING = """
SUBROUTINE run_loop_sockets
use iso_c_binding
use FortranSocketsInterface
implicit none
integer :: max_call_count = 255
integer :: must_run_loop, maximum_size, total_string_length
integer :: i, offset, call_count, port
character(len=32) :: port_string
character(kind=c_char, len=64) :: host
logical (c_bool), allocatable, target :: c_booleans_in(:)
logical (c_bool), allocatable, target :: c_booleans_out(:)
ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN))
ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT))
ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN))
ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT))
ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN))
ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT))
ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN))
ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT))
ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN))
!ensure there is at least one string to return an error code in
ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT)))
ALLOCATE(string_sizes_out(max(1, max_call_count * MAX_STRINGS_OUT)))
call get_command_argument(1, port_string)
call get_command_argument(2, host)
read (port_string,*) port
!add a null character to the end of the string so c knows when the string ends
host = trim(host) // c_null_char
call forsockets_init(host, port)
must_run_loop = 1
do while (must_run_loop .eq. 1)
call receive_integers(c_loc(header_in), HEADER_SIZE)
!print*, 'fortran sockets: got header ', header_in
call_count = header_in(HEADER_CALL_COUNT)
IF (call_count .gt. max_call_count) THEN
max_call_count = call_count + 255;
DEALLOCATE(integers_in)
DEALLOCATE(integers_out)
DEALLOCATE(longs_in)
DEALLOCATE(longs_out)
DEALLOCATE(floats_in)
DEALLOCATE(floats_out)
DEALLOCATE(doubles_in)
DEALLOCATE(doubles_out)
DEALLOCATE(booleans_in)
DEALLOCATE(booleans_out)
DEALLOCATE(c_booleans_in)
DEALLOCATE(c_booleans_out)
DEALLOCATE(string_sizes_in)
DEALLOCATE(string_sizes_out)
DEALLOCATE(strings_in)
DEALLOCATE(strings_out)
ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN))
ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT))
ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN))
ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT))
ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN))
ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT))
ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN))
ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT))
ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(string_sizes_out(max_call_count * MAX_STRINGS_OUT))
ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT)))
END IF
if (header_in(HEADER_INTEGER_COUNT) .gt. 0) then
call receive_integers(c_loc(integers_in), header_in(HEADER_INTEGER_COUNT))
end if
if (header_in(HEADER_LONG_COUNT) .gt. 0) then
call receive_longs(c_loc(longs_in), header_in(HEADER_LONG_COUNT))
end if
if (header_in(HEADER_FLOAT_COUNT) .gt. 0) then
call receive_floats(c_loc(floats_in), header_in(HEADER_FLOAT_COUNT))
end if
if (header_in(HEADER_DOUBLE_COUNT) .gt. 0) then
call receive_doubles(c_loc(doubles_in), header_in(HEADER_DOUBLE_COUNT))
end if
if (header_in(HEADER_BOOLEAN_COUNT) .gt. 0) then
call receive_booleans(c_loc(c_booleans_in), header_in(HEADER_BOOLEAN_COUNT))
do i = 1, header_in(HEADER_BOOLEAN_COUNT), 1
booleans_in(i) = logical(c_booleans_in(i))
end do
end if
if (header_in(HEADER_STRING_COUNT) .gt. 0) then
strings_in = ' '
call receive_integers(c_loc(string_sizes_in), header_in(HEADER_STRING_COUNT))
maximum_size = 0
total_string_length = 0
do i = 1, header_in(HEADER_STRING_COUNT), 1
total_string_length = total_string_length + string_sizes_in(i) + 1
if (string_sizes_in(i) .gt. maximum_size) then
maximum_size = string_sizes_in(i)
end if
end do
if(maximum_size.GT.256) then
print*, "fortran_worker reports too large string"
stop
endif
if(total_string_length.GT.1000000) then
print*, "fortran_worker reports too large string message"
stop
endif
call receive_string(c_loc(c_characters_in), total_string_length)
! this trick is necessary on older gfortran compilers (~<4.9)
! as c_loc needs character(len=1)
do i=1, total_string_length
characters_in(i:i)=c_characters_in(i)
enddo
offset = 1
do i = 1, header_in(HEADER_STRING_COUNT), 1
strings_in(i) = ' '
strings_in(i) = characters_in(offset : (offset + string_sizes_in(i)))
strings_in(i)((string_sizes_in(i) + 1):(string_sizes_in(i) + 1)) = ' '
offset = offset + string_sizes_in(i) + 1
!print*, 'fortran: strings_in(i) ', i, strings_in(i) , ' of length ', string_sizes_in(i), &
!' actually of size ', len_trim(strings_in(i))
end do
end if
header_out = 0
header_out(HEADER_CALL_ID) = header_in(HEADER_CALL_ID)
header_out(HEADER_FUNCTION_ID) = header_in(HEADER_FUNCTION_ID)
header_out(HEADER_CALL_COUNT) = header_in(HEADER_CALL_COUNT)
strings_out = ' '
must_run_loop = handle_call()
!print*, 'fortran: sending header ', header_out
call send_integers(c_loc(header_out), HEADER_SIZE)
if (header_out(HEADER_INTEGER_COUNT) .gt. 0) then
call send_integers(c_loc(integers_out), header_out(HEADER_INTEGER_COUNT))
end if
if (header_out(HEADER_LONG_COUNT) .gt. 0) then
call send_longs(c_loc(longs_out), header_out(HEADER_LONG_COUNT))
end if
if (header_out(HEADER_FLOAT_COUNT) .gt. 0) then
call send_floats(c_loc(floats_out), header_out(HEADER_FLOAT_COUNT))
end if
if (header_out(HEADER_DOUBLE_COUNT) .gt. 0) then
call send_doubles(c_loc(doubles_out), header_out(HEADER_DOUBLE_COUNT))
end if
if (header_out(HEADER_BOOLEAN_COUNT) .gt. 0) then
do i = 1, header_out(HEADER_BOOLEAN_COUNT), 1
c_booleans_out(i) = logical(booleans_out(i), c_bool)
end do
call send_booleans(c_loc(c_booleans_out), header_out(HEADER_BOOLEAN_COUNT))
end if
if (header_out(HEADER_STRING_COUNT) .gt. 0) then
offset = 1
do i = 1, header_out(HEADER_STRING_COUNT),1
string_sizes_out(i) = len_trim(strings_out(i))
!print*, 'fortran: sending strings, strings_out(i) ', i, strings_out(i) , ' of length ', string_sizes_out(i), &
!' actually of size ', len_trim(strings_out(i))
characters_out(offset:offset+string_sizes_out(i)) = strings_out(i)
offset = offset + string_sizes_out(i) + 1
characters_out(offset-1:offset-1) = char(0)
end do
total_string_length=offset-1
if(total_string_length.GT.1000000) then
print*, "fortran_worker reports too large string message"
stop
endif
do i=1, total_string_length
c_characters_out(i)=characters_out(i:i)
enddo
call send_integers(c_loc(string_sizes_out), header_out(HEADER_STRING_COUNT))
call send_string(c_loc(c_characters_out), offset-1 )
end if
end do
DEALLOCATE(integers_in)
DEALLOCATE(integers_out)
DEALLOCATE(longs_in)
DEALLOCATE(longs_out)
DEALLOCATE(floats_in)
DEALLOCATE(floats_out)
DEALLOCATE(doubles_in)
DEALLOCATE(doubles_out)
DEALLOCATE(booleans_in)
DEALLOCATE(booleans_out)
DEALLOCATE(c_booleans_in)
DEALLOCATE(c_booleans_out)
DEALLOCATE(string_sizes_in)
DEALLOCATE(string_sizes_out)
DEALLOCATE(strings_in)
DEALLOCATE(strings_out)
call forsockets_close()
return
end subroutine
"""
EMPTY_RUN_LOOP_SOCKETS_STRING = """
subroutine run_loop_sockets
print*, 'fortran: sockets channel not supported in this worker'
return
end subroutine
"""
RUN_LOOP_SOCKETS_MPI_STRING = """
SUBROUTINE run_loop_sockets_mpi
use iso_c_binding
use FortranSocketsInterface
use mpi
implicit none
integer :: provided
integer :: max_call_count = 255
integer :: must_run_loop, maximum_size, total_string_length
integer :: i, offset, call_count, port, rank, ioerror
character(len=32) :: port_string
character(kind=c_char, len=64) :: host
logical (c_bool), allocatable, target :: c_booleans_in(:)
logical (c_bool), allocatable, target :: c_booleans_out(:)
ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN))
ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT))
ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN))
ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT))
ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN))
ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT))
ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN))
ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT))
ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN))
!ensure there is at least one string to return an error code in
ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT)))
ALLOCATE(string_sizes_out(max(1, max_call_count * MAX_STRINGS_OUT)))
call mpi_init_thread(mpi_thread_multiple, provided, ioerror)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ioerror)
if (rank .eq. 0) then
call get_command_argument(1, port_string)
call get_command_argument(2, host)
read (port_string,*) port
!add a null character to the end of the string so c knows when the string ends
host = trim(host) // c_null_char
call forsockets_init(host, port)
end if
must_run_loop = 1
do while (must_run_loop .eq. 1)
if (rank .eq. 0) then
call receive_integers(c_loc(header_in), HEADER_SIZE)
end if
call MPI_BCast(header_in, HEADER_SIZE , MPI_INTEGER, 0, MPI_COMM_WORLD, ioerror)
!print*, 'fortran sockets mpi: got header ', header_in
call_count = header_in(HEADER_CALL_COUNT)
IF (call_count .gt. max_call_count) THEN
max_call_count = call_count + 255;
DEALLOCATE(integers_in)
DEALLOCATE(integers_out)
DEALLOCATE(longs_in)
DEALLOCATE(longs_out)
DEALLOCATE(floats_in)
DEALLOCATE(floats_out)
DEALLOCATE(doubles_in)
DEALLOCATE(doubles_out)
DEALLOCATE(booleans_in)
DEALLOCATE(booleans_out)
DEALLOCATE(c_booleans_in)
DEALLOCATE(c_booleans_out)
DEALLOCATE(string_sizes_in)
DEALLOCATE(string_sizes_out)
DEALLOCATE(strings_in)
DEALLOCATE(strings_out)
ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN))
ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT))
ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN))
ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT))
ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN))
ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT))
ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN))
ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT))
ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(string_sizes_out(max_call_count * MAX_STRINGS_OUT))
ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT)))
END IF
if (header_in(HEADER_INTEGER_COUNT) .gt. 0) then
if (rank .eq. 0) then
call receive_integers(c_loc(integers_in), header_in(HEADER_INTEGER_COUNT))
end if
call MPI_BCast(integers_in, header_in(HEADER_INTEGER_COUNT), MPI_INTEGER, 0, MPI_COMM_WORLD, ioError);
end if
if (header_in(HEADER_LONG_COUNT) .gt. 0) then
if (rank .eq. 0) then
call receive_longs(c_loc(longs_in), header_in(HEADER_LONG_COUNT))
end if
call MPI_BCast(longs_in, header_in(HEADER_LONG_COUNT), MPI_INTEGER8, 0, MPI_COMM_WORLD, ioError);
end if
if (header_in(HEADER_FLOAT_COUNT) .gt. 0) then
if (rank .eq. 0) then
call receive_floats(c_loc(floats_in), header_in(HEADER_FLOAT_COUNT))
end if
call MPI_BCast(floats_in, header_in(HEADER_FLOAT_COUNT), MPI_REAL, 0, MPI_COMM_WORLD, ioerror)
end if
if (header_in(HEADER_DOUBLE_COUNT) .gt. 0) then
if (rank .eq. 0) then
call receive_doubles(c_loc(doubles_in), header_in(HEADER_DOUBLE_COUNT))
end if
call MPI_BCast(doubles_in, header_in(HEADER_DOUBLE_COUNT), MPI_REAL8, 0, MPI_COMM_WORLD, ioerror)
end if
if (header_in(HEADER_BOOLEAN_COUNT) .gt. 0) then
if (rank .eq. 0) then
call receive_booleans(c_loc(c_booleans_in), header_in(HEADER_BOOLEAN_COUNT))
do i = 1, header_in(HEADER_BOOLEAN_COUNT), 1
booleans_in(i) = logical(c_booleans_in(i))
end do
end if
call MPI_BCast(booleans_in, header_in(HEADER_BOOLEAN_COUNT), MPI_LOGICAL, 0, MPI_COMM_WORLD, ioerror)
end if
if (header_in(HEADER_STRING_COUNT) .gt. 0) then
strings_in = ' '
if (rank .eq. 0) then
call receive_integers(c_loc(string_sizes_in), header_in(HEADER_STRING_COUNT))
end if
call MPI_BCast(string_sizes_in, header_in(HEADER_STRING_COUNT), MPI_INTEGER, 0, MPI_COMM_WORLD, ioError);
maximum_size = 0
total_string_length = 0
do i = 1, header_in(HEADER_STRING_COUNT), 1
total_string_length = total_string_length + string_sizes_in(i) + 1
if (string_sizes_in(i) .gt. maximum_size) then
maximum_size = string_sizes_in(i)
end if
end do
if(maximum_size.GT.256) then
print*, "fortran_worker reports too large string"
stop
endif
if(total_string_length.GT.1000000) then
print*, "fortran_worker reports too large string message"
stop
endif
if (rank .eq. 0) then
call receive_string(c_loc(c_characters_in), total_string_length)
endif
do i=1, total_string_length
characters_in(i:i)=c_characters_in(i)
enddo
call MPI_BCast(characters_in, total_string_length, MPI_CHARACTER, 0, MPI_COMM_WORLD, ioError);
offset = 1
do i = 1, header_in(HEADER_STRING_COUNT), 1
strings_in(i) = ' '
strings_in(i) = characters_in(offset : (offset + string_sizes_in(i)))
strings_in(i)((string_sizes_in(i) + 1):(string_sizes_in(i) + 1)) = ' '
offset = offset + string_sizes_in(i) + 1
!print*, 'fortran: strings_in(i) ', i, strings_in(i) , ' of length ', string_sizes_in(i), &
!' actually of size ', len_trim(strings_in(i))
end do
end if
header_out = 0
header_out(HEADER_CALL_ID) = header_in(HEADER_CALL_ID)
header_out(HEADER_FUNCTION_ID) = header_in(HEADER_FUNCTION_ID)
header_out(HEADER_CALL_COUNT) = header_in(HEADER_CALL_COUNT)
strings_out = ' '
must_run_loop = handle_call()
call MPI_Barrier(MPI_COMM_WORLD, ioerror)
if (rank .eq. 0) then
!print*, 'fortran: sending header ', header_out
call send_integers(c_loc(header_out), HEADER_SIZE)
if (header_out(HEADER_INTEGER_COUNT) .gt. 0) then
call send_integers(c_loc(integers_out), header_out(HEADER_INTEGER_COUNT))
end if
if (header_out(HEADER_LONG_COUNT) .gt. 0) then
call send_longs(c_loc(longs_out), header_out(HEADER_LONG_COUNT))
end if
if (header_out(HEADER_FLOAT_COUNT) .gt. 0) then
call send_floats(c_loc(floats_out), header_out(HEADER_FLOAT_COUNT))
end if
if (header_out(HEADER_DOUBLE_COUNT) .gt. 0) then
call send_doubles(c_loc(doubles_out), header_out(HEADER_DOUBLE_COUNT))
end if
if (header_out(HEADER_BOOLEAN_COUNT) .gt. 0) then
do i = 1, header_out(HEADER_BOOLEAN_COUNT), 1
c_booleans_out(i) = logical(booleans_out(i), c_bool)
!print*, 'fortran sockets mpi: sending boolean', booleans_out(i) , i, ' send as ', c_booleans_out(i)
end do
call send_booleans(c_loc(c_booleans_out), header_out(HEADER_BOOLEAN_COUNT))
end if
if (header_out(HEADER_STRING_COUNT) .gt. 0) then
offset = 1
do i = 1, header_out(HEADER_STRING_COUNT),1
string_sizes_out(i) = len_trim(strings_out(i))
!print*, 'fortran: sending strings, strings_out(i) ', i, strings_out(i) , ' of length ', string_sizes_out(i), &
!' actually of size ', len_trim(strings_out(i))
characters_out(offset:offset+string_sizes_out(i)) = strings_out(i)
offset = offset + string_sizes_out(i) + 1
characters_out(offset-1:offset-1) = char(0)
end do
total_string_length=offset-1
if(total_string_length.GT.1000000) then
print*, "fortran_Worker reports too large string message"
stop
endif
do i=1, total_string_length
c_characters_out(i)=characters_out(i:i)
enddo
call send_integers(c_loc(string_sizes_out), header_out(HEADER_STRING_COUNT))
call send_string(c_loc(c_characters_out), offset-1 )
end if
end if
end do
DEALLOCATE(integers_in)
DEALLOCATE(integers_out)
DEALLOCATE(longs_in)
DEALLOCATE(longs_out)
DEALLOCATE(floats_in)
DEALLOCATE(floats_out)
DEALLOCATE(doubles_in)
DEALLOCATE(doubles_out)
DEALLOCATE(booleans_in)
DEALLOCATE(booleans_out)
DEALLOCATE(string_sizes_in)
DEALLOCATE(string_sizes_out)
DEALLOCATE(strings_in)
DEALLOCATE(strings_out)
if (rank .eq. 0) then
call forsockets_close()
end if
call MPI_FINALIZE(ioerror)
return
end subroutine
"""
EMPTY_RUN_LOOP_SOCKETS_MPI_STRING = """
subroutine run_loop_sockets_mpi
print*, 'fortran: sockets channel not supported in this worker'
return
end subroutine
"""
MAIN_STRING = """
integer :: count
logical :: use_mpi
character(len=32) :: use_mpi_string
count = command_argument_count()
use_mpi = NEEDS_MPI
if (count .eq. 0) then
call run_loop_mpi()
else if (count .eq. 3) then
call get_command_argument(3, use_mpi_string)
if (use_mpi_string .eq. 'true') then
use_mpi = .true.
else if (use_mpi_string .eq. 'false') then
use_mpi = .false.
else
print*, 'fortran worker: need either true or false as mpi enable arguments, not', use_mpi_string
stop
end if
if (use_mpi) then
call run_loop_sockets_mpi()
else
call run_loop_sockets()
end if
else
print*, 'fortran worker: need either 0 or 3 arguments, not', count
stop
end if
"""
GETSET_WORKING_DIRECTORY="""
function set_working_directory(directory) result(ret)
{0}
integer :: ret
character(*), intent(in) :: directory
ret = chdir(directory)
end function
function get_working_directory(directory) result(ret)
{0}
integer :: ret
character(*), intent(out) :: directory
ret = getcwd(directory)
end function
"""
class GenerateAFortranStringOfAFunctionSpecification(GenerateASourcecodeString):
MAX_STRING_LEN = 256
@late
def specification(self):
raise exceptions.AmuseException("No specification set, please set the specification first")
@late
def underscore_functions_from_specification_classes(self):
return []
@late
def dtype_to_spec(self):
return dtype_to_spec
def index_string(self, index, must_copy_in_to_out = False):
if self.specification.must_handle_array and not must_copy_in_to_out:
if index == 0:
return '1'
else:
return '( %d * call_count) + 1' % (index )
elif self.specification.can_handle_array or (self.specification.must_handle_array and must_copy_in_to_out):
if index == 0:
return 'i'
else:
if index == -1:
return "i - 1"
else:
return '( %d * call_count) + i' % index
else:
return index + 1
def start(self):
self.specification.prepare_output_parameters()
self.output_casestmt_start()
self.out.indent()
#self.output_lines_before_with_clear_out_variables()
#self.output_lines_before_with_clear_input_variables()
if self.specification.must_handle_array:
pass
elif self.specification.can_handle_array:
self.out.lf() + 'do i = 1, call_count, 1'
self.out.indent()
#self.output_lines_before_with_inout_variables()
self.output_function_start()
self.output_function_parameters()
self.output_function_end()
self.output_lines_with_inout_variables()
if self.specification.must_handle_array:
if not self.specification.result_type is None:
spec = self.dtype_to_spec[self.specification.result_type]
self.out.lf() + 'DO i = 2, call_count'
self.out.indent()
self.out.lf() + spec.output_var_name + '(i)' + ' = ' + spec.output_var_name + '(1)'
self.out.dedent()
self.out.lf() + 'END DO'
elif self.specification.can_handle_array:
self.out.dedent()
self.out.lf() + 'end do'
self.output_lines_with_number_of_outputs()
self.output_casestmt_end()
self.out.dedent()
self._result = self.out.string
def output_function_parameters(self):
self.out.indent()
first = True
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if first:
first = False
self.out + ' &'
else:
self.out + ' ,&'
if parameter.direction == LegacyFunctionSpecification.IN:
# if parameter.datatype == 'string':
# self.out.n() + 'input_characters('
# self.out + '( (' + self.index_string(parameter.input_index) + ')* ' + self.MAX_STRING_LEN + ')'
# self.out + ':' + '(((' + self.index_string(parameter.input_index) + ')* ' + self.MAX_STRING_LEN + ') +'
# self.out + '(' + spec.input_var_name + '(' + self.index_string(parameter.input_index) + ')' + '-'
# self.out + 'get_offset(' + self.index_string(parameter.input_index) + ' - 1 , '+spec.input_var_name +') ))'
# self.out + ')'
# else:
if parameter.datatype == 'string':
self.out.n() + 'strings_in(' + self.index_string(parameter.input_index) + ')'
else:
self.out.n() + spec.input_var_name
self.out + '(' + self.index_string(parameter.input_index) + ')'
if parameter.direction == LegacyFunctionSpecification.INOUT:
# if parameter.datatype == 'string':
# self.out.n() + 'output_characters('
# self.out + '((' + self.index_string(parameter.output_index) + ')* ' + self.MAX_STRING_LEN + ')'
# self.out + ':' + '(((' + self.index_string(parameter.output_index) + ')+1) * ' + self.MAX_STRING_LEN + ' - 1)'
# self.out + ')'
# else:
# if parameter.datatype == 'string':
# self.out.n() + spec.input_var_name
# self.out + '(' + self.index_string(parameter.input_index) + ', :)'
# else:
self.out.n() + spec.input_var_name
self.out + '(' + self.index_string(parameter.input_index) + ')'
elif parameter.direction == LegacyFunctionSpecification.OUT:
# if parameter.datatype == 'string':
# self.out.n() + 'output_characters('
# self.out + '((' + self.index_string(parameter.output_index) + ')* ' + self.MAX_STRING_LEN + ')'
# self.out + ':' + '(((' + self.index_string(parameter.output_index) + ')+1) * ' + self.MAX_STRING_LEN + ' - 1)'
# self.out + ')'
# else:
# if parameter.datatype == 'string':
# self.out.n() + spec.output_var_name
# self.out + '(' + self.index_string(parameter.output_index) + ')(1:50)'
# else:
self.out.n() + spec.output_var_name
self.out + '(' + self.index_string(parameter.output_index) + ')'
elif parameter.direction == LegacyFunctionSpecification.LENGTH:
self.out.n() + 'call_count'
self.out.dedent()
def output_lines_with_inout_variables(self):
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if parameter.direction == LegacyFunctionSpecification.INOUT:
if self.specification.must_handle_array:
self.out.lf() + 'DO i = 1, call_count'
self.out.indent()
self.out.n() + spec.output_var_name
self.out + '(' + self.index_string(parameter.output_index, must_copy_in_to_out = True) + ')'
self.out + ' = '
self.out + spec.input_var_name + '(' + self.index_string(parameter.input_index, must_copy_in_to_out = True) + ')'
if self.specification.must_handle_array:
self.out.dedent()
self.out.lf() + 'END DO'
def output_lines_before_with_clear_out_variables(self):
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if parameter.is_output():
if parameter.datatype == 'string':
self.out.lf() + 'output_characters = "x"'
return
def output_lines_before_with_clear_input_variables(self):
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if parameter.is_input():
if parameter.datatype == 'string':
self.out.lf() + 'input_characters = "x"'
return
def output_lines_before_with_inout_variables(self):
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if parameter.direction == LegacyFunctionSpecification.IN:
if parameter.datatype == 'string':
self.out.n() + 'input_characters('
self.out + '( (' + self.index_string(parameter.input_index) + ')* ' + self.MAX_STRING_LEN + ')'
self.out + ':' + '(((' + self.index_string(parameter.input_index) + ')+1) * ' + self.MAX_STRING_LEN + ' - 1)'
self.out + ') = &'
self.out.lf()
self.out + 'characters('
self.out + 'get_offset(' + self.index_string(parameter.input_index) + ' - 1 , '+spec.input_var_name +')'
self.out + ':' + spec.input_var_name + '(' + self.index_string(parameter.input_index) + ')'
self.out + ')'
if parameter.direction == LegacyFunctionSpecification.INOUT:
if parameter.datatype == 'string':
self.out.n() + 'output_characters('
self.out + '( (' + self.index_string(parameter.output_index) + ')* ' + self.MAX_STRING_LEN + ')'
self.out + ':' + '(((' + self.index_string(parameter.output_index) + ')+1) * ' + self.MAX_STRING_LEN + ' - 1)'
self.out + ') = &'
self.out.lf()
self.out + 'characters('
self.out + 'get_offset(' + self.index_string(parameter.input_index) + ' - 1 , '+spec.input_var_name +')'
self.out + ':' + spec.input_var_name + '(' + self.index_string(parameter.input_index) + ')'
self.out + ')'
def output_lines_with_number_of_outputs(self):
dtype_to_count = {}
for parameter in self.specification.output_parameters:
count = dtype_to_count.get(parameter.datatype, 0)
dtype_to_count[parameter.datatype] = count + 1
if not self.specification.result_type is None:
count = dtype_to_count.get(self.specification.result_type, 0)
dtype_to_count[self.specification.result_type] = count + 1
for dtype in dtype_to_count:
spec = self.dtype_to_spec[dtype]
count = dtype_to_count[dtype]
self.out.n() + 'header_out(' + spec.counter_name + ') = ' + count + ' * call_count'
pass
def output_function_end(self):
self.out + ' &'
self.out.n() + ')'
def output_function_start(self):
self.out.n()
if not self.specification.result_type is None:
spec = self.dtype_to_spec[self.specification.result_type]
# if self.specification.result_type == 'string':
# self.out + 'output_characters('
# self.out + '( (' + self.index_string(0) + ')* ' + self.MAX_STRING_LEN + ')'
# self.out + ':' + '(((' + self.index_string(0) + ')+1)*' + self.MAX_STRING_LEN + '-1)'
# self.out + ') = &'
# self.out.lf()
# else:
self.out + spec.output_var_name
self.out + '(' + self.index_string(0) + ')' + ' = '
else:
self.out + 'CALL '
self.out + self.specification.name
if self.must_add_underscore_to_function(self.specification):
self.out + '_'
self.out + '('
def output_casestmt_start(self):
self.out + 'CASE(' + self.specification.id + ')'
def output_casestmt_end(self):
self.out.n()
def must_add_underscore_to_function(self, x):
for cls in self.underscore_functions_from_specification_classes:
if hasattr(cls, x.name):
return True
return False
class GenerateAFortranSourcecodeStringFromASpecificationClass(GenerateASourcecodeStringFromASpecificationClass):
MAX_STRING_LEN = 256
@late
def dtype_to_spec(self):
return dtype_to_spec
@late
def number_of_types(self):
return len(self.dtype_to_spec)
@late
def length_of_the_header(self):
return 2 + self.number_of_types
@late
def underscore_functions_from_specification_classes(self):
return []
def output_sourcecode_for_function(self):
result = GenerateAFortranStringOfAFunctionSpecification()
result.underscore_functions_from_specification_classes = self.underscore_functions_from_specification_classes
return result
def output_needs_mpi(self):
self.out.lf() + 'logical NEEDS_MPI'
if (hasattr(self, 'needs_mpi') and self.needs_mpi) and self.must_generate_mpi:
self.out.lf() + 'parameter (NEEDS_MPI=.true.)'
else:
self.out.lf() + 'parameter (NEEDS_MPI=.false.)'
self.out.lf().lf()
def start(self):
self.use_iso_c_bindings = config.compilers.fc_iso_c_bindings
self.out + GETSET_WORKING_DIRECTORY.format("" if not config.compilers.ifort_version else " use ifport")
self.out + 'program amuse_worker_program'
self.out.indent()
self.output_modules()
if self.use_iso_c_bindings:
self.out.n() + 'use iso_c_binding'
self.out.n() + 'implicit none'
self.out.n() + CONSTANTS_STRING
self.output_needs_mpi()
self.output_maximum_constants()
if self.must_generate_mpi:
self.out.lf().lf() + MODULE_GLOBALS_STRING
else:
self.out.lf().lf() + NOMPI_MODULE_GLOBALS_STRING
if self.use_iso_c_bindings:
self.out.n() + ISO_ARRAY_DEFINES_STRING
else:
self.out.n() + ARRAY_DEFINES_STRING
self.out.lf().lf() + MAIN_STRING
self.out.lf().lf() + 'CONTAINS'
self.out + POLLING_FUNCTIONS_STRING
self.out + GETSET_WORKING_DIRECTORY.format("" if not config.compilers.ifort_version else " use ifport")
if self.must_generate_mpi:
self.out + INTERNAL_FUNCTIONS_STRING
if self.use_iso_c_bindings:
self.out + RECV_HEADER_SLEEP_STRING
else:
self.out + RECV_HEADER_WAIT_STRING
self.out + RUN_LOOP_MPI_STRING
else:
self.out + NOMPI_INTERNAL_FUNCTIONS_STRING
self.out + EMPTY_RUN_LOOP_MPI_STRING
if self.use_iso_c_bindings:
self.out.n() + RUN_LOOP_SOCKETS_STRING
if self.must_generate_mpi:
self.out.n() + RUN_LOOP_SOCKETS_MPI_STRING
else:
self.out.n() + EMPTY_RUN_LOOP_SOCKETS_MPI_STRING
else:
self.out.n() + EMPTY_RUN_LOOP_SOCKETS_STRING
self.out.n() + EMPTY_RUN_LOOP_SOCKETS_MPI_STRING
self.output_handle_call()
self.out.dedent()
self.out.n() + 'end program amuse_worker_program'
self._result = self.out.string
def output_mpi_include(self):
self.out.n() + "USE mpi"
def output_modules(self):
self.out.n()
if hasattr(self.specification_class, 'use_modules'):
for x in self.specification_class.use_modules:
self.out.n() + 'use ' + x
def must_include_declaration_of_function(self, x):
if hasattr(x.specification,"internal_provided"):
return False
return True
def output_declarations_for_the_functions(self):
if not hasattr(self.specification_class, 'use_modules'):
for x in self.interface_functions:
if not self.must_include_declaration_of_function(x):
continue
specification = x.specification
if specification.id == 0:
continue
if specification.result_type is None:
continue
if specification.result_type == 'string':
type = 'character(len=255)'
else:
spec = self.dtype_to_spec[specification.result_type]
type = spec.type
self.out.lf() + type + ' :: ' + specification.name
if self.must_add_underscore_to_function(x):
self.out + '_'
def must_add_underscore_to_function(self, x):
for cls in self.underscore_functions_from_specification_classes:
if hasattr(cls, x.specification.name):
return True
return False
def output_handle_call(self):
self.out.lf() + 'integer function handle_call()'
self.out.indent().n()
self.out.lf() + 'implicit none'
self.output_declarations_for_the_functions()
self.out.lf() + 'integer i, call_count'
self.out.lf() + 'call_count = header_in(HEADER_CALL_COUNT)'
self.out.lf() + 'handle_call = 1'
self.out.lf() + 'SELECT CASE (header_in(HEADER_FUNCTION_ID))'
self.out.indent().n()
self.out.lf() + 'CASE(0)'
self.out.indent().lf()+'handle_call = 0'
self.out.dedent()
self.output_sourcecode_for_functions()
self.out.lf() + 'CASE DEFAULT'
self.out.indent()
self.out.lf() + 'header_out(HEADER_STRING_COUNT) = 1'
self.out.lf() + 'header_out(HEADER_FLAGS) = IOR(header_out(HEADER_FLAGS), 256) '
self.out.lf() + "strings_out(1) = 'error, illegal function id'"
self.out.dedent()
self.out.dedent().n() + 'END SELECT'
self.out.n() + 'return'
self.out.dedent()
self.out.n() + 'end function'
def output_maximum_constants(self):
self.out.lf() + 'integer MAX_INTEGERS_IN, MAX_INTEGERS_OUT, MAX_LONGS_IN, MAX_LONGS_OUT, &'
self.out.lf() + 'MAX_FLOATS_IN, MAX_FLOATS_OUT, MAX_DOUBLES_IN,MAX_DOUBLES_OUT, &'
self.out.lf() + 'MAX_BOOLEANS_IN,MAX_BOOLEANS_OUT, MAX_STRINGS_IN, MAX_STRINGS_OUT'
self.out.lf()
for dtype in self.dtype_to_spec.keys():
dtype_spec = self.dtype_to_spec[dtype]
maximum = self.mapping_from_dtype_to_maximum_number_of_inputvariables.get(dtype,0)
self.out.n() + 'parameter (MAX_' + dtype_spec.input_var_name.upper() + '=' + maximum + ')'
maximum =self.mapping_from_dtype_to_maximum_number_of_outputvariables.get(dtype,0)
self.out.n() + 'parameter (MAX_' + dtype_spec.output_var_name.upper() + '=' + maximum + ')'
class GenerateAFortranStubStringFromASpecificationClass\
(GenerateASourcecodeStringFromASpecificationClass):
@late
def dtype_to_spec(self):
return dtype_to_spec
@late
def ignore_functions_from_specification_classes(self):
return []
@late
def underscore_functions_from_specification_classes(self):
return []
def output_sourcecode_for_function(self):
result = create_definition.CreateFortranStub()
result.output_definition_only = False
return result
def start(self):
if hasattr(self.specification_class, 'use_modules'):
self.out.lf() + 'module {0}'.format(self.specification_class.use_modules[0])
self.out.indent()
self.output_modules(1)
if hasattr(self.specification_class, 'use_modules'):
self.out.lf() + "contains"
self.out.lf()
self.output_sourcecode_for_functions()
self.out.lf()
if hasattr(self.specification_class, 'use_modules'):
self.out.dedent()
self.out.lf() + "end module"
self.out.lf()
self._result = self.out.string
def must_include_interface_function_in_output(self, x):
if hasattr(x.specification,"internal_provided"):
return False
for cls in self.ignore_functions_from_specification_classes:
if hasattr(cls, x.specification.name):
return False
return True
def output_modules(self,skip=0):
self.out.n()
if hasattr(self.specification_class, 'use_modules'):
for x in self.specification_class.use_modules[skip:]:
self.out.n() + 'use ' + x
|
#coding=utf-8
from __future__ import print_function
import rarfile
import zipfile
from sim import *
import os
import filetype
import shutil
#Create directory to store the code for each question.
if os.path.isdir("AS2_Q1") == False:
os.mkdir("AS2_Q1")
else:
shutil.rmtree('AS2_Q1')
os.mkdir("AS2_Q1")
if os.path.isdir("AS2_Q2") == False:
os.mkdir("AS2_Q2")
else:
shutil.rmtree('AS2_Q2')
os.mkdir("AS2_Q2")
if os.path.isdir("AS2_Q3") == False:
os.mkdir("AS2_Q3")
else:
shutil.rmtree('AS2_Q3')
os.mkdir("AS2_Q3")
if os.path.isdir("AS2_Q4") == False:
os.mkdir("AS2_Q4")
else:
shutil.rmtree('AS2_Q4')
os.mkdir("AS2_Q4")
if os.path.isdir("AS2_Q5") == False:
os.mkdir("AS2_Q5")
else:
shutil.rmtree('AS2_Q5')
os.mkdir("AS2_Q5")
if os.path.isdir("AS2_Q6") == False:
os.mkdir("AS2_Q6")
else:
shutil.rmtree('AS2_Q6')
os.mkdir("AS2_Q6")
#Get the list of current directory.
dir_list = os.listdir("AS2")
os.chdir("AS2")
#print(dir_list)
#delete the .DS_Store file in MAC OS.
for i in dir_list:
if "DS_Store" in i:
#os.remove(i)
dir_list.remove(i)
#print(dir_list)
Submitted_Student = []
#To expand the file, and
#判断解压出来的文件夹里面有没有directory, 如果有, 那就再往后面进一步。 Unfinished
for i in dir_list:
#print(i.find("July, 2020)_")+1)
Student_ID = i[int(i.find("July, 2020)_"))+12:int(i.find("July, 2020)_"))+21]
Submitted_Student.append(Student_ID)
#print(Student_ID)
kind = filetype.guess(i)
print(i)
if kind.extension == "zip":
if os.path.isdir(Student_ID) == False:
os.mkdir(Student_ID)
rf = zipfile.ZipFile(i)
rf.extractall(os.path.splitext(Student_ID)[0])
rf.close()
File_List_Under_ID = os.listdir(Student_ID)
#Only save the python and directory.
for f in File_List_Under_ID:
try:
os.chdir(Student_ID)
#os.chdir("..")
except:
continue
#print(f)
if ".py" in f:
#print(f)
if "1" in f:
Question_ID = Student_ID+"_Q1"
os.rename(f,Question_ID+'.py')
shutil.copy(Question_ID+'.py',"../../AS2_Q1")
elif "5" in f:
Question_ID = Student_ID+"_Q5"
os.rename(f,Question_ID+'.py')
shutil.copy(Question_ID+'.py',"../../AS2_Q5")
elif "3" in f:
Question_ID = Student_ID+"_Q3"
os.rename(f,Question_ID+'.py')
shutil.copy(Question_ID+'.py',"../../AS2_Q3")
elif "4" in f:
Question_ID = Student_ID+"_Q4"
os.rename(f,Question_ID+'.py')
shutil.copy(Question_ID+'.py',"../../AS2_Q4")
elif "6" in f:
Question_ID = Student_ID+"_Q6"
os.rename(f,Question_ID+'.py')
shutil.copy(Question_ID+'.py',"../../AS2_Q6")
elif "2" in f:
Question_ID = Student_ID+"_Q2"
os.rename(f,Question_ID+'.py')
shutil.copy(Question_ID+'.py',"../../AS2_Q2")
else:
continue
try:
os.chdir("..")
except:
continue
elif kind.extension == "rar":
if os.path.isdir(Student_ID) == False:
os.mkdir(Student_ID)
rf = rarfile.RarFile(i)
rf.extractall(os.path.splitext(Student_ID)[0])
rf.close()
File_List_Under_ID = os.listdir(Student_ID)
#Only save the python and directory.
for f in File_List_Under_ID:
try:
os.chdir(Student_ID)
#os.chdir("..")
except:
continue
if ".py" in f:
if "1" in f:
Question_ID = Student_ID+"_Q1"
os.rename(f,Question_ID+'.py')
shutil.copy(Question_ID+'.py',"../../AS2_Q1")
elif "5" in f:
Question_ID = Student_ID+"_Q5"
os.rename(f,Question_ID+'.py')
shutil.copy(Question_ID+'.py',"../../AS2_Q5")
elif "3" in f:
Question_ID = Student_ID+"_Q3"
os.rename(f,Question_ID+'.py')
shutil.copy(Question_ID+'.py',"../../AS2_Q3")
elif "4" in f:
Question_ID = Student_ID+"_Q4"
os.rename(f,Question_ID+'.py')
shutil.copy(Question_ID+'.py',"../../AS2_Q4")
elif "6" in f:
Question_ID = Student_ID+"_Q6"
os.rename(f,Question_ID+'.py')
shutil.copy(Question_ID+'.py',"../../AS2_Q6")
elif "2" in f:
Question_ID = Student_ID+"_Q2"
os.rename(f,Question_ID+'.py')
shutil.copy(Question_ID+'.py',"../../AS2_Q2")
else:
continue
try:
os.chdir("..")
except:
continue
elif kind.extension == "7z":
continue
else:
print("Do not suppport this kind of file now.")
|
class No:
def __init__(self, v):
self.value = v
self.next = None
class ListaEncadenada:
def __init__(self):
self.head = None
self.tail = None
def prepend(self, v):
no = No(v)
no.next = self.head
self.head = no
self.tail = no
def append(self, v):
final = self.tail
no = No(v)
final = no
tail = no
def tail(self):
if self.head is None:
return None
iter = self.head
while iter.next is not None:
iter = iter.next
return iter
|
# computes the entropy of P(m | w) using
# word pair relatedness and trigram probabilities
# from wordPair_relatedness_smoothedTrigrams_*.csv
# homophone log probability from homophones_unigram_*.csv.
import sys, re, string, itertools
import math
# parameters
# indicates whether puns are identical or homophone puns
punType = sys.argv[1]
useTrigrams = bool(int(sys.argv[2]))
self_relatedness = float(sys.argv[3])
scaling_parameter = float(sys.argv[4])
ngramType = "unigram"
if useTrigrams:
ngramType = "trigram"
filename = "../ModelOutputs/" + punType + "_" + ngramType + "_" + str(int(self_relatedness)) + "_" + str(int(scaling_parameter)) + ".csv"
print filename
writeFile = open(filename, "wr")
# The output is formatted as follows:
writeFile.write("punType,sentenceID,sentenceType,sum_m1_ngram,sum_m2_ngram,sum_m1_relatedness,sum_m2_relatedness,p_m1_given_w,p_m2_given_w,entropy,m1KL,m2KL,KL,m1Focus,m2Focus\n")
# A function that takes in a list and normalizes to sum to 1
def normListSumTo(L, sumTo=1):
sum = reduce(lambda x,y:x+y, L)
return [ x/(sum*1.0)*sumTo for x in L]
# a dictionary holding unigram probabilities for m1 (original homophone)
# indexed by the original homophone
m1ProbDict = dict()
# a dictionary holding unigram probabilities for m2 (modified homophone)
# indexed by the oriignal homophone
m2ProbDict = dict()
# homophone unigram
unigramFile = open("../ProcessedData/homophones_unigram_" + punType + ".csv", "r")
firstLine = 0
for l in unigramFile:
if firstLine == 0:
firstLine = 1
else:
l = l.strip()
toks = l.split(",")
key = toks[2].lower()
m1Prob = float(toks[4])
m2Prob = float(toks[5])
m1ProbDict[key] = math.log(m1Prob)
m2ProbDict[key] = math.log(m2Prob)
# a dictionary holding word pair information, with each entry being a sentence
sentenceDict = dict()
pairFile = open("../ProcessedData/wordPair_relatedness_smoothedTrigrams_" + punType + ".csv", "r")
firstLine = 0
for l in pairFile:
if firstLine == 0:
firstLine = 1
else:
l = l.strip()
toks = l.split(",")
sentenceID = int(toks[0])
sentenceType = toks[1]
# observed homophone
m1 = toks[2]
# content word
word = toks[3]
# relatedness of observed word with observed homophone (m1)
m1_relatedness = float(toks[4])
# relatedness of observed word with alternative homophone (m2)
m2_relatedness = float(toks[5])
# prior trigram probability of the content word with observed homophone (m1)
m1_ngram = math.log(float(toks[6]))
# prior trigram probability of the content word with the alternative homophone (m2)
m2_ngram = math.log(float(toks[7]))
# prior unigram probabiltiy of the content word
content_unigram = math.log(float(toks[8]))
# if this is the first word pair entry for the sentence,
# intializes all the relevant information and puts it in
# the dictionary indexed by sentence ID
if sentenceID not in sentenceDict:
# wordArray is an array of observed words for each sentence
wordArray = [word]
# hom1RelatednessArray is an array of relatedness for the observed words
# and the original homophone (h1)
m1RelatednessArray = [self_relatedness]
# hom2RelatednessArray is an array of relatedness for the observed words
# and the modified homophone (h2)
m2RelatednessArray = [0]
# m1NgramArray is an array of ngram probabilities for the observed words and the original homophone
m1NgramArray = [m1_ngram]
# m2NgramArray is an array of ngram probs for the observed words and the modified homophone
m2NgramArray = [m2_ngram]
# contentUnigramArray is an array of unigram probs for the observed words
contentUnigramArray = [content_unigram]
# infoArray is an array of all the relevant information for a sentence,
# namely whether it is a pun, the original homophone, the array of observed words,
# the array of relatedness for the observed words and h1, and the array of relatedness
# for the observed words and h2
infoArray = [sentenceType, m1, wordArray, m1RelatednessArray, m2RelatednessArray, m1NgramArray, m2NgramArray, contentUnigramArray]
# places the infoArray for the sentence in the dictionary
sentenceDict[sentenceID] = infoArray
# if the sentence is already in the dictionary, updates the information for that sentence
# with information from the new pair
else:
# retrieves the current infoArray for the sentence
infoArray = sentenceDict[sentenceID]
# the array of observed words. Updates it with the observed word from current pair
infoArray[2].append(word)
# the array of relatedness with m1. Updates it with relatedness from current pair
infoArray[3].append(m1_relatedness)
# the array of relatedness with m2. Updates it with relatedness from current pair
infoArray[4].append(m2_relatedness)
# the array of ngram with m1
infoArray[5].append(m1_ngram)
# the array of ngram with m2
infoArray[6].append(m2_ngram)
# the array of unigram for content word
infoArray[7].append(content_unigram)
# puts the updated infoArray into the dictionary indexed by sentenceID
sentenceDict[sentenceID] = infoArray
for k, v in sentenceDict.iteritems():
# sentenceID
sentenceID = str(k)
# isPun
sentenceType = v[0]
# the original homophone (not necessarily the one observed. Just the more standard one.
m1 = v[1]
#print hom
#print m1ProbDict
# the log probablity of the original homophone m1
m1PriorProb = m1ProbDict[m1]
# the log probability of the modified homophone h2
m2PriorProb = m2ProbDict[m1]
# array of all observed words in the sentence
words = v[2]
# number of content words in sentence
numWords = len(words)
# array of relatedness measures with all words and h1
m1Relatedness = v[3]
# array of relatedness measures with all words and h2
m2Relatedness = v[4]
if useTrigrams:
# array of ngram with all words and h1
m1Ngram = v[5]
# array of ngram with all words and h2
m2Ngram = v[6]
else:
# array of unigram of all words
m1Ngram = v[7]
m2Ngram = v[7]
# makes a list of all possible focus vectors
focusVectors = list(itertools.product([False, True], repeat=numWords))
#print focusVectors
# vector containing proabilities for each f,w combination given m1
fWGivenM1 = []
# vector containing probabilities for each f,w combination given m2
fWGivenM2 = []
sumOverMF = 0
sumM1OverF = 0
sumM2OverF = 0
# iterates through all subsets of indices in contextSubsets
for fVector in focusVectors:
# probabilty of each word being in focus (coin weight)
probWordInFocus = 0.5 # can be tweaked
# Probability of a focus vector
# Determined by the number of words in focus (number of "True" in vector) vs not
numWordsInFocus = sum(fVector)
probFVector = math.pow(probWordInFocus, numWordsInFocus) * math.pow(1 - probWordInFocus, numWords - numWordsInFocus)
wordsInFocus = []
sumLogProbWordsGivenM1F = 0
sumLogProbWordsGivenM2F = 0
for j in range(numWords):
wordj = words[j]
if fVector[j] is True:
wordsInFocus.append(wordj)
logProbWordGivenM1 = m1Ngram[j] + m1Relatedness[j] + scaling_parameter
logProbWordGivenM2 = m2Ngram[j] + m2Relatedness[j] + scaling_parameter
sumLogProbWordsGivenM1F = sumLogProbWordsGivenM1F + logProbWordGivenM1
sumLogProbWordsGivenM2F = sumLogProbWordsGivenM2F + logProbWordGivenM2
else:
logProbWordGivenM1_ngram = m1Ngram[j]
logProbWordGivenM2_ngram = m2Ngram[j]
sumLogProbWordsGivenM1F = sumLogProbWordsGivenM1F + logProbWordGivenM1_ngram
sumLogProbWordsGivenM2F = sumLogProbWordsGivenM2F + logProbWordGivenM2_ngram
# with homophone prior, calculate P(m,F | words)
probM1FGivenWords = math.exp(m1PriorProb + math.log(probFVector) + sumLogProbWordsGivenM1F)
probM2FGivenWords = math.exp(m2PriorProb + math.log(probFVector) + sumLogProbWordsGivenM2F)
# P(F | words, m) \propto P(w | m, f)P(f | m)
# since f, m are independent, this is just P(f)
probFGivenWordsM1 = math.exp(math.log(probFVector) + sumLogProbWordsGivenM1F)
probFGivenWordsM2 = math.exp(math.log(probFVector) + sumLogProbWordsGivenM2F)
fWGivenM1.append(probFGivenWordsM1)
fWGivenM2.append(probFGivenWordsM2)
# sums over all possible focus vectors for P(m1|w)
sumM1OverF = sumM1OverF + probM1FGivenWords
sumM2OverF = sumM2OverF + probM2FGivenWords
sumOverMF = sumOverMF + probM1FGivenWords + probM2FGivenWords
# normalizes and calcualtes entropy
probM1 = sumM1OverF / sumOverMF
probM2 = sumM2OverF / sumOverMF
entropy = - (probM1 * math.log(probM1) + probM2 * math.log(probM2))
# normalizes probability vectors of F to sum to 1 for m1 and m2
normalizedFWGivenM1 = normListSumTo(fWGivenM1, 1)
normalizedFWGivenM2 = normListSumTo(fWGivenM2, 1)
maxM1FocusVector = focusVectors[normalizedFWGivenM1.index(max(normalizedFWGivenM1))]
maxM2FocusVector = focusVectors[normalizedFWGivenM2.index(max(normalizedFWGivenM2))]
# find words in focus given maxM1FocusVector and maxM2FocusVector
maxM1FocusWords = []
maxM2FocusWords = []
for i in range(len(maxM1FocusVector)):
if maxM1FocusVector[i] is True:
maxM1FocusWords.append(words[i])
if maxM2FocusVector[i] is True:
maxM2FocusWords.append(words[i])
# coomputes KL between the two distributions
KL1 = 0
KL2 = 0
for i in range(len(normalizedFWGivenM1)):
KL1 = KL1 + math.log(normalizedFWGivenM1[i] / normalizedFWGivenM2[i]) * normalizedFWGivenM1[i]
KL2 = KL2 + math.log(normalizedFWGivenM2[i] / normalizedFWGivenM1[i]) * normalizedFWGivenM2[i]
writeFile.write(punType + "," + sentenceID + "," + sentenceType + "," + str(sum(m1Ngram)) + "," + str(sum(m2Ngram)) + "," + str(sum(m1Relatedness)) + "," + str(sum(m2Relatedness)) + "," + str(probM1) + "," + str(probM2) + "," + str(entropy) + "," + str(KL1) + "," + str(KL2) + "," + str(KL1 + KL2) + "," + ";".join(maxM1FocusWords) + "," + ";".join(maxM2FocusWords)+"\n")
|
from functools import partial
from data_generator.job_runner import JobRunner
from epath import job_man_dir
# There is four-level hierarchy for generating data for robust
# 1. JobRunner : this is basic job runner
# 2. Worker : RobustWorker -> give range of queries to generator
# 3. Generator : RobustTrainGen, RobustPredictGen : Whether to make instance paired or not
# 4. Encoder : How the each query/document pair is encoded
from tlm.data_gen.adhoc_datagen import OverlappingSegmentsEx
from tlm.data_gen.robust_gen.dense import RobustDenseGen, RobustWorkerPerQuery
def generate_robust_all_seg_for_predict():
max_seq_length = 128
step_size = 16
encoder = OverlappingSegmentsEx(max_seq_length, step_size)
worker_factory = partial(RobustWorkerPerQuery, RobustDenseGen(encoder, max_seq_length, "desc"))
num_jobs = 250
runner = JobRunner(job_man_dir, num_jobs-1, "robust_dense_desc_128", worker_factory)
runner.start()
if __name__ == "__main__":
generate_robust_all_seg_for_predict()
|
# -*- coding: utf-8 -*-
import os
os.startfile('C:\Program Filesx86\Google\Chrome\Application\chrome.exe') |
from __future__ import print_function, division, absolute_import
from odin.utils import ArgController, stdio, get_logpath, get_modelpath
args = ArgController(
).add('-ds', 'sami, estonia, finnish', 'estonia'
# for training
).add('-bs', 'batch size', 32
).add('-lr', 'learning rate', 0.0001
).add('-epoch', 'number of epoch', 8
# for features
).add('-feat', 'spec, mfcc, mspec', 'mfcc'
).add('-stack', 'whether stack the features or sequencing them', False
).add('-ctx', 'context, how many frames are grouped', 48
).add('-hop', 'together with context, number of shift frame', 1
).add('-mode', 'binary, laugh, all, emotion', 'binary'
).add('-bmode', 'batch mode: all, mul (multiple)', 'mul'
).add('-ncpu', 'number of CPU for feeder', 6
).parse()
# Identical name for model
MODEL_NAME = (args['ds'][:3] + args['feat'] + str(args['stack'])[0] + str(args['ctx']) +
str(args['hop']) + args['mode'][:3] + '_' + args['bmode'])
# store log
stdio(path=get_logpath(name=MODEL_NAME, override=True))
import os
os.environ['ODIN'] = 'float32,gpu,tensorflow,cnmem=0.2,seed=1208'
from six.moves import cPickle
import numpy as np
np.random.seed(1208)
from odin import backend as K, nnet as N, fuel as F
from odin import training, visual
from odin.utils import Progbar
from odin.basic import has_roles, WEIGHT, PARAMETER
from utils import get_data, laugh_labels, evaluate
# ===========================================================================
# Const
# ===========================================================================
if args['mode'] == 'binary':
nb_classes = 1
final_activation = K.sigmoid
cost_func = K.binary_crossentropy
score_func1 = K.binary_crossentropy
score_func2 = K.binary_accuracy
labels = range(2)
elif args['mode'] == 'laugh':
nb_classes = 3
final_activation = K.softmax
cost_func = lambda x, y: K.bayes_crossentropy(x, y, nb_classes)
score_func1 = K.categorical_crossentropy
score_func2 = K.categorical_accuracy
labels = range(3)
else:
nb_classes = len(laugh_labels)
final_activation = K.softmax
cost_func = lambda x, y: K.bayes_crossentropy(x, y, nb_classes)
score_func1 = K.categorical_crossentropy
score_func2 = K.categorical_accuracy
labels = range(nb_classes)
# ====== get data ====== #
print("MODEL NAME:", MODEL_NAME)
print("#Classes:", nb_classes)
print("Output Activation:", final_activation)
train, valid, test = get_data(args['ds'], args['feat'], args['stack'],
args['ctx'], args['hop'], args['mode'],
args['bmode'], args['ncpu'])
print('Train shape:', train.shape)
print('Valid shape:', valid.shape)
print('Test shape:', test.shape)
# prog = Progbar(train.shape[0])
# n = 0
# for X, y in train.set_batch(batch_size=32, seed=1208, shuffle_level=2):
# print(X.shape, y.shape)
# print(X)
# print(y)
# raw_input()
# n += X.shape[0]
# print(n, train.shape[0])
# ===========================================================================
# Create model
# ===========================================================================
X = K.placeholder(shape=(None,) + train.shape[1:], name='X')
y = K.placeholder(shape=(None,), name='y', dtype='int32')
if not args['stack']:
f = N.Sequence([
N.Dimshuffle((0, 1, 2, 'x')),
N.Conv(32, (3, 3), strides=1, pad='same', activation=K.relu),
N.Pool(pool_size=2, mode='max'),
N.BatchNorm(axes='auto'),
N.Conv(64, (3, 3), strides=1, pad='same', activation=K.relu),
N.Pool(pool_size=2, mode='max'),
N.BatchNorm(axes='auto'),
N.Conv(64, (3, 3), strides=1, pad='same', activation=K.relu),
N.Pool(pool_size=2, mode='max'),
N.BatchNorm(axes='auto'),
N.Flatten(outdim=2),
N.Dense(nb_classes, activation=final_activation),
], debug=True, name=MODEL_NAME)
else:
f = N.Sequence([
N.Dense(512, activation=K.relu),
N.BatchNorm(axes=0),
N.Dense(256, activation=K.relu),
N.BatchNorm(axes=0),
N.Dense(nb_classes, activation=final_activation),
], debug=True, name=MODEL_NAME)
K.set_training(1); y_pred_train = f(X)
K.set_training(0); y_pred_eval = f(X)
weights = [w for w in f.parameters if has_roles(w, WEIGHT)]
L1 = K.L1(weights)
L2 = K.L2(weights)
# ====== cost function ====== #
cost_train = K.mean(cost_func(y_pred_train, y))
cost_pred_1 = K.mean(score_func1(y_pred_eval, y))
cost_pred_2 = K.mean(score_func2(y_pred_eval, y))
if args['mode'] == 'binary':
y_pred_confuse = K.concatenate([1. - y_pred_eval, y_pred_eval], axis=-1)
else:
y_pred_confuse = y_pred_eval
confusion_matrix = K.confusion_matrix(y_pred_confuse, y, labels=labels)
optimizer = K.optimizers.RMSProp(lr=args['lr'])
updates = optimizer.get_updates(cost_train,
[i for i in f.parameters])
print('Building train function ...')
f_train = K.function([X, y], cost_train, updates)
print('Building score function ...')
f_eval = K.function([X, y], [cost_pred_1, cost_pred_2, confusion_matrix])
print('Building pred function ...')
f_pred = K.function(X, y_pred_eval)
# ===========================================================================
# Create traning
# ===========================================================================
print("Preparing main loop ...")
main = training.MainLoop(batch_size=args['bs'], seed=12082518, shuffle_level=2)
main.set_save(
get_modelpath(name=MODEL_NAME, override=True),
[f, args]
)
main.set_task(f_train, data=train,
epoch=args['epoch'], name='Train')
main.set_subtask(f_eval, data=valid,
freq=0.5, name='Valid')
main.set_callback([
training.ProgressMonitor(name='Train', format='Results: {:.4f}'),
training.ProgressMonitor(name='Valid', format='Results: {:.4f}, {:.4f}',
tracking={2: lambda x: sum(x)}),
training.NaNDetector(name='Train', patience=2, rollback=True),
training.History(),
training.EarlyStopGeneralizationLoss(name='Valid', threshold=5, patience=2),
])
main.run()
# ===========================================================================
# Visualization
# ===========================================================================
main['History'].print_batch('Train')
main['History'].print_epoch('Valid')
try:
print('[Train] Benchmark batch:', main['History'].benchmark('Train', 'batch_end').mean)
print('[Train] Benchmark epoch:', main['History'].benchmark('Train', 'epoch_end').mean)
print('[Valid] Benchmark batch:', main['History'].benchmark('Valid', 'batch_end').mean)
print('[Valid] Benchmark epoch:', main['History'].benchmark('Valid', 'epoch_end').mean)
except:
pass
# ====== evaluation ====== #
evaluate(get_modelpath(name=MODEL_NAME, override=False))
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 5 12:45:35 2021
@author: Hanuel
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('eda_data.csv')
# Things to do
# 1. Choose relevant columns
df.columns
df_model = df[['avg_salary','Rating', 'Size', 'Type of ownership', 'Industry', \
'Sector', 'Revenue', 'num_comp', 'hourly', 'employer_provided', \
'job_state', 'same_state', 'age', 'python_yn', 'spark', 'aws', 'excel', \
'job_simp', 'seniority', 'desc_len']]
# 2. Get dummy data
df_dum = pd.get_dummies(df_model)
# 3. Create train-test split
from sklearn.model_selection import train_test_split
X = df_dum.drop('avg_salary', axis = 1)
y = df_dum.avg_salary.values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# 4. Make models (linear, lasso, rf)
import statsmodels.api as sm
X_sm = X = sm.add_constant(X)
model = sm.OLS(y, X_sm)
model.fit().summary()
# Sklearn linear regression
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
lm = LinearRegression()
lm.fit(X_train, y_train)
np.mean(cross_val_score(lm, X_train, y_train, scoring = 'neg_mean_absolute_error', cv=3))
# Lasso Regression
from sklearn.linear_model import Lasso
lm_l = Lasso()
lm_l.fit(X_train, y_train)
np.mean(cross_val_score(lm_l, X_train, y_train, scoring = 'neg_mean_absolute_error', cv=3))
alpha = []
error = []
for i in range(1,100):
alpha.append(i/100)
lml = Lasso(alpha = (i/100))
error.append(np.mean(cross_val_score(lml, X_train, y_train, scoring = 'neg_mean_absolute_error', cv=3)))
plt.plot(alpha, error)
err = tuple(zip(alpha, error))
df_err = pd.DataFrame(err, columns = ['alpha', 'error'])
df_err[df_err.error == max(df_err.error)]
# Random Forest
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(random_state = 42)
np.mean(cross_val_score(rf, X_train, y_train, scoring = 'neg_mean_absolute_error', cv=3))
# 5. Tune models using GridSearchCV
from sklearn.model_selection import GridSearchCV
parameters = {'n_estimators': range(10, 100, 10),
'criterion': ['mse', 'mae'],
'max_features': ['auto', 'sqrt', 'log2']}
gs = GridSearchCV(rf, parameters, scoring = 'neg_mean_absolute_error', cv=3)
gs.fit(X_train, y_train)
gs.best_score_
gs.best_estimator_
# 6. Test samples
tpred_lm = lm.predict(X_test)
tpred_lml = lm_l.predict(X_test)
tpred_rf = gs.best_estimator_.predict(X_test)
from sklearn.metrics import mean_absolute_error
mean_absolute_error(y_test, tpred_lm)
mean_absolute_error(y_test, tpred_lml)
mean_absolute_error(y_test, tpred_rf)
mean_absolute_error(y_test, (tpred_lm + tpred_rf) / 2)
print(gs.best_estimator_)
import pickle
regressor = gs.best_estimator_
pickle.dump(regressor, open("model.pkl", "wb"))
filename = 'model.pkl'
model = pickle.load(open('model.pkl','rb'))
model.predict(X_test.iloc[1,:].values.reshape(1, -1))
list(X_test.iloc[1,:])
|
Smarty Affix Zeroes
Smarty Affix Zeroes: The program must accept two integers M and N as the input. The program must print the integers from M to N with smarty affix zeroes as the output.
Boundary Condition(s):
1 <= M < N <= 10^8
Input Format:
The first line contains M and N separated by a space.
Output Format:
The first line contains the integers from M to N with smarty affix zeroes.
Example Input/Output 1:
Input:
8 103
Output:
008 009 010 011 012 013 014 015 016 017 018 019 020 021 022 023 024 025 026 027 028 029 030 031 032 033 034 035 036 037 038 039 040 041 042 043 044 045 046 047 048 049 050 051 052 053 054 055 056 057 058 059 060 061 062 063 064 065 066 067 068 069 070 071 072 073 074 075 076 077 078 079 080 081 082 083 084 085 086 087 088 089 090 091 092 093 094 095 096 097 098 099 100 101 102 103
Example Input/Output 2:
Input:
990 1020
Output:
0990 0991 0992 0993 0994 0995 0996 0997 0998 0999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
a,b=map(int,input().split())
c=len(str(b))
for i in range(a,b+1):
print(str(i).zfill(c),end=" ")
|
import cv2
import numpy as np
import neurolab as nl
np.set_printoptions(suppress=True)
def mostrar(img):
cv2.imshow('detected circles',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def cortadora(img):
aux = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
cimg = img
circles = cv2.HoughCircles(aux,cv2.HOUGH_GRADIENT,1,2000, param1=59,param2=50,minRadius=50,maxRadius=10000)
#cv2.HoughCircles
circles = np.uint16(np.around(circles))
if len(circles) != 0:
for i in circles[0,:]:
crop_cimg = cimg[(i[1]-i[2]*0.72):(i[1]+i[2]*0.72), (i[0]-i[2]*0.72):(i[0]+i[2]*0.72)]
final = cv2.resize(crop_cimg,(30, 30), interpolation = cv2.INTER_CUBIC)
return final
else:
print "No se ha encontrado ningun limon"
return [0]
def colores(linea):
lista = [0,0,0,0,0]
for each in linea:
if( (each[2] >= 20 and each[2] <= 30) and (each[1] >= 40 and each[1] <= 70) ):
lista[0] = lista[0] + 1
elif( (each[2] >= 30 and each[2] <= 60) and (each[1] >= 70 and each[1] <= 120) ):
lista[1] = lista[1] + 1
elif( (each[2] >= 60 and each[2] <= 120) and (each[1] >= 120 and each[1] <= 140) ):
lista[2] = lista[2] + 1
elif( (each[2] >= 120 and each[2] <= 200) and (each[1] >= 140 and each[1] <= 200) ):
lista[3] = lista[3] + 1
elif( (each[2] >= 0 and each[2] <= 20) and (each[1] >= 0 and each[1] <= 40) ):
lista[4] = lista[4] + 1
#else:
# lista[5] = lista[5] + 1
return np.array(lista)
def interprete(resul):
if resul[0] >= 0.95:
print "El limon se encuentra en estado maduro"
print "Espere 5 dias para consumirlo"
if resul[1] >= 0.95:
print "El limon se encuentra en estado bueno"
print "Le qudan 8 dias para consumirlo en estado optimo"
if resul[2] >= 0.95:
print "El limon se encuentra en estado pasado"
print "Le qudan 3 dias para consumirlo antes de que se pase"
if resul[3] >= 0.95:
print "El limon se encuentra en estado muy pasado"
print "Se recomienda ya no consumirlo, aunque si lo hace debe ser inmediatamente"
if resul[4] >= 0.95:
print "El limon se encuentra en estado podrido"
print "No consuma este limon"
im1 = cv2.imread('a5.jpg',1)
out = cortadora(im1)
#mostrar(out)
listaux = []
if len(out) != 1:
for j in out:
for k in j:
listaux.append(k)
listaux = np.array( listaux )
fin = np.array( colores(listaux) )
#print fin
net = nl.load("funcional.net")
resultado = net.sim([fin])
print resultado
interprete(resultado[0])
#
|
import os
import numpy as np
from edflow.data.believers.meta import MetaDataset
from edflow.data.believers.meta_util import store_label_mmap
from edflow.util import retrieve, get_obj_from_str, walk
from tqdm.autonotebook import tqdm
class MetaViewDataset(MetaDataset):
"""The :class:`MetaViewDataset` implements a way to render out a view of a
base dataset without the need to rewrite/copy the load heavy data in the
base dataset.
To use the MetaViewDataset you need to define two things:
1. A base dataset as import string in the ``meta.yaml`` file. Use the
key ``base_dset`` for this. This should preferably be a function or
class, which is passed the kwargs ``base_kwargs`` as defined in the
``meta.yaml``..
2. A view in the form of a numpy ``memmap`` or a nested object of
``dict``s and ``list``s with ``memmaps`` at the leaves, each
storing the indices used for the view in this dataset. The arrays
can be of any dimensionality, but no value must be outside the
range ``[0, len(base dataset)]`` and they must all be of the same
length.
The dimensionality of the view is reflected in the nestednes of the
resulting examples.
### Example
You have a base dataset, which contains video frames. It has length ``N``.
Say you want to have a combination of two views on your dataset: One
contains all ``M`` possible subsequences of length 5 of videos contained in
the dataset and one contains an appearance image per each example with the
same person as in the sequence.
All you need is to define two numpy arrays, one with the indices belonging
to the sequenced frames and one with indices of examples of the appearence
images. They should look something like this:
.. code-block:: python
# Sequence indices
seq_idxs = [[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
...
[N-4, N-3, N-2, N-1, N],
print(seq_idxs.shape) # [M, 5]
# Sequence indices
app_idxs = [12,
12,
15,
10,
..
109],
print(app_idxs.shape) # [M]
Knowing your views, create a folder, where you want to store your view
dataset, i.e. at some path ``ROOT``. Create a folder ``ROOT/labels`` and
store the views according to the label naming scheme as defined in the
:class:`MetaDataset`. You can use the function
:func:`edflow.data.believers.meta_util.store_label_mmap` for this.
You can also store the views in any subfolder of labels, which might come
in handy if you have a lot of labels and want to keep things clean.
Finally create a file ``ROOT/meta.yaml``.
Our folder should look something like this:
.. code-block:: bash
ROOT/
├ labels/
│ ├ app_view-*-{M}-*-int64.npy
│ └ seq_view-*-{M}x5-*-int64.npy
└ meta.yaml
Now let us fill the ``meta.yaml``. All we need to do is specify the base
dataset and how we want to use our views:
.. code-block:: yaml
# meta.yaml
description: |
This is our very own View on the data.
Let's have fun with it!
base_dset: import.path.to.dset_object
base_kwargs:
stuff: needed_for_construction
views:
appearance: app_view
frames: seq_view
Now we are ready to construct our view on the base dataset!
Use ``.show()`` to see how the dataset looks like. This works especially
nice in a jupyter notebook.
.. code-block:: python
ViewDset = MetaViewDataset('ROOT')
print(ViewDset.labels.keys()) # ['appearance', 'frames']
print(len(ViewDset)) # {M}
ViewDset.show() # prints the labels and the first example
"""
def __init__(self, root):
super().__init__(root)
base_import = retrieve(self.meta, "base_dset")
base_kwargs = retrieve(self.meta, "base_kwargs")
self.base = get_obj_from_str(base_import)(**base_kwargs)
self.base.append_labels = False
views = retrieve(self.meta, "views", default="view")
def get_label(key):
return retrieve(self.labels, key)
self.views = walk(views, get_label)
if not os.path.exists(os.path.join(root, ".constructed.txt")):
def constructor(name, view):
folder_name = name
savefolder = os.path.join(root, "labels", folder_name)
os.makedirs(savefolder, exist_ok=True)
for key, label in tqdm(
self.base.labels.items(), desc=f"Exporting View {name}"
):
savepath = os.path.join(root, "labels", name)
label_view = np.take(label, view, axis=0)
store_label_mmap(label_view, savepath, key)
walk(self.views, constructor, pass_key=True)
with open(os.path.join(root, ".constructed.txt"), "w+") as cf:
cf.write(
"Do not delete, this reduces loading times.\n"
"If you need to re-render the view, you can safely "
"delete this file."
)
# Re-initialize as we need to load the labels again.
super().__init__(root)
def get_example(self, idx):
"""Get the examples from the base dataset at defined at ``view[idx]``. Load loaders if applicable.
"""
def get_view(view):
return view[idx]
view = walk(self.views, get_view)
view_example = walk(view, self.base.__getitem__, walk_np_arrays=True)
if len(self.loaders) > 0:
loaders_example = super().get_example(idx)
view_example.update(loaders_example)
return view_example
|
for minute in range(10,31, 5):
calories = 4.2 * minute
print('Congrats! For ', minute,"minutes you've burned", calories, 'calories') |
#!/usr/bin/env python
try:
import paramiko
except ImportError:
print "Cannot import 'Paramiko' - please check that it's installed"
print "Exiting..."
sys.exit()
import getpass
import os
import sys
server = raw_input("Server: ")
username = raw_input("Username: ")
password = getpass.getpass()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostname=server, username=username, password=password)
stdin, stdout, stderr = ssh.exec_command("cat /proc/interrupts")
for line in stdout.readlines():
print line.strip()
print "Done. Closing."
ssh.close()
except paramiko.AuthenticationException:
print "Authentication Failed"
sys.exit()
|
import os
from pydrive.auth import GoogleAuth, AuthenticationError
from pydrive.drive import GoogleDrive
from pydrive.drive import GoogleDriveFile as GDFile
from zipper import zip_all, extract
def start():
"""Initializes the Google Drive object that acts as a filesystem abstraction"""
gauth = GoogleAuth()
gauth.LoadCredentialsFile("gdrive_credentials.txt")
# Try to load saved client credentials
try:
gauth.Authorize()
except AuthenticationError:
gauth.LocalWebserverAuth()
# If there are no valid credentials to load, use the web validation flow
gauth.SaveCredentialsFile("gdrive_credentials.txt")
return GoogleDrive(gauth)
def get_drive_data(drive):
return drive.GetAbout()
def get_folder_id(drive):
"""Determines the folder id of cfs-m's root folder"""
folder_id = None
file_list = drive.ListFile({'q': "'root' in parents and trashed=false"}).GetList()
for f in file_list:
if f['title'] == 'CFS_Manager':
folder_id = f['id']
return folder_id
else:
folder = drive.CreateFile({'title': 'CFS_Manager',
"mimeType": "application/vnd.google-apps.folder"})
folder.Upload()
return folder['id']
def get_all_files(folder, drive):
query = {'q': "'{}' in parents and trashed=false".format(folder)}
return drive.ListFile(query).GetList() #Returns a list of dictionaries
def get_file(all_files, field, value):
for file in all_files:
if file[field] == value:
return file
else:
print("No file with "+ field +" of "+ value)
def upload_archives(currdir, folder_id, drive):
files = zip_all(currdir)
for f in files:
new = GDFile()
new.SetContentFile(f.filename)
new_name = os.path.split(f.filename)[1]
new['title'] = new_name
new['parents'] = [{'id': folder_id}]
new.Upload()
def remove_file(file):
try:
file.Trash()
except AttributeError: #There's no file to Trash, so .Trash() is undefined
print("No file", file, "exists")
def download_file(file, filename):
"""Downloads a file (including zipped directories) from GDrive into cfs-m's swap folder"""
os.makedirs(os.path.join(os.getcwd(), "file_swap"), exist_ok=True)
destination = os.path.join("file_swap", filename)
try:
file.GetContentFile(destination)
return extract(destination)
except AttributeError: #If the file being downloaded doesn't exist
pass
def inspect(file):
info = []
for key in file:
statement = str(key) +" : "+ str(file[key])
if type(file[key]) == dict:
statement += " (dict)"
info.append(statement)
return info |
"""Model for the ISIS ISO2709-based file format.
This file format specification can be found at:
https://wiki.bireme.org/pt/img_auth.php/5/5f/2709BR.pdf
"""
from collections import defaultdict
from itertools import accumulate
from construct import Array, Bytes, Check, Computed, \
Const, Container, Default, ExprAdapter, \
FocusedSeq, Prefixed, RawCopy, \
Rebuild, Select, Struct, Terminated
from .ccons import IntASCII, LineSplitRestreamed, \
DEFAULT_LINE_LEN, DEFAULT_NEWLINE
from .fieldutils import con_pairs, DEFAULT_FTF_TEMPLATE, FieldTagFormatter
from .streamutils import should_be_file, TightBufferReadOnlyBytesStreamWrapper
DEFAULT_FIELD_TERMINATOR = b"#"
DEFAULT_RECORD_TERMINATOR = b"#"
DEFAULT_ISO_ENCODING = "cp1252"
DEFAULT_ISO_FTF = FieldTagFormatter(DEFAULT_FTF_TEMPLATE, int_tags=False)
TOTAL_LEN_LEN = 5
LEADER_LEN = TOTAL_LEN_LEN + 19
TAG_LEN = 3
DEFAULT_LEN_LEN = 4
DEFAULT_POS_LEN = 5
DEFAULT_CUSTOM_LEN = 0
def create_record_struct(
field_terminator=DEFAULT_FIELD_TERMINATOR,
record_terminator=DEFAULT_RECORD_TERMINATOR,
line_len=DEFAULT_LINE_LEN,
newline=DEFAULT_NEWLINE,
):
"""Create a construct parser/builder for a whole record object."""
ft_len = len(field_terminator)
prefixless = Struct(
# Build time pre-computed information
"_build_len_list" / Computed(
lambda this: None if "fields" not in this else
[len(field) + ft_len for field in this.fields]
),
"_build_pos_list" / Computed(
lambda this: None if "fields" not in this else
list(accumulate([0] + this._build_len_list))
),
"_build_dir_len" / Computed(
lambda this: None if "fields" not in this else
len(this.fields) * (
TAG_LEN
+ this.get("len_len", DEFAULT_LEN_LEN)
+ this.get("pos_len", DEFAULT_POS_LEN)
+ this.get("custom_len", DEFAULT_CUSTOM_LEN)
)
),
# Record leader/header (apart from the leading total_len)
"status" / Default(Bytes(1), b"0"),
"type" / Default(Bytes(1), b"0"),
"custom_2" / Default(Bytes(2), b"00"),
"coding" / Default(Bytes(1), b"0"),
"indicator_count" / Default(IntASCII(1), 0),
"identifier_len" / Default(IntASCII(1), 0),
"base_addr" / Rebuild(IntASCII(5),
lambda this: LEADER_LEN + this._build_dir_len + ft_len),
"custom_3" / Default(Bytes(3), b"000"),
# Directory entry map (trailing part of the leader)
"len_len" / Default(IntASCII(1), DEFAULT_LEN_LEN),
"pos_len" / Default(IntASCII(1), DEFAULT_POS_LEN),
"custom_len" / Default(IntASCII(1), DEFAULT_CUSTOM_LEN),
"reserved" / Default(Bytes(1), b"0"),
# The ISO leader/header doesn't have the number of fields,
# but it can be found from the base address
"num_fields" / Computed(lambda this:
(this.base_addr - LEADER_LEN - ft_len) //
(TAG_LEN + this.len_len + this.pos_len + this.custom_len)
),
Check(lambda this:
"fields" not in this or this.num_fields == len(this.fields)
),
# Directory
"dir" / Struct(
"tag" / Bytes(TAG_LEN),
"len" / Rebuild(IntASCII(lambda this: this._.len_len),
lambda this: this._._build_len_list[this._index]),
"pos" / Rebuild(IntASCII(lambda this: this._.pos_len),
lambda this: this._._build_pos_list[this._index]),
"custom" / Default(Bytes(lambda this: this._.custom_len),
lambda this: b"0" * this._.custom_len),
)[lambda this: this.num_fields],
Check(lambda this: this.num_fields == 0 or (
this.dir[0].pos == 0 and
all(
this.dir[idx + 1].pos == entry.pos + entry.len
for idx, entry in enumerate(this.dir[:-1])
)
)),
Const(field_terminator),
Check(lambda this: this._io.tell() + TOTAL_LEN_LEN == this.base_addr),
# Field data
"fields" / Array(
lambda this: this.num_fields,
FocusedSeq(
"value",
"value" / Bytes(
lambda this: this._.dir[this._index].len - ft_len
),
Const(field_terminator),
),
),
# There should be no more data belonging to this record
Const(record_terminator),
)
# This includes (and checks) the total_len prefix
result = ExprAdapter(
RawCopy(Prefixed(
lengthfield=IntASCII(TOTAL_LEN_LEN),
subcon=prefixless,
includelength=True,
)),
lambda obj, ctx: Container(total_len=obj.length, **obj.value),
lambda obj, ctx: {"value": obj},
)
if line_len is None or line_len == 0:
return result
return LineSplitRestreamed(result, line_len=line_len, newline=newline)
DEFAULT_RECORD_STRUCT = create_record_struct()
@should_be_file("iso_file")
def iter_con(iso_file, record_struct=DEFAULT_RECORD_STRUCT):
"""Generator of records as parsed construct objects."""
alt_struct = Select(record_struct, Terminated)
while True:
stream_reader = TightBufferReadOnlyBytesStreamWrapper(iso_file)
con = alt_struct.parse_stream(stream_reader)
if con is None: # No more records
return
yield con
def iter_records(iso_file, encoding=DEFAULT_ISO_ENCODING, **kwargs):
"""Generator of records as dictionaries."""
for con in iter_con(iso_file, **kwargs):
yield con2dict(con, encoding=encoding)
def iter_raw_tl(iso_file, *,
only_active=True, prepend_mfn=False, prepend_status=False,
ftf=DEFAULT_ISO_FTF,
record_struct=DEFAULT_RECORD_STRUCT):
containers = iter_con(iso_file, record_struct=record_struct)
for mfn, con in enumerate(containers, 1):
if only_active and con.status != b"0":
continue
result = []
if prepend_mfn:
result.append((b"mfn", b"%d" % mfn))
if prepend_status:
result.append((b"status", b"%d" % con.status))
result.extend(con_pairs(con, ftf=ftf))
yield result
def iter_tl(iso_file, encoding=DEFAULT_ISO_ENCODING, **kwargs):
for tl in iter_raw_tl(iso_file, **kwargs):
yield [(tag.decode("ascii"), field.decode(encoding))
for tag, field in tl]
def con2dict(con, encoding=DEFAULT_ISO_ENCODING, ftf=DEFAULT_ISO_FTF):
"""Parsed construct object to dictionary record converter."""
result = defaultdict(list)
for tag_value, field_value in con_pairs(con, ftf=ftf):
result[tag_value.decode("ascii")].append(field_value.decode(encoding))
return result
def dict2bytes(
data,
encoding=DEFAULT_ISO_ENCODING,
record_struct=DEFAULT_RECORD_STRUCT,
):
"""Encode/build the raw ISO string from a single dict record."""
record_dict = {
"dir": [],
"fields": [],
}
for k, values in data.items():
for v in values:
record_dict["dir"].append({"tag": k.encode("ascii").zfill(3)})
record_dict["fields"].append(v.encode(encoding))
return record_struct.build(record_dict)
|
"""
This problem was asked by Microsoft.
Given a string and a pattern, find the starting indices of all occurrences of the pattern in the string.
For example, given the string "abracadabra" and the pattern "abr", you should return [0, 7].
"""
def startIndicesOfPattern(s,pattern):
length=len(pattern)
result=[]
for i in range(len(s)-length+1):
if s[i:i+length]==pattern:
result.append(i)
return result
if __name__=="__main__":
s=input("Enter the string: ")
pattern=input("Enter the pattern: ")
print(startIndicesOfPattern(s,pattern))
|
from collections import Counter
import re
import pandas as pd
def bacaKamus(isi):
return re.findall(r'\w+', isi.lower())
# dataset=pd.read_csv('kata_dasar_kbbi.csv')
# isidata=dataset.iloc[:,0].values
kamus = Counter(bacaKamus(open('kamusindo.txt',encoding="utf8").read()))
def prob(kata, N=sum(kamus.values())):
#Probabilitas kata yang di input
return kamus[kata] / N
def koreksi(kata):
#mengembalikan hasil koreksi ejaan yang paling memungkinkan
cekspasi=kata.split()
tampung=[]
jawaban=""
for i in cekspasi:
tampung.append(max(kandidatKoreksi(i), key=prob))
for x in tampung:
jawaban+=x+" "
return jawaban
# return max(kandidatKoreksi(kata),key=prob)
def kandidatKoreksi(kata):
"Generate possible spelling corrections for word."
return (cekKata([kata]) or cekKata(edits1(kata)) or cekKata(edits2(kata)) or [kata])
def cekKata(kata):
"bagian dari `kata` yang muncul dalam kamus"
return set(w for w in kata if w in kamus)
def edits1(kata):
#Pada dasarnya, edit kata dapat dilakukan dengan cara memisahkan kata, menambah huruf, mengubah urutan kata, atau menghapus huruf
huruf = 'abcdefghijklmnopqrstuvwxyz'
splits = [(kata[:i], kata[i:]) for i in range(len(kata) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in huruf]
inserts = [L + c + R for L, R in splits for c in huruf]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
def addKataKeKamus(kata):
with open("kamusindo.txt","a") as fileku:
fileku.write(" %s " % kata)
fileku.close() |
from manimlib.imports import *
class LS(Scene):
def construct(self):
text1 = TextMobject(r"Consider a matrix $A =$")
text2 = TextMobject(r"[")
text3 = TextMobject(r"$\begin{array}{c c} 1 & -2\end{array}$")
text4 = TextMobject(r"$\begin{array}{c c} 1 & -1\end{array}$")
text5 = TextMobject(r"]")
text2.scale(2)
text5.scale(2)
text1.set_color(DARK_BLUE)
text2.set_color(DARK_BLUE)
text3.set_color(PURPLE)
text4.set_color(YELLOW)
text5.set_color(DARK_BLUE)
text1.move_to(3.5*LEFT+3*UP+2*RIGHT)
text2.move_to(0.75*LEFT+3*UP+2*RIGHT)
text3.move_to(3.25*UP+2*RIGHT)
text4.move_to(2.75*UP+2*RIGHT)
text5.move_to(0.75*RIGHT+3*UP+2*RIGHT)
self.play(FadeIn(text1), FadeIn(text2), FadeIn(text3), FadeIn(text4), FadeIn(text5))
self.wait()
ttext1 = TextMobject(r"$A^T =$")
ttext2 = TextMobject(r"[")
ttext3 = TextMobject(r"$\begin{array}{c} 1 \\ -2\end{array}$")
ttext4 = TextMobject(r"$\begin{array}{c} 1 \\ -1\end{array}$")
ttext5 = TextMobject(r"]")
ttext2.scale(2)
ttext5.scale(2)
ttext1.set_color(DARK_BLUE)
ttext2.set_color(DARK_BLUE)
ttext3.set_color(PURPLE)
ttext4.set_color(YELLOW)
ttext5.set_color(DARK_BLUE)
ttext1.move_to(2*LEFT+1.5*UP+2*RIGHT)
ttext2.move_to(1*LEFT+1.5*UP+2*RIGHT)
ttext3.move_to(0.5*LEFT+1.5*UP+2*RIGHT)
ttext4.move_to(0.5*RIGHT+1.5*UP+2*RIGHT)
ttext5.move_to(1*RIGHT+1.5*UP+2*RIGHT)
self.play(FadeIn(ttext1), FadeIn(ttext2), FadeIn(ttext3), FadeIn(ttext4), FadeIn(ttext5))
rtext = TextMobject(r"Row Space of $A$ = Column Space of $A^T = a_1$",r"$\left[\begin{array}{c} 1 \\ -2\end{array}\right]$",r"$+a_2$",r"$\left[\begin{array}{c} 1 \\ -1\end{array}\right]$")
rtext[1].set_color(PURPLE)
rtext[3].set_color(YELLOW)
rtext.move_to(2*DOWN+1.5*LEFT)
rtext.scale(0.75)
self.play(Write(rtext))
self.wait()
arrow1 = Arrow(start = 1.5*RIGHT+UP, end = 1.25*(DOWN+RIGHT))
arrow2 = Arrow(start = 2.5*RIGHT+UP, end = 1.25*DOWN+3.25*RIGHT)
arrow1.scale(1.25)
arrow2.scale(1.25)
arrow1.set_color(PURPLE)
arrow2.set_color(YELLOW)
self.play(ShowCreation(arrow1), ShowCreation(arrow2))
self.wait(2)
|
from plotnine import *
import pandas as pd
kROOT_DIR = "2019_acl_modularity"
def datadir(filename):
return "%s/data/%s" % (kROOT_DIR, filename)
def gfxdir(filename):
return "%s/auto_fig/%s" % (kROOT_DIR, filename)
def plot_ablation():
actual = "Actual Classification Accuracy"
predicted = "Predicted Accuracy"
lang = "lang"
dim = "factor(dim)"
df = pd.read_csv(datadir("ablation_study.csv"))
g = ggplot(df, aes(x=actual, y=predicted, shape=lang, color=dim)) \
+ geom_point(size=4) + geom_abline(intercept=0, slope=1) \
+ xlim(0.0, 1.0) + ylim(0.0, 1.0) \
+ theme(text=element_text(size=14)) \
+ guides(color=guide_legend(title="dim")) \
+ facet_wrap("~label", ncol=2)
g = g + geom_text(aes(x=0.25, y = 0.9, label="r_sq"), color="black", size=14, stat="unique")
g.save(filename=gfxdir("ablation_study.pdf"), format="pdf", height = 5, width = 7)
def plot_hyperparam_k():
df = pd.read_csv(datadir("k_nn_hyperparam.csv"))
g = ggplot(df, aes(x="k", y="Absolute Correlation", group="Correlation Type")) \
+ geom_line(aes(linetype="Correlation Type", color="Correlation Type")) \
+ geom_point(aes(shape="Correlation Type", color="Correlation Type"), size=4) \
+ labs(title="") \
+ theme(legend_position="top") \
+ theme(legend_title=element_text(text="")) \
+ theme(legend_entry_spacing=10) \
+ theme(text=element_text(size=16))
# + guides(color=guide_legend(title="")) \
# + theme(legend_position=(0.7, 0.7)) \
# \
# + theme(legend.position=(200, 0.95))
g.save(filename=gfxdir("k_nn_diff_k_corr.pdf"), format="pdf", height = 5, width = 7)
def plot_bli_corr():
columns = ["Precision@1", "Modularity", "Dim", "Lang"]
df = pd.read_csv(datadir("corr_bli.csv"))
df["Dim"] = df["Dim"].astype(str)
g = ggplot(df, aes(x=columns[0], y=columns[1])) \
+ geom_point(aes(shape=columns[3], color=columns[2]), size=3) \
+ ylim(0.35, 0.7) \
+ geom_smooth(method = 'lm', se=False) \
+ theme(text=element_text(size=18))
g.save(filename=gfxdir("corr_bli.pdf"), format="pdf")
def plot_cldc_corr():
columns = ["Classification Accuracy", "Modularity", "Dim", "Lang"]
df = pd.read_csv(datadir("corr_cldc.csv"))
df["Dim"] = df["Dim"].astype(str)
g = ggplot(df, aes(x=columns[0], y=columns[1])) \
+ geom_point(aes(shape=columns[3], color=columns[2]), size=3) \
+ ylim(0.35, 0.7) \
+ geom_smooth(method = 'lm', se=False) \
+ theme(text=element_text(size=18))
g.save(filename=gfxdir("corr_cldc.pdf"), format="pdf")
def plot_tsne():
actual = "dim0"
predicted = "dim1"
df = pd.read_csv(datadir("en_jp_w2v_snippet.csv"))
g = ggplot(df, aes(x=actual, y=predicted, fill="lang", label="word")) \
+ geom_point(size=5, show_legend=False) \
+ geom_text(size=14, position = position_nudge(y = 0.085, x = 0.03), show_legend=False, color="black") \
+ theme(text=element_text(size=14, family="Arial Unicode MS"), \
axis_title_x=element_blank(), axis_title_y=element_blank()) \
+ xlim(-1.01, 1.0) \
+ scale_fill_manual(values=("#FFFFFF", "#FFA07A"))
g.save(filename=gfxdir("en_jp_w2v_snippet.pdf"), format="pdf", height = 5, width = 7)
plot_ablation()
plot_hyperparam_k()
plot_bli_corr()
plot_cldc_corr()
plot_tsne()
|
from tkinter import *
import tkinter.simpledialog
class Field(Frame):
def __init__(self,parent,name,value=None,label=None,var_type=StringVar,state=NORMAL):
Frame.__init__(self,parent)
self.name = name
if label is None:
#use name by default
label = name
self.label = label
self.state = state
#instantiate a tk variable
self.var = var = var_type(self)
#create and cache the sub widgets
self._label = Label(self,text=label)
self._entry = Entry(self,textvariable=var,state=state)
#set the value if given
if not value is None:
self.set(value)
def get(self):
return self.var.get()
def set(self, val):
self.var.set(val)
def bind(self,*args,**kwargs):
"bind to the entry widget"
self._entry.bind(*args,**kwargs)
def pack(self,label_width=None,*args,**kwargs):
if label_width is None:
label_width = len(self.label)
#set the label width
self._label.config(width=label_width)
#pack the sub widgets
self._label.pack(side=LEFT)
self._entry.pack(side=RIGHT,expand=YES,fill=X) #grow horizontally
Frame.pack(self,*args,**kwargs)
class VariableForm(Frame):
def __init__(self,parent,title=None, default_field_state=NORMAL):
Frame.__init__(self,parent)
self.default_field_state = default_field_state
if not title is None:
Label(self,text=title).pack(side=TOP, fill=X)
self.fields = []
self.field_dict = {}
def new_field(self,*args,**kwargs):
#use default state if not specified
if kwargs.get('state',None) is None:
kwargs['state'] = self.default_field_state
field = Field(self,*args,**kwargs) #create Field sub Frame
self.fields.append(field) #cache in order
self.field_dict[field.name] = field #lookup by name
def __getitem__(self,key):
field = self.field_dict[key]
return field.get()
def __setitem__(self,key,val):
field = self.field_dict[key]
field.set(val)
def to_dict(self):
return dict([(name,field.get()) for name, field in list(self.field_dict.items())])
def get_field_widget(self,name):
return self.field_dict[name]
def bind_field_override(self,field_name, binding,
command = lambda field: None):
field = self.get_field_widget(field_name)
msg = "Enter new value for field '%s':" % field.label
wm_title = 'Override Field'
#create override dialog wrapper
def override_func(event):
val = tkinter.simpledialog.askfloat(wm_title,msg, parent=self)
if not val is None:
field.set(val)
#run the command function
command(field)
field.bind(binding,override_func)
def pack(self,*args,**kwargs):
fields = self.fields
#obtain the largest field label length
max_label_width = max([len(field.label) for field in fields])
#pack all the field, knowing the form constraints
for field in fields:
field.pack(label_width = max_label_width,
side = TOP,
fill = X)
Frame.pack(self,*args,**kwargs)
if __name__ == "__main__":
root = Tk()
VF = VariableForm(root, default_field_state = 'readonly')
VF.new_field(name='name',label='Name and stuff')
VF.new_field(name='job', label='Job')
VF.new_field(name='pay', label='Pay')
VF.pack(expand=YES, fill=BOTH)
VF['name'] = 'Mike'
VF['job'] = 'student'
VF['pay'] = '30000.00'
root.mainloop()
|
# coding=utf-8
import sys
import datetime
import threading
sys.path.append('../')
from base import *
from tool import Tools, Message, Event
from flask import render_template, redirect, session, url_for, request
import json
THREAD_RM_TAR_RUNNING = None
THREAD_RM_IMAGE_RUNNING = None
@app.route('/rm/file/', methods=['GET', 'POST'])
def rm_file():
if 'username' not in session:
return render_template('page-login.html')
elif 'lock_stat' in session:
return redirect(url_for('lock'))
nodes = Node.query.filter().all()
if len(nodes) == 0:
return redirect(url_for('sys_set'))
return render_template('rm-tar.html')
@app.route('/rm/info/', methods=['POST'])
def get_rm_info():
global THREAD_RM_IMAGE_RUNNING, THREAD_RM_TAR_RUNNING
if 'username' in session:
receive = request.get_json()
receive_type = receive.get('type')
receive_node_ip = receive.get('node')
receive_files_name = receive.get('files_name')
info = {}
if receive_type == 'submit_rm_tar':
if not check_thread_busy('tar'):
THREAD_RM_TAR_RUNNING = threading.Thread(target=rm_tar_file,
args=(receive_node_ip, receive_files_name, session['username']),
name='thread-rm-tar')
THREAD_RM_TAR_RUNNING.setDaemon(True)
THREAD_RM_TAR_RUNNING.start()
info['rm_tar_status'] = 'success'
else:
info['rm_tar_status'] = 'busy'
elif receive_type == 'submit_rm_image':
if not check_thread_busy('image'):
receive_files_status = receive.get('files_status')
THREAD_RM_IMAGE_RUNNING = threading.Thread(target=rm_image_file,
args=(receive_node_ip, receive_files_name, receive_files_status, session['username']),
name='thread-rm-image')
THREAD_RM_IMAGE_RUNNING.setDaemon(True)
THREAD_RM_IMAGE_RUNNING.start()
info['rm_image_status'] = 'success'
else:
info['rm_image_status'] = 'busy'
return json.dumps(info)
@app.route('/rm/image/', methods=['GET', 'POST'])
def rm_image():
if 'username' not in session:
return render_template('page-login.html')
elif 'lock_stat' in session:
return redirect(url_for('lock'))
nodes = Node.query.filter().all()
if len(nodes) == 0:
return redirect(url_for('sys_set'))
return render_template('rm-image.html')
def rm_tar_file(ip, files_name, username):
"""
删除镜像文件
:param ip:
:param files_name:
:param username:
:return:
"""
Message.write_message('开始删除镜像文件,清稍候', username)
connect_node = Tools.get_connect_node()
connect_node.bool_flush = False
while connect_node.flush_status:
pass
exec_status = []
cmd = 'rm -rf {path}/{file_name}'
path = connect_node.get_ip_attr(ip, 'dir')
# 删除
for file_name in files_name:
# 若镜像名中存在'/',则进行转义
if '/' in file_name:
file_name = file_name.replace('/', '\/')
rm_cmd = cmd.format(path=path, file_name=file_name)
# exec rm cmd
result = connect_node.cmd(ip, rm_cmd)
exec_status.append(result[1])
# 统计执行结果
success_num = len([x for x in exec_status if x == 'success'])
fail_num = len(exec_status) - success_num
message_info = '镜像删除完成:%d成功 %d失败' % (success_num, fail_num)
if 'defeated' in exec_status:
Message.write_message(message_info, username, grade='danger')
else:
Message.write_message(message_info, username)
Event.write_event(username, '从{ip}删除了 {number_file} 个镜像文件'.format(ip=ip, number_file=success_num),
datetime.datetime.now())
connect_node.bool_flush = True
def rm_image_file(ip, files_name, files_status, username):
"""删除Docker镜像
基本思路:
1. 判断该镜像是否正在被使用
2. 若该镜像正在被占用,先找出占用该镜像的容器并删除该容器
3. 删除该镜像
:param ip:
:param files_name:
:param files_status:
:param username:
:return:
"""
# Message.write_message('开始删除Docker镜像,清稍候', username)
connect_node = Tools.get_connect_node()
connect_node.bool_flush = False
while connect_node.flush_status:
pass
exec_status = []
# 获取容器id
get_container_id = "docker ps -a|awk '/{image_name}/ {{print $1}}'"
# 停止并删除容器命令
rm_container_cmd = 'docker kill {container_id} & docker rm {container_id}'
# 删除镜像
rm_image_cmd = 'docker rmi {docker_image}'
# 删除
for index in xrange(len(files_name)):
# 若镜像中存在'/',则进行转义
if '/' in files_name[index]:
files_name[index] = files_name[index].replace('/', '\/')
image_name = files_name[index].split(':')[0]
image_tag = files_name[index].split(':')[1]
exec_rm_image_cmd = rm_image_cmd.format(docker_image=files_name[index])
# 如果镜像名为空,则拒绝操作,防止筛选出全部容器
if len(image_name) == 0:
return
# 若镜像未被使用
if files_status[index] == 'NoUse':
result = connect_node.cmd(ip, exec_rm_image_cmd)
exec_status.append(result[1])
else:
exec_tag_container_id = get_container_id.format(image_name=files_name[index])
exec_no_tag_container_id = get_container_id.format(image_name=image_name)
# 获取id时,首先通过镜像全名获取,若结果为空且本镜像的tag为latest,则以镜像名重新获取一遍
container_id = connect_node.cmd(ip, exec_tag_container_id)
container_id_list = container_id[2][0].readlines()
if len(container_id_list) == 0:
if image_tag == 'latest':
container_id = connect_node.cmd(ip, exec_no_tag_container_id)
container_id_list = container_id[2][0].readlines()
print container_id_list
# 删除容器
for container in container_id_list:
container_id = container.split('\n')[0]
exec_rm_container_cmd = rm_container_cmd.format(container_id=container_id)
connect_node.cmd(ip, exec_rm_container_cmd)
# 删除镜像
result = connect_node.cmd(ip, exec_rm_image_cmd)
exec_status.append(result[1])
# 统计执行结果
success_num = len([x for x in exec_status if x == 'success'])
fail_num = len(exec_status) - success_num
message_info = '镜像删除完成:%d成功 %d失败' % (success_num, fail_num)
if 'defeated' in exec_status:
Message.write_message(message_info, username, grade='danger')
else:
Message.write_message(message_info, username)
Event.write_event(username, '从{ip}删除了 {number_file} 个Docker镜像'.format(ip=ip, number_file=success_num),
datetime.datetime.now())
connect_node.bool_flush = True
def check_thread_busy(check_type):
"""
检查是否有任务正在进行
:param check_type:检查类别.值为 `tar` or `image`,其余值会被忽略
:return: `True`: 线程繁忙; `False`: 线程结束
"""
global THREAD_RM_TAR_RUNNING, THREAD_RM_IMAGE_RUNNING
if check_type is 'tar':
if THREAD_RM_TAR_RUNNING is not None:
if not THREAD_RM_TAR_RUNNING.isAlive():
THREAD_RM_TAR_RUNNING = None
busy_status = False
else:
busy_status = True
else:
busy_status = False
elif check_type is 'image':
if THREAD_RM_IMAGE_RUNNING is not None:
if not THREAD_RM_IMAGE_RUNNING.isAlive():
THREAD_RM_IMAGE_RUNNING = None
busy_status = False
else:
busy_status = True
else:
busy_status = False
else:
busy_status = None
return busy_status |
from gtts import gTTS
import os
def make_speech(file):
with open(file) as f:
text = f.read()
tts = gTTS(text=text, lang='en')
tts.save("/pfs/tts/{}.mp3".format(file.split('.txt')[0]))
for dirpath, dirs, files in os.walk("/pfs/tts"):
for file in files:
make_speech(os.path.join(dirpath, file))
|
# 3_4
inv_people = ["Beauvoir", "ZeDong Mao", "Enlai Zhou", "Frank"]
print("We invited", len(inv_people), "people to come to the dinner.") |
● Basic idea: Predict the label of a data point by :
● Looking at the ‘k’ closest labeled data points
● Taking a majority vote
# Import KNeighborsClassifier from sklearn.neighbors
from sklearn.neighbors import KNeighborsClassifier
# Create arrays for the features and the response variable
y =df['party'].values
X =df.drop('party', axis=1).values
# Create a k-NN classifier with 6 neighbors
knn =KNeighborsClassifier(n_neighbors=6)
# Fit the classifier to the data
knn.fit(X,y)
#KNeighborsClassifier(algorithm='auto',
#leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=6,
#p=2, weights='uniform')
# Predict the labels for the training data X
y_pred = knn.predict(X)
# Predict and print the label for the new data point X_new
new_prediction = knn.predict(X_new)
print("Prediction: {}".format(new_prediction))
# Setup arrays to store train and test accuracies
neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
# Loop over different values of k
for i, k in enumerate(neighbors):
# Setup a k-NN Classifier with k neighbors: knn
knn = KNeighborsClassifier(n_neighbors=k)
# Fit the classifier to the training data
knn.fit(X_train, y_train)
#Compute accuracy on the training set
train_accuracy[i] = knn.score(X_train, y_train)
#Compute accuracy on the testing set
test_accuracy[i] = knn.score(X_test, y_test)
# Generate plot
plt.title('k-NN: Varying Number of Neighbors')
plt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')
plt.plot(neighbors, train_accuracy, label = 'Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.ylabel('Accuracy')
plt.show()
|
import sys
import time
import numpy as np
if len(sys.argv) > 1:
log_filename = sys.argv[1]
log_file = open(log_filename, 'a')
else:
log_file = None
def test_cluster_sklearn(runs, data, k):
import sklearn.cluster
global log_file
np.random.seed(int(time.time()))
clocks = np.empty((runs,))
times = np.empty((runs,))
inertias = np.empty((runs,))
for i in xrange(runs):
t1 = time.time()
c1 = time.clock()
KMeans = sklearn.cluster.KMeans(k)
KMeans.fit(data)
c2 = time.clock()
t2 = time.time()
dt = t2 - t1
dc = c2 - c1
clocks[i] = c2 - c1
times[i] = t2 - t1
inertias[i] = KMeans.inertia_
mean_clock = np.mean(clocks)
std_clock = np.std(clocks)
mean_time = np.mean(times)
std_time = np.std(times)
print 'sklearn: %d objects, %d features, %d clusters: clocks=%f +- %f, times=%f +- %f' % (data.shape[0], data.shape[1], k, mean_clock, std_clock, mean_time, std_time)
print ' inertias:', inertias
if log_file is not None:
print >> log_file, '%d objects, %d features, %d clusters: clocks=%f +- %f, times=%f +- %f' % (data.shape[0], data.shape[1], k, mean_clock, std_clock, mean_time, std_time)
print >> log_file, ' inertias:', inertias
return mean_time, std_time, mean_clock, std_clock, inertias
def test_cluster_kmeans(runs, data, k, n_init=10):
import sklearn.cluster
from src.core import cluster
global log_file
np.random.seed(int(time.time()))
inertias = np.zeros((runs,))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
variances = np.var(X, axis=0)
return np.mean(variances) * tol
tol = 1e-4
tol = _tolerance(data, tol)
# subtract of mean of x for more accurate distance computations
data = data.copy()
data_mean = data.mean(axis=0)
data -= data_mean
for i in xrange(runs):
best_inertia = None
for n in xrange(n_init):
clusters = sklearn.cluster.k_means_.k_init(data, k)
partition, clusters, inertia = cluster.cluster_kmeans(
data,
k,
clusters=clusters,
use_ccluster=False,
clusters_tol=tol
)
if best_inertia is None or inertia[-1] < best_inertia:
best_inertia = inertia[-1]
inertias[i] = best_inertia
print 'kmeans: %d objects, %d features, %d clusters' % (data.shape[0], data.shape[1], k)
print ' inertias:', inertias
if log_file is not None:
print >> log_file, '%d objects, %d features, %d clusters' % (data.shape[0], data.shape[1], k)
print >> log_file, ' inertias:', inertias
return inertias
import cPickle
f = open('/g/pepperkok/hepp/cell_objects_COP.pic', 'r')
up = cPickle.Unpickler(f)
original_data = up.load()
f.close()
runs = 3
#N = [1000, np.sqrt(10)*1000, 10000, np.sqrt(10)*10000, 100000]
#K = [100, 200]
N = [1000]
K = [5]
for n in N:
n = int(n)
for k in K:
a = np.arange(original_data.shape[0])
np.random.shuffle(a)
data = original_data[a[:n]]
data = np.asarray(data, dtype=np.float32)
test_cluster_kmeans(runs, data, k)
test_cluster_sklearn(runs, data, k)
|
import unittest
class unitest(unittest.TestCase):
def testNone(self):
Input = ""
Output = 0
self.assertEqual(Solution().titleToNumber(Input),Output)
def testSample(self):
Input = "AAA"
Output = 703
self.assertEqual(Solution().titleToNumber(Input),Output)
class Solution():
def titleToNumber(self, s):
if s == "":
return 0
Ans = 0
for i in range(len(s)):
Ans *= 26
Ans += ord(s[i]) - ord("A") + 1
return Ans
if __name__ == '__main__':
unittest.main()
|
from flask import Flask,jsonify,request
app = Flask(__name__)
data = [
{'name': 'blacky',
'pass':'123'},
{'name': 'boki',
'pass':'12345'}
]
@app.route('/',methods=['GET'])
def home():
return "<h1> API Authentication</h1>"
@app.route('/valid',methods=['GET'])
def valid():
if 'name' in request.args:
name = request.args['name']
if 'pass' in request.args:
pass1 = request.args['pass']
else:
return "pass not provided"
else:
return "No name provided!"
flag = 0
for name1 in data:
see = (name1['name'] == name) and (name1['pass'] == pass1)
if(see):
flag = 1
break;
else:
continue
if flag == 1:
return "Authenticated!"
else:
return "not Authenticated!"
app.run(debug=True,port=5001)
|
from os.path import abspath, dirname, basename, join
try:
import social_auth
except ImportError:
import sys
sys.path.insert(0, "..")
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ROOT_PATH = abspath(dirname(__file__))
PROJECT_NAME = basename(ROOT_PATH)
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
ADMIN_MEDIA_PREFIX = '/admin-media/'
MEDIA_URL = ''
SECRET_KEY = 't2eo^kd%k+-##ml3@_x__$j0(ps4p0q6eg*c4ttp9d2n(t!iol'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'example.urls'
TEMPLATE_DIRS = (
join(ROOT_PATH, 'templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'social_auth',
'app',
)
AUTHENTICATION_BACKENDS = (
'social_auth.backends.twitter.TwitterBackend',
'social_auth.backends.facebook.FacebookBackend',
'social_auth.backends.google.GoogleOAuthBackend',
'social_auth.backends.google.GoogleOAuth2Backend',
'social_auth.backends.google.GoogleBackend',
'social_auth.backends.yahoo.YahooBackend',
'social_auth.backends.contrib.linkedin.LinkedinBackend',
'social_auth.backends.contrib.flickr.FlickrBackend',
'social_auth.backends.contrib.instagram.InstagramBackend',
'social_auth.backends.contrib.vkontakte.VkontakteBackend',
'social_auth.backends.OpenIDBackend',
'social_auth.backends.contrib.livejournal.LiveJournalBackend',
'social_auth.backends.browserid.BrowserIDBackend',
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.contrib.messages.context_processors.messages',
'social_auth.context_processors.social_auth_by_type_backends',
)
LOGIN_REDIRECT_URL = '/'
try:
from local_settings import *
except:
pass
|
from tkinter import *
Label(None, text='label', fg='green', bg='black').pack()
Button(None, text='button', fg='blue', state="disabled", bg='black').pack()
mainloop()
"""
# keep button from resizing when text appeared in it
from tkinter import *
from itertools import cycle
root = Tk()
root.title("Tic-Tac-Toe")
root.minsize(width=100, height=100)
root.geometry('800x875')
test_frame = Frame(root)
test_frame.grid(row=0, column=0)
fonts = cycle((('Helvetica', '11'), ('Helvetica', '15'), ('Helvetica', '20')))
def chg():
button.config(font=next(fonts))
# relief=SUNKEN width=40 height=20
button = Button(root, text="Click Me!", padx=100, pady=100, width=1, height=1, command=chg)
button.grid(row=0, column=0)
root.mainloop()
"""
"""
# static size of window
from tkinter import *
root = Tk()
menu = Menu(root)
root.config(menu=menu)
# try fiddling with these root.geometry values
root.title('My tkinter size experiment')
root.minsize(width=100, height=100)
root.geometry('1000x920+0+0')
subMenu = Menu(menu)
menu.add_cascade(label="File",menu=subMenu)
subMenu.add_command(label="New")
subMenu.add_command(label="Open File...")
subMenu.add_command(label="Close")
subMenu.add_separator()
subMenu.add_command(label="Exit", command=quit)
editMenu = Menu(menu)
menu.add_cascade(label="Edit",menu=editMenu)
editMenu.add_command(label="Undo")
editMenu.add_command(label="Redo")
editMenu = Menu(menu)
menu.add_cascade(label="?",menu=editMenu)
editMenu.add_command(label="Check For Updates")
editMenu.add_command(label="Change log")
editMenu.add_command(label="About")
root.mainloop()
"""
"""
# Disabling buttons once clicked edited for TicTacToe
from tkinter import Tk, Button, GROOVE
from itertools import cycle
root = Tk()
current_player = "X"
def when_clicked(index):
# Disable the button by index
buttons[index].config(text=current_player, state="disabled")
flip_player()
def flip_player():
global current_player
# if current player was x then change it to o
if current_player == "X":
current_player = "O"
# if current player was o then change it to x
elif current_player == "O":
current_player = "X"
return
# A collection (list) to hold the references to the buttons created below
buttons = []
for index in range(9):
button = Button(root, bg="White", width=5, height=1, relief=GROOVE, command=lambda index=index: when_clicked(index))
# Add the button to the window
button.grid(padx=2, pady=2, row=index % 3, column=int(index / 3))
# Add a reference to the button to 'buttons'
buttons.append(button)
root.mainloop()
"""
"""
# Disabling buttons once clicked
from tkinter import Tk, Button, GROOVE
root = Tk()
def appear(index, letter):
# This line would be where you insert the letter in the textbox
print(letter)
# Disable the button by index
buttons[index].config(state="disabled")
letters=["A", "T", "D", "M", "E", "A", "S", "R", "M"]
# A collection (list) to hold the references to the buttons created below
buttons = []
for index in range(9):
n = letters[index]
button = Button(root, bg="White", text=n, width=5, height=1, relief=GROOVE,
command=lambda index=index, n=n: appear(index, n))
# Add the button to the window
button.grid(padx=2, pady=2, row=index % 3, column=index)
# Add a reference to the button to 'buttons'
buttons.append(button)
root.mainloop()
""" |
import csv
import numpy as np
from sklearn import linear_model, datasets
from PIL import Image
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from skimage.measure import LineModelND, ransac, CircleModel
from scipy.spatial.transform import Rotation as R
from envmap import EnvironmentMap
# the distance from up body to the center of circle (camera stick)
people2camera_meter = None
# the stick radius unit is meter
radius_meter = None
def spherical2dcm(spherical_coordinate, dcm):
"""
convert spherical coordinate to DCM (Direction Cosine Matrix)
:param spherical_coordinate: the spherical coordinate Euler angles [phi, theta]
:param dcm: the rotation matrix (Direction Cosine Matrix), is 3x3 matrix
:return:
"""
dcm[:] = R.from_euler("xyz", [spherical_coordinate[1],
spherical_coordinate[0], 0], degrees=True).as_matrix()
def rotate_image(data, rotation):
"""
rotation images with skylibs
:param data: image data will be rotated, dimension is 4 [ x , width, height, 3]
:return : weather rotated the images
"""
if [0.0, 0.0] == rotation:
return False
rotation_matrix = np.zeros([3, 3])
spherical2dcm(rotation, rotation_matrix)
envmap = EnvironmentMap(data, format_='latlong')
new_image = envmap.rotate("DCM", rotation_matrix).data
data[:] = new_image.astype(np.uint8)
return True
def rotate_image_fast(data, rotation):
"""
rotation panoramic image with drift matrix
:param data: image data will be rotated, dimension is 4 [ x , width, height, 3]
:return : weather rotated images
"""
if [0.0, 0.0] == rotation:
return False
phi_roll_numb = rotation[0] / 360.0 * np.shape(data)[1]
if phi_roll_numb == int(phi_roll_numb) and rotation[1] == 0.0:
# do not need to interpolate, use numpy roll operation
data[:] = np.roll(data, int(phi_roll_numb), axis=2)
return True
# interpolate (rotate) with skylibs
return rotate_image(data, rotation)
def estimate_photographer_position_ransac_circle(position_list):
"""
"""
# AX + By + C = z
# (x-h)^2 + (y-k)^2 = r^2
x = position_list[300:900, 0]
y = position_list[300:900, 1]
z = position_list[300:900, 2]
# A*x^2 + B*x + C*y^2 + D*y + E*z^2 + F = z
X = np.stack(x*x, x, y*y, y, z*z)
Y = z
ransac = linear_model.RANSACRegressor()
ransac.fit(X, Y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Compare estimated coefficients
print("Estimated coefficients (linear regression, RANSAC):")
print(ransac.estimator_.coef_, ransac.estimator_.intercept_)
xx, yy = np.meshgrid(np.linspace(X[:, 0].min(), X[:, 0].max(
), 20), np.linspace(X[:, 1].min(), X[:, 1].max(), 20))
zz = xx * xx * ransac.estimator_.coef_[0] \
+ xx * ransac.estimator_.coef_[1] \
+ yy * yy * ransac.estimator_.coef_[2] \
+ yy * ransac.estimator_.coef_[3] \
- ransac.estimator_.intercept_
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(xx, yy, zz, color=(0.3, 0.3, 0.3, 0.5))
ax.scatter(X[inlier_mask][:, 0], X[inlier_mask][:, 1],
Y[inlier_mask], color='yellowgreen', marker='.', label='Inliers')
ax.scatter(X[outlier_mask][:, 0], X[outlier_mask][:, 1],
Y[outlier_mask], color='gold', marker='x', label='Outliers')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
def estimate_photographer_position_ransac_plane(position_list, plot_result = False):
"""
# AX + By + C = z
3D plane function
"""
# get rid of the start and end unstable pose
start_idx = int(np.shape(position_list)[0] * 0.15)
end_idx = int(np.shape(position_list)[0] * 0.85)
X = position_list[start_idx:end_idx, :2]
y = position_list[start_idx:end_idx, 2]
# X = position_list[:, :2]
# y = position_list[:, 2]
ransac = linear_model.RANSACRegressor(stop_probability = 0.80)
ransac.fit(X, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Compare estimated coefficients
print("Estimated coefficients (linear regression, RANSAC):")
print(ransac.estimator_.coef_, ransac.estimator_.intercept_)
A = ransac.estimator_.coef_[0]
B = ransac.estimator_.coef_[1]
C = ransac.estimator_.intercept_
if plot_result:
xx, yy = np.meshgrid(np.linspace(X[:, 0].min(), X[:, 0].max(), 20), np.linspace(X[:, 1].min(), X[:, 1].max(), 20))
zz = xx * A + yy * B + C
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(xx, yy, zz, color=(0.3, 0.3, 0.3, 0.5))
ax.scatter(X[inlier_mask][:, 0], X[inlier_mask][:, 1], y[inlier_mask], color='yellowgreen', marker='.', label='Inliers')
ax.scatter(X[outlier_mask][:, 0], X[outlier_mask][:, 1], y[outlier_mask], color='gold', marker='x', label='Outliers')
ax.scatter(position_list[:start_idx, 0], position_list[:start_idx, 1], position_list[:start_idx,2], color='blue', marker='>', label='Start')
ax.scatter(position_list[end_idx:, 0], position_list[end_idx:, 1], position_list[end_idx:,2], color='blue', marker='>', label='End')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
return [A/1.0, B/1.0, -1.0]
def estimate_photographer_position(traj_list, plot_result = False):
"""
estimate the center of photographer 3D position in the
"""
# start and end index of frames
tranlation_array = traj_list[:,1:4]
start_idx = int(np.shape(tranlation_array)[0] * 0.35)
end_idx = int(np.shape(tranlation_array)[0] * 0.95)
# get the center of trajectory
center_circle = np.average(tranlation_array[start_idx: end_idx], axis = 0)
# get the radius of circle
radius_vector = tranlation_array[start_idx: end_idx] - center_circle
radius = np.average(np.sqrt(radius_vector[:,0] * radius_vector[:,0] \
+ radius_vector[:,1] * radius_vector[:,1] \
+ radius_vector[:,2] * radius_vector[:,2]))
# get the upright vector, openvslam up is -Y
up_vector = estimate_photographer_position_ransac_plane(traj_list[:, 1:4], True)
if up_vector[1] < 0:
up_vector = [-up_vector[0], -up_vector[1], -up_vector[2]]
# estimate the persion position, with real stick length
people2camera = radius / radius_meter * people2camera_meter
people2camera_vec = up_vector / np.linalg.norm(up_vector) * people2camera
ptgpr_position = center_circle + people2camera_vec
if plot_result:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(tranlation_array[:start_idx, 0], tranlation_array[:start_idx, 1], tranlation_array[:start_idx,2], color='blue', marker='.', label='Camera_start')
ax.scatter(tranlation_array[start_idx:end_idx, 0], tranlation_array[start_idx:end_idx, 1], tranlation_array[start_idx:end_idx,2], color='yellowgreen', marker='.', label='Camera_used')
ax.scatter(tranlation_array[end_idx:, 0], tranlation_array[end_idx:, 1], tranlation_array[end_idx:,2], color='blue', marker='.', label='Camera_end')
ax.scatter(ptgpr_position[0], ptgpr_position[ 1], ptgpr_position[2], color='blue', marker='x', label='People')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
return ptgpr_position
def get_spherical_coord(src_trans, src_rot, tar_trans):
"""
Right hand coordinate system.
get the target theta & phi relation to source camera.
The theta, phi as https://developers.google.com/vr/jump/rendering-ods-content.pdf
@param src_trans: XYZ
@param src_rot: rotation quaternion XYZ
@return theta, phi
"""
openvslam_coord_transform = np.array((-1.0, -1.0, 1.0))
src_trans = openvslam_coord_transform * src_trans
tar_trans = openvslam_coord_transform * tar_trans
src_rot_mat_inv = np.linalg.inv(R.from_quat(src_rot).as_matrix())
src2tar_trans = np.dot(src_rot_mat_inv, tar_trans - src_trans)
x = src2tar_trans[0]
y = src2tar_trans[1]
z = src2tar_trans[2]
radius = np.sqrt(np.sum(src2tar_trans * src2tar_trans))
# transform to OpenVSLAM coordinate
theta = np.arctan2(z,x) * 180 / np.pi
phi = np.arcsin(y/radius) * 180 / np.pi
return theta, phi
def create_mask(initial_mask_file_path, traj_file_path, mask_output_path):
"""
OpenVSLAM use right hand coordinate: \
up (0, -1, 0), forward (0, 0, 1), left (-1, 0, 0)
@param initial_mask_file_path: the original openvslam output csv file, corresponding the first frame
https://github.com/xdspacelab/openvslam/blob/5a0b1a5f52b4d29b699624052c9d5dc4417d9882/src/openvslam/io/trajectory_io.cc#L148
<< timestamp << trans_wc(0) << " " << trans_wc(1) << " " << trans_wc(2) << " "
<< quat_wc.x() << " " << quat_wc.y() << " " << quat_wc.z() << " " << quat_wc.w()
"""
initial_mask_file = Image.open(initial_mask_file_path)
initial_mask = initial_mask_file.convert("L") # convert image to black and white
# warp initial mask base on the camera pose
traj_file = open(traj_file_path)
traj_csv_handle = csv.reader(traj_file, delimiter=' ', quoting=csv.QUOTE_NONNUMERIC)
traj_list = [traj_csv_item for traj_csv_item in traj_csv_handle]
transformation_array = np.asarray(traj_list)
# the people position
ptgpr_position = estimate_photographer_position(transformation_array, True)
theta_init, phi_init = \
get_spherical_coord(transformation_array[0, 1:4], transformation_array[0, 4:8], ptgpr_position)
for idx in range(len(traj_list)):
if idx % 10 == 0:
print("generate the {}th frame mask".format(idx))
term = traj_list[idx]
# compute the relative rotation from initial fot current frame
translation = transformation_array[idx, 1:4]
rotation_quat = transformation_array[idx, 4:8]
# ration the mask
theta_cur, phi_cur = get_spherical_coord(translation, rotation_quat, ptgpr_position)
rotation = [theta_cur - theta_init , phi_cur - phi_init]
# print(rotation)
# rotation
if idx % 1 == 0:
# output mask
mask = np.copy(initial_mask)[..., np.newaxis]
rotate_image_fast(mask, rotation)
im = Image.fromarray(mask[..., 0])
im.save(mask_output_path + r"{:04d}.jpg".format(idx))
# plt.imshow(mask, interpolation='nearest')
# plt.show()
if __name__ == "__main__":
people2camera_meter = 0.1
radius_meter = 0.65
traj_file_path = "D:/workdata/KobeGarden6/openvslam_result_Apr_23/KobeGarden6_traj.csv"
initial_mask_file_path = "D:/workdata/KobeGarden6/mask.png"
mask_output_path = "D:/workdata/KobeGarden6/mask/"
create_mask(initial_mask_file_path, traj_file_path, mask_output_path)
|
from Abstractas.NodoArbol import NodoArbol
from Abstractas.NodoAST import NodoAST
class Return(NodoAST):
def __init__(self, valor, fila, columna):
self.valor = valor
self.fila = fila
self.columna = columna
def ejecutar(self, tree, table):
if isinstance(self.valor, NodoAST):
valor = self.valor.ejecutar(tree,table)
return Return(valor,self.fila,self.columna)
else:
return self
def getNodo(self):
NodoNuevo = NodoArbol("RETURN")
NodoNuevo.agregarHijo("return")
if self.valor != None:
NodoNuevo.agregarHijoNodo(self.valor.getNodo())
NodoNuevo.agregarHijo(";")
return NodoNuevo |
class TestObserver(object):
def __init__(self,observer):
self.events = []
observer.bind('change',self.handler)
def handler(self,event):
self.events.append(event) |
# @Author : Shusheng Wang
# @Time : 2021/2/2 6:04 下午
# @Email : lastshusheng@163.com
from echarts_data.extensions import db
from datetime import datetime
from .base_mod import DataMixin
class Table1(db.Model, DataMixin):
"""
表1
"""
__tablename__ = 'tb_table1'
title = db.Column(db.String(32))
value = db.Column(db.Integer, default=0, server_default='0') # 这里根据值的类型设置是否使用DECIMAL类型
class Table2(db.Model, DataMixin):
"""
表2
"""
__tablename__ = 'tb_table2'
title = db.Column(db.String(32))
value = db.Column(db.Integer, default=0, server_default='0') # 这里根据值的类型设置是否使用DECIMAL类型
|
#!/usr/bin/env python
#
# Copyright (C) 2008 Telefonica I+D (Spain)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# This code has been developed within the context of the DRIVE
# research project.
#
# Testing: nc -l -u 7777 (listen incomming UDP connections)
#
# Authors:
# Jessica Colom <jess2188@mit.edu>
# Javi Roman <javiroman@kernel-labs.org>
# Jose A. Olivera <jaoo@tid.es>
import sys
import socket
import signal
import syslog
import time
import os
buf = 102400
def main():
# Create socket
UDPSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# bind the address to the socket
UDPSock.bind(("localhost", 7777))
while 1:
data, addr = UDPSock.recvfrom(buf)
print data
if __name__ == "__main__":
main()
|
from datetime import datetime, timezone
def do(*handlers):
def invoke_all():
for handler in handlers:
handler()
return invoke_all
def turn_lamp_off(lamp):
return lambda: lamp.off()
def turn_lamp_on(lamp):
return lambda: lamp.on()
def when(filter, handler):
def wrapper(value):
if filter(value):
handler()
return wrapper
def with_current_time(*handlers):
def invoke():
now = datetime.now(timezone.utc)
for handler in handlers:
handler(now)
return invoke
|
#!/usr/bin/python
# Cloudwatch query script for use with Zabbix (or any other monitoring tool)
# Queries for the values over the last 20 minutes and returns the most recent value
# Author: Stefan Radu
# Web: http://rstefan.blogspot.com
## Parameters:
# MetricName. Eg: CurrConnections
# Function. Can be one of the following: Average, Sum, SampleCount, Maximum, or Minimum.
# Dimension. Eg: CacheClusterId=cache,CacheNodeId=0001
# Region. Eg: eu-west-1
# AWS_Access_Key
# AWS_Secret_Access_Key
import boto.ec2.cloudwatch
import sys
import datetime
try:
metName = sys.argv[1]
funcName = sys.argv[2]
dimSpace = sys.argv[3]
region = sys.argv[4]
accessKey = sys.argv[5]
secretKey = sys.argv[6]
except:
print "Usage: get_aws.py MetricName Function Dimension Region AWS_ACCESS_KEY AWS_SECRET_ACCESS_KEY"
print "Example: get_aws.py CurrConnections Average \"CacheClusterId=cache,CacheNodeId=0001\" eu-west-1 ACCESS_KEY SECRET_ACCESS_KEY"
sys.exit(1)
dim = {}
firstSplit = dimSpace.split(',')
for word in firstSplit:
secondSplit = word.split('=')
dim[secondSplit[0]] = secondSplit[1]
regions = boto.ec2.cloudwatch.regions()
reg = ''
for r in regions:
if region == r.name:
reg = r
c = boto.ec2.cloudwatch.CloudWatchConnection(aws_access_key_id=accessKey, aws_secret_access_key=secretKey, region=reg)
metrics = c.list_metrics(dimensions=dim)
end = datetime.datetime.now()
start = end - datetime.timedelta(minutes=20)
dataPoints = [];
for met in metrics:
if met.name == metName:
dataPoints = met.query(start, end, funcName)
if len(dataPoints) > 0:
max = datetime.datetime.now() - datetime.timedelta(hours=1)
index = 0
for i in range(0,len(dataPoints)):
if max < dataPoints[i][u'Timestamp']:
max = dataPoints[i][u'Timestamp']
index = i
for key in dataPoints[index].keys():
if funcName in key:
value = dataPoints[index][key]
print value
else:
print 'Error! No response from Amazon.'
sys.exit(2)
|
# -*- coding: utf-8 -*-
import sflow.gpu1 as tf
# @tf.scope
# def generator(z):
# deconv = dict(kernel=4, stride=2, padding='SAME')
#
# with tf.default_args(deconv=deconv):
# net = z
# net = net.dense(4*3*512).bn().relu()
# net = net.reshape((-1, 4, 3, 512)) # (4, 3)
# net = net.deconv(256).bn().relu() # (8, 6)
# net = net.deconv(128).bn().relu() # (16, 12)
# net = net.deconv(64).bn().relu() # (32, 24)
# net = net.deconv(32).bn().relu() # (64, 48)
# net = net.deconv(3, bias=True) # (128, 96)
#
# return tf.summary_image(net.sigmoid(), name='fake')
# @tf.scope
# def generator(z):
# conv = dict(kernel=4, stride=1, padding='SAME')
#
# with tf.default_args(conv=conv):
# net = z
# net = net.dense(4*3*512).bn().relu()
# net = net.reshape((-1, 4, 3, 512)) # (4, 3)
# net = net.sizeup(2).conv(256).bn().relu() # (8, 6)
# net = net.sizeup(2).conv(128).bn().relu() # (16, 12)
# net = net.sizeup(2).conv(64).bn().relu() # (32, 24)
# net = net.sizeup(2).conv(32).bn().relu() # (64, 48)
# net = net.sizeup(2).conv(3, bias=True) # (128, 96)
#
# return tf.summary_image(net.sigmoid(), name='fake')
@tf.scope
def generator(z):
conv = dict(kernel=4, stride=1, padding='SAME')
subpixel = dict(kernel=3, factor=2, padding='SAME', bias=True)
with tf.default_args(conv=conv, subpixel=subpixel):
net = z
net = net.dense(4*3*512).bn().relu()
net = net.reshape((-1, 4, 3, 512)) # (4, 3)
net = net.subpixel().bn().relu().conv(256).bn().relu() # (8, 6)
net = net.subpixel().bn().relu().conv(128).bn().relu() # (16, 12)
net = net.subpixel().bn().relu().conv(64).bn().relu() # (32, 24)
net = net.subpixel().bn().relu().conv(32).bn().relu() # (64, 48)
net = net.subpixel().bn().relu().conv(3, bias=True) # (128, 96)
return tf.summary_image(net.sigmoid(), name='fake')
@tf.scope
def discriminator(x, zdim):
conv = dict(kernel=3, padding='VALID', bias=True)
with tf.default_args(conv=conv, dense=dict(bias=True)):
net = x
net = net.conv(16, stride=2).leaky(0.2).dropout(0.5) # (63, 47)
net = net.conv(32).leaky(0.2).dropout(0.5) # (31, 23)
net = net.conv(64, stride=2).leaky(0.2).dropout(0.5) # (15, 11)
net = net.conv(128).leaky(0.2).dropout(0.5) # (7, 5)
net = net.conv(256, stride=2).leaky(0.2).dropout(0.5) # (3, 2)
net = net.flat2d().dense(41 + zdim, bias=True)
disc, klass, cont = tf.split(net, [1, 40, zdim], axis=1)
return disc.squeeze(), klass, cont.sigmoid()
# @tf.scope
# def discriminator(x, zdim):
# deconv = dict(kernel=3, padding='SAME', bias=True)
#
# with tf.default_args(deconv=deconv, dense=dict(bias=True)):
# net = x
# net = net.sizedown(2).deconv(16).leaky(0.2).dropout(0.5) # (64, 48)
# net = net.sizedown(2).deconv(32).leaky(0.2).dropout(0.5) # (32, 24)
# net = net.sizedown(2).deconv(64).leaky(0.2).dropout(0.5) # (16, 12)
# net = net.sizedown(2).deconv(128).leaky(0.2).dropout(0.5) # (8, 6)
# net = net.sizedown(2).deconv(256).leaky(0.2).dropout(0.5) # (4, 3)
#
# net = net.flat2d().dense(41 + zdim, bias=True)
# disc, klass, cont = tf.split(net, [1, 40, zdim], axis=1)
#
# return disc.squeeze(), klass, cont.sigmoid()
# @tf.scope
# def discriminator(x, zdim):
# atrous = dict(kernel=3, rate=3, padding='SAME', bias=True)
#
# with tf.default_args(atrous=atrous, dense=dict(bias=True)):
# net = x
# net = net.sizedown(2).atrous(16).leaky(0.2).dropout(0.5) # (64, 48)
# net = net.sizedown(2).atrous(32).leaky(0.2).dropout(0.5) # (32, 24)
# net = net.sizedown(2).atrous(64).leaky(0.2).dropout(0.5) # (16, 12)
# net = net.sizedown(2).atrous(128).leaky(0.2).dropout(0.5) # (8, 6)
# net = net.sizedown(2).atrous(256).leaky(0.2).dropout(0.5) # (4, 3)
#
# net = net.flat2d().dense(41 + zdim, bias=True)
# disc, klass, cont = tf.split(net, [1, 40, zdim], axis=1)
#
# return disc.squeeze(), klass, cont.sigmoid()
# @tf.scope
# def discriminator(x, zdim):
# conv = dict(kernel=3, padding='SAME', bias=True)
# dwconv = dict(kernel=2, stride=2, padding='VALID', bias=True)
#
# with tf.default_args(conv=conv, dwconv=dwconv, dense=dict(bias=True)):
# net = x
# net = net.dwconv().relu().conv(32).relu().dropout() # (32, 24)
# net = net.dwconv().relu().conv(64).relu().dropout() # (16, 12)
# net = net.dwconv().relu().conv(16).relu().dropout() # (64, 48)
# net = net.dwconv().relu().conv(128).relu().dropout() # (8, 6)
# net = net.dwconv().relu().conv(256).relu().dropout() # (4, 3)
#
# net = net.flat2d().dense(41 + zdim, bias=True)
# disc, klass, cont = tf.split(net, [1, 40, zdim], axis=1)
#
# return disc.squeeze(), klass, cont.sigmoid()
def model_celeb_gan():
from sflow.data import celeba
batch = 16
zdim = 100
# shape...
data = celeba.attribute_trainset(batch=batch, size=(128, 96), threads=8)
x = data.image # input image
x = tf.summary_image(x, name='real')
y = tf.ones(data.batch)
y_disc = tf.concat(0, [y, tf.zeros(batch)])
z_klass = tf.random_uniform((batch, 40), 0., 1.).greater(0.5).to_float() # data.label.to_float()
z_cond = tf.random_uniform((batch, zdim), 0., 1.)
z = tf.concat(1, [z_klass, z_cond])
fake = generator(z)
xx = tf.concat(0, [x, fake])
disc, klass, cont = discriminator(xx, zdim)
_, disc_fake = tf.split(disc, 2)
_, cont = tf.split(cont, 2)
target = tf.concat(0, [data.label, z_klass])
loss_disc = tf.binary_cross_entropy(disc, y_disc).mean()
loss_gen = tf.binary_cross_entropy(disc_fake, y).mean()
loss_klass = tf.binary_cross_entropy(klass, target).mean() # + 0.01 * tf.abs(cont - z_cond).mean()
return loss_disc, loss_gen, loss_klass
def train():
loss_disc, loss_gen, loss_klass = model_celeb_gan()
tf.summary_loss(loss_disc, name='loss_disc')
tf.summary_loss(loss_gen, name='loss_gen')
tf.summary_loss(loss_klass, name='loss_klass')
train_d = tf.optim.Adam(lr=0.0001, beta1=0.5).minimize(loss_disc + loss_klass, scope='discriminator')
train_g = tf.optim.Adam(lr=0.001, beta1=0.5).minimize(loss_gen + loss_klass, scope='generator')
writer = tf.summary_writer(logdir='train/face/celeb_sub_conv')
sess = tf.get_default_session()
saver = tf.saver()
for ep, gstep in tf.trainstep(maxep=100, epochper=60000//16, savers=saver):
loss_d = sess.run([loss_disc, train_d])[0]
loss_g = sess.run([loss_gen, train_g])[0]
print([ep, gstep, loss_d, loss_g])
if gstep % 30 == 0:
writer.add_summary(gstep, sess=sess)
if __name__ == '__main__':
train()
|
import numpy as np
def macierz(m):
i = 1
mat = np.diag([2 for a in range(m)])
while i < m:
mat_1 = np.diag([2+2*i for a in range(m - i)], i)
mat_2 = np.diag([2+2*i for a in range(m - i)], -i)
mat = mat + mat_1 + mat_2
i=i+1
return mat
print(macierz(5))
|
# -*- coding=utf-8
'''
Created on 2016年9月23日
庄家规则
@author: zhaol
'''
from majiang2.ai.play_mode import MPlayMode
from majiang2.mao_rule.mao_base import MMaoRuleBase
class MMaoRuleFactory(object):
def __init__(self):
super(MMaoRuleFactory, self).__init__()
@classmethod
def getMaoRule(cls, playMode):
"""判和规则获取工厂
输入参数:
playMode - 玩法
返回值:
对应玩法的判和规则
"""
return MMaoRuleBase()
|
"""Iterators for paging through API responses.
These iterators
simplify the process
of paging through API responses
where the response
is a list of results
with a ``nextPageToken``.
To make an iterator work,
just override the ``get_items_from_response`` method
so that given a response
(containing a page of results)
it parses those results
into an iterable
of the actual objects you want::
class MyIterator(Iterator):
def get_items_from_response(self, response):
items = response.get('items', [])
for item in items:
yield MyItemClass.from_dict(item, other_arg=True)
You then can use this
to get **all** the results
from a resource::
>>> iterator = MyIterator(...)
>>> list(iterator) # Convert to a list (consumes all values).
Or you can walk your way through items
and call off the search early
if you find what you're looking for
(resulting in possibly fewer requests)::
>>> for item in MyIterator(...):
>>> print item.name
>>> if not item.is_valid:
>>> break
"""
from gcloud.storage.exceptions import StorageError
class Iterator(object):
"""A generic class for iterating through Cloud Storage list responses.
:type connection: :class:`gcloud.storage.connection.Connection`
:param connection: The connection to use to make requests.
:type path: string
:param path: The path to query for the list of items.
"""
def __init__(self, connection, path):
self.connection = connection
self.path = path
self.page_number = 0
self.next_page_token = None
def __iter__(self):
"""Iterate through the list of items."""
while self.has_next_page():
response = self.get_next_page_response()
for item in self.get_items_from_response(response):
yield item
def has_next_page(self):
"""Determines whether or not this iterator has more pages.
:rtype: bool
:returns: Whether the iterator has more pages or not.
"""
if self.page_number == 0:
return True
return self.next_page_token is not None
def get_query_params(self):
"""Getter for query parameters for the next request.
:rtype: dict or None
:returns: A dictionary of query parameters or None if there are none.
"""
if self.next_page_token:
return {'pageToken': self.next_page_token}
def get_next_page_response(self):
"""Requests the next page from the path provided.
:rtype: dict
:returns: The parsed JSON response of the next page's contents.
"""
if not self.has_next_page():
raise RuntimeError('No more pages. Try resetting the iterator.')
response = self.connection.api_request(
method='GET', path=self.path, query_params=self.get_query_params())
self.page_number += 1
self.next_page_token = response.get('nextPageToken')
return response
def reset(self):
"""Resets the iterator to the beginning."""
self.page_number = 0
self.next_page_token = None
def get_items_from_response(self, response):
"""Factory method called while iterating. This should be overriden.
This method should be overridden by a subclass.
It should accept the API response
of a request for the next page of items,
and return a list (or other iterable)
of items.
Typically this method will construct
a Bucket or a Key
from the page of results in the response.
:type response: dict
:param response: The response of asking for the next page of items.
:rtype: iterable
:returns: Items that the iterator should yield.
"""
raise NotImplementedError
class KeyDataIterator(object):
"""An iterator listing data stored in a key.
You shouldn't have to use this directly,
but instead should use the helper methods
on :class:`gcloud.storage.key.Key` objects.
:type key: :class:`gcloud.storage.key.Key`
:param key: The key from which to list data..
"""
def __init__(self, key):
self.key = key
# NOTE: These variables will be initialized by reset().
self._bytes_written = None
self._total_bytes = None
self.reset()
def __iter__(self):
while self.has_more_data():
yield self.get_next_chunk()
def reset(self):
"""Resets the iterator to the beginning."""
self._bytes_written = 0
self._total_bytes = None
def has_more_data(self):
"""Determines whether or not this iterator has more data to read.
:rtype: bool
:returns: Whether the iterator has more data or not.
"""
if self._bytes_written == 0:
return True
elif not self._total_bytes:
# self._total_bytes **should** be set by this point.
# If it isn't, something is wrong.
raise ValueError('Size of object is unknown.')
else:
return self._bytes_written < self._total_bytes
def get_headers(self):
"""Gets range header(s) for next chunk of data.
:rtype: dict
:returns: A dictionary of query parameters.
"""
start = self._bytes_written
end = self._bytes_written + self.key.CHUNK_SIZE - 1
if self._total_bytes and end > self._total_bytes:
end = ''
return {'Range': 'bytes=%s-%s' % (start, end)}
def get_url(self):
"""Gets URL to read next chunk of data.
:rtype: string
:returns: A URL.
"""
return self.key.connection.build_api_url(
path=self.key.path, query_params={'alt': 'media'})
def get_next_chunk(self):
"""Gets the next chunk of data.
Uses CHUNK_SIZE to determine how much data to get.
:rtype: string
:returns: The chunk of data read from the key.
:raises: :class:`RuntimeError` if no more data or
:class:`gcloud.storage.exceptions.StorageError` in the
case of an unexpected response status code.
"""
if not self.has_more_data():
raise RuntimeError('No more data in this iterator. Try resetting.')
response, content = self.key.connection.make_request(
method='GET', url=self.get_url(), headers=self.get_headers())
if response.status in (200, 206):
self._bytes_written += len(content)
if 'content-range' in response:
content_range = response['content-range']
self._total_bytes = int(content_range.rsplit('/', 1)[1])
return content
# Expected a 200 or a 206. Got something else, which is unknown.
raise StorageError(response)
|
a, b = [int(i) for i in input().split()]
c=0
for i in range(a):
d=int(input())
if d%b==0:
c+=1
print(c)
|
from django.http.response import Http404
from django.shortcuts import redirect
from django.utils.datastructures import MultiValueDictKeyError
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from apis.fitbit import utils
from apis.fitbit.models import UserFitbit
from apis.fitbit.serializers import FitbitAPIRequestSerializer
from apis.fitbit.tasks import import_user_fitbit_history_via_api
from apis.fitbit.utils import is_integrated
class FitbitLoginView(APIView):
def get(self, request):
next_url = request.GET.get('next', None)
if next_url:
request.session['fitbit_next'] = next_url
else:
request.session.pop('fitbit_next', None)
callback_uri = request.build_absolute_uri(reverse('fitbit-complete'))
fb = utils.create_fitbit(callback_uri=callback_uri)
# returns back token_url and code ... set as _ for flake8
token_url, _ = fb.client.authorize_token_url(redirect_uri=callback_uri)
response = {'url': token_url}
return Response(response)
class FitbitCompleteView(APIView):
def post(self, request):
try:
code = request.data['code']
except KeyError:
return redirect('/500')
callback_uri = request.build_absolute_uri(reverse('fitbit-complete'))
fb = utils.create_fitbit(callback_uri=callback_uri)
try:
token = fb.client.fetch_access_token(code, callback_uri)
access_token = token['access_token']
fitbit_user_id = token['user_id']
except KeyError:
raise Http404('Invalid Token')
user = request.user
UserFitbit.objects.update_or_create(user=user, defaults={
'fitbit_user_id': fitbit_user_id,
'access_token': access_token,
'refresh_token': token['refresh_token'],
'expires_at': token['expires_at'],
})
next_url = request.session.pop('fitbit_next', None) or utils.get_setting('FITBIT_LOGIN_REDIRECT')
response = {'next_url': next_url}
return Response(response)
class FitbitUserAuthCheck(APIView):
# Simple class to check if a user has authorized Fitbit Credentials
# Used by the frontend to decide which modal to display
permission_classes = (IsAuthenticated, )
url = 'fitbit-user-auth-check'
def get(self, request):
data = is_integrated(request.user)
return Response(data)
class FitbitUserUpdateSleepHistory(APIView):
# This concept isn't really RESTful (and more akin to SOA),
# but I can't tell if it's really worth it either to make it a resource
permission_classes = (IsAuthenticated,)
throttle_scope = 'fitbit-api-sync'
url = 'fitbit-user-update-sleep-history'
def post(self, request):
data = request.data
user = request.user
try:
initial_data = {
'start_date': data['start_date'],
'end_date': data['end_date'],
}
except (MultiValueDictKeyError, KeyError) as exc:
return Response('Missing POST parameters {}'.format(exc), status=400)
serializer = FitbitAPIRequestSerializer(data=initial_data)
serializer.is_valid(raise_exception=True)
# send the job off to celery so it's an async task
import_user_fitbit_history_via_api.delay(user=user, **serializer.validated_data)
return Response(status=202)
|
from . import utils
from . import sizes
from . import vision
from . import deepfake
from . import kaggle
from . import torch
from .utils import *
from .sizes import *
from .deepfake import ImageReader
from .torch import Learner
|
import cv2
import numpy as np
import os
image_x,image_y=50,50
def create_folder(folder_name):
if not os.path.exists(folder_name):
os.mkdir(folder_name)
def store_images(g_id):
total_pics = 1200
cap = cv2.VideoCapture(0)
x, y, w, h = 300, 50, 350, 350
create_folder("gestures/" + str(g_id))
pic_no = 0
flag_start_capturing = False
frames = 0
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
hsv = cv2.cvtColor(frame, cv2.Color_BGR2HSV)
mask2 = cv2.inRange(hsv, np.array([2, 50, 60]), np.array([25, 150, 255]))
res = cv2.bitwise_and(frame, frame, mask=mask2)
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
median = cv2.GaussianBlur(gray, (5, 5), 0)
kernel_square = np.ones((5, 5), np.uint8)
dilation = cv2.dilate(median, kernel_square, iterations=2)
opening = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel_square)
ret, thresh = cv2.threshold(opening, 30, 255, cv2.THRESH_BINARY)
thresh = thresh[y:y + h, x:x + w]
contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]
if len(contours) > 0:
contour = max(contours, key=cv2.contourArea)
if cv2.contourArea(contour) > 10000 and frames > 50:
x1, y1, w1, h1 = cv2.boundingRect(contour)
pic_no += 1
save_img = thresh[y1:y1 + h1, x1:x1 + w1]
save_img = cv2.resize(save_img, (image_x, image_y))
cv2.putText(frame, "Capturing...", (30, 60), cv2.FONT_HERSHEY_TRIPLEX, 2, (127, 255, 255))
cv2.imwrite("gestures/" + str(g_id) + "/" + str(pic_no) + ".jpg", save_img)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, str(pic_no), (30, 400), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (127, 127, 255))
cv2.imshow("capturing gesture", frame)
cv2.imshow("thresh", thresh)
keypress = cv2.waitKey(1)
if keypress == ord("c"):
if flag_start_capturing == False:
flag_start_capturing == True
else:
flag_start_capturing == False
frames = 0
if flag_start_capturing == True:
frames += 1
if pic_no == total_pics:
break
import numpy as np
from keras.layers import Dense,Flatten,Conv2D
from keras.layers import MaxPooling2D,Dropout
from keras.utils import np_utils,print_summary
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
import pandas as pd
import keras.backend as K
data=pd.read_csv("trainfoo.csv")
dataset=np.array(data)
np.random.shuffle(dataset)
X=dataset
Y=dataset
X=X[:,1:2501]
y=y[:,0]
X_train=X[0:12000,:]
X_train=X_train/255
X_test=X[12000:13201,:]
X_test=X_test/255
Y=Y.reshape(Y.shape[0],1)
Y_train=Y[0:12000,:]
Y_train=Y_train.T
Y_test=Y[12000:13201,:]
Y_test=Y_test.T
image_x=50
image_y=50
train_y=np_utils.to_categorical(Y_train)
test_y=np_utils.to_categorical(Y_test)
train_y=train_y.reshape(train_y.shape[1],train_y.shape[2])
test_y=test_y.reshape(test_y.shape[1],test_y.shape[2])
X_train=X_train.reshape(X_train.shape[0],image_x,image_y,1)
X_test=X_test.reshape(X_test.shape[0],image_x,image_y,1)
def keras_model(image_x, image_y):
num_of_classes = 12
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5, 5), input_shape=(image_x, image_y, 1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same"))
model.add(Conv2D(64, (5, 5), activation="relu"))
model.add(MaxPooling2D(pool_size=(5, 5), strides=(5, 5), padding="same"))
model.add(Flatten())
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.6))
model.add(Dense(num_of_classes, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
filepath = "face-rec_256.h5"
checkpoint1 = ModelCheckpoint(filepath, monitor="val_acc", verbose=1, save_best_only=True, mode="max")
callbacks_list = [checkpoint1]
return model, callbacks_list
model,callbacks_list=keras_model(image_x,image_y)
model.fit(X_train,train_y,validation_data=(X_test,test_y),epochs=10,batch_size=64,callbacks=callbacks_list)
scores=model.evaluate(X_test,test_y,verbose=0)
print("CNN error: %.2f%%" % (100-scores[1]*100))
print_summary(model)
model.save("handEmo.h5") |
"""An automatic testing framework to compare VB and Python versions of a function"""
import os
import pprint
from vb2py import converter, vbparser, config
Config = config.VB2PYConfigObject("autotest.ini", "../vb2pyautotest")
# << AutoTest Functions >> (1 of 3)
class TestMaker:
"""A Base Class to help in making unit tests"""
default_filename = "testscript.txt"
# << TestMaker methods >> (1 of 8)
def __init__(self, filename):
"""Initialize the test maker"""
self.filename = filename
# << TestMaker methods >> (2 of 8)
def parseVB(self):
"""Parse the VB code"""
self.parser = converter.VBConverter(converter.importTarget("PythonCard"), converter.FileParser)
self.parser.doConversion(self.filename)
# << TestMaker methods >> (3 of 8)
def createTests(self):
"""Create the test signatures"""
self.tests = self.extractSignatures(self.parser)
# << TestMaker methods >> (4 of 8)
def makeTestForFunction(self, testid, modulename, fnname, paramlist):
"""Make a test script for a function
The function resides in a module and takes a list of parameters which
are specified in paramlist. Paramlist also includes a list of values
to pass as that particular parameter.
The results of the test are written to a file testid
"""
raise NotImplementedError
# << TestMaker methods >> (5 of 8)
def extractSignatures(self, project):
"""Extract function test signatures from a project"""
fns = []
for module in project.modules:
for defn in module.code_structure.locals:
if isinstance(defn, vbparser.VBFunction):
test = ["test_%s_%s" % (module.name, defn.identifier),
module.name,
defn.identifier]
ranges = []
for param in defn.parameters:
try:
thisrange = Config["DefaultRanges", param.type]
except config.ConfigParser.NoOptionError:
thisrange = []
ranges.append((param.identifier, eval(thisrange)))
test.append(ranges)
fns.append(test)
return fns
# << TestMaker methods >> (6 of 8)
def createTestScript(self):
"""Create a script containing the tests"""
ret = []
for test in self.tests:
ret.append(self.makeTestForFunction(*test))
return "\n".join(ret)
# << TestMaker methods >> (7 of 8)
def writeTestsToFile(self, filename=None):
"""Write the test script to a file"""
script = self.createTestScript()
if filename is None:
filename = self.default_filename
f = open(filename, "w")
f.write(script)
f.close()
# << TestMaker methods >> (8 of 8)
def makeTestFile(self, filename=None):
"""Translate VB and make tests"""
self.parseVB()
self.createTests()
self.writeTestsToFile(filename)
# -- end -- << TestMaker methods >>
# << AutoTest Functions >> (2 of 3)
class PythonTestMaker(TestMaker):
"""A Class to help in making Python unit tests"""
default_filename = "testscript.py"
# << PythonTestMaker methods >>
def makeTestForFunction(self, testid, modulename, fnname, paramlist):
"""Make a Python test script for a function
The function resides in a module and takes a list of parameters which
are specified in paramlist. Paramlist also includes a list of values
to pass as that particular parameter.
The results of the test are written to a file testid
"""
ret = []
ret.append("from %s import %s" % (modulename, fnname))
ret.append("results = []")
tabs = ""
#
for param, values in paramlist:
ret.append("%sfor %s in %s:" % (tabs, param, values))
tabs += "\t"
#
arg_list = ",".join([param[0] for param in paramlist])
ret.append("%sresults.append((%s(%s), %s))" % (tabs, fnname, arg_list, arg_list))
ret.extend(("f = open('%s_py.txt', 'w')" % testid,
r"f.write('# vb2Py Autotest results\n')",
r"f.write('\n'.join([', '.join(map(str, x)) for x in results]))",
r"f.close()"))
#
return "\n".join(ret)
# -- end -- << PythonTestMaker methods >>
# << AutoTest Functions >> (3 of 3)
class VBTestMaker(TestMaker):
"""A Class to help in making VB unit tests"""
default_filename = "testscript.bas"
# << VBTestMaker methods >>
def makeTestForFunction(self, testid, modulename, fnname, paramlist):
"""Make a VB test script for a function
The function resides in a module and takes a list of parameters which
are specified in paramlist. Paramlist also includes a list of values
to pass as that particular parameter.
The results of the test are written to a file testid
"""
ret = []
ret.append("Dim Results()")
ret.append("ReDim Results(0)")
tabs = ""
#
for param, values in paramlist:
ret.append("%sfor each %s in Array%s" % (tabs, param, tuple(values)))
tabs += "\t"
#
arg_list = ",".join([param[0] for param in paramlist])
ret.append("%sRedim Preserve Results(UBound(Results)+1)" % tabs)
ret.append("%sAnswer = %s(%s)" % (tabs, fnname, arg_list))
#
result_list = ' & "," & '.join(["Str(%s)" % x for x in ["Answer"] + [param[0] for param in paramlist]])
ret.append("%sResults(Ubound(Results)) = %s" % (tabs, result_list))
#
for param, values in paramlist:
tabs = tabs[:-1]
ret.append("Next %s" % param)
#
ret.extend(("Chn = NextFile",
"Open '%s_vb.txt' For Output As #Chn" % testid,
'Print #Chn, "# vb2Py Autotest results"',
"For Each X In Results",
" Print #Chn, X",
"Next X",
"Close #Chn"))
#
return "\n".join(ret)
# -- end -- << VBTestMaker methods >>
# -- end -- << AutoTest Functions >>
if __name__ == "__main__":
filename = 'c:\\development\\python22\\lib\\site-packages\\vb2py\\vb\\test3\\Globals.bas'
p = PythonTestMaker(filename)
v = VBTestMaker(filename)
|
#!/usr/bin/env python
'''
Define a function that can receive two integral numbers in string form and compute their sum and then print it in console.
Hints: Use int() to convert a string to integer.
'''
def sum(num1, num2):
print (int(num1) + int(num2))
num1="3"
num2="5"
sum(num1,num2)
|
# 543
from utils.treeNode import TreeNode
# time: O(N)
# space: O(N)
class Solution:
def dimater(self, root):
self.result = 0
if not root:
return self.result
def depth(root):
left_depth = 1 + depth(root.left) if root.left else 0
right_depth = 1 + depth(root.right) if root.right else 0
self.result = max(self.result, left_depth + right_depth)
return max(left_depth, right_depth)
depth(root)
return self.result
# time = O(N)
# space = O(N)
class Solution2:
heights_map = dict()
def dimater(self, root):
if not root:
return 0
return max(
self.height(root.left) + self.height(root.right),
self.dimater(root.left),
self.dimater(root.right)
)
def height(self, root):
if not root:
return 0
if root in self.heights_map:
return self.heights_map[root]
height = 1 + max(self.height(root.left), self.height(root.right))
self.heights_map[root] = height
return height
one = TreeNode(1)
two = TreeNode(2)
three = TreeNode(3)
four = TreeNode(4)
five = TreeNode(5)
six = TreeNode(6)
seven = TreeNode(7)
one.left = two
two.left = three
three.left = four
two.right = five
five.right = six
six.right = seven
print(one)
print('===============')
solution = Solution()
print(solution.dimater(one))
class SOlution:
def get_diameter(self, root):
depth_map = dict()
if not root:
return 0
def depth(root):
if not root:
return 0
if root in depth_map:
return depth_map[root]
result = 1 + max(depth(root.left), depth(root.right))
depth_map[root] = result
return result
return max(
1 + depth(root.left) + depth(root.right),
self.get_diameter(root.left),
self.get_diameter(root.right)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.