text stringlengths 38 1.54M |
|---|
from MidiEvents import MetaEvent
from MidiEvents import ContinuationEvent
from MidiEvents import MidiEvent
def readFourBytes(f):
dat = f.read(4)
return (ord(dat[0])<<24) | (ord(dat[1])<<16) | (ord(dat[2])<<8) | ord(dat[3])
def readTwoBytes(f):
dat = f.read(2)
return (ord(dat[0])<<8) | ord(dat[1])
def readByte(f):
return ord(f.read(1))
def readDelta(f):
ret = 0
while True:
v = readByte(f)
ret = (ret << 7) | (v & 0x7F)
if v<0x80:
return ret
def readHeaderChunk(f):
dat = f.read(4)
if dat!="MThd":
raise Exception("Missing 'MThd' header")
en = readFourBytes(f)
if en!=6:
raise Exception("Expected header length to be 6 bytes but got "+str(en))
return (readTwoBytes(f),readTwoBytes(f),readTwoBytes(f))
def readTrackChunk(f,num):
ret = []
dat = f.read(4)
if dat!="MTrk":
raise Exception("Missing 'MTrk' header")
trackSize = readFourBytes(f)
eot = f.tell()+trackSize
prev = None
while f.tell()!=eot:
delta = readDelta(f)
d = readByte(f)
if(d==0xFF):
t = readByte(f)
tl = readDelta(f)
td = f.read(tl)
e = MetaEvent(delta,t,td)
ret.append(e)
continue
com = d>>4
chan = d&0xF
if com<8:
e = readByte(f)
e = ContinuationEvent(delta,prev,[d,e])
ret.append(e)
continue
elif com==8:
pass
elif com==9:
nn = readByte(f)
ve = readByte(f)
e = MidiEvent(delta,chan,"NoteOn",[nn,ve])
prev = e
ret.append(e)
continue
elif com==10:
pass
elif com==11:
cn = readByte(f)
va = readByte(f)
e = MidiEvent(delta,chan,"ControllerChange",[cn,va])
prev = e
ret.append(e)
continue
elif com==12:
prg = readByte(f)
MidiEvent(delta,chan,"ProgramChange",[prg])
prev = e
ret.append(e)
continue
elif com==13:
pass
elif com==14:
pass
elif com==15:
pass
print com,chan
raise Exception("Implement me")
return ret
def parseFile(filename):
with open(filename,"rb") as f:
(_,numTracks,time) = readHeaderChunk(f)
tracks = []
for n in xrange(numTracks):
events = readTrackChunk(f,n)
tracks.append(events)
return (time,tracks)
|
from pathlib import Path
import tensorflow as tf
from unsupervised_dna import (
LoadImageEncoder,
LoadImageVAE,
)
AUTOTUNE = tf.data.AUTOTUNE
class DatasetVAE:
def __init__(self, data_dir: Path, batch_size: int, kmer: int, shuffle: bool = True):
self.data_dir = Path(data_dir)
self.batch_size = batch_size
self.shuffle = shuffle
self.kmer = kmer
self.img_loader = self.get_img_loader()
self.charge_dataset()
def charge_dataset(self,):
# load path to images in /data
self.ds = tf.data.Dataset.list_files(str(self.data_dir/'*.jpg'))
n_files = len(self.ds)
print("dataset loaded from: {} | Total files: {}".format(str(self.data_dir), n_files))
def split_train_val(self, val_size):
# Split training and validation datasets
image_count = len(self.ds)
val_size = int(image_count * val_size)
train_ds = self.ds.skip(val_size)
val_ds = self.ds.take(val_size)
return train_ds, val_ds
def charge_img_loader(self, ds):
# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
ds = ds.map(self.img_loader, num_parallel_calls=AUTOTUNE)
return ds
def configure_for_performance(self, ds):
# Performance of datasets
ds = ds.cache()
if self.shuffle is True:
ds = ds.shuffle(buffer_size=len(ds))
ds = ds.batch(self.batch_size)
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
def preprocessing(self, ds):
normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)
ds = ds.map(lambda x, y: (normalization_layer(x), normalization_layer(y)))
return ds
def get_img_loader(self,):
return LoadImageVAE(2**self.kmer, 2**self.kmer)
def __call__(self, for_training: bool=True, val_size: float=0.2):
"""
given a directory, returns train and val sets if 'for_training == True'
otherwise, returns one dataset
"""
if for_training is True:
train_ds, val_ds = self.split_train_val(val_size)
# img loader
train_ds = self.charge_img_loader(train_ds)
val_ds = self.charge_img_loader(val_ds)
# performance
train_ds = self.configure_for_performance(train_ds)
val_ds = self.configure_for_performance(val_ds)
# preprocessing
train_ds = self.preprocessing(train_ds)
val_ds = self.preprocessing(val_ds)
return train_ds, val_ds
else:
# img loader
ds = self.charge_img_loader(self.ds)
# performance
ds = self.configure_for_performance(ds)
# preprocessing
ds = self.preprocessing(ds)
return ds
class DatasetEncoder(DatasetVAE):
def __init__(self, data_dir: Path, batch_size: int, kmer: int, shuffle: bool = False):
self.data_dir = Path(data_dir)
self.batch_size = batch_size
self.shuffle = shuffle
self.kmer = kmer
self.img_loader = self.get_img_loader()
self.charge_dataset()
def __call__(self,):
# img loader
ds = self.charge_img_loader(self.ds)
# performance
ds = self.configure_for_performance(ds)
# preprocessing
ds = self.preprocessing(ds)
return ds
def get_img_loader(self,):
return LoadImageEncoder(2**self.kmer, 2**self.kmer)
def preprocessing(self, ds):
normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)
ds = ds.map(lambda x: normalization_layer(x))
return ds
|
from os import path
WTF_CSRF_ENABLED = True
SECRET_KEY = 'princesse123'
db_filename = path.join(path.dirname(__file__), 'templog.db')
|
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, SimpleRNN, Dropout
#1. 데이터
x = np.array([[1,2,3], [2,3,4], [3,4,5], [4,5,6]]) # (4, 3)
y = np.array([4,5,6,7]) # (4,)
print(x.shape, y.shape)
x = x.reshape(4, 3, 1) # (batch_size, timesteps, feature)
print(x)
#2. 모델구성
model = Sequential()
model.add(SimpleRNN(units=10, activation='relu', input_shape=(3, 1)))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(32, activation='relu'))
model.add(Dense(12, activation='relu'))
model.add(Dense(1))
# model.summary()
# simple_rnn (SimpleRNN) (None, 10) 120
# 파라미터값이 120이 나오는지 구하라
# ( unit 개수 * unit 개수 ) + ( input_dim(feature) 수 * unit 개수 ) + ( 1 * unit 개수)
# (10*10) + (1*10) + (1*10) = 120
# (Input + bias) * output + output * output --> (Input + baias + output) * output
#3. 컴파일, 훈련
model.compile(loss='mse', optimizer='adam')
model.fit(x, y, epochs=100, batch_size=1)
#4. 평가, 예측
x_input = np.array([5,6,7]).reshape(1,3,1)
results = model.predict(np.array(x_input))
print(results)
# [[8.2254505]]
# [[7.8925486]] |
from lxml import etree
import os
import sqlite3
import db_tools
def populate_questions_answers_tables(elem, c):
is_question = elem.attrib['PostTypeId'] is "1"
has_accepted_answer = 'AcceptedAnswerId' in elem.attrib
if is_question and has_accepted_answer:
q_id = int(elem.attrib['Id'])
title = elem.attrib['Title'].encode('ascii','ignore')
body = elem.attrib['Body'].encode('ascii','ignore')
score = int(elem.attrib['Score'])
views = int(elem.attrib['ViewCount'])
accepted_answer_id = int(elem.attrib['AcceptedAnswerId'])
q_cur = c.cursor()
q_cur.execute('INSERT OR IGNORE INTO questions (id, title, body, score, views, acceptedanswerid) VALUES (?, ?, ?, ?, ?, ?)', (q_id, title, body, score, views, accepted_answer_id))
c.commit()
is_answer = elem.attrib['PostTypeId'] is "2"
if is_answer:
a_id = int(elem.attrib['Id'])
body = elem.attrib['Body'].encode('ascii','ignore')
score = int(elem.attrib['Score'])
q_id = int(elem.attrib['ParentId'])
a_cur = c.cursor()
a_cur.execute('INSERT OR IGNORE INTO answers (id, body, score, pid) VALUES (?, ?, ?, ?)', (a_id, body, score, q_id))
c.commit()
def fast_iter(context, c, limit=None):
has_limit = limit is not None
ct = 0
for event, elem in context:
if has_limit:
if ct > limit:
break
if ct % 50000 == 0:
print("completed %i rows." % (ct))
populate_questions_answers_tables(elem, c)
ct += 1
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
if __name__=='__main__':
# DATASET MUST BE IN SOURCE FOLDER WITHIN A FOLDER CALLED 'datasets'
static_path = os.getcwd()
users = static_path+'/../datasets/Users.xml'
posts = static_path+'/../datasets/Posts.xml'
# connect to the dataset
context = etree.iterparse(posts)
# connect to the database
print("Connecting to DB")
connection = db_tools.connect()
print("Setting up DB (if necessary)")
db_tools.setup_db(connection)
print("Populating DB")
fast_iter(context, connection, limit=10000)
connection.close()
del context
|
from datetime import date
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from dateutil.rrule import rrule, WEEKLY, WE
START_DATE = date(2018, 11, 1)
MIN_DAYS_TO_COUNT_AS_MONTH = 10
MONTHS_PER_YEAR = 12
def calc_months_passed(year, month, day):
"""Construct a date object from the passed in arguments.
If this fails due to bad inputs reraise the exception.
Also if the new date is < START_DATE raise a ValueError.
Then calculate how many months have passed since the
START_DATE constant. We suggest using dateutil.relativedelta!
One rule: if a new month is >= 10 (MIN_DAYS_TO_COUNT_AS_MONTH)
days in, it counts as an extra month.
For example:
date(2018, 11, 10) = 9 days in => 0 months
date(2018, 11, 11) = 10 days in => 1 month
date(2018, 12, 11) = 1 month + 10 days in => 2 months
date(2019, 12, 11) = 1 year + 1 month + 10 days in => 14 months
etc.
See the tests for more examples.
Return the number of months passed int.
"""
constr_date = date(year=year, month=month, day=day)
#if isinstance(year , str) or isinstance(month, str) or isinstance(day, str):
#raise TypeError
if constr_date < START_DATE: raise ValueError
if constr_date.day > MIN_DAYS_TO_COUNT_AS_MONTH:
constr_date = constr_date +relativedelta(months=+1)
return relativedelta(constr_date, START_DATE).years * 12 + relativedelta(constr_date, START_DATE).months
#pybites
def calc_months_passed(year, month, day):
new_date = date(year=year, month=month, day=day)
if new_date < START_DATE:
raise ValueError
diff = relativedelta(new_date, START_DATE)
month_count = diff.years * MONTHS_PER_YEAR + diff.months
if diff.days >= MIN_DAYS_TO_COUNT_AS_MONTH:
month_count += 1
return month_count
|
# -*- coding: utf-8 -*-
#use:
#python opencv_labeling.py img/520f8a8d.jpg
import cv2
import numpy as np
import sys
args = sys.argv
image_path = ""
if len(args) < 2:
image_path = "img/520f8a8d.jpg"
else:
image_path = str(args[1])
img = cv2.imread(image_path)
def main():
# 入力画像の取得
im = cv2.imread(image_path)
# グレースケール変換
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# 2値化
# gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
gray = cv2.threshold(gray, 0, 255, cv2.THRESH_TOZERO | cv2.THRESH_OTSU)[1]
# ラベリング処理
n, label = cv2.connectedComponents(gray)
# ラベルの個数nだけ色を用意
rgbs = np.random.randint(0,255,(n+1,3))
# print(type(label))
# print(len(label))
for arr in label:
print(np.count_nonzero(arr))
# ラベル付けした各マスクを色付け
for y in range(0, gray.shape[0]):
for x in range(0, gray.shape[1]):
if label[y, x] > 0:
im[y, x] = rgbs[label[y, x]]
else:
im[y, x] = [0, 0, 0]
while(1):
# cv2.imshow("Labeling", gray)
# ウィンドウ表示
cv2.imshow("Labeling", im)
# qを押したら終了
k = cv2.waitKey(1)
if k == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
""" Script "output_analysis_v5.py" from 12.05.2020.
It was used for simulation and analysis of "S90_DSNB_CCatmo_reactor_NCatmo".
output_analysis_v5.py:
The Script is a function to display and analyze the results of the MCMC analysis.
The MCMC analysis is done either with analyze_spectra_v7_local.py or with analyze_spectra_v7_server2.py.
The script output_analysis_v5.py is used in the script output_analysis_v4_server.py
(when you display the results on the server)
"""
import numpy as np
import datetime
# define the function, which analyzes the output of the analysis:
def output_analysis(save_data, dm_mass, path_output, path_analysis, file_info_analysis, file_number_start,
file_number_stop):
"""
:param save_data: boolean value to define, if the result of output_analysis_v3.py is saved (boolean)
:param dm_mass: DM mass in MeV (float)
:param path_output: path of the output folder (output folder is "dataset_output_{}") (string)
:param path_analysis: the path of the analysis folder (analysis folder is "analysis_mcmc") (string)
:param file_info_analysis: path of the file, where the information about the analysis is saved (string)
:param file_number_start: number of the files, that should be read in (is equal to dataset_start and dataset_stop)
(integer)
:param file_number_stop: number of the files, that should be read in (is equal to dataset_start and dataset_stop)
(integer)
:return:
number_of_entries: number of files, that are read in (Equivalent to the number of entries in the result array)
(integer)
lower_energy_bound: lower bound of the energy window in MeV (float)
upper_energy_bound: upper bound of the energy window in MeV (float)
s_mode: mode of the total number of signal events (np.array of float)
s_50: mean of the array s_mode (float)
s_50_sigma: standard deviation of the array s_mode (float)
s_50_16: 16% confidence level of the array s_mode (float)
s_50_84: 84% confidence level of the array s_mode (float)
s_50_2_5: 2.5% confidence level of the array s_mode (float)
s_50_97_5: 97.5% confidence level of the array s_mode (float)
s_50_0_15: 0.15% confidence level of the array s_mode (float)
s_50_99_85: 99.85% confidence level of the array s_mode (float)
signal_expected: number of expected signal events in the energy window (float)
s_90_limit: values of upper 90 percent limit of the number of signal events (np.array of float)
s_90: mean of s_90_limit (float)
s_90_sigma: std of s_90_limit (float)
s_90_16: 16% confidence level of s_90_limit (float)
s_90_84: 84% confidence level of s_90_limit (float)
s_90_2_5: 2.5% confidence level of s_90_limit (float)
s_90_97.5: 97.5% confidence level of s_90_limit (float)
s_90_0_15: 0.15% confidence level of s_90_limit (float)
s_90_99_85: 99.85% confidence level of s_90_limit (float)
dsnb_mode: mode of the total number of DSNB background events (np.array of float)
dsnb_50: mean of dsnb_mode (float)
dsnb_50_sigma: std of dsnb_mode (float)
dsnb_50_16: 16% confidence level of dsnb_mode (float)
dsnb_50_84: 84% confidence level of dsnb_mode (float)
dsnb_expected: number of expected DSNB background events in the energy window (float)
ccatmo_p_mode: mode of the total number of CCatmo background events on p (np.array of float)
ccatmo_p_50: mean of ccatmo_mode (float)
ccatmo_p_50_sigma: std of ccatmo_mode (float)
ccatmo_p_50_16: 16% C.L. of ccatmo_mode (float)
ccatmo_p_50_84: 84% C.L. of ccatmo_mode (float)
ccatmo_p_expected: number of expected atmospheric CC background events on p in the energy window (float)
reactor_mode: mode of the total number of reactor background events (np.array of float)
reactor_50: mean of reactor_mode (float)
reactor_50_sigma: std of reactor_mode (float)
reactor_50_16: 16% C.L. of reactor_mode (float)
reactor_50_84: 84% C.L. of reactor_mode (float)
reactor_expected: number of expected reactor background events in the energy window (float)
ncatmo_mode: mode of the total number of NCatmo background events (np.array of float)
ncatmo_50: mean of ncatmo_mode (float)
ncatmo_50_sigma: std of ncatmo_mode (float)
ncatmo_50_16: 16% C.L. of ncatmo_mode (float)
ncatmo_50_84: 84% C.L. of ncatmo_mode (float)
ncatmo_expected: number of expected atmospheric NC background events in the energy window (float)
ccatmo_c12_mode: mode of the total number of CCatmo background events on C12 (np.array of float)
ccatmo_c12_50: mean of ccatmo_c12_mode (float)
ccatmo_c12_50_sigma: std of ccatmo_c12_mode (float)
ccatmo_c12_50_16: 16% C.L. of ccatmo_c12_mode (float)
ccatmo_c12_50_84: 84% C.L. of ccatmo_c12_mode (float)
ccatmo_c12_expected: number of expected CCatmo background events on C12 in the energy window (float)
"""
""" Variable, which defines the date and time of running the script: """
# get the date and time, when the script was run:
date = datetime.datetime.now()
now = date.strftime("%Y-%m-%d %H:%M")
# calculate the number of files, that are read in (Equivalent to the number of entries in the result array)
# (integer):
number_of_entries = file_number_stop - file_number_start + 1
""" Preallocate the arrays, where the results of the analysis of the different datasets should be appended: """
# mode of the total number of signal events (np.array of float):
s_mode = np.array([])
# values of upper 90 percent limit of the number of signal events (np.array of float):
s_90_limit = np.array([])
# mode of the total number of DSNB background events (np.array of float):
dsnb_mode = np.array([])
# mode of the total number of CCatmo background events on protons (np.array of float):
ccatmo_p_mode = np.array([])
# mode of the total number of reactor background events (np.array of float):
reactor_mode = np.array([])
# mode of the total number of NCatmo background events (np.array of float):
ncatmo_mode = np.array([])
# mode of the total number of CCatmo background events on C12 (np.array of float):
ccatmo_c12_mode = np.array([])
""" Read in the files, where the results of the analysis are saved and read the result-values: """
for number in np.arange(file_number_start, file_number_stop + 1, 1):
# load the file corresponding to Dataset{number}_mcmc_analysis.txt:
result_analysis = np.loadtxt(path_analysis + "/Dataset{0:d}_mcmc_analysis.txt".format(number))
# get value of mode of S (float):
value_mode_s = result_analysis[0]
# get value of 90 percent limit S (float):
value_s_90_limit = result_analysis[1]
# get value of mode of B_DSNB (float):
value_mode_dsnb = result_analysis[2]
# get value of mode of B_CCatmo_p (float):
value_mode_ccatmo_p = result_analysis[3]
# get value of mode of B_CCatmo_c12 (float):
value_mode_ccatmo_c12 = result_analysis[4]
# get value of mode of B_reactor (float):
value_mode_reactor = result_analysis[5]
# get value of mode of B_NCatmo (float):
value_mode_ncatmo = result_analysis[6]
# Append the values to the arrays (np.array of float):
s_mode = np.append(s_mode, value_mode_s)
s_90_limit = np.append(s_90_limit, value_s_90_limit)
dsnb_mode = np.append(dsnb_mode, value_mode_dsnb)
ccatmo_p_mode = np.append(ccatmo_p_mode, value_mode_ccatmo_p)
reactor_mode = np.append(reactor_mode, value_mode_reactor)
ncatmo_mode = np.append(ncatmo_mode, value_mode_ncatmo)
ccatmo_c12_mode = np.append(ccatmo_c12_mode, value_mode_ccatmo_c12)
""" Calculate the mean and probability interval: """
# calculate the mean and 16% and 84% confidence level and 2.5% and 97.5% CL and 0.15% and 99.85% CL of the
# array S_mode (float):
s_50 = np.mean(s_mode)
s_50_sigma = np.std(s_mode)
s_50_16, s_50_84 = np.percentile(s_mode, [16, 84])
s_50_2_5, s_50_97_5 = np.percentile(s_mode, [2.5, 97.5])
s_50_0_15, s_50_99_85 = np.percentile(s_mode, [0.15, 99.85])
# calculate the mean and 16% and 84% confidence level of the array s_90_limit (float):
s_90 = np.mean(s_90_limit)
s_90_sigma = np.std(s_90_limit)
s_90_16, s_90_84 = np.percentile(s_90_limit, [16, 84])
s_90_2_5, s_90_97_5 = np.percentile(s_90_limit, [2.5, 97.5])
s_90_0_15, s_90_99_85 = np.percentile(s_90_limit, [0.15, 99.85])
# calculate the mean and 16% and 84% confidence level of the array DSNB_mode (float):
dsnb_50 = np.mean(dsnb_mode)
dsnb_50_sigma = np.std(dsnb_mode)
dsnb_50_16, dsnb_50_84 = np.percentile(dsnb_mode, [16, 84])
# calculate the mean and 16% and 84% confidence level of the array CCatmo_p_mode (float):
ccatmo_p_50 = np.mean(ccatmo_p_mode)
ccatmo_p_50_sigma = np.std(ccatmo_p_mode)
ccatmo_p_50_16, ccatmo_p_50_84 = np.percentile(ccatmo_p_mode, [16, 84])
# calculate the mean and 16% and 84% confidence level of the array Reactor_mode (float):
reactor_50 = np.mean(reactor_mode)
reactor_50_sigma = np.std(reactor_mode)
reactor_50_16, reactor_50_84 = np.percentile(reactor_mode, [16, 84])
# calculate the mean and 16% and 84% confidence level of the array NCatmo_mode (float):
ncatmo_50 = np.mean(ncatmo_mode)
ncatmo_50_sigma = np.std(ncatmo_mode)
ncatmo_50_16, ncatmo_50_84 = np.percentile(ncatmo_mode, [16, 84])
# calculate the mean and 16% and 84% confidence level of the array CCatmo_c12_mode (float):
ccatmo_c12_50 = np.mean(ccatmo_c12_mode)
ccatmo_c12_50_sigma = np.std(ccatmo_c12_mode)
ccatmo_c12_50_16, ccatmo_c12_50_84 = np.percentile(ccatmo_c12_mode, [16, 84])
""" Load the analysis information file to get the expected/true number of events in the energy window and to get the
energy window: """
# load the txt file (np.array of float):
information_analysis = np.loadtxt(file_info_analysis)
# lower bound of the energy window in MeV (float):
lower_energy_bound = information_analysis[1]
# upper bound of the energy window in MeV (float):
upper_energy_bound = information_analysis[2]
# number of expected signal events in the energy window (float):
signal_expected = information_analysis[4]
# number of expected DSNB backgrounds events in the energy window (float):
dsnb_expected = information_analysis[6]
# number of expected atmospheric CC background events on protons in the energy window (float):
ccatmo_p_expected = information_analysis[7]
# number of expected reactor background events in the energy window (float):
reactor_expected = information_analysis[9]
# number of expected atmospheric NC background events in the energy window (float):
ncatmo_expected = information_analysis[10]
# number of expected atmopsheric CC background events on C12 in the energy window (float):
ccatmo_c12_expected = information_analysis[10]
if save_data:
np.savetxt(path_output + "/result_mcmc/result_dataset_output_{0:d}.txt".format(dm_mass),
np.array([lower_energy_bound, upper_energy_bound, number_of_entries,
signal_expected, s_50, s_50_sigma, s_50_16, s_50_84,
s_90, s_90_sigma, s_90_16, s_90_84,
dsnb_expected, dsnb_50, dsnb_50_sigma, dsnb_50_16, dsnb_50_84,
ccatmo_p_expected, ccatmo_p_50, ccatmo_p_50_sigma, ccatmo_p_50_16, ccatmo_p_50_84,
reactor_expected, reactor_50, reactor_50_sigma, reactor_50_16, reactor_50_84,
ncatmo_expected, ncatmo_50, ncatmo_50_sigma, ncatmo_50_16, ncatmo_50_84,
ccatmo_c12_expected, ccatmo_c12_50, ccatmo_c12_50_sigma, ccatmo_c12_50_16,
ccatmo_c12_50_84,
s_50_2_5, s_50_97_5, s_50_0_15, s_50_99_85,
s_90_2_5, s_90_97_5, s_90_0_15, s_90_99_85]),
fmt="%4.5f",
header="Results of the analysis of the spectra in dataset_output_{3} "
"(with output_analysis_v7_server.py, {0}):\n"
"Analysis of Dataset_{1:d}.txt to Dataset_{2:d}.txt\n"
"\n"
"Information to the values below:\n"
"Lower bound of the energy window in MeV, upper bound of the energy window in MeV\n"
"Number of datasets that were analyzed,\n"
"Expected number of signal events from simulation,\n"
"Mean of the observed number of signal events,\n"
"Standard deviation of the observed number of signal events,\n"
"16 % confidence level of the observed number of signal events,\n"
"84 % confidence level of the observed number of signal events,\n"
"Mean of the 90% probability limit of the observed number of signal events,\n"
"Standard deviation of the 90% probability limit of the observed number of signal events,\n"
"16 % confidence level of the 90% probability limit of the observed number of signal "
"events,\n"
"84 % confidence level of the 90% probability limit of the observed number of signal "
"events,\n"
"Expected number of DSNB background events from simulation,\n"
"Mean of the observed number of DSNB background events,\n"
"Standard deviation of the observed number of DSNB background events,\n"
"16 % confidence level of the observed number of DSNB background events,\n"
"84 % confidence level of the observed number of DSNB background events,\n"
"Expected number of CCatmo background events on p from simulation,\n"
"Mean of the observed number of atmo. CC background events on p,\n"
"Standard deviation of the observed number of atmo. CC background events on p,\n"
"16 % confidence level of the observed number of atmo. CC background events on p,\n"
"84 % confidence level of the observed number of atmo. CC background events on p,\n"
"Expected number of reactor background events from simulation,\n"
"Mean of the observed number of Reactor background events,\n"
"Standard deviation of the observed number of Reactor background events,\n"
"16 % confidence level of the observed number of Reactor background events,\n"
"84 % confidence level of the observed number of Reactor background events,\n"
"Expected number of NCatmo background events from simulation,\n"
"Mean of the observed number of atmo. NC background events,\n"
"Standard deviation of the observed number of atmo. NC background events,\n"
"16 % confidence level of the observed number of atmo. NC background events,\n"
"84 % confidence level of the observed number of atmo. NC background events,\n"
"Expected number of CCatmo background events on C12 from simulation,\n"
"Mean of the observed number of atmo. CC background events on C12,\n"
"Standard deviation of the observed number of atmo. CC background events on C12,\n"
"16 % confidence level of the observed number of atmo. CC background events on C12,\n"
"84 % confidence level of the observed number of atmo. CC background events on C12,\n"
"2.5 % confidence level of the observed number of signal events,\n"
"97.5 % confidence level of the observed number of signal events,\n"
"0.15 % confidence level of the observed number of signal events,\n"
"99.85 % confidence level of the observed number of signal events,\n"
"2.5 % confidence level of the 90% probability limit of the observed number of signal "
"events,\n"
"97.5 % confidence level of the 90% probability limit of the observed number of signal "
"events,\n"
"0.15 % confidence level of the 90% probability limit of the observed number of signal "
"events,\n"
"99.85 % confidence level of the 90% probability limit of the observed number of signal "
"events,\n:"
.format(now, file_number_start, file_number_stop, dm_mass))
# print message, that result data is saved in file:
print("result data is saved in the file result_dataset_output_{0:d}.txt".format(dm_mass))
return (number_of_entries, lower_energy_bound, upper_energy_bound,
s_mode, s_50, s_50_sigma, s_50_16, s_50_84, signal_expected,
s_90_limit, s_90, s_90_sigma, s_90_16, s_90_84,
dsnb_mode, dsnb_50, dsnb_50_sigma, dsnb_50_16, dsnb_50_84, dsnb_expected,
ccatmo_p_mode, ccatmo_p_50, ccatmo_p_50_sigma, ccatmo_p_50_16, ccatmo_p_50_84, ccatmo_p_expected,
reactor_mode, reactor_50, reactor_50_sigma, reactor_50_16, reactor_50_84, reactor_expected,
ncatmo_mode, ncatmo_50, ncatmo_50_sigma, ncatmo_50_16, ncatmo_50_84, ncatmo_expected,
ccatmo_c12_mode, ccatmo_c12_50, ccatmo_c12_50_sigma, ccatmo_c12_50_16, ccatmo_c12_50_84,
ccatmo_c12_expected,
s_50_2_5, s_50_97_5, s_50_0_15, s_50_99_85, s_90_2_5, s_90_97_5, s_90_0_15, s_90_99_85)
|
# DIFFERENCE BETWEEN COROUTINES AND GENERATORS
# it is possible to add arguments to coroutines during execution
# not possible for generators
from asyncio import coroutine
def coroutine(func):
def start(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start
# def uuid_gen(idx=1):
# while True:
# idx = yield idx
# idx += 1
@coroutine
def filter_first_list(target):
while True:
try:
list_yielded = yield
filtered_element = list_yielded[1:]
target.send(filtered_element)
except StopIteration as e:
print('Filter: I am done')
return e.value
@coroutine
def double_first_list(target):
while True:
try:
list_yielded = yield
first_element = list_yielded[0]
list_yielded = [first_element * 2, *list_yielded[1:]]
target.send(list_yielded)
except StopIteration as e:
print('Double: I am done')
return e.value
@coroutine
def square_list():
list_yielded = yield
new_list = [a ** 2 for a in list_yielded]
return new_list
def get_data(coro, iterable):
try:
coro.send(iterable)
except StopIteration as e:
print(e.value)
get_data(filter_first_list(double_first_list(square_list())), [2, 3, 4, 5])
# e = 1, 2, 3, 4, 5
# q, w, *t = e
# x = [5, 6, 7, 8]
# l = [1, 2, *x]
# a, *b, c = x
|
from data_conversion import *
buids = []
def init():
get_buids()
def get_buids():
for entry in db["profiles"].find():
if "buids" in entry:
if len(entry["buids"]) > 0:
for buid in entry["buids"]:
buids.append((buid, str(entry["_id"])))
buids.sort()
print("Done with the setup of the buids.")
def link_buid(entry):
low = 0
high = len(buids) - 1
session_id = entry["buid"][0]
while low <= high:
middle = (low + high) // 2
if buids[middle][0] == session_id:
return buids[middle][1]
elif buids[middle][0] > session_id:
high = middle - 1
else:
low = middle + 1
def get_session_duration(entry):
start_time = entry["session_start"]
end_time = entry["session_end"]
duration = end_time - start_time
return int(duration.total_seconds())
def get_normalized_tables_id(search_value, search_name):
if search_value is not None:
search_value = search_value.replace("'", "''")
cursor.execute(f"select _id from {search_name} where {search_name} = '{search_value}'")
else:
cursor.execute(f"select _id from {search_name} where {search_name} is null")
results = cursor.fetchall()
return results[0][0]
def get_brand_id(entry):
return get_normalized_tables_id(entry["brand"], "brand")
def get_category_id(entry):
return get_normalized_tables_id(entry["category"], "category")
def get_sub_category_id(entry):
return get_normalized_tables_id(entry["sub_category"], "sub_category")
def get_sub_sub_category_id(entry):
return get_normalized_tables_id(entry["sub_sub_category"], "sub_sub_category")
def get_color_id(entry):
return get_normalized_tables_id(entry["color"], "color")
def get_gender_id(entry):
return get_normalized_tables_id(entry["gender"], "gender")
def bought_profile_id(entry):
try:
if entry["has_sale"]:
profile_id = link_buid(entry)
return profile_id if profile_id is not None else -1
else:
return -1
except:
return -1
def bought_product_id(entry):
try:
if entry["has_sale"] and len(entry["order"]["products"]) >= 1:
profile_id = link_buid(entry)
for product_index in range(len(entry["order"]["products"]) - 1):
upload_values.append((profile_id, entry["order"]["products"][product_index]["id"]))
return entry["order"]["products"][-1]["id"]
else:
return -1
except:
return -1
def viewed_product_id(entry):
try:
global counter
if entry["recommendations"]["viewed_before"]:
profile_id = str(entry["_id"])
for product_index in range(len(entry["recommendations"]["viewed_before"]) - 1):
upload_values.append((profile_id, entry["recommendations"]["viewed_before"][product_index]))
counter += 1
return entry["recommendations"]["viewed_before"][-1]
else:
return -1
except:
return -1
|
# Generated by Django 3.2.7 on 2021-10-06 08:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("details", "0016_alter_experienceitem_experience_type"),
]
operations = [
migrations.CreateModel(
name="SkillItem",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"skill_name",
models.CharField(max_length=120, verbose_name="Skill Name"),
),
(
"skill_level",
models.IntegerField(
choices=[
(1, "Poor"),
(2, "Average"),
(3, "Good"),
(4, "Verygood"),
(5, "Excellent"),
],
verbose_name="Skill Level",
),
),
(
"user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
|
import os
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
# app.config['SECRET_KEY'] = 'mysecretkey'
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__, static_url_path='/static')
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://tinyfee4_Team:k-k)6ih8URbs@162.241.219.131/tinyfee4_sources'
app.config['SECRET_KEY'] = 'secret'
# :3306
db = SQLAlchemy(app)
# -------------------------------- Blueprints -------------------------------- #
# Must be defined after import db
from server.sectors.views import sectors_blueprint
app.register_blueprint(sectors_blueprint,url_prefix="/sectors")
|
from django.db import models
from django.contrib.auth.models import User
from address.models import AddressField
from star_ratings.models import Rating
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
surname = models.CharField(max_length=50)
image = models.ImageField(default='default.jpg', upload_to='profile_pics')
cellphone = models.CharField(max_length=10)
address1 = AddressField()
address2 = AddressField(related_name='+', blank=True, null=True)
ratings = Rating()
def __str__(self):
return f'{self.user.username} Profile'
return f'{self.user.name} Profile'
return f'{self.user.surname} Profile'
# from django.contrib.auth.models import AbstractUser
# class User(AbstractUser):
# otp1 = models.IntegerField(null=True)
# phoneno1 = models.CharField(max_length=10) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lxml import etree
import requests
import xlwt
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',
}
info_base_url = 'https://hr.tencent.com/'
# 1. 创建 列表页链接 https://hr.tencent.com/position.php?lid=&tid=&keywords=python&start=10#a
def creat_url_list(maxNum):
base_url = "https://hr.tencent.com/position.php?lid=&tid=&keywords=python&start={0}#a"
url_list = []
for x in range(0, maxNum, 10):
url_list.append(base_url.format(x))
return url_list
# 2. 请求列表页 拿去 职位链接、职位名称 https://hr.tencent.com/position.php?lid=&tid=&keywords=python&start=10#a
def request_list(url_list):
print('请求链接-列表:{0} ...'.format(url_list))
info_url_list=[]
for item in url_list:
resp = requests.get(item, headers=headers)
html = etree.HTML(resp.text)
info_urls=html.xpath("//td[@class ='l square']/a/@href")
for item in info_urls:
info_url_list.append(str(info_base_url)+str(item))
return info_url_list
# 4. 请求 详情页 拿去 html
def request_info_page(url):
print('请求链接-详情:{0} ...'.format(url))
resp2 = requests.get(url, headers=headers)
html2 = etree.HTML(resp2.text)
title_list = html2.xpath("//*[@id='sharetitle']/text()")
place_list = html2.xpath('//tr[2]/td[1]/text()')
position_list = html2.xpath('//tr[2]/td[2]/text()')
num_list = html2.xpath('//tr[2]/td[3]/text()')
duty_list = html2.xpath('//tr[3]//ul[@class="squareli"]/li/text()')
requir_list = html2.xpath('//tr[4]//ul[@class="squareli"]/li/text()')
for title,place,position,num,duty,requir in zip( title_list,place_list,position_list,num_list,duty_list,requir_list):
info_data = {
'职位': title,
'工作地点': place,
'职位类别': position,
'招聘人数': num,
'工作职责': duty,
'工作要求': requir
}
return info_data
# 6. 打印 info_list
def run():
info_list = []
url_list = creat_url_list(maxNum=30)
info_url_list = request_list(url_list)
for item in info_url_list:
info_list.append(request_info_page(url=item))
return info_list
# print(info_url_list)
# print(info_list)
# for item in info_list:
# print(item)
def save_info(info_list):
workbook = xlwt.Workbook(encoding='utf-8')
first_sheet = workbook.add_sheet('电影天堂')
row = ["职位", "工作地点", "职位类别", "招聘人数", "工作职责", "工作要求"]
for h in range(len(row)):
first_sheet.write(0, h, row[h])
a = 1
for aa in info_list:
b = 0
for data in aa:
first_sheet.write(a, b, aa[data])
b += 1
a += 1
workbook.save('7' + ".xls")
if __name__ == '__main__':
info_list = run()
save_info(info_list)
# workbook = xlwt.Workbook(encoding='utf-8')
# first_sheet = workbook.add_sheet('电影天堂')
# row = ["职位", "工作地点", "职位类别", "招聘人数", "工作职责", "工作要求"]
#
# for h in range(len(row)):
# first_sheet.write(0, h, row[h])
#
# a = 1
# for aa in lists:
# b = 0
# for data in aa:
# first_sheet.write(a, b, aa[data])
# b += 1
# a += 1
# workbook.save('7' + ".xls")
|
import numpy as np
x, y = np.loadtxt('coordinate pixel.txt', unpack = True)
az, pol = np.loadtxt('coordinate_celesti.txt', unpack = True)
az = (az*np.pi)/180
pol= (pol*np.pi)/180
A = np.cos(pol[0])*np.cos(pol[1])+ np.sin(pol[0])*np.sin(pol[1])*np.cos(az[0]-az[1])
alfa1 = np.arccos(A)
A = np.cos(pol[1])*np.cos(pol[2])+ np.sin(pol[1])*np.sin(pol[2])*np.cos(az[1]-az[2])
alfa2 = np.arccos(A)
A = np.cos(pol[2])*np.cos(pol[0])+ np.sin(pol[2])*np.sin(pol[0])*np.cos(az[2]-az[0])
alfa3 = np.arccos(A)
d1 = np.sqrt((x[0] - x[1])**2 + (y[0] - y[1])**2)
d2 = np.sqrt((x[1] - x[2])**2 + (y[1] - y[2])**2)
d3 = np.sqrt((x[2] - x[0])**2 + (y[2] - y[0])**2)
f1 = alfa1/d1
f2 = alfa2/d2
f3 = alfa3/d3
f = (f1+f2+f3)/3
f = (f/np.pi)*180
print('%.2f' %(f))
a = f*197.6
da = f*0.2
print('%.2f +- %.2f' %(a, da))
a = a*(np.pi/180)
n = (np.sin((a + (np.pi)/3)/2))/(np.sin(np.pi/6))
dn = abs(np.cos((a +(np.pi/3)/2)))*da
print('%.2f +- %.2f' %(n, dn)) |
""" Query catsim """
def catsim_query(stack_version, **kwargs):
""" Pass arguments to a function which handles
specifics of the stack version """
if stack_version < 10:
return catsim_query_stack8(**kwargs)
else:
return catsim_query_stack10(**kwargs)
def catsim_query_stack8(objid, constraint, obs_metadata, dia):
""" Query catsim and make a catalog for stack 8
@param [in] objid of the catsim table
@param [in] constraint is sql constraint for the catsim table
@param [in] obs_metadata is the metadata for the visit
@param [in] dia is boolean which determines the catalog class
i.e if full diasource will be emitted
"""
from lsst.sims.catalogs.generation.db import DBObject
dbobj = DBObject.from_objid(objid)
if dia:
catalog = 'variable_stars_dia'
else: catalog = 'variable_stars'
obs_data = dbobj.getCatalog(catalog,
obs_metadata=obs_metadata,
constraint=constraint)
# filename = 'test_reference.dat'
# t.write_catalog(filename, chunk_size=10)
return obs_data
def catsim_query_stack10 (objid, constraint, obs_metadata, dia):
""" Query catsim and make a catalog for stack 10+
@param [in] objid of the catsim table
@param [in] constraint is sql constraint for the catsim table
@param [in] obs_metadata is the metadata for the visit
@param [in] dia is boolean which determines the catalog class
i.e if full diasource will be emitted
"""
from lsst.sims.catalogs.db import CatalogDBObject
dbobj = CatalogDBObject.from_objid(objid)
if dia:
catalog = 'variable_stars_dia'
else: catalog = 'variable_stars'
obs_data = dbobj.getCatalog(catalog,
obs_metadata=obs_metadata, constraint=constraint)
#column_outputs=VariableStars.get_column_outputs(obs_metadata.bandpass))
# dbobj.show_db_columns()
# dbobj.show_mapped_columns()
# filename = 'test_reference.dat'
# t.write_catalog(filename, chunk_size=10)
return obs_data
|
#!/usr/bin/env python
import os
from munin import MuninPlugin
from sense_hat import SenseHat
ADJUSTMENT=9
class TempPlugin(MuninPlugin):
title = "Adjusted Temperature"
# args = "--base 1000 -l 0"
vlabel = "adjusted temp (-{0})".format(ADJUSTMENT)
scale = False
category = "sense"
@property
def fields(self):
warning = str(os.environ.get('temp_warn', 25))
warning_low = str(os.environ.get('temp_warn_low', 20))
critical = str(os.environ.get('temp_crit', 30))
critical_low = str(os.environ.get('temp_crit_low', 17))
return [("temp", dict(
label = "temp",
info = 'The temperature from sensor',
type = "GAUGE",
min = "0",
warning = "{0}:{1}".format(warning_low, warning),
critical = "{0}:{1}".format(critical_low, critical),
)),
("temp_hum", dict(
label = "temp_humidity",
info = 'The temperature from humidity sensor',
type = "GAUGE",
min = "0",
warning = "{0}:{1}".format(warning_low, warning),
critical = "{0}:{1}".format(critical_low, critical),
)),
("temp_pre", dict(
label = "temp_pressure",
info = 'The temperature from pressure sensor',
type = "GAUGE",
min = "0",
warning = "{0}:{1}".format(warning_low, warning),
critical = "{0}:{1}".format(critical_low, critical)
))
]
def execute(self):
sense = SenseHat()
temp = sense.get_temperature()-ADJUSTMENT
temp_hum = sense.get_temperature_from_humidity()-ADJUSTMENT
temp_pre = sense.get_temperature_from_pressure()-ADJUSTMENT
return dict(temp=temp, temp_hum=temp_hum, temp_pre=temp_pre)
if __name__ == "__main__":
TempPlugin().run()
|
import csv
import pandas as pd
import io
headers = ["name", "mass", "radius", "distance"]
df = pd.read_csv("./brown-dwarfs.csv")
df = df[df['mass'].notna()]
df = df[df['radius'].notna()]
df['radius'] = df['radius'] * 0.102763 # Convert to Solar Radius
df['mass'] = df['mass'] * 0.000954588 # Convert to Solar Mass
print(df[headers])
df2 = pd.read_csv("./brightstars.csv")
with io.open("merged.csv", "w", encoding="utf-8", newline='') as f:
csvW = csv.writer(f)
csvW.writerow(headers)
csvW.writerows(df[headers].iloc)
csvW.writerows(df2.iloc)
|
import unittest
import mock
import os
from cumulus.queue import get_queue_adapter
from cumulus.queue.abstract import AbstractQueueAdapter
from cumulus.constants import QueueType
from cumulus.tasks import job
class PbsQueueAdapterTestCase(unittest.TestCase):
def setUp(self):
self._cluster_connection = mock.MagicMock()
self._adapter = get_queue_adapter({
'config': {
'scheduler': {
'type': QueueType.PBS
}
},
'type': 'trad'
}, self._cluster_connection)
def test_terminate_job(self):
job_id = 123
job = {
self._adapter.QUEUE_JOB_ID: job_id
}
self._adapter.terminate_job(job)
expected_call = [mock.call('qdel %d' % job_id)]
self.assertEqual(self._cluster_connection.execute.call_args_list, expected_call)
def test_submit_job(self):
job_id = '123'
test_output = ['%s.ulex.kitware.com' % job_id]
job_script = 'script.sh'
job = {
AbstractQueueAdapter.QUEUE_JOB_ID: job_id,
'dir': '/tmp'
}
expected_calls = [mock.call('cd /tmp && qsub ./%s' % job_script)]
self._cluster_connection.execute.return_value = test_output
actual_job_id = self._adapter.submit_job(job, job_script)
self.assertEqual(self._cluster_connection.execute.call_args_list, expected_calls)
self.assertEqual(actual_job_id, job_id)
test_output = ['Your fook %s ("test.sh") has been submitted' % job_id]
self._cluster_connection.execute.return_value = test_output
with self.assertRaises(Exception) as cm:
self._adapter.submit_job(test_output)
self.assertIsNotNone(cm.exception)
def test_job_statuses(self):
job1_id = '1126'
job1 = {
AbstractQueueAdapter.QUEUE_JOB_ID: job1_id
}
job2_id = '1127'
job2 = {
AbstractQueueAdapter.QUEUE_JOB_ID: job2_id
}
job_status_output = [
'Job id Name User Time Use S Queue',
'------------------------- ---------------- --------------- -------- - -----',
'%s.ulex sleep.sh cjh 00:00:00 C batch' % job1_id,
'%s.ulex sleep.sh cjh 00:00:00 C batch' % job2_id
]
expected_calls = [mock.call('qstat %s' % job1_id)]
self._cluster_connection.execute.return_value = job_status_output
status = self._adapter.job_statuses([job1])
self.assertEqual(self._cluster_connection.execute.call_args_list, expected_calls)
self.assertEqual(status[0][1], 'complete')
# Now try two jobs
self._cluster_connection.reset_mock()
expected_calls = [mock.call('qstat %s %s' % (job1_id, job2_id))]
self._cluster_connection.execute.return_value = job_status_output
status = self._adapter.job_statuses([job1, job2])
self.assertEqual(self._cluster_connection.execute.call_args_list, expected_calls)
self.assertEqual(status[0][1], 'complete')
self.assertEqual(status[1][1], 'complete')
def test_submission_template_pbs(self):
cluster = {
'_id': 'dummy',
'type': 'trad',
'name': 'dummy',
'config': {
'host': 'dummy',
'ssh': {
'user': 'dummy',
'passphrase': 'its a secret'
},
'scheduler': {
'type': 'pbs'
}
}
}
job_id = '123432423'
job_model = {
'_id': job_id,
'queueJobId': '1',
'name': 'dummy',
'commands': ['ls', 'sleep 20', 'mpirun -n 1000000 parallel'],
'output': [{'tail': True, 'path': 'dummy/file/path'}]
}
path = os.path.join(os.environ["CUMULUS_SOURCE_DIRECTORY"],
'tests', 'cases', 'fixtures', 'job',
'pbs_submission_script1.sh')
with open(path, 'r') as fp:
expected = fp.read()
script = job._generate_submission_script(job_model, cluster, {})
self.assertEqual(script, expected)
path = os.path.join(os.environ["CUMULUS_SOURCE_DIRECTORY"],
'tests', 'cases', 'fixtures', 'job',
'pbs_submission_script2.sh')
with open(path, 'r') as fp:
expected = fp.read()
job_params = {
'numberOfSlots': 12312312
}
script = job._generate_submission_script(job_model, cluster, job_params)
self.assertEqual(script, expected)
def test_submission_template_pbs_nodes(self):
cluster = {
'_id': 'dummy',
'type': 'trad',
'name': 'dummy',
'config': {
'host': 'dummy',
'ssh': {
'user': 'dummy',
'passphrase': 'its a secret'
},
'scheduler': {
'type': 'pbs'
}
}
}
job_id = '123432423'
job_model = {
'_id': job_id,
'queueJobId': '1',
'name': 'dummy',
'commands': ['ls', 'sleep 20', 'mpirun -n 1000000 parallel'],
'output': [{'tail': True, 'path': 'dummy/file/path'}]
}
# Just nodes specfied
path = os.path.join(os.environ["CUMULUS_SOURCE_DIRECTORY"],
'tests', 'cases', 'fixtures', 'job',
'pbs_submission_script_nodes.sh')
with open(path, 'r') as fp:
expected = fp.read()
job_params = {
'numberOfNodes': 12312312
}
script = job._generate_submission_script(job_model, cluster, job_params)
self.assertEqual(script, expected)
# Nodes with number of cores
path = os.path.join(os.environ["CUMULUS_SOURCE_DIRECTORY"],
'tests', 'cases', 'fixtures', 'job',
'pbs_submission_script_nodes_cores.sh')
with open(path, 'r') as fp:
expected = fp.read()
job_params = {
'numberOfNodes': 12312312,
'numberOfCoresPerNode': 8
}
script = job._generate_submission_script(job_model, cluster, job_params)
self.assertEqual(script, expected)
# Nodes with number of gpus
path = os.path.join(os.environ["CUMULUS_SOURCE_DIRECTORY"],
'tests', 'cases', 'fixtures', 'job',
'pbs_submission_script_nodes_gpus.sh')
with open(path, 'r') as fp:
expected = fp.read()
job_params = {
'numberOfNodes': 12312312,
'numberOfGpusPerNode': 8
}
script = job._generate_submission_script(job_model, cluster, job_params)
self.assertEqual(script, expected)
# Nodes with number of cores and gpus
path = os.path.join(os.environ["CUMULUS_SOURCE_DIRECTORY"],
'tests', 'cases', 'fixtures', 'job',
'pbs_submission_script_nodes_cores_gpus.sh')
with open(path, 'r') as fp:
expected = fp.read()
job_params = {
'numberOfNodes': 12312312,
'numberOfGpusPerNode': 8,
'numberOfCoresPerNode': 8
}
script = job._generate_submission_script(job_model, cluster, job_params)
self.assertEqual(script, expected)
|
from Instrucciones.Return import Return
from Expresiones.Arreglos import Arreglos
from Abstractas.NodoArbol import NodoArbol
from Expresiones.Rango import Rango
from Instrucciones.Continue import Continue
from TablaSimbolos.Errores import Errores
from Abstractas.Objeto import TipoObjeto
from Objetos.Primitivos import Primitivo
from Expresiones.Constante import Constante
from TablaSimbolos.Tipos import Tipo_Acceso
from Instrucciones.Asignacion import Asignacion
from Instrucciones.Break import Break
from TablaSimbolos.TablaSimbolos import TablaSimbolos
from Abstractas.NodoAST import NodoAST
class For(NodoAST):
def __init__(self, id, rango, instrucciones, fila, columna):
self.id = id
self.rango = rango
self.instrucciones = instrucciones
self.fila = fila
self.columna = columna
def ejecutar(self, tree, table):
nuevaTabla= TablaSimbolos("For",table)
self.id.ejecutar(tree,nuevaTabla)
id = self.id.id
rango = self.rango.ejecutar(tree,nuevaTabla)
if isinstance(rango,Rango):
rango1 = rango.izquierdo
rango2 = rango.derecho
if isinstance(rango1,int) and isinstance(rango2,int):
for i in range(rango1,rango2):
#if isinstance(rango1,int):
nuevaTabla.actualizarValor(id,i)
nuevaConstante = Constante(Primitivo(TipoObjeto.ENTERO, i), self.fila, self.columna)
'''elif isinstance(rango1,float):
nuevaConstante = Constante(Primitivo(TipoObjeto.DECIMAL, i), self.fila, self.columna)
else:
return Errores((str(rango1)+","+str(rango2)),"Semántico","Rango no aceptado", self.fila,self.columna)'''
nuevaAsignacion = Asignacion(Tipo_Acceso.NONE,id,nuevaConstante,None, self.fila,self.columna)
nuevaAsignacion.ejecutar(tree,nuevaTabla)
for instruccion in self.instrucciones:
resp= instruccion.ejecutar(tree,nuevaTabla)
if isinstance(resp,Break):
return None
elif isinstance(resp,Continue):
return None
elif isinstance(resp, Return):
return resp
elif isinstance(rango1,float) and isinstance(rango2,float):
total = int(rango2-rango1)+1
if total >0:
for i in range(0,total):
nuevaTabla.actualizarValor(id,i)
variable = rango1
nuevaConstante = Constante(Primitivo(TipoObjeto.DECIMAL, variable), self.fila, self.columna)
rango1 = rango1+1
nuevaAsignacion = Asignacion(Tipo_Acceso.NONE,id,nuevaConstante,None, self.fila,self.columna)
nuevaAsignacion.ejecutar(tree,nuevaTabla)
for instruccion in self.instrucciones:
resp= instruccion.ejecutar(tree,nuevaTabla)
if isinstance(resp,Break):
return None
elif isinstance(resp,Continue):
return None
elif isinstance(resp, Return):
return resp
elif isinstance(resp, Errores):
return resp
else:
if isinstance(rango,int) or isinstance(rango,float):
if isinstance(rango, int):
nuevaConstante = Constante(Primitivo(TipoObjeto.ENTERO, rango), self.fila, self.columna)
else:
nuevaConstante = Constante(Primitivo(TipoObjeto.DECIMAL, rango), self.fila, self.columna)
nuevaAsignacion = Asignacion(Tipo_Acceso.NONE,id,nuevaConstante,None,self.fila,self.columna)
nuevaAsignacion.ejecutar(tree,nuevaTabla)
if self.instrucciones != None:
for instruccion in self.instrucciones:
resp=instruccion.ejecutar(tree,nuevaTabla)
if isinstance(resp,Break):
return None
elif isinstance(resp,Continue):
return None
elif isinstance(resp, Return):
return resp
elif isinstance(resp,Errores):
return resp
else:
try:
for i in rango:
if isinstance(i, str):
nuevaConstante = Constante(Primitivo(TipoObjeto.CADENA, i), self.fila, self.columna)
nuevaAsignacion = Asignacion(Tipo_Acceso.NONE,id,nuevaConstante,None,self.fila,self.columna)
nuevaAsignacion.ejecutar(tree,nuevaTabla)
elif isinstance(i,int):
nuevaConstante = Constante(Primitivo(TipoObjeto.ENTERO, i), self.fila, self.columna)
nuevaAsignacion = Asignacion(Tipo_Acceso.NONE,id,nuevaConstante,None,self.fila,self.columna)
nuevaAsignacion.ejecutar(tree,nuevaTabla)
elif isinstance(i, NodoAST):
#val = i.ejecutar(tree,nuevaTabla)
nuevaTabla.actualizarValor(id,i)
elif isinstance(i, list):
nuevaTabla.actualizarValor(id,i)
if self.instrucciones != None:
for instruccion in self.instrucciones:
resp=instruccion.ejecutar(tree,nuevaTabla)
if isinstance(resp,Break):
return None
elif isinstance(resp, Continue):
return None
elif isinstance(resp, Return):
return resp
elif isinstance(resp, Errores):
return resp
except:
err = Errores("For","Semántico","Valor no permitido, debe ser una cadena", self.fila,self.columna)
tree.insertError(err)
return err
def getNodo(self):
NodoNuevo = NodoArbol("For")
NodoNuevo.agregarHijoNodo(self.id.getNodo())
NodoNuevo.agregarHijoNodo(self.rango.getNodo())
NodoInst = NodoArbol("Instrucciones")
for instruccion in self.instrucciones:
NodoInst.agregarHijoNodo(instruccion.getNodo())
NodoNuevo.agregarHijoNodo(NodoInst)
NodoNuevo.agregarHijo("end")
NodoNuevo.agregarHijo(";")
return NodoNuevo
|
from classproperty import classproperty, classproperty_support
from collections import OrderedDict
@classproperty_support
class LMPtrj(object):
"""
A class that parses lammps trajectories and stores the data in the
trj attribute as a dictionary
attributes:
clear() : clear the trj, called by the class constructor
parse(trjname) : construct the trj, called by the class constructor
to_file() : write trj to a file
sort(key) : sort the atoms section
sort_id() : sort the atoms section based on id
sort_mol_type_id() : sort the atoms section based on mol, type, id
subtrj(timesteps) : generate a copy of sub-trajectory
valid_sections: the keys to the trj.values()
data_types: atoms section, key to type dictionary
trj : a dictionary, key = timestep, value = dictionary
{timestep : {valsection : value, ...,
dictsection : (args, [{ attr: value}, ...]), ...}, ...}
if two sections have the same timestep, the latter one will overwrite
the former one
"""
def __init__(self, trjname=None):
self.clear()
self.parse(trjname)
########################## public methods ##########################
def clear(self):
self.__trj = OrderedDict()
return self
def parse(self, trjname):
if trjname is None:
return self
with open(trjname) as f:
data = f.readlines()
next_item_idx = 0
timestep = None
while len(data) > next_item_idx:
line_idx = next_item_idx
line = data[line_idx]
if line[:5] != "ITEM:":
raise RuntimeError("Section not starting with ITEM")
section, args = self._parse_item(line[5:].strip())
section_value, lines_parsed = getattr(self, "_parse_" + section)\
(args, timestep, data, line_idx + 1)
next_item_idx = line_idx + lines_parsed + 1
if section == "timestep":
timestep = section_value
self.__trj[timestep] = OrderedDict()
self.__set_section_value(timestep, section, section_value)
return self
def to_file(self, filename=None):
data = ""
for timestep, frame in self.trj.items():
for section, value in frame.items():
try:
args, sectionlist = value
except TypeError:
args = []
sectionlist = [{None: value}]
data += "ITEM: {:s}\n".format(' '.join(
[self.__valid_keys_rev[section]] + args))
for line in sectionlist:
data += " ".join([str(x) for x in line.values()]) + "\n"
if filename:
with open(filename, "w") as f:
f.write(data)
return data
def sort(self, key):
for frame in self.trj.values():
frame["atoms"][1].sort(key=key)
return self
def sort_id(self):
return self.sort(lambda x : x["id"])
def sort_mol_type_id(self):
return self.sort(lambda x : [x["mol"], x["type"], x["id"]])
def subtrj(self, timesteps):
try:
timesteps[0]
except:
timesteps = [timesteps]
rtn = LMPtrj()
timesteps.sort()
subtrj = OrderedDict((t, self.trj[t]) for t in timesteps)
rtn.__set_trj(subtrj)
return rtn
########################## public members ##########################
@property
def trj(self):
return self.__trj
@classproperty
def valid_sections(cls):
return cls.__valid_sections
@classproperty
def data_types(cls):
return cls.__data_types
########################## private methods ##########################
def __set_section_value(self, timestep, section, value):
self.__trj[timestep][section] = value
def __set_trj(self, trj):
self.__trj = trj
########################## protected methods ##########################
def _parse_atoms(self, args, timestep, data, start):
natoms = self.trj[timestep]["natoms"]
assert(len(data) >= start+natoms)
return (args, self._parse_formatted_section(args,
[self.data_types[arg] for arg in args],
data[start:start+natoms])), natoms
@classmethod
def _parse_item(cls, line):
for key in cls.__valid_sections:
if key == line[:len(key)]:
return cls.__valid_keys[key], line[len(key):].split()
raise ValueError(
'Does not recognize the item "{}". Valid sections are '.\
format(line) + ', '.join(cls.__valid_sections) + '.')
return None, None
@classmethod
def _parse_bounds(cls, args, timestep, data, start):
for arg in args:
assert(arg == "pp")
ndim = len(args)
assert(len(data) >= start+ndim)
return (args, cls._parse_formatted_section(["lbound", "hbound"],
[float, float],
data[start:start+ndim])), ndim
@staticmethod
def _parse_natoms(args, timestep, data, start):
""" Must not use timestep, which is uninitialized, in this function """
if args:
raise RuntimeError("Non empty args")
return int(data[start]), 1
_parse_timestep = _parse_natoms
@staticmethod
def _parse_formatted_section(args, types, data):
rtn = []
for line in data:
elem = OrderedDict()
for attr, type_, val in zip(args, types, line.split()):
elem[attr] = type_(val)
rtn.append(elem)
return rtn
########################## private static members ##########################
# could be overridden by derived class
__valid_keys = OrderedDict([
("TIMESTEP" , "timestep"),
("NUMBER OF ATOMS", "natoms"),
("BOX BOUNDS" , "bounds"),
("ATOMS" , "atoms"),
])
__valid_keys_rev = OrderedDict([
(v, k) for k, v in __valid_keys.items()
])
__valid_sections = __valid_keys.keys()
__data_types = OrderedDict([
("id" , int),
("mol" , int),
("type" , int),
("q" , float),
("x" , float),
("y" , float),
("z" , float),
("vx" , float),
("vy" , float),
("vz" , float),
("fx" , float),
("fy" , float),
("fz" , float),
])
if __name__ == "__main__":
foo = LMPtrj()
bar = LMPtrj("test/test.lammpstrj")
foo.parse("test/test.lammpstrj")
foo.to_file("result/rtn.lammpstrj")
assert(foo.trj == bar.trj)
foo.sort_id().to_file("result/sorted_id.lammpstrj")
foo.sort_mol_type_id().to_file("result/sorted_moltypeid.lammpstrj")
foo.subtrj(0).to_file("result/frame0.lammpstrj")
#print(foo.trj)
#print(foo.valid_sections)
|
import sys
import cloudinary
#---------------------------------------------------------------------------#
# Generic #
#---------------------------------------------------------------------------#
SECRET_DEBUG = True
SECRET_KEY = 'xd#vc@mec1c0+wz^y&_i^-og&oy$mkn%_yky&xe^fo()mio$up'
# SECRET_ALLOWED_HOSTS = ['*']
#---------------------------------------------------------------------------#
# Database #
#---------------------------------------------------------------------------#
# SECRET_DB_USER = "sean"
# SECRET_DB_PASSWORD = "Aggreyomondi90"
#---------------------------------------------------------------------------#
# Email #
#---------------------------------------------------------------------------#
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'leadershipacademyafrica@gmail.com'
EMAIL_HOST_PASSWORD = 'hrznsyzmgrrvlkhl'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = 'Leadership and development academy <info@leadershipanddevelopmentacademy.com>'
# EMAIL_HOST = 'smtp.sendgrid.net'
# EMAIL_PORT = 587
# EMAIL_HOST_USER = 'apikey'
# EMAIL_HOST_PASSWORD = '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'
# EMAIL_USE_TLS = True
PAYPAL_RECEIVER_EMAIL = 'africaforsdgs2019@gmail.com'
PAYPAL_TEST = False
PAYPAL_CLIENT_ID = 'AYxM9ft3x-ODD8P60XP1fI7cljyjve2vY3Oaa2rzsyIMad7VcxhJABtlJBzhcP6-eztRghDfg3kn4Ato'
PAYPAL_CLIENT_SECRET = 'EDe6n1w19IPRTPdBwm10QSBxyF5o9ZEeH0R7mSJeI7yGLvhMS-7mOwMgcNPhUZg6zUpPUlnKsIDWE3Y3'
# PAYPAL_RECEIVER_EMAIL = 'rennyopascal@gmail.com'
# PAYPAL_TEST = False
# PAYPAL_CLIENT_ID = 'AXvkBngcxRDy96UenCWYPR5BPzGo-N40NT5wj1n7yeRiTtGq-U8qTbJQlznNY9uQ9lGb7pkPbImbCYzd'
# PAYPAL_CLIENT_SECRET = 'EFZ6RzSswSiZpM3AwZ_HVIjEfE0UP-SYKv_NMALWhe7Ula-tP_3kqJjI5UxqEF76S3ZD7AFlhug4eTb_'
UPLOADCARE = {
# Don’t forget to set real keys when it gets real :)
'pub_key': 'd4d69f96f2e2dde353d1',
'secret': 'a31559dd611e70c202f4',
}
cloudinary.config(
cloud_name ='lada' ,
api_key = '991846289858872',
api_secret = 'PxriDvQELG9426d-3KZ1_OtbsVE',
secure = True
)
#---------------------------------------------------------------------------#
# Application Specific Settings #
#---------------------------------------------------------------------------#
# (Disable Ads when running Unit-Tests)
if 'test' in sys.argv:
APPLICATION_HAS_ADVERTISMENT = False # (DO NOT MODIFY)
else:
APPLICATION_HAS_ADVERTISMENT = False # (True = Yes I want advertisments)
APPLICATION_HAS_PUBLIC_ACCESS_TO_TEACHERS = True |
import os
import yara
import time
import lief
import json
import pefile
import zipfile
import hashlib
import pythoncom
import win32com.client
from utils import db_comm
from utils import peparser
from utils import get_malicious
from utils.config import Config
from utils.yarascan import YaraScan
from utils.MSMQCustom import MSMQCustom
from utils.playbookSig import playbooksig
from utils.digicheck import DigitalSignatureCheck
from utils.graphity.graphity import get_behaviors
RE_EMBEDDED_FILE = r'0x([A-F0-9]+)\s+([0-9]+)\s+([^,:\(\.]+)'
opts = Config().read_config()
if opts["config"]["ENABLE_AV_OTX"] == 1:
from utils import lookup
if opts["config"]["ENABLE_EMULATION"] == 1:
from utils import binee
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
def threat_intel_lookup_file(md5, sha1, sha2):
try:
final_alerts = {}
final_av_alerts = []
exception_found = False
md5_alert = lookup.alienvault_otx("hash", md5)
sha1_alert = lookup.alienvault_otx("hash", sha1)
sha2_alert = lookup.alienvault_otx("hash", sha2)
if "AV Detections" in md5_alert:
final_av_alerts = md5_alert["AV Detections"]
elif "AV Detections" in sha1_alert:
final_av_alerts = sha1_alert["AV Detections"]
elif "AV Detections" in sha2_alert:
final_av_alerts = sha2_alert["AV Detections"]
if len(final_av_alerts) > 0:
final_alerts["AV Detections"] = final_av_alerts
pulse_alert_md5 = []
pulse_alert_sha1 = []
pulse_alert_sha2 = []
if "Pulse Alerts" in md5_alert:
pulse_alert_md5 = md5_alert["Pulse Alerts"]
if "Pulse Alerts" in sha1_alert:
pulse_alert_sha1 = sha1_alert["Pulse Alerts"]
if "Pulse Alerts" in sha2_alert:
pulse_alert_sha2 = sha2_alert["Pulse Alerts"]
final_pulse_alerts = pulse_alert_md5
if len(pulse_alert_sha1) > 0:
for alertsh1 in pulse_alert_sha1:
found = False
for pulse_id in alertsh1.keys():
for alert_final in final_pulse_alerts:
for final_pulse_id in alert_final.keys():
if pulse_id == final_pulse_id:
found = True
break
if found:
break
if found:
break
if not found:
final_pulse_alerts.append(alertsh1)
if len(pulse_alert_sha2) > 0:
for alertsh2 in pulse_alert_sha2:
found = False
for pulse_id in alertsh2.keys():
for alert_final in final_pulse_alerts:
for final_pulse_id in alert_final.keys():
if pulse_id == final_pulse_id:
found = True
break
if found:
break
if found:
break
if not found:
final_pulse_alerts.append(alertsh2)
if len(final_pulse_alerts) > 0:
final_alerts["Pulse Alerts"] = final_pulse_alerts
except Exception as e:
print("Error: %s" %str(e))
exception_found = True
return (final_alerts, exception_found)
def tip_lookup(dst_file_static):
tip_json = {}
exception_found = False
try:
with open(dst_file_static, 'rb') as fp:
suspicious_strings = json.load(fp)
if "Yara Matched" in suspicious_strings:
for tag in list(suspicious_strings["Yara Matched"].keys()):
if tag == "URL" or tag == "domain" or tag == "IP" or tag == "URL":
for rule_name in list(suspicious_strings["Yara Matched"][tag].keys()):
if "indicators_matched" in suspicious_strings["Yara Matched"][tag][rule_name]:
for indicator in suspicious_strings["Yara Matched"][tag][rule_name]["indicators_matched"]:
otx_alerts = lookup.alienvault_otx(tag, indicator)
if len(otx_alerts) > 0:
if tag not in tip_json.keys():
tip_json[tag] = {}
if indicator not in tip_json[tag].keys():
tip_json[tag][indicator] = {}
tip_json[tag][indicator]["AlienVault"] = otx_alerts
alerts_hphosts = lookup.hphosts_spamhaus(indicator)
if len(alerts_hphosts) > 0:
for ti in alerts_hphosts:
if tag not in tip_json.keys():
tip_json[tag] = {}
if indicator not in tip_json[tag].keys():
tip_json[tag][indicator] = {}
tip_json[tag][indicator][ti] = "Found malicious on " + ti
except Exception as e:
print("Error: %s" %str(e))
exception_found = True
return (tip_json, exception_found)
def process_file(yara_scan, yara_rules, yara_id_rules, yara_mitre_rules, input_file, output_file_static,
outputfile_mitre):
try:
with open(input_file, 'rb') as f:
file_data = f.read()
yara_mitre_rules.match(data=file_data, callback=yara_scan.yara_callback_desc,
which_callbacks=yara.CALLBACK_MATCHES)
json_data = yara_scan.yara_sig_matched
with open(outputfile_mitre, 'w') as fw:
json_report = json.dumps(json_data, sort_keys=True, indent=4)
fw.write(json_report)
json_data = {}
yara_id_rules.match(data=file_data, callback=yara_scan.yara_callback, which_callbacks=yara.CALLBACK_MATCHES)
json_data['File Type Information'] = yara_scan.yara_idsig_matched
yara_scan.yara_sig_matched = {}
yara_rules.match(data=file_data, callback=yara_scan.yara_callback_desc, which_callbacks=yara.CALLBACK_MATCHES)
json_data['Yara Matched'] = yara_scan.yara_sig_matched
with open(output_file_static, 'w') as fw:
json_report = json.dumps(json_data, sort_keys=True, indent=4)
fw.write(json_report)
except Exception as e:
print("Error while parsing for mitre and yara")
print((str(e)))
def process_dir(src_dir, dst_dir, sample_type):
print(("Processing: " + src_dir + " ..."))
md5 = ""
sha1 = ""
sha2 = ""
yara_scan = YaraScan()
yara_rules = yara.compile('./yara_sigs/index.yar')
yara_idrules = yara.compile('./yara_sigs/index_id.yar')
yara_mitre_rules = yara.compile('./yara_sigs/index_mitre.yar')
for root_dir, dirs, files in os.walk(src_dir):
for filename in files:
failed = False
src_file = os.path.join(root_dir, filename)
try:
with open(src_file, 'rb') as f:
contents = f.read()
file_size = len(contents)
sha1 = hashlib.sha1(contents).hexdigest()
sha2 = hashlib.sha256(contents).hexdigest()
md5_obj = hashlib.md5()
for i in range(0, len(contents), 8192):
md5_obj.update(contents[i:i + 8192])
md5 = md5_obj.hexdigest()
basic_info = {'MD5': md5, 'SHA1': sha1, 'SHA256': sha2, 'File Size': file_size}
with open(os.path.join(dst_dir, filename) + ".basic_info.json", 'w') as fw:
json.dump(basic_info, fw)
print("basic info done")
except Exception as e:
print(("Error: " + str(e)))
failed = True
try:
if md5 != "" and sha1 != "" and sha2 != "" and opts["config"]["ENABLE_AV_OTX"] == 1:
retrun_val = threat_intel_lookup_file(md5, sha1, sha2)
final_alerts = retrun_val[0]
if retrun_val[1] == True:
failed = True
if len(final_alerts.keys()) > 0:
with open(os.path.join(dst_dir, filename) + ".threat_intel_file.json", 'w') as fw:
json.dump(final_alerts, fw)
else:
print("No, Threat Data found")
print("Threat Intel File done")
except Exception as e:
print(("Error: " + str(e)))
failed = True
if sample_type == "PE":
try:
peparsed = peparser.parse(src_file)
with open(os.path.join(dst_dir, filename) + ".static.json", 'w') as fp:
json.dump(peparsed, fp)
print("Static done")
with open(os.path.join(dst_dir, filename) + ".cert.json", 'w') as fp:
digiSig = DigitalSignatureCheck()
digiSig.run(src_file)
json.dump(digiSig._REQ_DATA_FIELD, fp)
print("Cert done")
except Exception as e:
print((str(e)))
print("No static data.. !!")
failed = True
elif sample_type == "ELF":
try:
binary = lief.parse(src_file)
elfparsed = json.loads(lief.to_json(binary))
with open(os.path.join(dst_dir, filename) + ".static.json", 'w') as fp:
json.dump(elfparsed, fp)
print("Linux Static done")
except Exception as e:
print((str(e)))
print("No static data.. !!")
failed = True
try:
dst_file_static = os.path.join(dst_dir, filename) + ".yara.json"
dst_file_mitre = os.path.join(dst_dir, filename) + ".mitre.json"
# run yara rules on file
process_file(yara_scan, yara_rules, yara_idrules, yara_mitre_rules, src_file, dst_file_static,
dst_file_mitre)
except Exception as e:
print((str(e)))
print("Yara Part did not run")
failed = True
try:
tip_file = os.path.join(dst_dir, filename) + ".tip.json"
tip_json = {}
if opts["config"]["ENABLE_AV_OTX"] == 1 and os.path.exists(dst_file_static):
ret_val = tip_lookup(dst_file_static)
tip_json = ret_val[0]
if ret_val[1]:
failed = True
if (len(tip_json.keys()) > 0):
with open(tip_file, 'w') as fw:
json.dump(tip_json, fw)
except Exception as e:
print((str(e)))
print("Lookup Part did not run")
try:
if opts["config"]["ENABLE_EMULATION"] == 1 and sample_type == "PE":
dst_binee_file = os.path.abspath(os.path.join(dst_dir, filename) + ".binee.json")
report_emulation_json = binee.emulate(os.path.abspath(src_file), dst_binee_file)
if len(report_emulation_json.keys()) > 0:
report_emulation_file = os.path.abspath(os.path.join(dst_dir, filename) + ".emulation.json")
with open(report_emulation_file, 'w') as fw:
json.dump(report_emulation_json, fw)
except Exception as e:
print((str(e)))
print("Emulation part did not run")
try:
dst_file = os.path.join(dst_dir, filename) + ".behav.json"
get_behaviors(src_file, dst_file, dst_dir)
except Exception as e:
print((str(e)))
print("Behavior part did not run..!!")
failed = True
try:
if os.path.exists(os.path.join(dst_dir, filename) + ".behav.json"):
with open(os.path.join(dst_dir, filename) + ".behav.json", 'rb') as fp:
file_data = fp.read()
json_data = {}
yara_mitre_api = yara.compile('.\\yara_sigs\\mitre\\api_based.yar')
yara_scan.yara_sig_matched = {}
yara_mitre_api.match(data=file_data, callback=yara_scan.yara_callback_desc,
which_callbacks=yara.CALLBACK_MATCHES)
json_data['API_MITRE'] = yara_scan.yara_sig_matched
dst_file_mitre = os.path.join(dst_dir, filename) + ".mitre.json"
try:
with open(dst_file_mitre, 'rb') as fs:
mitre_matched_json = json.loads(fs.read())
dump_mitre = mitre_matched_json
for matched_tid in list(json_data['API_MITRE'].keys()):
if matched_tid in mitre_matched_json.keys():
dump_mitre[matched_tid].update(json_data['API_MITRE'][matched_tid])
else:
dump_mitre[matched_tid] = json_data['API_MITRE'][matched_tid]
except:
dst_file_mitre = os.path.join(dst_dir, filename) + ".mitre.json"
with open(dst_file_mitre, 'rb') as fs:
dump_mitre = json.loads(fs.read())
with open(dst_file_mitre, 'wb') as fs:
fs.write(json.dumps(dump_mitre, sort_keys=True, indent=4).encode('utf-8'))
dst_campaign_file = os.path.join(dst_dir, filename) + ".campaign.json"
playbooksig(opts["config"]["PLAYBOOK_JSON"], dst_file_mitre, dst_campaign_file)
print("Playbook part done")
else:
dst_file_mitre = os.path.join(dst_dir, filename) + ".mitre.json"
with open(dst_file_mitre, 'rb') as fs:
mitre_matched_json = json.loads(fs.read())
with open(dst_file_mitre, 'wb') as fs:
fs.write(json.dumps(mitre_matched_json, sort_keys=True, indent=4).encode('utf-8'))
dst_campaign_file = os.path.join(dst_dir, filename) + ".campaign.json"
playbooksig(opts["config"]["PLAYBOOK_JSON"], dst_file_mitre, dst_campaign_file)
print("Playbook part done")
except Exception as e:
print((str(e)))
print("MITRE and Playbook part did not work properly")
failed = True
try:
report_folder_name = dst_dir.split("\\")[-1]
zipf = zipfile.ZipFile(os.path.join(opts["config"]["OUTPUT_DIR"], report_folder_name+'.zip'), 'w',
zipfile.ZIP_DEFLATED)
zipdir(dst_dir, zipf)
zipf.close()
except Exception as e:
print((str(e)))
failed = True
if failed:
return False
return True
def check_queue():
try:
print("Running..")
print("Checking for New Sample..")
msmq_queue_obj = MSMQCustom(opts["config"]["QUEUE_NAME"])
msmq_queue_obj.open_queue(1, 0) # Open a ref to queue to read(1)
#queue = qinfo.Open(1, 0)
while True:
#if queue.Peek(pythoncom.Empty, pythoncom.Empty, 1000):
if msmq_queue_obj.peek(1000):
msg = msmq_queue_obj.recv_from_queue()
if msg:
print("Found new sample:")
print("Label:", msg.Label)
print("Body :", msg.Body)
bDone = process_dir(os.path.join(opts["config"]["INPUT_DIR"], msg.Body),
os.path.join(opts["config"]["OUTPUT_DIR"], msg.Body), msg.Label)
if bDone:
db_comm.update(msg.Body, "COMPLETED")
else:
db_comm.update(msg.Body, "FAILED")
except Exception as e:
print((str(e)))
if __name__ == '__main__':
check_queue()
|
from jetbotSim import Robot, Camera
import numpy as np
import cv2,math
frames = 0
objpoints = []
Matrix = np.array([[568.67291932, 0.00000000e+00, 518.70213251],
[0.00000000e+00, 567.49287398, 245.11856484],
[0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
k = np.array([[ -0.30992158, 0.10084567, 0.00088568 ,-0.00114713, -0.01561677]])
def detect_chessboard(image):
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 40, 0.001)
objp = np.zeros((3*6,3), np.float32)
objp[:,:2] = np.mgrid[0:6,0:3].T.reshape(-1,2)*20
img = image
h,w,c = img.shape
bytesPerLine = 3*w
gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (6,3),None) #
if ret:
objpoints.append(objp)
print(corners)
print(objp)
print('find target%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
return ret,objp,corners
else:
print('cant find target')
return ret,0,0
def solve_dis(objectPoints,imagePoints,cameraMatrix,distCoeffs):
ret,R, T = cv2.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs)
return R, T
def execute(change):
global robot, frames
print("\rFrames", frames, end="")
frames += 1
# Visualize
img = cv2.resize(change["new"],(640,360))
cv2.imshow("camera", img)
ret,objp,corners = detect_chessboard(img)
if ret !=0:
R, T = solve_dis(objp,corners,Matrix,k)
print(R)
print(T)
print(math.sqrt(T[0]**2+T[1]**2+T[2]**2))
robot = Robot()
camera = Camera()
camera.observe(execute)
|
import logging
from os.path import exists
import unittest
from db import Db
from generic_dao import GenericDao
class TestGenericDao(unittest.TestCase):
class TestDao(GenericDao):
@property
def table_name(self):
return "test_table"
@property
def columns(self):
return ["id", "data"]
@property
def column_types(self):
return ["integer", "text"]
@property
def primary_key_index(self):
return 0
db_name = "generic_dao_test"
test_data = [
{
"id": 1,
"data": "Mairzy doats and dozy doats"
},
{
"id": 2,
"data": "Four score and seven years ago"
}
]
def test_generic_dao(self):
# Calculate expected answers from the database
expected_1 = (TestGenericDao.test_data[0]["id"], TestGenericDao.test_data[0]["data"])
expected_2 = (TestGenericDao.test_data[1]["id"], TestGenericDao.test_data[1]["data"])
# Make sure we're not overwriting a database
if exists(f'{TestGenericDao.db_name}.db'):
logging.fatal(f"Database test file '{TestGenericDao.db_name}.db' already exists. Aborting.")
return
# Create the database and dao
db = Db(TestGenericDao.db_name)
dao = TestGenericDao.TestDao()
# Test non-existence
self.assertFalse(db.does_table_exist(dao),
f"Database table '{dao.table_name}' already exists")
# Test creation
db.create_table(dao)
self.assertTrue(db.does_table_exist(dao),
f"Database table does not exist after creation")
# Test empty count
self.assertEqual(dao.get_count(db), 0,
f"Database table contains rows when none entered")
# Add one record
try:
dao.create(db, TestGenericDao.test_data[0])
except Exception as e:
self.fail(f"Create of first record failed with exception {e}")
# Test single count
self.assertEqual(dao.get_count(db), 1,
f"Incorrect number of records after one create")
# Add a second record
try:
dao.create(db, TestGenericDao.test_data[1])
except Exception as e:
self.fail(f"Create of second record failed with exception {e}")
# Test second count
self.assertEqual(dao.get_count(db), 2,
f"Incorrect number of records after two creates")
# Read all records
try:
result = dao.read(db, 0, 2, "id", False)
except Exception as e:
self.fail(f"Read all record failed with exception {e}")
# Verify read records
self.assertEqual(result, [expected_1, expected_2],
f"All results {result} do not match expected {[expected_1, expected_2]}")
# Test sorting 1
try:
result = dao.read(db, 0, 2, "id", True)
except Exception as e:
self.fail(f"Read sorting 1 failed with exception {e}")
# Verify read records
self.assertEqual(result, [expected_2, expected_1],
f"Sorting 1 results {result} do not expected output")
# Test sorting 2
try:
result = dao.read(db, 0, 2, "data", False)
except Exception as e:
self.fail(f"Read sorting 2 failed with exception {e}")
# Verify read records
self.assertEqual(result, [expected_2, expected_1],
f"Sorting 2 results {result} do not expected output")
# Test sorting 3
try:
result = dao.read(db, 0, 2, "data", True)
except Exception as e:
self.fail(f"Read sorting 3 failed with exception {e}")
# Verify read records
self.assertEqual(result, [expected_1, expected_2],
f"Sorting 3 results {result} do not expected output")
# Test read limit
try:
result = dao.read(db, 0, 1, "id", False)
except Exception as e:
self.fail(f"Read limit failed with exception {e}")
# Verify read records
self.assertEqual(result, [expected_1],
f"Read limit results {result} do not match {[expected_1]}")
# Test read offset
try:
result = dao.read(db, 1, 1, "id", False)
except Exception as e:
self.fail(f"Read offset failed with exception {e}")
# Verify read records
self.assertEqual(result, [expected_2],
f"Read offset results {result} do not match {[expected_2]}")
# Test clear_table
try:
dao.clear_table(db)
except Exception as e:
self.fail(f"Clear table failed with exception {e}")
# Verify clear_table
self.assertEqual(dao.get_count(db), 0,
f"Table is not empty after clear table is called")
# Clean up
db.delete_table(dao)
db.destroy()
if __name__ == '__main__':
unittest.main()
|
# Python implementation of post at
# https://www.topcoder.com/community/data-science/data-science-tutorials/assignment-problem-and-hungarian-algorithm/
import numpy as np
import pdb
__all__ = ['Hungarian']
class Hungarian:
def __init__(self):
self.max_match = 0
self.cost = None
self.n = 0
self.xy = None
self.yx = None
self.S = None
self.T = None
self.slack = None
self.slackx = None
self.prev = None
self.lx = None
self.ly = None
self.iters = 0
def __reset(self, cost):
if np.isnan(np.sum(cost)):
return False
self.iters = 0
# if any entries < 0, clamp to 0
cost[cost < 0] = 0.
nx, ny = np.shape(cost)
assert nx == ny
self.n = nx # n of workers
self.max_match = 0 # n of jobs
self.xy = [-1 for _ in range(self.n)] # vertex that is matched with x
self.yx = [-1 for _ in range(self.n)] # vertex that is matched with y
self.S = [False for _ in range(self.n)] # sets S and T in algorithm
self.T = [False for _ in range(self.n)]
self.slack = np.zeros(self.n) # amount by which sum of labels exceed edge weights
self.slackx = np.zeros(self.n) # such a vertex that l(slackx[y]) + l(y) - w(slackx[y],y) = slack[y]
self.prev = -1 * np.ones(self.n) # array for memorizing alternating paths
self.cost = cost
# init labels
self.lx = np.zeros(self.n)
self.ly = np.zeros(self.n) # labels of Y parts
for x in range(self.n):
for y in range(self.n):
self.lx[x] = max(self.lx[x], self.cost[x, y])
return True
def __update_labels(self):
delta = np.inf
for y in range(self.n): # calculate delta
if not self.T[y]:
delta = min(delta, self.slack[y])
for x in range(self.n): # update X labels
if self.S[x]:
self.lx[x] -= delta
for y in range(self.n): # update Y labels
if self.T[y]:
self.ly[y] += delta
for y in range(self.n): # update slack array
if not self.T[y]:
self.slack[y] -= delta
def __add_to_tree(self, x, prevx):
"""
args:
x: current vertex
prevx: vertex from X before x in the alternating path,
so we add edges (prevx, xy[x]), (xy[x], x)
"""
x = int(x)
prevx = int(prevx)
self.S[x] = True
self.prev[x] = prevx
for y in range(self.n):
if self.lx[x] + self.ly[y] - self.cost[x, y] < self.slack[y]:
self.slack[y] = self.lx[x] + self.ly[y] - self.cost[x, y]
self.slackx[y] = x
def __augment(self):
if self.max_match == self.n:
return
root = 0
x = y = root
q = [0 for _ in range(self.n)]
wr = 0
rd = 0
# reset
self.S = [False for _ in range(self.n)]
self.T = [False for _ in range(self.n)]
self.prev = [-1 for _ in range(self.n)]
# finding root of the tree
for x in range(self.n):
if self.xy[x] == -1:
q[wr] = root = x
wr += 1
self.prev[x] = -2
self.S[x] = 1
break
for y in range(self.n):
self.slack[y] = self.lx[root] + self.ly[y] - self.cost[root, y]
self.slackx[y] = root
while True:
self.iters += 1
if self.iters > 10000:
print(self.cost)
pdb.set_trace()
while rd < wr: # building tree with bfs cycle
x = q[rd] # current vertex from X part
rd += 1
while y < self.n: # iterate through all edges in equality graph
if (self.cost[x, y] == self.lx[x] + self.ly[y]) and not self.T[y]:
if self.yx[y] == -1: # an exposed vertex in Y found, so augmenting path exists!
break
self.T[y] = True # else just add y to T,
q[wr] = self.yx[y] # add vertex yx[y], which is matched
wr += 1
self.__add_to_tree(self.yx[y], x) # add edges (x,y) and (y,yx[y]) to the tree
y += 1
if y < self.n: # augmenting path found
break
if y < self.n:
break
self.__update_labels() # augmenting path not found, improve labeling
wr = rd = 0
y = 0
steps = 0
while y < self.n:
# in this cycle we add edges that were added to the equality graph as a
# result of improving the labeling, we add edge (slackx[y], y) to the tree if
# and only if !T[y] && slack[y] == 0, also with this edge we add another one
# (y, yx[y]) or augment the matching, if y was exposed
if not self.T[y] and self.slack[y] == 0:
if self.yx[y] == -1: # exposed vertex in Y found - augmenting path exists
x = self.slackx[y]
break
else: # else just add y to T
self.T[y] = True
if not self.S[int(self.yx[y])]:
q[wr] = self.yx[y] # add vertex yx[y], which is matched with
wr += 1 # y, to the queue
self.__add_to_tree(self.yx[y], self.slackx[y]) # and add edges (x,y) and (y,
# yx[y]) to the tree
y += 1
if y < self.n:
break # augmenting path found
if y < self.n: # we found an augmenting path
self.max_match += 1
# invert edges along the augmenting path
cx = int(x)
cy = int(y)
while cx != -2:
ty = self.xy[cx]
self.yx[cy] = cx
self.xy[cx] = cy
cx = self.prev[cx]
cy = ty
self.__augment()
def __call__(self, cost):
if self.__reset(cost):
self.__augment()
ret = 0
m = np.zeros((self.n, self.n), dtype=int)
for i in range(self.n):
ret += self.cost[i, self.xy[i]]
m[i, self.xy[i]] = 1
return True, m
else:
return False, None
if __name__ == '__main__':
import matplotlib.pyplot as plt
import time
# cost = np.array([
# [0.95, 0.76, 0.62, 0.41, 0.06],
# [0.23, 0.46, 0.79, 0.94, 0.35],
# [0.61, 0.02, 0.92, 0.92, 0.81],
# [0.49, 0.82, 0.74, 0.41, 0.01],
# [0.89, 0.44, 0.18, 0.89, 0.14]
# ])
times = []
for i in range(5, 10):
c = np.random.randn(i, i)
hung = Hungarian()
start = time.time()
min_cost, matching = hung.solve(c)
diff = time.time() - start
print('Maximum assignment reward: {} in {} sec'.format(min_cost, diff))
print('Maximal matching: \n{}'.format(matching))
times.append(diff)
#plt.plot(times)
#plt.show()
|
#!/usr/bin/env python
#
# Set the GPIO state of a specified pin. Return the pin with its new value.
# Package: gpio_msgs. Support for setting a single Raspberry Pi GPIO output.
#
import sys
import rospy
import RPi.GPIO as GPIO
from std_msgs.msg import String
from gpio_msgs.srv import GPIOSet
def set_GPIO(request):
response = GPIOSetResponse()
response.channel = request.channel
# CheckMode
if GPIO.function(channel)=="IN":
GPIO.output(channel,request.value)
response.value = GPIO.input(response.channel)
response.msg="Success"
else:
response.msg="GPIOSet error: channel ",channel," not configured as an IN"
response.value = False
rospy.loginfo(response.msg)
return response
if __name__ == "__main__":
rospy.init_node('sb_serve_gpio_set')
serve = rospy.Service('/sb_serve_gpio_set',GPIOSet,set_GPIO)
rospy.spin()
|
import re
hand = open('mbox-short.txt')
for line in hand:
line = line.rstrip()
if re.search('From:', line):
print line
###############################################
#achieve above not using regular expression
hand = open('mbox-short.txt')
for line in hand:
line = line.rstrip()
if line.find('From:') >=0:
print line
#################################################
# to instruct that 'From' appears in beginning of line
hand = open('mbox-short.txt')
for line in hand:
line = line.rstrip()
if line.startswith('From:') >=0:
print line
####################################################
#to instruct same in re
import re
hand = open('mbox-short.txt')
for line in hand:
line = line.rstrip()
if re.search('^From:', line):
print line
#################################################
# . represents wildcard
# * represents any number of times
# \S represents any non blank character
# e.g. ^X-\S+: means starts with X- followed by any non blank character and then : |
from riotwatcher import RiotWatcher
import json
from time import sleep
w = RiotWatcher('21e6bb30-08e1-47ed-946f-cee514b740d8')
challenger = w.get_challenger()
# returns list of player id's in league
def getPlayerIds(league):
entries = league['entries']
l = []
breakPoint = 0;
for ent in entries:
l.append(ent['playerOrTeamId'])
###comment out later
#if(breakPoint > 1):
# break;
#else:
# breakPoint +=1;
###comment out above
return l
#returns the recent history of an ID
def getHistory(id):
sleep(1)
return w.get_recent_games(id)
#writes all games in recent history to own file
def writeAllGames(ids):
playerNum = 0
for id in ids:
hist = getHistory(id)
gameids = getGameIds(hist)
gamenum = 0
for gameid in gameids:
try:
game = getGame(int(gameid))
fileName = "Games/P" +str(playerNum)+"G"+ str(gamenum)+".txt"
gamenum +=1
file = open(fileName, 'w')
file.write(json.dumps(game))
file.close()
except :
print(gameid)
playerNum +=1
#returns game object after sleeping
def getGame(gameid):
sleep(1)
game = w.get_match(gameid, w.default_region, True)
return game
#returns game id's from recent history
def getGameIds (hist):
l = []
for game in hist["games"]:
l.append(game["gameId"])
return l
#print(w.get_match(2004556024, w.default_region, True))
#run
writeAllGames(getPlayerIds(challenger))
|
import os
anaFiles = ["start_lunchinator.py"]
anaFiles.extend(aFile for aFile in os.listdir("plugins") if os.path.isfile(aFile) and aFile.endswith(".py"))
for aFile in os.listdir("plugins"):
aFile = os.path.join("plugins", aFile)
if aFile.endswith(".py"):
anaFiles.append(aFile)
if os.path.isdir(aFile):
if os.path.exists(os.path.join(aFile, "__init__.py")):
anaFiles.append(os.path.join(aFile, "__init__.py"))
# -*- mode: python -*-
a = Analysis(anaFiles,
pathex=['.'],
hiddenimports=['netrc','markdown.extensions.extra',
'markdown.extensions.smart_strong',
'markdown.extensions.fenced_code',
'markdown.extensions.footnotes',
'markdown.extensions.attr_list',
'markdown.extensions.def_list',
'markdown.extensions.tables',
'markdown.extensions.abbr'],
hookspath=None,
runtime_hooks=None)
#workaround for http://www.pyinstaller.org/ticket/783#comment:5
for d in a.datas:
if 'pyconfig' in d[0]:
a.datas.remove(d)
break
#workaround not necessary with Pyinstaller > 2.2
pyz = PYZ(a.pure)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='lunchinator.exe',
debug=False,
strip=None,
upx=False,
console = False,
icon='images\\lunchinator.ico')
|
from CRABClient.UserUtilities import config
config = config()
config.General.requestName = 'PRv4_monoX-SingleEl_resubmit'
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'tree.py'
config.Data.inputDataset = '/SingleElectron/Run2015D-PromptReco-v4/MINIAOD'
#'/DoubleEG/Run2015D-05Oct2015-v1/MINIAOD'
#'/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/RunIISpring15MiniAODv2-74X_mcRun2_asymptotic_v2-v1/MINIAODSIM'
#/MET/Run2015D-PromptReco-v4/MINIAOD' #'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 15
#50000 for EALB in MC, 40 for LB in Data # 20 for single el/mu/double eg
config.Data.lumiMask = 'Cert_256630-258750_13TeV_PromptReco_Collisions15_25ns_JSON.txt'
config.Site.storageSite = 'T2_CH_CERN'
|
password = [0x0, 0x0, 0x0, 0xd2]
temp = 0xf9
temp2 = 0xd4
for i,byte in enumerate(password):
byte = temp ^ temp2 ^ byte
temp = temp2
temp2 = byte
password[i] = byte
checksum = ((((0x3b + password[0]) ^ password[1]) - password[2]) ^ password[3]) & 0xff
result = [password[0], password[1], password[2], password[3], checksum];
print('Result code: {:02x}{:02x}{:02x}{:02x}{:02x}'.format(password[0], password[1], password[2], password[3], checksum))
|
# coding: utf-8
# In[1]:
""" Process flood risk data and store on BigQuery.
-------------------------------------------------------------------------------
Author: Rutger Hofste
Date: 20181204
Kernel: python35
Docker: rutgerhofste/gisdocker:ubuntu16.04
"""
SCRIPT_NAME = "Y2018M12D04_RH_RFR_CFR_BQ_V01"
OUTPUT_VERSION = 3
S3_INPUT_PATH = "s3://wri-projects/Aqueduct30/finalData/Floods"
INPUT_FILE_NAME = "flood_results.csv"
BQ_PROJECT_ID = "aqueduct30"
BQ_OUTPUT_DATASET_NAME = "aqueduct30v01"
BQ_OUTPUT_TABLE_NAME = "{}_v{:02.0f}".format(SCRIPT_NAME,OUTPUT_VERSION).lower()
ec2_input_path = "/volumes/data/{}/input_V{:02.0f}".format(SCRIPT_NAME,OUTPUT_VERSION)
ec2_output_path = "/volumes/data/{}/output_V{:02.0f}".format(SCRIPT_NAME,OUTPUT_VERSION)
print("S3_INPUT_PATH: ",S3_INPUT_PATH,
"\nec2_input_path: ",ec2_input_path,
"\nec2_output_path: ",ec2_output_path,
"\nBQ_OUTPUT_DATASET_NAME: ", BQ_OUTPUT_DATASET_NAME,
"\nBQ_OUTPUT_TABLE_NAME: ",BQ_OUTPUT_TABLE_NAME
)
# In[2]:
import time, datetime, sys
dateString = time.strftime("Y%YM%mD%d")
timeString = time.strftime("UTC %H:%M")
start = datetime.datetime.now()
print(dateString,timeString)
sys.version
# In[3]:
get_ipython().system('rm -r {ec2_input_path}')
get_ipython().system('rm -r {ec2_output_path}')
get_ipython().system('mkdir -p {ec2_input_path}')
get_ipython().system('mkdir -p {ec2_output_path}')
# In[4]:
get_ipython().system("aws s3 cp {S3_INPUT_PATH} {ec2_input_path} --recursive --exclude 'inundationMaps/*'")
# In[5]:
import os
import pandas as pd
import numpy as np
from google.cloud import bigquery
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/.google.json"
os.environ["GOOGLE_CLOUD_PROJECT"] = "aqueduct30"
client = bigquery.Client(project=BQ_PROJECT_ID)
# In[6]:
files = os.listdir(ec2_input_path)
# In[7]:
input_path = "{}/{}".format(ec2_input_path,INPUT_FILE_NAME)
# In[8]:
df = pd.read_csv(input_path)
# In[9]:
df.dtypes
# In[10]:
df.head()
# In[11]:
# RVR -> RFR
# CST -> CFR.
# raw -> raw.
# s -> score.
# None -> cat.
# cat -> label.
# In[12]:
df_out = df.rename(columns={"PFAF_ID":"pfaf_id",
"RVR_raw":"rfr_raw",
"CST_raw":"cfr_raw",
"RVR_s":"rfr_score",
"CST_s":"cfr_score",
"RVR_cat":"rfr_label",
"CST_cat":"cfr_label"})
# In[13]:
df_out.drop(columns=["River_pop_impacted","Coast_pop_impacted","pop_total"],inplace=True)
# In[14]:
df_out["cfr_label"].unique()
# In[15]:
def update_labels_rfr(label):
# update labels to be consistent with rest of framework
if label == "Low (0 to 1 in 1,000)":
new_label = "Low (0 to 1 in 1,000)"
elif label == "Low to medium (1 in 1,000 to 2 in 1,000)":
new_label = "Low - Medium (1 in 1,000 to 2 in 1,000)"
elif label == "Medium to high (2 in 1,000 to 6 in 1,000)":
new_label = "Medium - High (2 in 1,000 to 6 in 1,000)"
elif label == "High (6 in 1,000 to 1 in 100)":
new_label = "High (6 in 1,000 to 1 in 100)"
elif label == "Extremely High (more than 1 in 100)":
new_label = "Extremely High (more than 1 in 100)"
else:
new_label = "error, check script"
return new_label
def category_from_labels_rfr(label):
if label == "Low (0 to 1 in 1,000)":
cat = 0
elif label == "Low to medium (1 in 1,000 to 2 in 1,000)":
cat = 1
elif label == "Medium to high (2 in 1,000 to 6 in 1,000)":
cat = 2
elif label == "High (6 in 1,000 to 1 in 100)":
cat =3
elif label == "Extremely High (more than 1 in 100)":
cat = 4
else:
cat = -9999
return cat
def update_labels_cfr(label):
# update labels to be consistent with rest of framework
if label == "Low (0 to 9 in 1,000,000)":
new_label = "Low (0 to 9 in 1,000,000)"
elif label == "Low to medium (9 in 1,000,000 to 7 in 100,000)":
new_label = "Low - Medium (9 in 1,000,000 to 7 in 100,000)"
elif label == "Medium to high (7 in 100,000 to 3 in 10,000)":
new_label = "Medium - High (7 in 100,000 to 3 in 10,000)"
elif label == "High (3 in 10,000 to 2 in 1,000)":
new_label = "High (3 in 10,000 to 2 in 1,000)"
elif label == "Extremely High (more than 2 in 1,000)":
new_label = "Extremely High (more than 2 in 1,000)"
else:
print(label)
new_label = "error"
return new_label
def category_from_labels_cfr(label):
# update labels to be consistent with rest of framework
if label == "Low (0 to 9 in 1,000,000)":
cat = 0
elif label == "Low to medium (9 in 1,000,000 to 7 in 100,000)":
cat = 1
elif label == "Medium to high (7 in 100,000 to 3 in 10,000)":
cat = 2
elif label == "High (3 in 10,000 to 2 in 1,000)":
cat = 3
elif label == "Extremely High (more than 2 in 1,000)":
cat = 4
else:
cat = -9999
return cat
# In[16]:
df_out["rfr_cat"] = df_out["rfr_label"].apply(category_from_labels_rfr)
df_out["rfr_label"] = df_out["rfr_label"].apply(update_labels_rfr)
# In[17]:
df_out["cfr_cat"] = df_out["cfr_label"].apply(category_from_labels_cfr)
df_out["cfr_label"] = df_out["cfr_label"].apply(update_labels_cfr)
# In[18]:
df_out = df_out.reindex(sorted(df_out.columns), axis=1)
# In[19]:
df_out["rfr_label"].unique()
# In[20]:
df_out["cfr_label"].unique()
# In[21]:
df_out["cfr_cat"].unique()
# In[22]:
destination_table = "{}.{}".format(BQ_OUTPUT_DATASET_NAME,BQ_OUTPUT_TABLE_NAME)
# In[23]:
df_out.to_gbq(destination_table=destination_table,
project_id=BQ_PROJECT_ID,
chunksize=10000,
if_exists="replace")
# In[24]:
end = datetime.datetime.now()
elapsed = end - start
print(elapsed)
# Previous runs:
# 0:00:18.766466
#
|
from flask import request
from flask_restful import Resource
from sqlalchemy.orm import joinedload
from sqlalchemy import exists
from werkzeug.exceptions import NotFound, Conflict, Forbidden
from model import db, Group, User
from core.capabilities import Capabilities
from core.schema import GroupSchema, MultiGroupShowSchema, GroupNameSchemaBase, UserLoginSchemaBase
from . import logger, requires_capabilities, requires_authorization
class GroupListResource(Resource):
@requires_authorization
@requires_capabilities(Capabilities.manage_users)
def get(self):
"""
---
summary: List of groups
description: |
Returns list of all groups and members.
Requires `manage_users` capability.
security:
- bearerAuth: []
tags:
- group
responses:
200:
description: List of groups
content:
application/json:
schema: MultiGroupShowSchema
403:
description: When user doesn't have `manage_users` capability.
"""
objs = db.session.query(Group).options(joinedload(Group.users)).all()
schema = MultiGroupShowSchema()
return schema.dump({"groups": objs})
class GroupResource(Resource):
@requires_authorization
@requires_capabilities(Capabilities.manage_users)
def get(self, name):
"""
---
summary: Get group
description: |
Returns information about group.
Requires `manage_users` capability.
security:
- bearerAuth: []
tags:
- group
parameters:
- in: path
name: name
schema:
type: string
description: Group name
responses:
200:
description: List of groups
content:
application/json:
schema: MultiGroupShowSchema
403:
description: When user doesn't have `manage_users` capability.
404:
description: When group doesn't exist
"""
obj = db.session.query(Group).options(joinedload(Group.users)).filter(Group.name == name).first()
if obj is None:
raise NotFound("No such group")
schema = GroupSchema()
return schema.dump(obj)
@requires_authorization
@requires_capabilities(Capabilities.manage_users)
def post(self, name):
"""
---
summary: Create a new group
description: |
Creates a new group.
Requires `manage_users` capability.
security:
- bearerAuth: []
tags:
- group
parameters:
- in: path
name: name
schema:
type: string
description: Group name
responses:
200:
description: When group was created successfully
400:
description: When group name or request body is invalid
403:
description: When user doesn't have `manage_users` capability
409:
description: When group exists yet
"""
schema = GroupSchema()
obj = schema.loads(request.get_data(as_text=True))
if obj.errors:
return {"errors": obj.errors}, 400
group_name_obj = GroupNameSchemaBase().load({"name": name})
if group_name_obj.errors:
return {"errors": group_name_obj.errors}, 400
if db.session.query(exists().where(Group.name == name)).scalar():
raise Conflict("Group exists yet")
group = Group()
group.name = name
group.capabilities = obj.data.get("capabilities") or []
db.session.add(group)
db.session.commit()
logger.info('group created', extra={
'group': group.name,
'capabilities': group.capabilities
})
schema = GroupSchema()
return schema.dump({"name": obj.data.get("name")})
@requires_authorization
@requires_capabilities(Capabilities.manage_users)
def put(self, name):
"""
---
summary: Update group name and capabilities
description: |
Updates group name and capabilities.
Works only for user-defined groups (excluding private and 'public')
Requires `manage_users` capability.
security:
- bearerAuth: []
tags:
- group
parameters:
- in: path
name: name
schema:
type: string
description: Group name
responses:
200:
description: When group was updated successfully
400:
description: When group name or request body is invalid
403:
description: When user doesn't have `manage_users` capability or group is immutable
404:
description: When group doesn't exist
"""
obj = GroupSchema().loads(request.get_data(as_text=True))
if obj.errors:
return {"errors": obj.errors}, 400
group_name_obj = GroupNameSchemaBase().load({"name": name})
if group_name_obj.errors:
return {"errors": group_name_obj.errors}, 400
group = db.session.query(Group).filter(Group.name == name).first()
if group is None:
raise NotFound("No such group")
params = dict(capabilities=obj.data.get("capabilities", group.capabilities),
name=obj.data.get("name", name))
if params["name"] != name and group.immutable:
raise Forbidden("Renaming group not allowed - group is immutable")
db.session.query(Group) \
.filter(Group.name == name) \
.update(params)
# Invalidate all sessions due to potentially changed capabilities
for member in group.users:
member.reset_sessions()
db.session.add(member)
db.session.commit()
logger.info('group updated', extra={"group": params["name"]})
schema = GroupSchema()
return schema.dump({"name": params["name"]})
class GroupMemberResource(Resource):
@requires_authorization
@requires_capabilities(Capabilities.manage_users)
def put(self, name, login):
"""
---
summary: Add a member to the specified group
description: |
Adds new member to existing group.
Works only for user-defined groups (excluding private and 'public')
Requires `manage_users` capability.
security:
- bearerAuth: []
tags:
- group
parameters:
- in: path
name: name
schema:
type: string
description: Group name
- in: path
name: login
schema:
type: string
description: Member login
responses:
200:
description: When member was added successfully
400:
description: When request body is invalid
403:
description: When user doesn't have `manage_users` capability, group is immutable or user is pending
404:
description: When user or group doesn't exist
"""
group_name_obj = GroupNameSchemaBase().load({"name": name})
if group_name_obj.errors:
return {"errors": group_name_obj.errors}, 400
user_login_obj = UserLoginSchemaBase().load({"login": login})
if user_login_obj.errors:
return {"errors": user_login_obj.errors}, 400
member = db.session.query(User).filter(User.login == login).first()
if member is None:
raise NotFound("No such user")
if member.pending:
raise Forbidden("User is pending and need to be accepted first")
group = db.session.query(Group).options(joinedload(Group.users)).filter(Group.name == name).first()
if group is None:
raise NotFound("No such group")
if group.immutable:
raise Forbidden("Adding members to private or public group is not allowed")
group.users.append(member)
member.reset_sessions()
db.session.add(member)
db.session.add(group)
db.session.commit()
logger.info('Group member added', extra={'user': member.login, 'group': group.name})
schema = GroupSchema()
return schema.dump({"name": name})
@requires_authorization
@requires_capabilities(Capabilities.manage_users)
def delete(self, name, login):
"""
---
summary: Delete member from group
description: |
Removes member from existing group.
Works only for user-defined groups (excluding private and 'public')
Requires `manage_users` capability.
security:
- bearerAuth: []
tags:
- group
parameters:
- in: path
name: name
schema:
type: string
description: Group name
- in: path
name: login
schema:
type: string
description: Member login
responses:
200:
description: When member was removed successfully
400:
description: When request body is invalid
403:
description: When user doesn't have `manage_users` capability, group is immutable or user is pending
404:
description: When user or group doesn't exist
"""
group_name_obj = GroupNameSchemaBase().load({"name": name})
if group_name_obj.errors:
return {"errors": group_name_obj.errors}, 400
user_login_obj = UserLoginSchemaBase().load({"login": login})
if user_login_obj.errors:
return {"errors": user_login_obj.errors}, 400
member = db.session.query(User).filter(User.login == login).first()
if member is None:
raise NotFound("No such user")
if member.pending:
raise Forbidden("User is pending and need to be accepted first")
group = db.session.query(Group).options(joinedload(Group.users)).filter(Group.name == name).first()
if group is None:
raise NotFound("No such group")
if group.immutable:
raise Forbidden("Removing members from private or public group is not allowed")
group.users.remove(member)
member.reset_sessions()
db.session.add(member)
db.session.add(group)
db.session.commit()
logger.info('Group member deleted', extra={'user': member.login, 'group': group.name})
schema = GroupSchema()
return schema.dump({"name": name})
|
print "How old are you?",
age = raw_input()
print "How tall are you?",
height = raw_input()
print "How much do you weigh?",
weight = raw_input()
print "So, you're %r old, %r tall and %r heavy." % (
age, height, weight)
print "Who is your daddy?",
daddy = raw_input()
print "What does he do?",
do = raw_input()
print "So, you're daddy is %r and he is a %r." % (
daddy, do
) |
import geocoder
from django.db import models
from django.contrib.auth.models import (AbstractUser, User)
mapbox_token = 'pk.eyJ1Ijoid2F6b3dza2lkZXZlbG9wIiwiYSI6ImNrcTdneXZ4ejA2M2Uyd3VoY29hZTVjYXYifQ.wUjItHT_F5ZCMXUcwx5_xA'
class Place(models.Model):
address = models.CharField(max_length=100)
lat = models.FloatField(blank=True, null=True)
long = models.FloatField(blank=True, null=True)
def save(self, *args, **kwargs):
g = geocoder.mapbox(self.address, key=mapbox_token)
g = g.latlng # returns => [lat, long]
self.lat = g[0]
self.long = g[1]
return super(Place, self).save(*args, **kwargs)
def __str__(self):
return self.address
class Vehiculo(models.Model):
patente = models.CharField(max_length=8,null=False, blank=False, help_text='Ingrese la patente de su automovil')
marca = models.CharField(max_length=8,null=False, blank=False, help_text='Ingrese la marca de su automovil')
color = models.CharField(max_length=8,null=False, blank=False, help_text='ingrese el color del vehiculo')
año = models.CharField(max_length=8,null=False, blank=False, help_text='Ingrese el año de su vehiculo')
class Meta:
ordering = ['patente']
def __str__(self):
return f'{self.patente}'
def get_absolute_url(self):
pass
class Usuario(AbstractUser):
rut_user = models.CharField(max_length=10, null=False, blank=False,help_text="Indique su rut")
name = models.CharField(max_length=200, null=True)
email = models.EmailField(max_length=200, null=True)
vehiculo = models.ManyToManyField(Vehiculo, blank=True)
address = models.ForeignKey(Place, on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
return f'{self.name},{self.rut_user}'
|
################################################################################
# -*- coding: utf-8 -*-
# author : Jinwon Oh
# file name : cxFile.py
# date : 2012-09-06 14:27:08
# ver :
# desc. :
# tab size : set sw=4, ts=4
# python ver. : 2.7.1 Stackless 3.1b3 060516 (release27-maint, Jan 1 2011, 13:04:37) [MSC v.1500 32 bit (Intel)]
# ADD CODES FROM HERE
import time
class cxFile :
def __init__(self, fileName = u'result_%s.txt'%(unicode(time.strftime('%Y%m%d'))) ) :
self.hFile = None
self.fileName = fileName
def __del__(self) :
pass
def open(self, mode = 'a' ) :
import codecs
try :
self.hFile = codecs.open( self.fileName, mode, 'utf-8')
except BaseException as e :
print e
raise e
except :
print 'open : unknown error'
return False
return True
def close(self) :
if self.hFile != None :
self.hFile.close()
del self.hFile
self.hFile = None
def write(self, context ) :
writeString = context
if self.hFile == None :
try :
self.open()
except BaseException as e :
return False
elif self.hFile.closed == True :
try : self.open()
except BaseException as e :
return False
elif self.hFile.closed == False and self.hFile.mode[0] == 'r' :
self.close()
try : self.open()
except BaseException as e :
return False
self.hFile.write( unicode(context) )
return True
def readlines(self) :
if self.hFile == None :
self.open(mode = 'r')
elif self.hFile.closed == True :
try : self.open(mode = 'r')
except : return []
elif self.hFile.closed == False and self.hFile.mode[0] != 'r' :
self.close()
try : self.open(mode = 'r')
except : return []
self.hFile.seek(0,0)
return self.hFile.readlines()
def readline(self) :
if self.hFile == None :
self.open(mode = 'r')
elif self.hFile.closed == True :
try : self.open(mode = 'r')
except : return []
elif self.hFile.closed == False and self.hFile.mode[0] != 'r' :
self.close()
try : self.open(mode = 'r')
except : return []
return self.hFile.readline()
def delete(self, bForce=True) :
import os
if self.hFile != None :
if self.hFile.closed == False :
self.close()
if bForce == True :
os.remove(self.fileName)
else :
if bForce == True :
os.remove(self.fileName)
def getLastLine(self) :
if self.hFile == None :
self.open(mode='r')
elif self.hFile.closed == True :
try : self.open(mode='r')
except : return None
elif self.hFile.closed == False and self.hFile.mode[0] != 'r' :
self.close()
try : self.open(mode='r')
except : return None
result = list(self.hFile)
if result != [] :
for i in range(len(result)-1,-1,-1) :
if result[i] != u'' and result[i] != u'\n' and \
result[i] != u'\t' and result[i] != u' ' and \
result[i] != u'\r' and result[i] != u'\n\r':
return result[i]
return u''
def isEmpty(self) :
import os
if not os.path.exists(self.fileName) :
return True
if os.path.getsize(self.fileName) > 0 :
return False
return True
def isExist(self) :
import os
return os.path.exists(self.fileName)
def dump(self) :
for line in self.readlines() :
print line,
def close(self) :
if self.hFile != None :
self.hFile.close()
def test_cxFile() :
resultFile = cxFile()
#resultFile.dump()
#print
resultFile.write(u'하이\n')
#resultFile.dump()
#print
#resultFile.close()
#for line in resultFile.readlines() :
# print line
#print
resultFile.write('머지\n')
#resultFile.dump()
resultFile.delete()
resultFile.close()
def test_read_file( fileName ) :
import os, errno
import codecs
#fileName = u'A000050_경방_T'
try :
#historyFile = open( fileName, 'r')
historyFile = codecs.open( fileName, 'r', 'utf-8' )
except BaseException as e :
print 'BaseException'
print e.errno
print e.strerror
print e
return
except IOError as e :
print 'IOError'
print 'e : ', e
print 'errno : ', e.errno
print 'err code : ', errno.errorcode[e.errno]
print 'err message : ', e.strerror
print 'err message : ', os.strerror(e.errno)
print 'failed to open \'%s\' file.'%(fileName)
return
print historyFile.name
print historyFile.closed
print historyFile.mode
print historyFile.softspace
lines = historyFile.readlines()
for line in lines :
print line,
historyFile.close()
def test() :
import common
#test_read_file(u'A000050_경방_T')
test_cxFile()
def collect_and_show_garbage() :
"Show what garbage is present."
print "Collecting..."
n = gc.collect()
print "Unreachable objects:", n
if n > 0 : print "Garbage:"
for i in range(n):
print "[%d] %s" % (i, gc.garbage[i])
if __name__ == "__main__" :
import gc
gc.set_debug(gc.DEBUG_LEAK)
print "before"
collect_and_show_garbage()
print "testing..."
print "-"*79
test()
print "-"*79
print "after"
collect_and_show_garbage()
raw_input("Hit any key to close this window...")
|
"""
1. Have accumulator to check largest
2. Iterator should reduce by 1 every iteration
3. once end of iterator reached, swap index of number with last index
"""
def selection_sort(lst):
largest_index = 0
for i in range(len(lst)-1, 0, -1):
for j in range(i):
if lst[j+1] > lst[j]:
largest_index = j+1
lst[largest_index], lst[i] = lst[i], lst[largest_index]
return lst
a_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
selection_sort(a_list) |
import unittest
from parse import parse
def input_file():
# return the input_test file in a text
file = open('input', 'r')
text = file.read()
file.close()
return text
def output_file():
# read line of output_1 file
file = open('output', 'r')
res = [line.rstrip('\n') for line in file]
file.close()
return res
def day_5_part_2(text):
raw_events = sorted(tuple(parse("[{:d}-{:d}-{:d} {:d}:{:d}] {}", l)) for l in text.split('\n')) # @source https://github.com/ngilles/adventofcode-2018/blob/master/day-04/day-04.py
return raw_events
def work_way(lines):
# data transformation
# max R, L, U, D
def get_max(card, line):
max = 0
for line in lines:
cur_max = 0
for i in line:
if i[0] == card:
cur_max += i[1]
if cur_max > max:
max = cur_max
return max
# we found that it will be a matrix like the example
R_max = get_max("R", lines)
L_max = get_max("L", lines)
U_max = get_max("U", lines)
D_max = get_max("D", lines)
# we build the matrix
width = R_max + L_max + 1
height = U_max + D_max + 1
o_point = (D_max, L_max)
print("width", width)
print("height", height)
print("Build begin")
matrix = []
for i in range(height):
line = []
for j in range(width):
if (i, j) == o_point:
line.append(2)
else:
line.append(0)
matrix.append(line)
print("Build end")
# move function
def move(cur_pos, the_move, matrix):
if the_move[0] == "R":
for i in range(the_move[1]):
matrix[cur_pos[0]][cur_pos[1] + i] += 1
return cur_pos[0], cur_pos[1] + the_move[1]
if the_move[0] == "L":
for i in range(the_move[1]):
matrix[cur_pos[0]][cur_pos[1] - i] += 1
return cur_pos[0], cur_pos[1] - the_move[1]
if the_move[0] == "U":
for i in range(the_move[1]):
matrix[cur_pos[0] + i][cur_pos[1]] += 1
return cur_pos[0] + the_move[1], cur_pos[1]
if the_move[0] == "D":
for i in range(the_move[1]):
matrix[cur_pos[0] - i][cur_pos[1]] += 1
return cur_pos[0] - the_move[1], cur_pos[1]
# we add the first line one the matrix
move_line_1 = o_point
for the_move in lines[0]:
move_line_1 = move(move_line_1, the_move, matrix)
# we add the second line on the matrix
move_line_2 = o_point
for the_move in lines[1]:
move_line_2 = move(move_line_2, the_move, matrix)
# data encoding
intersec = []
for i in range(height):
for j in range(width):
if matrix[i][j] == 2:
intersec.append((i, j))
matrix[i][j] = "x"
elif matrix[i][j] == 4:
matrix[i][j] = "o"
else:
matrix[i][j] = " "
# data modeling
# find the min of distance to 'o'
def md(o, p):
return abs(o[1] - p[1]) + abs(o[0] - p[0])
md_inter = [md(o_point, p) for p in intersec]
print(min(md_inter))
# data visualisation
# for line in matrix[::-1]:
# print(line)
class TestDay5part1(unittest.TestCase):
def test_day_5_part_2(self):
text = input_file()
def process(text, value):
i = 0
code = [int(val) for val in text.split(",")]
length = len(code)
try:
while i < length:
if code[i] == 99:
break
# TODO add mode on opcode
# add 0 at the beginning of 1
tmp_cur = code[i]
str_opcode = str(tmp_cur)
while len(str_opcode) < 5:
str_opcode = "0" + str_opcode
opcode = int(str_opcode[3:])
# TODO add pos and imm
# case param 1
if int(str_opcode[2]) == 0: # position
first_param = code[i + 1]
else:
first_param = i + 1
# case param 2
if int(str_opcode[1]) == 0: # position
second_param = code[i + 2]
else:
second_param = i + 2
# case param 2
if int(str_opcode[0]) == 0: # position
third_param = code[i + 3]
else:
third_param = i + 3
if opcode == 1: # addition
# case param 3
code[third_param] = code[first_param] + code[second_param]
i += 4
elif opcode == 2: # multiplication
# case param 3
code[third_param] = code[first_param] * code[second_param]
i += 4
# TODO code 3 and code 4
elif opcode == 3: # as input_test 3, 50 => 3 take input_test and save to address 50
code[first_param] = value
i += 2
elif opcode == 4: # as output_1 4, 50 => 4 output_1 the value at address 50
value = code[first_param]
#print("as input_test 3, 50 => 3 take input_test and save to address 50")
i += 2
elif opcode == 5: # jump-if-true
if code[first_param] != 0:
i = code[second_param]
else:
i += 3
elif opcode == 6: # jump-if-false
if code[first_param] == 0:
i = code[second_param]
else:
i += 3
elif opcode == 7: # less than
if code[first_param] < code[second_param]:
code[third_param] = 1
else:
code[third_param] = 0
i += 4
elif opcode == 8: # equals
if code[first_param] == code[second_param]:
code[third_param] = 1
else:
code[third_param] = 0
i += 4
#i += 1
#print(i, code)
except:
print("except")
return value
#print(i, code)
#print(code[i], i, length)
#print(code)
return value
# @source : https://github.com/dan144/aoc-2019/blob/master/5.py because value
# be careful with i += ?
print(process(text, 5))
if __name__ == '__main__':
unittest.main()
|
from dsa_util import *
from subprocess import Popen
before_path = r'Y:\TIM_3.1\TIM31_HigherTransitCoefficients\scenario\Output'
after_path = r'Y:\TIM_3.1\DVRPC_ABM_Github\scenario\Output'
outfile = r'D:\TIM3\BeforeAfterSPEmp.csv'
names = ['before', 'after']
fps = [os.path.join(before_path, '_person_2.dat'), os.path.join(after_path, '_person_2.dat')]
tables = ReadTables(names, fps, 2*['\t'])
output = {}
for table in tables:
workers = tables[table].query('pwtaz > 0')[['pwtaz', 'psexpfac']]
output[table] = workers.groupby('pwtaz').sum()['psexpfac']
pd.DataFrame(output).to_csv(outfile)
Popen(outfile, shell = True) |
"""
Django settings for mail_server project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '15lb$*d+mfbb+c+wzdf-9z*^^j-01zeo=^3vse(1&ih&9mc)g9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ADMINS = [('Martin Stevko', 'mstevko10@gmail.com')]
MANAGERS = ADMINS
ALLOWED_HOSTS = ['slu.pythonanywhere.com', 'localhost']
APPEND_SLASH = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tinymce',
'mailing',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mail_server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mail_server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'Sk-sk'
TIME_ZONE = 'Europe/Bratislava'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
'''
### Deployment ### + set DEBUG = False
STATIC_ROOT = '/var/www/mailing.pythonanywhere.com/static/'
# for collectstatic command - '/home/mailing/mailing/static/'
MEDIA_ROOT = '/var/www/mailing.pythonanywhere.com/media/'
##################
'''
# Development #
STATIC_ROOT = os.path.dirname(os.path.abspath(__file__))
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
###############
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
#'/var/www/static/',
)
MEDIA_URL = '/media/'
# AUTH_USER_MODEL = 'users.User'
LOGIN_URL = 'admin:login'
# EMAIL_LOGIN_URL = '/ucet/prihlasenie_emailom/'
LOGOUT_URL = 'admin:logout'
UPLOAD_FILE_MAX_SIZE = 2 * 1024 * 1024 # 2MB
TINYMCE_DEFAULT_CONFIG = {
'selector': 'textarea',
'theme': 'modern',
'plugins': 'link image preview codesample contextmenu table code lists '
'charmap visualchars fullscreen textcolor colorpicker emoticons hr wordcount',
'toolbar1':
'fullscreen | styleselect formatselect | bold italic underline strikethrough subscript superscript removeformat |'
' forecolor backcolor | alignleft aligncenter alignright alignjustify |'
' bullist numlist | outdent indent | table | link image | emoticons charmap hr visualchars | preview code',
'content_css': STATIC_URL+'css/bootstrap.css',
'contextmenu': 'copy paste cut | formats removeformat | link image | table',
'menubar': False,
'inline': False,
'statusbar': True,
'branding': False,
'width': 'auto',
'max-width': '100vw',
'height': 360,
'plugin_preview_width': '350',
'plugin_preview_height': '350',
'code_dialog_width': '350',
'code_dialog_height': '350'
}
# Mail configuration
# Enable Captcha on gmail account
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'riesky.web.testing@gmail.com'
EMAIL_HOST_PASSWORD = '' # heslo na riesky testy
EMAIL_PORT = 587
EMAIL_USE_TLS = True
|
"""
统计生成所有不同的二叉树
题目: 给定一个整数N,如果N<1,代表空树结构,否则代表中序遍历的结果为{1,2,3.。。N}
请返回可能的二叉树结构由多少。
例如,N = -1时,代表空树结构,返回1;N=2时,满足中序遍历为{1,2}的二叉树结构只有图3-49所示的两种
所以返回结果为2.
进阶:N的含义不变,假设可能的二叉树结构由M种,请返回M个二叉树的头节点,每一颗二叉树代表一种可能的结构
"""
from question.chapter1_stack_queue_question.question9 import Node
def num_tree(n):
if n < 2:
return 1
num = [0] * (n + 1)
num[0] = 1
for i in range(n + 1):
for j in range(i + 1):
num[i] += num[j - 1] + num[i - j]
return num[n]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 23 14:40:03 2017
@author: jason
"""
import shapefile
import numpy as np
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.io.img_tiles import OSM
osm_tiles = OSM()
proj = osm_tiles.crs
sf = shapefile.Reader("../GIS/Demolitions_3857.shp")
shapes = sf.shapes()
coord = []
for shape in shapes:
try:
if shape.points[0][0]< -1.26*10**7 and shape.points[0][0]> -1.274*10**7 and shape.points[0][1]< 6.67*10**6 and shape.points[0][1] > 6.55*10**6:
coord.append(shape.points[0])
except IndexError:
pass
X = np.array(coord)
db = DBSCAN(eps=0.3, min_samples=15).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
plt.figure(figsize=(50, 50))
# Setup cartographic data for plot
ax = plt.axes(projection=proj)
# Specify a region of interest, in this case, Calgary.
ax.set_extent([-114.3697, -113.8523, 50.8567, 51.2447],
ccrs.PlateCarree())
# Add the tiles at zoom level 11.
ax.add_image(osm_tiles, 11)
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=15)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=0.2)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.savefig('../Outputs/Cluster.png', transparent=True) |
class DisjointSet(object):
def __init__(self, value):
self.rank = 0
self.representedBy = value
self.parent = None
class DisjoinSets(object):
def __init__(self):
# Hash table that stores the mappings of value -> node for fast access
self.dictionarySets = {}
# Create a set out of a new node
def makeSet(self, value):
node = DisjointSet(value)
self.dictionarySets[value] = node
# Find the value which represents the disjoint set containing val
def findSet(self, val):
# If the node has not be initialized till now, return False
if val not in self.dictionarySets:
return False
startNode = self.dictionarySets[val]
tempNode = startNode
# Found the node. Now, travel up to find the parent
while tempNode.parent != None:
tempNode = tempNode.parent
# Optimization
# if tempNode is not the parent of startNode, then make it. This will make future findSet(startNode) faster
if startNode != tempNode:
startNode.parent = tempNode
# tempNode here denotes the parent node
return tempNode.representedBy
# Join two sets together
def union(self, a, b):
aRepresentedBy = self.findSet(a)
bRepresentedBy = self.findSet(b)
# if either does not exist or both represented by same then simply return
if (aRepresentedBy == False or bRepresentedBy == False) or (aRepresentedBy == bRepresentedBy):
return
# else, the parent is the one with the larger rank. Also, update the rank of the parent by incrementing it by 1
if self.dictionarySets[aRepresentedBy].rank >= self.dictionarySets[bRepresentedBy].rank:
self.dictionarySets[bRepresentedBy].parent = self.dictionarySets[aRepresentedBy]
if self.dictionarySets[aRepresentedBy].rank == self.dictionarySets[bRepresentedBy].rank:
self.dictionarySets[aRepresentedBy].rank += 1
else:
self.dictionarySets[aRepresentedBy].parent = self.dictionarySets[bRepresentedBy]
if self.dictionarySets[bRepresentedBy].rank == self.dictionarySets[aRepresentedBy].rank:
self.dictionarySets[bRepresentedBy].rank += 1
# Create disjointSet and do makesets
dSet1 = DisjoinSets()
dSet1.makeSet(1)
dSet1.makeSet(2)
dSet1.makeSet(3)
dSet1.makeSet(4)
dSet1.makeSet(5)
dSet1.makeSet(6)
dSet1.makeSet(7)
# Do union on sets
dSet1.union(1,2)
dSet1.union(3,2)
dSet1.union(4,5)
dSet1.union(6,7)
dSet1.union(5,6)
dSet1.union(3,7)
print 'yo'
|
height1, width1, deepth1 = sorted(map(int, input().split()))
height2, width2, deepth2 = sorted(map(int, input().split()))
if height1 == height2 and width1 == width2 and deepth1 == deepth2:
print('Boxes are equal')
elif height1 <= height2 and width1 <= width2 and deepth1 <= deepth2:
print('The first box is smaller than the second one')
elif height1 >= height2 and width1 >= width2 and deepth1 >= deepth2:
print('The first box is larger than the second one')
else:
print('Boxes are incomparable')
|
import rasa.utils.io as io_utils
from rasa.cli import x
def test_x_help(run):
output = run("x", "--help")
help_text = """usage: rasa x [-h] [-v] [-vv] [--quiet] [-m MODEL] [--no-prompt]
[--production] [--data DATA] [--log-file LOG_FILE]
[--endpoints ENDPOINTS] [-p PORT] [-t AUTH_TOKEN]
[--cors [CORS [CORS ...]]] [--enable-api]
[--remote-storage REMOTE_STORAGE] [--credentials CREDENTIALS]
[--connector CONNECTOR] [--jwt-secret JWT_SECRET]
[--jwt-method JWT_METHOD]"""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
def test_prepare_credentials_for_rasa_x_if_rasa_channel_not_given(tmpdir_factory):
directory = tmpdir_factory.mktemp("directory")
credentials_path = str(directory / "credentials.yml")
io_utils.write_yaml_file({}, credentials_path)
tmp_credentials = x._prepare_credentials_for_rasa_x(
credentials_path, "http://localhost:5002"
)
actual = io_utils.read_yaml_file(tmp_credentials)
assert actual["rasa"]["url"] == "http://localhost:5002"
def test_prepare_credentials_if_already_valid(tmpdir_factory):
directory = tmpdir_factory.mktemp("directory")
credentials_path = str(directory / "credentials.yml")
credentials = {
"rasa": {"url": "my-custom-url"},
"another-channel": {"url": "some-url"},
}
io_utils.write_yaml_file(credentials, credentials_path)
x._prepare_credentials_for_rasa_x(credentials_path)
actual = io_utils.read_yaml_file(credentials_path)
assert actual == credentials
|
def buildMethodTree(classesList, classesQueryDic):
for c in classesList:
for inherited in c.inheritedList:
if not inherited in classesQueryDic:
print "inherited classes not found"
raise Exception("inherited classes not found")
c.addParentClass(classesQueryDic[inherited])
classesQueryDic[inherited].addChildClass(c)
rootList = []
for c in classesList:
if not c.inheritedList:
rootList.append(c)
return rootList
|
from models.expense import Expense
class PercentExpense(Expense):
def __init__(self, paid_by, amount, splits, expense_metadata):
super().__init__(paid_by, amount, splits, expense_metadata)
self.validate()
def validate(self):
split_percent = 0
for split in self.splits:
split_percent += split.percent
if not split_percent == 100:
raise Exception("Total percent of splits not 100.")
|
import pymysql
from pandas import DataFrame
def connect_db():
sharenote_db = pymysql.connect(
user='root',
passwd='sharenotedev1!',
host='52.79.246.196',
port=3306,
db='share_note',
charset='utf8'
)
# data read -> 오늘 기준으로 전날 데이터 조회하는 쿼리 필요
cursor = sharenote_db.cursor(pymysql.cursors.DictCursor)
sql = "SELECT * FROM `topic_news`;"
cursor.execute(sql)
result = cursor.fetchall()
df = DataFrame(result)
print(df['topic_news_title'])
return df
if __name__ == "__main__":
connect_db() |
#Reverse Cipher
message = "Three can keep a secret ,if thwo of them are dead."
translated =''
i = len(message) - 1
while i>=0:
translated = translated + message[i]
i = i-1
print(translated)
|
from django.shortcuts import render
from django.views.generic import(
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView,)
from .models import Post, Reply
from .forms import PostForm, ReplyForm
from django.urls import reverse_lazy
# Create your views here.
# def home(request):
# return render(request, 'forumapp/home.html', {})
def home(request):
return render(request, 'forum/home.html')
class ForumStartView(ListView):
model = Post
template_name = 'forum/forum_start.html'
ordering = ['-date_created']
# ordering = ['-id']# post in order new to old with out date
class ForumDetailView(DetailView):
model = Post
template_name = 'forum/post_detail.html'
class CreatePostView(CreateView):
model = Post
form_class = PostForm
template_name = 'forum/create_post.html'
# fields = '__all__'
# fields = ('title', 'body')
class CreateReplyView(CreateView):
model = Reply
form_class = ReplyForm
template_name = 'forum/create_reply.html'
# fields = '__all__'
# success_url = reverse_lazy('home')
# associates reply with a post.
def form_valid(self, form):
form.instance.post_id = self.kwargs['pk']
return super().form_valid(form)
class EditPostView(UpdateView):
model = Post
template_name = 'forum/edit_post.html'
fields = ['title', 'body']
class DeletePostView(DeleteView):
model = Post
template_name = 'forum/delete_post.html'
success_url = reverse_lazy('forum-start')
|
n = int(input())
c = 0
for i in range (0, n):
number = int(input())
if number == 0:
c = c+1
print(c) |
from django.apps import AppConfig
class mdbConfig(AppConfig):
name = 'mdb'
verbose_name = 'Django Movie Database'
def ready(self):
import mdb.signals
|
def f1(arg):
return arg + 2
def f2:
return 3
@f1(arg)
@f2
def func(): pass
#is equivalent to:
def func(): pass
func = f1(arg)(f2(func))
|
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import numpy as np
import os
import random
import cv2
class CLSDataPrepare(Dataset):
def __init__(self, txt_path, img_transform = None):
self.img_list = []
self.label_list = []
with open(txt_path, 'r') as f:
lines = f.readlines()
for line in lines:
self.img_list.append(line.split()[0])
self.label_list.append(line.split()[1])
self.img_transform = img_transform
def __getitem__(self, index):
im, gt = self.pull_item(index)
return im, gt
def pull_item(self, index):
img_path = self.img_list[index]
label = self.label_list[index]
img = cv2.imread(img_path)
#################### RandomRotation #######################
# N_45 = int(random.random()/0.125)
# if N_45 != 0:
# img = rotate(img, N_45 * 45)
#
# img = img.astype(np.float32)
###########################################################
if self.img_transform is not None:
img = self.img_transform(img)
else:
img = torch.from_numpy(img.astype(np.float32)).permute(2, 0, 1)
# to rgb
img = img[(2, 1, 0), :, :]
label = int(label)
return img, label
def __len__(self):
return len(self.label_list)
def classifier_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
labels = []
imgs = []
for sample in batch:
imgs.append(sample[0])
labels.append(sample[1])
return torch.stack(imgs, 0), torch.LongTensor(labels)
def rotate(image, angle, center=None, scale=1.0):
# 获取图像尺寸
(h, w) = image.shape[:2]
# 若未指定旋转中心,则将图像中心设为旋转中心
if center is None:
center = (w / 2, h / 2)
# 执行旋转
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# 返回旋转后的图像
return rotated |
#!/usr/bin/env python3
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import load_model
from keras.utils import CustomObjectScope
from keras.initializers import glorot_uniform
from model import eng_tokenizer,encode,rus_tokenizer,eng_max_sentence_length
def mapping(number,tokenizer):
""" This function maps a word to a given number(output of seq2seq)
Args:
number(int): outputted integer
tokenizer(class): tokenizer of a language we want to map a word in
Returns:
vocab_word(str): word that corresponding to this number
"""
for vocab_word,vocab_number in tokenizer.word_index.items():
if number == vocab_number:
return vocab_word
return None
def to_word(prediction,tokenizer):
""" This function transforms outputted SEQUENCE of integers to a sentence.
Args:
prediction(np.ndarray/list): vector of integers representing outputted
sentence
tokenizer(class): language tokenizer to map words correctly
Returns:
returns(str): a final translated sentence
"""
target = list()
for i in prediction:
word = mapping(i,tokenizer)
if word is None:
break
target.append(word)
return ' '.join(target)
# Hiding tensorflow warnings!
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# Avoiding the bag!
with CustomObjectScope({'GlorotUniform':glorot_uniform()}):
model = load_model('./models/seq2seq.h5')
# Source is an input from a user
source = input('Type here:')
# Data preparation
source = encode(eng_tokenizer,eng_max_sentence_length,source)
prediction = model.predict(source,verbose=0)[0]
# Have to take the highest probability for a single word!
prediction = [np.argmax(vector) for vector in prediction]
#print(prediction)
# Getting everything right!
prediction = to_word(prediction,rus_tokenizer)
print('Translation: %s' % prediction)
|
from flask import Flask, request, jsonify, abort
import socket
import json
from flask_cors import CORS, cross_origin
#챗봇 엔진 서버 접속 정보
host = "127.0.0.1"
port = 5050
app = Flask(__name__)
#챗봇 엔진 서버와 통신
def get_answer_from_engine(bottype, query):
#챗봇 엔진 서버 연결
myApiSocket = socket.socket()
myApiSocket.connect((host, port))
#챗봇 엔진 질의 요청
json_data = {
'Query': query,
'BotType': "MyService"
}
message = json.dumps(json_data)
myApiSocket.send(message.encode())
#챗봇 엔진 답변 출력
data = myApiSocket.recv(2048).decode()
ret_data = json.loads(data)
print("socket 확인")
#챗봇 엔진 서버 연결 소켓 닫기
myApiSocket.close()
return ret_data
@app.route('/query/<bot_type>', methods=['GET','POST'])
@cross_origin(origin='*',headers=['Content-Type'])
def query(bot_type):
body = request.get_json()
try:
if bot_type == 'TEST':
ret = get_answer_from_engine(bottype=bot_type, query=body['query'])
return jsonify(ret)
else:
# 정의되지 않은 bot type 인 경우 404 오류
abort(404)
except Exception as ex:
print(ex)
abort(500)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000) |
from flask import Blueprint, jsonify, request, send_file, render_template
import imageio
# imageio.plugins.ffmpeg.download()
from moviepy.editor import VideoFileClip
import os
import random
import json
import cv2
from ml_prod.image2text import model
from ml_prod.match import nli_predict
recommend_controller = Blueprint('recommend_controller', __name__)
# Global Cache
memory = {}
# load songs.json
with open('./songs/songs.json', 'r') as f:
Catalog = json.load(f)
def validate_media_path(path):
return os.path.isfile(path)
def get_video_duration(path):
duration = VideoFileClip(path).duration
return duration
def extract_frames(movie_path, times, duration=0):
clip = VideoFileClip(movie_path)
res_paths = []
for t in times:
if t > duration:
break
imgpath = '{}-{}.png'.format(movie_path[0:-4], t)
res_paths.append(imgpath)
clip.save_frame(imgpath, t)
return res_paths
@recommend_controller.route('/<string:file_path>')
def recommend_given_file(file_path):
global memory
if file_path in memory:
print('[Recommend][Cache Hit]', file_path)
return memory[file_path]
else:
print('[Recommend][Cache Miss]', file_path)
full_path = os.path.join('uploads', file_path)
file_exists = validate_media_path(full_path)
print('[Recommend][file_path][exists]', file_path, file_exists)
if file_exists is False:
return jsonify({
'message': "Invalid Media Path",
'exists': file_exists,
'path': file_path
}), 400
# isVideo -> boolean
isVideo = file_path[-4:] in {'.mov'}
# extracted_frames_path = [full_path]
# Extract Frames from Video
# if isVideo:
# duration = get_video_duration(full_path)
# times = []
# time = 0
# while time <= duration:
# times.append(time)
# time += 0.5
# extracted_frames_path = extract_frames(full_path, times, duration)
if isVideo:
duration = get_video_duration(full_path)
extracted_frames_path = extract_frames(full_path, [0.5], duration)
# Image2Text
cv_path = extracted_frames_path[0] if isVideo else full_path
print('Started [model].[inference]', cv_path)
image2text = model.inference(cv2.imread(cv_path), plot=False)
print('Done [model].[inference]', image2text)
# Song Matching
print("Start NLP")
num_songs = 3
song_indices, song_matches = nli_predict(image2text, num_songs)
print("song_indices: ", song_indices)
print("song_matches: ", song_matches)
# Generate Random Recommendation
playlist = []
for k, i in enumerate(song_indices):
item = Catalog['songs'][i]
item['match'] = song_matches[k]
playlist.append(item)
# Cache and Return
res = jsonify({
'playlist': playlist,
'image2text': image2text,
'img_url': request.base_url.replace('/recommend/', '/uploads/')
})
memory[file_path] = res
return res
@recommend_controller.route('/show/<string:file_path>')
def recommend_show(file_path):
full_path = os.path.join('uploads', file_path)
file_exists = validate_media_path(full_path)
print('[Recommend][file_path][exists]', file_path, file_exists)
if file_exists is False:
return jsonify({
'message': "Invalid Media Path",
'exists': file_exists,
'path': file_path
}), 400
if file_path[-4:] in {'.png', '.jpg'}:
return send_file(full_path, mimetype='image/png')
else:
file_name = file_path[:-4]
time = 0
img_to_display = []
while True:
check_path = 'uploads/' + file_name + '-' + str(time) + '.png'
if os.path.exists(check_path):
img_to_display.append(check_path)
else:
break
time += 0.5
img_to_display = ['/' + path for path in img_to_display]
return render_template(
'show.html',
img_to_display=img_to_display,
)
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pl
import matplotlib.cm as cm
import matplotlib.colors as colors
from matplotlib import rc, rcParams
rcParams.update({'font.size': 12})
import numpy as np
import os
import sys
import time
import yaml
import h5py
import argparse
from pathlib import Path
from Corrfunc.theory import wp, xi
rpbins = np.logspace(-1, np.log10(30), 21)
# summit auto corr
pos_summit = np.load("../summit/data/data_gal_am_ph000_cleaned.npz")['x'] % 2000
wp_results = wp(2000, 30, 1, rpbins, pos_summit[:, 0], pos_summit[:, 1], pos_summit[:, 2],
verbose=False, output_rpavg=False) # this is all done in mpc / h
wp_summit = np.array([row[3] for row in wp_results])
# cosmos auto corr
wps_cosmos = 0
Ncosmos = 10
for whichsim in range(Ncosmos):
pos_gals = np.load("../s3PCF_fenv/data/data_gal_am_"+str(whichsim)+".npz")['x']
wp_results = wp(1100, 30, 1, rpbins, pos_gals[:, 0], pos_gals[:, 1], pos_gals[:, 2],
verbose=False, output_rpavg=False) # this is all done in mpc / h
wps_cosmos += np.array([row[3] for row in wp_results])
wp_cosmos = wps_cosmos / Ncosmos
rmids = 0.5*(rpbins[1:] + rpbins[:-1])
fig = pl.figure(figsize = (4.3, 4))
pl.xlabel('$r_p$ ($h^{-1}$Mpc)')
pl.ylabel('$r_p w_p$ ($h^{-2}$Mpc$^2$)')
pl.plot(rmids, rmids * wp_summit, label = 'summit')
pl.plot(rmids, rmids * wp_cosmos, label = 'cosmos')
pl.xscale('log')
pl.legend(loc = 1, fontsize = 12)
pl.tight_layout()
fig.savefig("./plots/plot_autocorr_am.pdf", dpi = 200)
# cosmos particle loader
# get the particle positions in the sim
def part_pos(whichsim, params):
# file directory
mydir = '/mnt/gosling1/bigsim_products/AbacusCosmos_1100box_planck_products/'
mysim = 'AbacusCosmos_1100box_planck_00'
starttime = time.time()
# we need to modify sim_name for halotools
if sim_name.endswith("_00"):
sim_name = sim_name[:-3]
# first load a big catalog of halos and particles
cats = Halotools.make_catalogs(sim_name=sim_name, phases = whichsim,
cosmologies=0, redshifts=params['z'],
products_dir=products_dir,
halo_type='FoF', load_ptcl_catalog=True)
# pull out the particle subsample
part_table = cats.ptcl_table
return part_table |
import numpy as np
import preprocessing
import random_transformer
def test_filegen():
fs = preprocessing.get_wavs_from_dir("Data/Speaker_A/WAV")
wav = fs.__next__()
rg = preprocessing.MonoRawGenerator().gen(wav)
fg = preprocessing.FFTGenerator().gen(rg)
data = fg.__next__()
data = np.concatenate([data, fg.__next__()], axis=1)
print(data.dtype)
print(data.shape)
wfd = preprocessing.WavFFTData(data)
wav = wfd.to_raw()
print(wav)
print(wav.data)
wav.create_wav_file("out.wav")
def test_2():
fs = preprocessing.get_wavs_from_dir("Data/Speaker_A/WAV")
wav = fs.__next__()
rg = preprocessing.MonoRawGenerator().gen(wav)
fg = preprocessing.FFTGenerator().gen(rg)
sg = preprocessing.SentenceSeperator().gen(fg)
for i in range(20):
sample = sg.__next__()
sfft = preprocessing.WavFFTData(sample)
sfft.to_raw().create_wav_file("out_%d.wav" % i)
def main():
sg = preprocessing.DataSource().gen("Data/Speaker_A/WAV")
trans = random_transformer.RandomTransformer()
for i, sentence in enumerate(sg):
if i > 9:
break
sentence = trans.transform(sentence)
preprocessing.WavFFTData(sentence).to_raw().create_wav_file("out_%d.wav" % i)
if __name__ == "__main__":
main()
|
'''
Stepik001132ITclassPyсh01p03st03TASK02__20200610.py
Даны три переменные, напиши такую программу, которая используя эти
переменные выведет текст: "Дважды два = четыре" без кавычек.
'''
b = 'два'
a = 'Дважды'
c = 'четыре'
print("{} {} = {}".format(a, b, c)) |
from django.db import models
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
class Post(models.Model):
Titulo = models.CharField(max_length=200)
Contenido = models.TextField()
Fecha_Creacion = models.DateTimeField('Fecha de creacion')
PUBLICO = 'pub'
PRIVADO = 'pri'
pORp_choices = (
(PUBLICO, 'publico'),
(PRIVADO, 'privado'),
)
Publico_Privado = models.CharField(
max_length=3,
choices=pORp_choices,
default=PUBLICO,
)
Foto = models.ImageField(upload_to='postPhotos', default = 'SOMETHING')
def __str__(self):
return self.Titulo
|
from skbio import DNA, RNA, Sequence
rna_seq = RNA(open('raw.txt').read().replace('T','U'))
with open('moderna.gb', 'w+') as fh:
print(rna_seq.write(fh, format='genbank'))
|
import os
import csv
from datetime import datetime
import cv2
import face_recognition
import numpy as np
from datetime import date
#read chinese path name
def cv_imread(filePath):
cv_img=cv2.imdecode(np.fromfile(filePath,dtype=np.uint8),-1)
return cv_img
def encodeFaces(images):
encodelist = []
for img in images:
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
# print(face_recognition.face_encodings(img))
encode_img = face_recognition.face_encodings(img)[0]
# print(type(encode_img))
encodelist.append(encode_img)
return encodelist
def markAttendance(name):
with open('attendance.csv','r+',newline='')as file:
writer = csv.writer(file)
namelist = []
datalist = csv.reader(file)
for line in datalist:
# print(line)
namelist.append(line[0])
if name not in namelist:
time = datetime.now().strftime('%H:%M:%S')
print(time)
writer.writerow([name] + [time])
def main():
#prep
# today = date.today().strftime('%d/%m/%Y')
# newfile = f'attendance {today}.csv'
open( 'attendance.csv' , 'wb')
path = 'Images'
images = []
stu_names = []
os.chdir('D:\python_projects\Attendance project')
print(os.getcwd())
imglist = os.listdir(path)
for stu in imglist:
curImg = cv_imread(f'{path}/{stu}')
images.append(curImg)
stu_names.append(os.path.splitext(stu)[0])
print(os.path.splitext(stu)[0])
# print(curImg)
# encode known faces
known_encoded = encodeFaces(images)
print(len(known_encoded))
print('encoding complete')
# webcam feed
cap = cv2.VideoCapture(0)
while True:
read,imgF = cap.read()
# shrink image to speed up process
imgS = cv2.resize(imgF,(0,0),None,0.25,0.25)
imgS = cv2.cvtColor(imgS,cv2.COLOR_BGR2RGB)
facelocs = face_recognition.face_locations(imgS)
curFaces = face_recognition.face_encodings(imgS,facelocs)
for face,loc in zip(curFaces,facelocs):
matches = face_recognition.compare_faces(known_encoded,face,0.4)
dist = face_recognition.face_distance(known_encoded,face)
print(stu_names)
print(dist)
print(matches)
min_index = np.argmin(dist)
print(min_index)
if matches[min_index]:
name = stu_names[min_index]
y1,x2,y2,x1 = loc
y1,x2,y2,x1 = y1*4,x2*4,y2*4,x1*4
cv2.rectangle(imgF,(x1,y1),(x2,y2),(255,255,0),2)
cv2.rectangle(imgF,(x1,y2),(x2,y2-30),(255,255,0),cv2.FILLED)
cv2.putText(imgF,name,(x1+5,y2-5),cv2.FONT_HERSHEY_SIMPLEX,
1,(255,255,255),1)
markAttendance(name)
cv2.imshow('Webcam',imgF)
cv2.waitKey(1)
if __name__ == '__main__':
main() |
"""Consensus Representation
@author: Soufiane Mourragui
This module computes the consensus representation between two datasets, by:
- Computing the domain-specific factors.
- Computing the principal vectors from source and target.
- Interpolating between the sets of principal vectors.
- Using KS statistics, finds the intermediate point where source and target are best balanced.
Example
-------
Examples are given in the vignettes.
Notes
-------
Examples are given in the vignette
References
-------
[1] Mourragui, S., Loog, M., Reinders, M.J.T., Wessels, L.F.A. (2019)
PRECISE: A domain adaptation approach to transfer predictors of drug response
from pre-clinical models to tumors
"""
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.externals.joblib import Parallel, delayed
from scipy.stats import ks_2samp
from precise.principal_vectors import PVComputation
from precise.intermediate_factors import IntermediateFactors
class ConsensusRepresentation:
def __init__(self, n_factors,
n_pv,
n_representations=100,
dim_reduction='pca',
dim_reduction_target=None,
total_variance=10**3,
n_jobs=1):
"""
Parameters
-------
n_factors: int
Number of domain-specific factors.
n_pv: int
Number of principal vectors.
n_representations: int, optional, default to 100
Number of interpolated features between source and target principal vectors.
dim_reduction : str, default to 'pca'
Dimensionality reduction method for the source data,
i.e. 'pca', 'ica', 'nmf', 'fa', 'sparsepca', pls'.
dim_reduction_target : str, default to None
Dimensionality reduction method for the target data.
total_variance: float, default to 10^3
Total variance in both source and target after total variance normalization.
n_jobs: int (optional, default to 1)
Number of jobs for computation.
"""
self.n_factors = n_factors
self.n_pv = n_pv
self.n_representations = n_representations
self.dim_reduction = dim_reduction
self.dim_reduction_target = dim_reduction_target
self.total_variance = total_variance
self.source_data = None
self.target_data = None
self.source_components_ = None
self.target_components_ = None
self.intermediate_factors_ = None
self.consensus_components_ = None
self.n_jobs = 1
def fit(self, source_data, target_data):
"""
Compute the consensus representation between two set of data.
IMPORTANT: Same genes have to be given for source and target, and in same order
Parameters
-------
source_data : np.ndarray, shape (n_components, n_genes)
Source dataset
target_data : np.ndarray, shape (n_components, n_genes)
Target dataset
Return values
-------
self: returns an instance of self.
"""
# Low-rank representation
Ps = self.dim_reduction_source.fit(X_source, y_source).components_
self.source_components_ = scipy.linalg.orth(Ps.transpose()).transpose()
Pt = self.dim_reduction_target.fit(X_target, y_source).components_
self.target_components_ = scipy.linalg.orth(Pt.transpose()).transpose()
# Compute intermediate factors
self.intermediate_factors_ = IntermediateFactors(self.n_representations)\
.sample_flow(self.source_components_, self.target_components_)
self.intermediate_factors_ = self.intermediate_factors_.transpose(1,0,2)
# Normalize for total variance
target_total_variance = np.sqrt(np.sum(np.var(target_data, 0)))
normalized_target_data = target_data / target_total_variance
normalized_target_data *= self.total_variance
source_total_variance = np.sqrt(np.sum(np.var(source_data, 0)))
normalized_source_data = source_data / source_total_variance
normalized_source_data *= self.total_variance
# Compute consensus representation
self.consensus_components_ = []
for i in range(self.n_pv):
source_projected = intermediate_factors_[i].dot(normalized_source_data.transpose())
target_projected = intermediate_factors_[i].dot(normalized_target_data.transpose())
ks_stats = [
ks_2samp(s,t)[0]
for (s,t) in zip(source_projected, target_projected)
]
self.consensus_components_.append(intermediate_factors_[i, np.argmin(ks_stats)])
self.consensus_components_ = np.array(self.consensus_components_).transpose()
return self.consensus_components_ |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from app.models import Destination, UserProfile, Rating, Trip, BlogPost, Comment, PostImage
# Register your models here.
admin.site.register(Destination)
admin.site.register(UserProfile)
admin.site.register(Rating)
admin.site.register(Trip)
admin.site.register(BlogPost)
admin.site.register(Comment)
admin.site.register(PostImage) |
import logging
from django.shortcuts import render
from django.views import View
from django.http.response import JsonResponse
from .models import Meiju, MeijuTag, TagMeiju
from .models import model
class MeijuView(View):
def get(self, request, meiju_id):
meiju = model.get_meiju(meiju_id)
return JsonResponse(dict(meiju=meiju))
class MeijusView(View):
def get(self, request):
try:
page = int(request.GET.get('page', 1))
page_size = int(request.GET.get('page_size', 15))
author_id = int(request.GET.get('author_id', 0))
source_id = int(request.GET.get('source_id', 0))
tag_id = int(request.GET.get('tag_id', 0))
except Exception as e:
logging.error(e)
kw = dict(page_size=page_size, author_id=author_id, source_id=source_id, tag_id=tag_id)
meijus = model.get_meijus(page, **kw)
count = model.get_meiju_count(author_id, source_id, tag_id)
total_page = (count + page_size - 1) // page_size
return JsonResponse(dict(meijus=meijus, page=page, count=count, total_page=total_page))
class TagsView(View):
def get(self, request):
try:
page = int(request.GET.get('page', 1))
page_size = int(request.GET.get('page_size', 50))
tp_str = request.GET.get('tp_str', 'author')
assert tp_str in ('author', 'source', 'tag')
except Exception as e:
logging.error(e)
tags = model.get_meiju_tp_tags(tp_str, page, page_size)
count = model.get_meiju_tp_tags_count(tp_str)
total_page = (count + page_size - 1) // page_size
return JsonResponse(dict(tags=tags, page=page, count=count, total_page=total_page))
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
p = head
while p:
if p.next:
q = p.next
if p.val == q.val:
# 重复, 进行删除
if q.next:
p.next = q.next
else:
p.next = None
else:
p = p.next
else:
p = p.next
return head |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 22 17:19:08 2018
@author: yiyuezhuo
"""
import json
import numpy as np
import os
def json2points(fname, verbose = True, detect=False):
with open(fname) as f:
obj = json.load(f)
arr = np.array(obj, dtype=np.int)
_fname, _ = os.path.splitext(fname)
if detect:
if os.path.exists(_fname+'.jpg'):
_fname = _fname+'.jpg'
elif os.path.exists(_fname+'.png'):
_fname = _fname+'.png'
np.savetxt(_fname+'.txt', arr, fmt='%i')
if verbose:
print('{} => {}'.format(fname, _fname+'.txt'))
import argparse
parser = argparse.ArgumentParser('make points for faceSwapCLI.py')
parser.add_argument('fpath', nargs='+')
parser.add_argument('--model', default='models/shape_predictor_68_face_landmarks.dat')
parser.add_argument('--detect', action='store_true', help="turn off suffix detection")
args = parser.parse_args()
for fname in args.fpath:
json2points(fname, detect = args.detect) |
#!/usr/bin/env python
# encoding: utf-8
"""
This script is called by a bash completion function to help complete
the options for the diskutil os x command
Created by Preston Holmes on 2010-03-11.
preston@ptone.com
Copyright (c) 2010
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import os
from subprocess import Popen, call, STDOUT, PIPE
import plistlib
import re
cache = '/tmp/completiondiskcache'
DEBUG = False
USE_CACHE = False
def iter_all_filesystem_personalities():
fsinfo = plistlib.readPlistFromString(sh("diskutil listFilesystems -plist"))
for fs in fsinfo:
pers = fs.get("Personality")
if pers:
yield pers
def iter_all_named_volumes():
diskinfo = plistlib.readPlistFromString(sh("diskutil list -plist"))
for disk in diskinfo['AllDisksAndPartitions']:
for part in disk['Partitions']:
vn = part.get("VolumeName")
if vn:
yield vn
def sh(cmd):
return Popen(cmd,shell=True,stdout=PIPE,stderr=PIPE).communicate()[0]
def debug(msg):
if DEBUG:
f = open ('/tmp/completedebug','a')
f.write (str(msg) + '\n')
f.close()
def get_disks(curr=''):
if USE_CACHE and os.path.exists(cache):
diskinfo = plistlib.readPlist(cache)
else:
diskinfo = plistlib.readPlistFromString(sh("diskutil list -plist"))
if USE_CACHE:
plistlib.writePlist(diskinfo,cache)
if curr.startswith("/"):
m = diskinfo["AllDisks"]
m = ["/dev/"+d for d in m]
v = diskinfo["VolumesFromDisks"]
v = ["/Volumes/"+d for d in v]
return m + v
return list(iter_all_named_volumes()) + diskinfo['AllDisks']
# return diskinfo['VolumesFromDisks'] + diskinfo['AllDisks']
named_disk_ct = len(diskinfo['VolumesFromDisks'])
opts = []
for i,d in enumerate(diskinfo['WholeDisks']):
o = "/dev/%s" % d
if i < named_disk_ct:
o += "(%s)" % diskinfo['VolumesFromDisks'][i].replace(' ','_')
if curr:
if o.startswith(curr):
opts.append(o)
else:
opts.append(o)
if len(opts) == 1 and '(' in opts[0]:
opts[0] = opts[0].split('(')[0]
debug ("disks:");debug(opts)
return opts
def re_in(pat,l):
r = re.compile(pat)
contained = [x for x in l if r.search(x)]
return len(contained)
def complete():
""" note if compreply only gets one word it will fully complete"""
verbs = """
list
info
activity
listFilesystems
unmount
umount
unmountDisk
eject
mount
mountDisk
renameVolume
enableJournal
disableJournal
enableOwnership
disableOwnership
verifyVolume
repairVolume
verifyPermissions
repairPermissions
eraseDisk
eraseVolume
reformat
eraseOptical
zeroDisk
randomDisk
secureErase
partitionDisk
resizeVolume
splitPartition
mergePartition
""".split()
verbs_for_device = """
list
info
unmount
umount
unmountDisk
eject
mount
mountDisk
renameVolume
enableJournal
disableJournal
enableOwnership
disableOwnership
verifyVolume
repairVolume
verifyPermissions
repairPermissions
eraseDisk
eraseVolume
reformat
eraseOptical
zeroDisk
randomDisk
secureErase
partitionDisk
resizeVolume
splitPartition
mergePartition
""".split()
device_final = """
list
info
unmount
umount
unmountDisk
eject
mount
mountDisk
enableJournal
disableJournal
verifyVolume
repairVolume
verifyPermissions
repairPermissions
eraseDisk
eraseVolume
eraseOptical
zeroDisk
randomDisk
secureErase
""".split()
filesystem_nicknames = (
"free", "fat32",
"hfsx", "jhfsx", "jhfs+",
"NTFS")
partition_types = ( "APM", "MBR", "GPT" )
verb_options = {
"list":('-plist', ),
"info":('-plist', '-all', ),
"listFilesystems":('-plist', ),
"unmount":('force', ),
"umount":('force', ),
"unmountDisk":('force', ),
"eject":( ),
"mount":('readOnly', '-mountPoint', ),
"mountDisk":( ),
"renameVolume":('<name>', ),
"enableJournal":( ),
"disableJournal":('force', ),
"verifyVolume":( ),
"repairVolume":( ),
"verifyPermissions":('-plist', ),
"repairPermissions":('-plist', ),
"eraseDisk": ('<name>', ) + partition_types,
"eraseVolume": ('<name>', ),
"eraseOptical":('quick', ),
"zeroDisk":( ),
"randomDisk":('<times>', ),
"secureErase":( ),
"partitionDisk":( ),
"resizeVolume":( ),
"splitPartition":( ),
"mergePartition":( )
}
cwords = os.environ['COMP_WORDS'].split('\n')[1:]
cword = int(os.environ['COMP_CWORD'])
debug(cword)
try:
curr = cwords[cword-1]
except IndexError:
curr = ''
debug("current: " + curr)
if cword == 1:
if os.path.exists(cache):
os.remove(cache)
opts = verbs
elif cwords[0] in verbs:
opts = []
if cwords[0] in verbs_for_device:
# if verb has device as last param - and dev is last word, exit
#if cword != len(cwords) and '/dev' in cwords[-1]:
# sys.exit(0)
#if not re_in('/dev',cwords) or '/dev' in curr:
# opts.extend(get_disks(curr))
opts.extend(get_disks(cwords[-1]))
opts.extend(verb_options[cwords[0]])
if cwords[0] == "eraseDisk" or cwords[0] == "eraseVolume":
opts.extend(iter_all_filesystem_personalities())
opts.extend(filesystem_nicknames)
opts = [x for x in opts if x not in cwords[:-2]]
debug(opts)
debug (cwords)
sys.stdout.write('\n'.join(filter(lambda x: x.lower().startswith(curr.lower()), opts)))
debug ("final |%s|" % ' '.join(filter(lambda x: x.startswith(curr), opts)))
sys.exit(0)
def main():
complete()
if __name__ == '__main__':
main()
|
from email.message import EmailMessage
from smtplib import SMTP
from abc import ABCMeta, abstractmethod
import os
class EmailSender(metaclass=ABCMeta):
@abstractmethod
def send(self,msg: EmailMessage):
pass
class SimpleEmailSender(EmailSender):
SMTP_SERVER_PORT:str="SMTP_SERVER_PORT"
@classmethod
def send(cls,msg: EmailMessage):
server_port = os.environ[cls.SMTP_SERVER_PORT]
servers = server_port.split(':')
with SMTP(servers[0],int(servers[1])) as s:
s.send_messagatsuogae(msg)
class EmailBuilder:
_registered_email_sender: EmailSender = None
@classmethod
def send_html_message(cls, from_address: str,subject: str, to_address: list[str]=None, cc_address: list[str]=None,
body: str=None, attachment_files: list[str] = None) -> EmailMessage:
email = EmailMessage()
email['Subject'] = subject
email['From'] = from_address
if to_address is not None and len(to_address) > 0 :
email['To'] = '.'.join(to_address)
if cc_address is not None and len(cc_address) > 0:
email['Cc'] = '.'.join(to_address)
if body is not None:
email.set_content(body, subtype='html')
if attachment_files is not None and len(attachment_files)>1 :
for filepath in attachment_files:
mime_type= cls.__get_mime_type(filepath)
with open(filepath, 'rb') as content_file:
content = content_file.read()
email.add_attachment(content,
maintype=mime_type['maintype'], subtype=mime_type['subtype'],
filename=filepath)
if cls._registered_email_sender is None:
SimpleEmailSender().send(email)
else:
cls._registered_email_sender.send(email)
return email
@classmethod
def register_mail_sender(cls, custom_mail_sender: EmailSender):
cls._registered_email_sender=custom_mail_sender
@classmethod
def __get_mime_type(cls, path: str) -> dict:
if path == '':
return None
parts = path.split('.')
if len(parts)<2:
return None
extension = parts[len(parts)-1]
applications = ['zip','pdf']
texts=['txt']
images=['png','jpeg','gif']
if extension in applications:
return dict(maintype='application',subtype=extension)
elif extension in images:
return dict(maintype='image',subtype=extension)
elif extension=='jpg':
return dict(maintype='image', subtype='jpeg')
elif extension=='html':
return dict(maintype='text',subtype='html')
elif extension in texts:
return dict(maintype='text',subtype='plain')
else:
return dict(maintype='application',subtype='octet-stream') |
import socket
socket_client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#Nos conectamos a la dirección y puerto del server
host = "127.0.0.1"
port = 8001
socket_client.connect((host,port))
var=True
while var==True:
mensaje = input("Cliente: ")
socket_client.send(mensaje.encode())
respuesta = socket_client.recv(1024) #este método debe ser indicado en bytes
respuesta = respuesta.decode()
if respuesta == 'adios':
print("\n")
var=False
else:
print(f"Server: {respuesta}")
socket_client.close()
|
#!/usr/bin/python
import threading
import time
class MyThread(threading.Thread):
def __init__(self,name,param):
threading.Thread.__init__(self)
self.setName(name)
self.thread_stop=False
self.param = param
def run(self):
for i in range(0,self.param):
if not self.thread_stop:
print self.getName(),
print ":",i
time.sleep(1)
print "currentid:",threading.current_thread().ident
print "currentthread:",threading.currentThread().__dict__
def stop(self):
self.thread_stop = True
if "__main__" == __name__:
t1 = MyThread("thread1",5)
t2 = MyThread("thread2",5)
t1.start()
t2.start()
t1.join() #wait t1 end.
t2.join() #wait t2 end.
print "main thread end!"
|
# coding=utf-8
import pandas as pd
import numpy as np
import os
import math
import pickle
# 作图相关
import matplotlib.pyplot as pplt
# 分词
import jieba.posseg as pseg
# 文本特征提取:计数向量 / tf-idf向量
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# 随机森林分类器
from sklearn.ensemble import RandomForestClassifier
# 逻辑回归
from sklearn.linear_model import LogisticRegression
# 非负矩阵分解:大矩阵分成两个小矩阵,降维,压缩
from sklearn.decomposition import NMF
# 数据集上随机划分出一定比例的训练集和测试集 / K折交叉验证
from sklearn.model_selection import train_test_split, cross_val_score
# 多标签二值化
from sklearn.preprocessing import MultiLabelBinarizer
# 多分类转化成二分类,用一个分类器对应一个类别, 每个分类器都把其他全部的类别作为相反类别看待
from sklearn.multiclass import OneVsRestClassifier
# 朴素贝叶斯(伯努利分布,高斯分布,多项式分布)
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
# 支持向量机做二分类
from sklearn.svm import SVC
# 评价分类模型的好坏,ROC灵敏度曲线
from sklearn.metrics import roc_curve
import sys
import importlib
importlib.reload(sys)
stopwords = [line.rstrip() for line in open('chineseStopWords.txt')]
stopwords[:10]
def cut(txt):
result = ''
try:
for w in pseg.cut(txt):
seg = w.word
if seg not in stopwords:
result += seg + ' '
except:
pass
return result
# 读入已清洗完的数据,要运行 pip3.7 install xlrd
DataSet_train = pd.read_csv('粤语.csv', encoding = "utf-8",header=0)
#DataSet_train = pd.read_csv('情感.csv', encoding = "utf-8",header=0)
#DataSet_train = pd.read_csv('风格.csv', encoding = "utf-8",header=0)
#DataSet_train = pd.read_csv('听众.csv', encoding = "utf-8",header=0)
DataSet_lyric = DataSet_train['歌词']
try:
DataSet_train['availble']
except:
DataSet_train['availble'] = DataSet_train.歌词.apply(cut)
DataSet_train['availble'] = DataSet_train.歌词.fillna('')
# 提取训练集文本特征
Feature_train = TfidfVectorizer(ngram_range=(1, 2))
Vector_feature_train = Feature_train.fit_transform(DataSet_train['availble'])
model = OneVsRestClassifier(RandomForestClassifier(), n_jobs=2)
# 学习标签 粤语
Vector_TargetLabel_train = DataSet_train['是否粤语']
model.fit(Vector_feature_train, Vector_TargetLabel_train)
with open('Yueyu.pkl','wb')as f:
pickle.dump(model, f)
with open('FeaTrain_Yueyu.pkl','wb')as f1:
pickle.dump(Feature_train,f1)
'''
# 学习标签 情感
Vector_TargetLabel_train = DataSet_train['情感']
model.fit(Vector_feature_train, Vector_TargetLabel_train)
with open('Motion.pkl','wb')as f:
pickle.dump(model, f)
with open('FeaTrain_Motion.pkl','wb')as f1:
pickle.dump(Feature_train,f1)
'''
'''
# 学习标签 听众
Vector_TargetLabel_train = DataSet_train['听众']
model.fit(Vector_feature_train, Vector_TargetLabel_train)
with open('Audience.pkl','wb')as f:
pickle.dump(model, f)
with open('FeaTrain_Audience.pkl','wb')as f1:
pickle.dump(Feature_train,f1)
'''
'''
# 学习标签 风格
Vector_TargetLabel_train = DataSet_train['风格']
model.fit(Vector_feature_train, Vector_TargetLabel_train)
with open('Style.pkl','wb')as f:
pickle.dump(model, f)
with open('FeaTrain_Style.pkl','wb')as f1:
pickle.dump(Feature_train,f1)
'''
#交叉验证
for Model_ in [RandomForestClassifier(), LogisticRegression(), SVC(), BernoulliNB()]:
print(cross_val_score(OneVsRestClassifier(Model_), Vector_feature_train, Vector_TargetLabel_train))
DataSet_train.to_csv('LyricTrain.csv', index=None, encoding='gb18030')
|
"""
This file is part of Linspector (https://linspector.org/)
Copyright (c) 2013-2023 Johannes Findeisen <you@hanez.org>. All Rights Reserved.
See LICENSE.
"""
class Database:
def __init__(self, configuration, environment, log):
self._configuration = configuration
self._environment = environment
self._log = log
|
# Generated by Django 2.0.13 on 2020-09-25 15:19
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LoginInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_of_login', models.DateTimeField(null=True)),
('details', jsonfield.fields.JSONField(default=dict, help_text='JSON object containing browser browser information')),
('user', models.ForeignKey(help_text='The user logging in', on_delete=django.db.models.deletion.CASCADE, to='user_profile.UserProfile')),
],
options={
'ordering': ['id'],
'managed': True,
},
),
migrations.CreateModel(
name='ViewPositionInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position_id', models.TextField()),
('position_type', models.CharField(default='AP', max_length=3)),
('date_of_view', models.DateTimeField()),
('date_of_view_day', models.TextField()),
('date_of_view_week', models.TextField()),
('user', models.ForeignKey(help_text='The user viewing the position', on_delete=django.db.models.deletion.CASCADE, to='user_profile.UserProfile')),
],
options={
'ordering': ['id'],
'managed': True,
},
),
]
|
def f(x,y,l,a):
p=a[x][y];t=0
for i in range(l):
for j in range(l):
if a[x+i][y+j]!=p:t=1;break
if t:break
if t:l//=2;return f(x,y,l,a)+f(x,y+l,l,a)+f(x+l,y,l,a)+f(x+l,y+l,l,a)
else:return str(p)
def solution(arr):k=f(0,0,len(arr),arr);return [k.count('0'),k.count('1')] |
from config import *
import time # 导入计时time函数
if os_platform == 'linux' or os_platform == 'Linux':
import RPi.GPIO as GPIO # 导入Rpi.GPIO库函数命名为GPIO
GPIO.setmode(GPIO.BOARD) # 将GPIO编程方式设置为BOARD模式
GPIO.setup(11, GPIO.OUT) # 设置物理引脚11负责输出电压
def LightBreath(light_code):
while True:
time.sleep(1)
GPIO.output(light_code, GPIO.HIGH)
time.sleep(1)
GPIO.output(light_code, GPIO.LOW)
else:
def LightBreath(light_code):
print('It is fake.')
if __name__ == '__main__':
LightBreath(11)
GPIO.cleanup() # 释放使用的GPIO引脚(程序到达最后都需要释放
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse
from store.models import Group, Category, \
Product, Article, Customer, Order, Parameter
class TestView(TestCase):
def setUp(self):
user = User.objects.create_superuser(
username='test_admin', password='12345'
)
self.client.force_login(user)
Parameter.objects.create(name='FREE_GIFT', value='800')
Parameter.objects.create(name='FREE_DELIVERY', value='2500')
Parameter.objects.create(name='DELIVERY_COURIER_COST', value='450')
category = Group.objects.create(name='Category1', slug='cat1')
subcategory = Category.objects.create(
name='Subcategory1', category=category, slug='subcat1')
Product.objects.create(title='Product1',
category=category,
subcategory=subcategory,
image='test_image.jpg',
price=100,
slug='pro1')
Article.objects.create(title='Test', name='Test', slug='test')
customer = Customer.objects.create(user=user)
self.order = Order.carts.create(owner=customer,
delivery_type='delivery_spb')
def test_index(self):
response = self.client.get(reverse('welcome'))
self.assertEqual(response.status_code, 200)
def test_store(self):
response = self.client.get(reverse('store'))
self.assertEqual(response.status_code, 200)
def test_article(self):
response = self.client.get(reverse('article', kwargs={'slug': 'test'}))
self.assertEqual(response.status_code, 200)
def test_search(self):
data = {'p': '1'}
response = self.client.get(reverse('search'), data=data)
self.assertEqual(response.status_code, 200)
def test_product_detail(self):
response = self.client.get(reverse('product_detail',
kwargs={'slug': 'pro1'}))
self.assertEqual(response.status_code, 200)
def test_subcategory(self):
response = self.client.get(reverse('subcategory_detail',
kwargs={'slug': 'subcat1'}))
self.assertEqual(response.status_code, 200)
def test_category(self):
response = self.client.get(reverse('category_detail',
kwargs={'slug': 'cat1'}))
self.assertEqual(response.status_code, 200)
def test_gifts(self):
response = self.client.get(reverse('gifts'))
self.assertEqual(response.status_code, 200)
def test_cart(self):
response = self.client.get(reverse('cart'))
self.assertEqual(response.status_code, 200)
def test_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
def test_registration(self):
response = self.client.get(reverse('registration'))
self.assertEqual(response.status_code, 200)
def test_profile(self):
response = self.client.get(reverse('profile'))
self.assertEqual(response.status_code, 200)
"""
TEST VIEWS
redirects, query parameters
"""
def test_logout(self):
response = self.client.get(reverse('logout'))
self.assertRedirects(response, "/")
def test_add_to_cart(self):
response = self.client.get(reverse('add_to_cart',
kwargs={'slug': 'pro1'}))
self.assertRedirects(response, '/product/pro1/')
# def test_checkout(self):
# """ не срабатывает order не передается """
#
# order = self.order
# print(order)
# data = {'delivery_type': 'delivery_spb'}
# response = self.client.post(reverse('checkout'), data=data)
# self.assertEqual(response.status_code, 200)
|
from flask import Flask
app = Flask(__name__)
print(__name__)
#1)localhost:5000 - have it say "Hello World!" - Hint: If you have only one route that your server is listening for, it must be your root route ("/")
@app.route('/')
def hello_world():
return 'Hello World!'
#2)localhost:5000/dojo - have it say "Dojo!"
@app.route('/dojo')
def dojo():
return 'Dojo!'
@app.route('/say/<name>')
def say(name):
#3)localhost:5000/say/flask - have it say "Hi Flask". Have function say() handle this routing request.
if name=="flask":
return "Hi Flask"
#4)localhost:5000/say/michael - have it say "Hi Michael" (have the same function say() handle this routing request)
elif name=="michael":
return "Hi Michael"
#5)localhost:5000/say/john - have it say "Hi John!" (have the same function say() handle this routing request)
elif name=="john":
return "Hi John!"
#6)localhost:5000/repeat/35/hello - have it say "hello" 35 times! - You will need to convert a string "35" to an integer 35.
# To do this use int(). For example int("35") returns 35. If the user request localhost:5000/repeat/80/hello,
# it should say "hello" 80 times.
#7)localhost:5000/repeat/99/dogs - have it say "dogs" 99 times! (have this be handled by the same route function as #6)
@app.route('/users/<num>/<name>')
def repeat(num,name):
num=int(num)
return num*name
if __name__=="__main__":
app.run(debug=True) |
import datetime
import re
log_line = '''183.60.212.153 - - [19/Feb/2013:10:23:29 +0800] "GET /o2o/media.html?menu=3 HTTP/1.1" 200 16691 "-" "Mozilla/5.0 (compatible; EasouSpider; +http://www.easou.com/search/spider.html)"'''
lst = []
tmp = ''
flag = False #用来判断 块语句
for word in log_line.split():
#print(word)
if not flag and (word.startswith('[') or word.startswith('"')):
if word.endswith("]") or word.endswith('"'):
lst.append(word.strip('"[]'))
else:
tmp += ' ' + word[1:]
flag = True
continue
if flag:
if word.endswith("]") or word.endswith('"'):
tmp += ' ' + word[:-1]
flag = False
lst.append(tmp)
tmp = ''
else: # 没找到后括号处理
tmp += ' ' + word
continue
lst.append(word)
names = ["remote", "", "", "datetime", "request", "status", "length", "", "useragent"]
#ops = [None, None, None,lambda timestr:datetime.datetime.strptime(timestr,"%d/%b/%Y:%H:%M:%S %z"),lambda request: dict(zip(("Method", "url", "protocol"), request.split())), int, int, None, None]
pattern_str = '''(?P<remote>[\d.]{7,}) - - \[(?P<datetime>[/\w +:]+)\] "(?P<method>\w+) (?P<url>\S+) (?P<protocol>[\w/.]+)" (?P<status>\d+) (?P<length>\d+) "[^"]+" "(?P<userangent>[^"]+)"'''
regex = re.compile(pattern_str)
ops = {'datetime':lambda timestr:datetime.datetime.strptime(timestr,"%d/%b/%Y:%H:%M:%S %z"),
'status': int,
'length': int}
matcger = regex.match(log_line)
info ={k:ops.get(k,lambda x:x)(v) for k,v in matcger.groupdict().items()}
print(info)
#print(lst)
# d = {}
# for i,v in enumerate(lst):
# #print(i,v)
# key = names[i]
# if ops[i]:
# d[key] = ops[i](v)
# d[key] = v
#print(d)
|
# Generated by Django 2.2 on 2020-12-05 11:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Update_Log',
fields=[
('ulid', models.AutoField(primary_key=True, serialize=False)),
('filename', models.CharField(max_length=50)),
('startlinenum', models.PositiveIntegerField()),
('finishlinenum', models.PositiveIntegerField()),
('updatetime', models.DateTimeField(auto_now_add=True)),
('updateadministrator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
from mongoengine import Document, fields
from mongoengine import connect
def init_db():
connect('madelyn-db')
class Product(Document):
price = fields.DecimalField(precision=2, required=True)
name = fields.StringField(required=True)
meta = {'allow_inheritance': True}
class Painting(Product):
material = fields.StringField()
length = fields.DecimalField(precision=5)
length_uom = fields.StringField()
width = fields.DecimalField(precision=5)
width_uom = fields.StringField()
class Brush(Product):
bristle_length = fields.DecimalField(precision=5)
bristle_length_uom = fields.StringField()
bristle_density = fields.StringField()
class User(Document):
email = fields.EmailField()
name = fields.StringField() |
import os
import json
import requests
import py7zr
url = 'https://arxiv.org/src/1911.12237v2/anc/corpus.7z'
corpus_dir = './data/samsum_corpus'
zip_fn = './data/corpus.7z'
def main():
if not os.path.isdir(corpus_dir):
os.makedirs(corpus_dir)
r = requests.get(url, allow_redirects=True)
open(zip_fn, 'wb').write(r.content)
archive = py7zr.SevenZipFile(zip_fn, mode='r')
archive.extractall(path=corpus_dir)
archive.close()
os.remove(zip_fn)
if __name__ == '__main__':
main()
|
# scope
# what variables do we have access to
a = 1
def my_func():
a = 5
return a
print(a) # -> 1
print(my_func()) # -> 5 scope of a = 5 is limited to my_func
print(a) # -> 1 scope is still limited
# order of scope:
# 1 - local scope
# 2 - parent scope
# 3 - global scope
# 4 - built in python function
# global variables
total = 0
def count():
global total # global keyword will use global variable
total += 1
return total
print(count())
# this also injects the global variable
def count1(total):
total += 1
return total
print(count1(total)) |
from django.shortcuts import render
from .models import Comunidad,Evento,Solicitud,AgentesPatorales,Login
# Create your views here.
def Home(request):
return render(request,'core/index.html')
def ListadoComunidad(request):
listaComunidad = Comunidad.objects.all()
return render(request,'core/listarComunidad.html',{'listaComunidad':listaComunidad})
def ListadoEvento(request):
listaEvento = Evento.objects.all()
return render(request,'core/listarEvento.html',{'listaEvento':listaEvento})
def ListadoSolicitud(request):
listaSolicitud = Solicitud.objects.all()
return render(request,'core/listarSolicitud.html',{'listaSolicitud':listaSolicitud})
def ListadoAgente(request):
listaAgente = AgentesPatorales.objects.all()
return render(request,'core/listarAgente.html',{'listaAgente':listaAgente})
def Formulario(request):
lt= Comunidad.objects.all()#select * from Tipos
if request.POST:
nombre= request.POST["nombre"]
ubicacion=request.POST["ubicacion"]
nombreCordinadores=request.POST["nombreCordinadores"]
nombresAgentesPastorales=request.POST["nombresAgentesPastorales"]
nombresMisnistroComunion=request.POST["nombresMisnistroComunion"]
comun= Comunidad(
nombre=nombre,
ubicacion=ubicacion,
nombreCordinadores=nombreCordinadores,
nombresAgentesPastorales=nombresAgentesPastorales,
nombresMisnistroComunion=nombresMisnistroComunion
)
comun.save()
return render(request,'core/formulario_ingreso.html',
{'tipos':lt,'mensaje':'grabo'})
return render(request,'core/formulario_ingreso.html',{'tipos':lt})
def FormularioSolicitud(request):
po= Solicitud.objects.all()#select * from Tipos
if request.POST:
sacramento= request.POST["sacramento"]
nombreP=request.POST["nombreP"]
apellidoP=request.POST["apellidoP"]
nombreH=request.POST["nombreH"]
apellidoH=request.POST["apellidoH"]
soli= Solicitud(
sacramento=sacramento,
nombreP=nombreP,
apellidoP=apellidoP,
nombreH=nombreH,
apellidoH=apellidoH
)
soli.save()
return render(request,'core/formulario_solicitud.html',
{'tipos':po,'mensaje':'grabo'})
return render(request,'core/formulario_solicitud.html',{'tipos':po})
def FormularioEvento(request):
lp= Comunidad.objects.all()#select * from Tipos
if request.POST:
nombre= request.POST["nombre"]
tipo=request.POST["tipo"]
fecha=request.POST["fecha"]
hora=request.POST["hora"]
comunidad=request.POST["comunidad"]
obj_tipo= Comunidad.objects.get(id=comunidad)
evento= Evento(
nombre=nombre,
tipo=tipo,
fecha=fecha,
hora=hora,
comunidad=obj_tipo
)
evento.save()
return render(request,'core/formulario_ingresoE.html',
{'tipos':lp,'mensaje':'grabo'})
return render(request,'core/formulario_ingresoE.html',{'tipos':lp})
def FormularioAgente(request):
ko= Comunidad.objects.all()#select * from Tipos
if request.POST:
nombre= request.POST["nombre"]
apellido=request.POST["apellido"]
edad=request.POST["edad"]
tipoPersona=request.POST["tipoPersona"]
comu=request.POST["comu"]
obj_tipo= Comunidad.objects.get(id=comu)
agente= AgentesPatorales(
nombre=nombre,
apellido=apellido,
edad=edad,
tipoPersona=tipoPersona,
comu=obj_tipo
)
agente.save()
return render(request,'core/formulario_Agente.html',
{'tipos':ko,'mensaje':'grabo'})
return render(request,'core/formulario_Agente.html',{'tipos':ko})
def FormularioLogin(request):
#select * from Tipos
if request.POST:
usuario= request.POST["usuario"]
password=request.POST["password"]
try:
me=Login.objects.get(usuario=usuario,password=password)
if me.usuario is not None:
request.session["usuario"]=usuario
return render(request,'core/home.html',{'mensaje':'encontro'})
except Exception as identifier:
return render(request,'core/login.html',{'mensaje':'no ta'})
return render(request,'core/login.html',{'mensaje':'ingrese'})
def Home1(request):
return render(request,'core/home.html')
|
from keras.models import load_model
import paths
#Modelimizi loadlıyoruz
model = load_model(paths.modelpath)
import json
import numpy
import random
import yorumlayici
import pickle
import dusukprobability
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
#Jsondaki her sentence
intents = json.loads(open(paths.jsonpath,encoding="utf8").read())
#Jsondaki her farklı unique kelimeler
words = pickle.load(open(paths.wordspath,"rb"))
#Her grup ismi
classes = pickle.load(open(paths.classespath,"rb"))
sentencee = None
#Cümleyi kelimelerine ayırır
def clean_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence)
print(sentence_words)
sentence_words = [lemmatizer.lemmatize(w.lower()) for w in sentence_words]
global sentencee
sentencee = " ".join(sentence_words)
return sentence_words
#Her cümle için bir bag oluşturur
def bow(sentence, words):
sentence = clean_sentence(sentence)
print("sentence = ",sentence)
bag = [0] * len(words)
for sent in sentence:
for w, i in enumerate(words):
if sent == i:
bag[w] = 1
print(bag)
return (numpy.array(bag))
def predict_class(sentence, model):
p = bow(sentence, words)
#Alınan bag'in classını öğrenmek için predictler
res = model.predict(numpy.array([p]))[0]
ERROR_THRESHOLD = 0.25
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# Olasılığı yüksek olandan düşük olana doğru sıralar
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
print(return_list[0]["probability"])
#Sınıfı ve probability'si ile birlikte döndürür
return return_list
def getResponse(ints, intents_json):
tag = ints[0]['intent']
probability = ints[0]["probability"]
list_of_intents = intents_json['veriler']
#Eğer alınan response'in doğru olma olasılığı 0.9 dan düşükse düsükprobaility classına girer
if float(probability) < 0.9:
print("sentence= ",sentencee)
yorumlayici.quest = sentencee
return "Sorduğun sorunun cevabını bilmiyorum,\n\nGoogle'de aratmamı ister misin? (E/H)"
#Jsondaki bütün intentleri gezip doğru olanı bulduktan sonra cevaplardan restgele bir tanesini seçip cevabı döndürür
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def chatbot_response(msg):
#Mesaja göre response döndürür
ints = predict_class(msg, model)
res = getResponse(ints, intents)
return res
|
"""
Architectures for sequence tagging task
"""
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_accuracy
from keras.models import Model, load_model
from keras.layers import Input, Bidirectional, LSTM, TimeDistributed, Dense
from keras.utils import Sequence
import numpy as np
class BaseTagger:
"""
Base for all tagger model
"""
def __init__(self, model):
self.model = model
def summary(self, *args, **kwargs):
return self.model.summary(*args, **kwargs)
def fit(self, *args, **kwargs):
return self.model.fit(*args, **kwargs)
def fit_generator(self, *args, **kwargs):
return self.model.fit_generator(*args, **kwargs)
def predict(self, *args, **kwargs):
return self.model.predict(*args, **kwargs)
def predict_generator(self, *args, **kwargs):
return self.model.predict_generator(*args, **kwargs)
def save(self, filepath):
self.model.save(filepath=filepath, include_optimizer=False)
def load(self, filepath):
self.model = load_model(filepath=filepath, custom_objects=self.create_custom_objects())
@classmethod
def create_custom_objects(cls):
instance_holder = {"instance": None}
class ClassWrapper(CRF):
def __init__(self, *args, **kwargs):
instance_holder["instance"] = self
super(ClassWrapper, self).__init__(*args, **kwargs)
def loss(*args):
method = getattr(instance_holder["instance"], "loss_function")
return method(*args)
def accuracy(*args):
method = getattr(instance_holder["instance"], "accuracy")
return method(*args)
return {"ClassWrapper": ClassWrapper, "CRF": ClassWrapper, "loss": loss, "accuracy": accuracy}
class BiLstmCrfTagger(BaseTagger):
"""
Bi-directional LSTM + CRF Architecture
"""
def __init__(self, n_features, n_lstm_unit, n_distributed_dense, n_tags):
inputs = Input(shape=(None, n_features))
outputs = Bidirectional(LSTM(units=n_lstm_unit, return_sequences=True))(inputs)
outputs = TimeDistributed(Dense(n_distributed_dense))(outputs)
outputs = CRF(n_tags)(outputs)
self.model = Model(inputs=inputs, outputs=outputs)
self.model.compile(optimizer="rmsprop", loss=crf_loss, metrics=[crf_accuracy])
super().__init__(self.model)
class SingularBatchGenerator(Sequence):
"""
Generate batch with size=1 for training
"""
def __init__(self, X, y=None, shuffle=False):
"""
Constructor
"""
self.X = X
self.y = y
self.shuffle = shuffle
self.indexes = np.arange(len(self.X))
self.on_epoch_end()
def __len__(self):
"""
Number of batch per epoch
"""
return len(self.X)
def __getitem__(self, batch_id):
"""
Get batch_id th batch
"""
if self.y is None:
return np.array([self.X[batch_id]])
return np.array([self.X[batch_id]]), np.array([self.y[batch_id]])
def on_epoch_end(self):
"""
Shuffles indexes after each epoch
"""
if self.shuffle:
np.random.shuffle(self.indexes)
self.X = self.X[self.indexes]
if self.y is not None:
self.y = self.y[self.indexes]
if __name__ == '__main__':
model = BiLstmCrfTagger(3, 50, 20, 2)
sample_data = np.array([[[1.0, 2.0, 0.0], [0.5, 1.2, 0.2]], [[1.0, 2.0, 0.1]]])
sample_label = np.array([[[1.0, 0.0], [1.0, 0.0]], [[0.0, 1.0]]])
generator = SingularBatchGenerator(sample_data, sample_label)
model.fit_generator(generator, epochs=5, verbose=2)
pred = []
for item in sample_data:
pred.append((model.predict(np.array([item]))[0]))
print(pred)
print(model.summary())
|
import os
import shutil
import unittest
import tempfile
from bento._config \
import \
IPKG_PATH
from bento.core.node \
import \
create_root_with_source_tree
from bento.core.package \
import \
PackageDescription
from bento.core \
import \
PackageMetadata
from bento.core.pkg_objects \
import \
Extension
from bento.installed_package_description \
import \
InstalledPkgDescription, ipkg_meta_from_pkg
from bento.conv \
import \
to_distutils_meta
from bento.commands.egg_utils \
import \
EggInfo
DESCR = """\
Name: Sphinx
Version: 0.6.3
Summary: Python documentation generator
Url: http://sphinx.pocoo.org/
DownloadUrl: http://pypi.python.org/pypi/Sphinx
Description: Some long description.
Author: Georg Brandl
AuthorEmail: georg@python.org
Maintainer: Georg Brandl
MaintainerEmail: georg@python.org
License: BSD
Library:
Packages:
sphinx,
sphinx.builders
Modules:
cat.py
Extension: _dog
Sources: src/dog.c
Executable: sphinx-build
Module: sphinx
Function: main
"""
DUMMY_C = r"""\
#include <Python.h>
#include <stdio.h>
static PyObject*
hello(PyObject *self, PyObject *args)
{
printf("Hello from C\n");
Py_INCREF(Py_None);
return Py_None;
}
static PyMethodDef HelloMethods[] = {
{"hello", hello, METH_VARARGS, "Print a hello world."},
{NULL, NULL, 0, NULL} /* Sentinel */
};
PyMODINIT_FUNC
init%(name)s(void)
{
(void) Py_InitModule("%(name)s", HelloMethods);
}
"""
def create_fake_package(top_node, packages=None, modules=None, extensions=[]):
if packages is None:
packages = []
if modules is None:
modules = []
if extensions is None:
extensions = []
for p in packages:
d = p.replace(".", os.sep)
n = top_node.make_node(d)
n.mkdir()
init = n.make_node("__init__.py")
init.write("")
for m in modules:
d = m.replace(".", os.sep)
n = top_node.make_node("%s.py" % d)
for extension in extensions:
main = extension.sources[0]
n = top_node.make_node(main)
n.parent.mkdir()
n.write(DUMMY_C % {"name": extension.name})
for s in extension.sources[1:]:
n = top_node.make_node(s)
n.write("")
class TestEggInfo(unittest.TestCase):
def setUp(self):
self.old_dir = None
self.tmpdir = None
self.old_dir = os.getcwd()
self.tmpdir = tempfile.mkdtemp()
os.chdir(self.tmpdir)
root = create_root_with_source_tree(self.tmpdir, os.path.join(self.tmpdir, "build"))
self.run_node = root.find_node(self.tmpdir)
self.top_node = self.run_node._ctx.srcnode
self.build_node = self.run_node._ctx.bldnode
def tearDown(self):
if self.old_dir:
os.chdir(self.old_dir)
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
def _prepare_egg_info(self):
create_fake_package(self.top_node, ["sphinx", "sphinx.builders"],
["cat.py"], [Extension("_dog", [os.path.join("src", "dog.c")])])
ipkg_file = self.build_node.make_node(IPKG_PATH)
ipkg_file.parent.mkdir()
ipkg_file.write("")
files = [os.path.join("sphinx", "builders", "__init__.py"),
os.path.join("sphinx", "__init__.py"),
os.path.join("src", "dog.c"),
os.path.join("cat.py")]
pkg = PackageDescription.from_string(DESCR)
meta = PackageMetadata.from_package(pkg)
executables = pkg.executables
return EggInfo(meta, executables, files)
def test_pkg_info(self):
egg_info = self._prepare_egg_info()
res = egg_info.get_pkg_info()
ref = """\
Metadata-Version: 1.0
Name: Sphinx
Version: 0.6.3
Summary: Python documentation generator
Home-page: http://sphinx.pocoo.org/
Author: Georg Brandl
Author-email: georg@python.org
License: BSD
Download-URL: http://pypi.python.org/pypi/Sphinx
Description: Some long description.
Platform: UNKNOWN
"""
self.assertEqual(res, ref)
def test_iter_meta(self):
egg_info = self._prepare_egg_info()
for name, content in egg_info.iter_meta(self.build_node):
pass
|
import math as mp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import scipy.interpolate as sp
import time
class Computations(object):
def __init__(self):
self.weights = []
self.thita = [[1, 1, 1]]
self.flag = True
def read_fl(self, filename):
"""
Function to read the geographic coordinates φ,λ of the points
"""
self.fl = np.genfromtxt(filename, delimiter=',')
def read_H(self, filename):
"""
Function to read the orthometric height H of the points
"""
self.H = np.genfromtxt(filename, delimiter=',')
def read_h(self, filename):
"""
Function to read the geometric height h of the points
"""
self.h = np.genfromtxt(filename, delimiter=',')
def read_N(self, filename):
"""
Function to read the geoid height N of the points
"""
self.N = np.genfromtxt(filename, delimiter=',')
def estimation(self, method, cut_off=0):
"""
Function to compute the parameters of the corrections model with
Least Squares Estimation
"""
self.method = method
self.cut_off = cut_off
# Create the measurements vector h - H - N
measurements = np.zeros((len(self.H), 1))
for i in range(0, len(self.H)):
measurements[i, 0] = self.h[i, 0] - self.H[i, 0] - self.N[i, 0]
self.initial = measurements[:]
# Choose the right error for the geoid heights based on the model
try:
self.N[:, 1] = self.N[:, 1] + cut_off**2
except:
pass
# Get the variances - errors for each point
measur_errors = np.zeros((len(self.H), 1))
for i in range(0, len(self.H)):
measur_errors[i, 0] = 1/(self.h[i, 1]**2 * np.ravel(self.thita[-1][0])
+ self.H[i, 1]**2 * np.ravel(self.thita[-1][1])
+ self.N[i, 1]**2 * np.ravel(self.thita[-1][2]))
weights = np.eye((len(self.H)))
self.weights = weights * measur_errors
if self.flag == True:
# Keep the initial weights to restore it later if needed
self.initial_weights = self.weights
# Create the state matrix based on the user's preference about the model
if method == 1:
A = np.ones((len(self.H), 3))
A[:, 1] = self.H[:, 0]
A[:, 2] = self.N[:, 0]
elif method == 2:
A = np.ones((len(self.H), 2))
A[:, 1] = self.N[:, 0]
elif method == 3:
A = np.ones((len(self.H), 2))
A[:, 1] = self.H[:, 0]
elif method == 4:
A = np.ones((len(self.H), 3))
A[:, 1] = self.fl[:, 0]
A[:, 2] = self.N[:, 0]
# Compute the apriori variance estimation
Cx_pre = np.matmul(np.transpose(A), self.weights)
Cx = np.linalg.inv(np.matmul(Cx_pre, A))
# Compute the estimation for the parameters of the model
x_pre = np.matmul(Cx_pre, measurements)
self.x = np.matmul(Cx, x_pre)
# Create a Pandas Dataframe to hold the results
if method == 1 or method == 4:
val_pass = np.zeros((3, 2))
val_pass[:, 0] = self.x[:, 0]
val_pass[0, 1] = mp.sqrt(Cx[0, 0])
val_pass[1, 1] = mp.sqrt(Cx[1, 1])
val_pass[2, 1] = mp.sqrt(Cx[2, 2])
elif method == 2 or method == 3:
val_pass = np.zeros((2, 2))
val_pass[:, 0] = self.x[:, 0]
val_pass[0, 1] = mp.sqrt(Cx[0, 0])
val_pass[1, 1] = mp.sqrt(Cx[1, 1])
columns = ['Results', 'σx']
if method == 1:
rows = ['m', 'σΔΗ', 'σΔΝ']
elif method == 2:
rows = ['m', 'σΔΝ']
elif method == 3:
rows = ['m', 'σΔΗ']
elif method == 4:
rows = ['m', 'σΔφ', 'σΔΝ']
self.val_pass = pd.DataFrame(val_pass, index=rows, columns=columns)
# Compute measurements estimation
self.measurements_estimation = (np.matmul(A, self.x))
# Compute the error of the estimation
self.error_estimation = measurements - self.measurements_estimation
return self.val_pass
def create_map(self):
"""
Function to create a grid - contour map of the correction surface of the model
"""
x, y = self.fl[:, 0], self.fl[:, 1]
z = self.measurements_estimation
X = np.linspace(np.min(x), np.max(x))
Y = np.linspace(np.min(y), np.max(y))
X, Y = np.meshgrid(X, Y)
Z = sp.griddata((x, y), z, (X, Y)).reshape(50, 50)
plt.contourf(Y, X, Z)
plt.colorbar()
plt.xlabel("Lon")
plt.ylabel("Lat")
plt.title("Correction Surface (m)")
plt.show(block=False)
time.sleep(1)
def plot(self):
"""
Function to create two plots, one for the initial and after LSE measurements
and one for the estimation errors of the model
"""
f, axarr = plt.subplots(2, figsize=(7,10))
f.subplots_adjust(hspace=0.5)
axarr[0].plot(self.initial, color='b', label='Initial')
axarr[0].plot(self.measurements_estimation, color='r', label='After LSE')
axarr[0].set_title("Initial differences - After LSE differences")
axarr[0].set_ylabel("h - H - N (m)")
axarr[0].legend()
accuracy = self.cross_validation()
axarr[1].plot(accuracy, color='b', label='Cross Validation Error')
axarr[1].set_title("Cross Validation Error")
axarr[1].set_ylabel("Error (m)")
axarr[1].legend()
plt.show()
def save_all_to_csv(self):
"""
Function to output results to a .csv file
"""
df = pd.DataFrame(self.initial, columns=["Initial_Dif"])
df = df.assign(After_LSE_Dif=self.measurements_estimation)
df = df.assign(Estimation_Errors=self.error_estimation)
df.index.name = "Points"
self.val_pass.to_csv("Results.csv", sep="\t")
with open('Results.csv', 'a') as f:
df.to_csv(f, header=True, sep="\t")
statistics = np.zeros((4, 3))
statistics[0, 0] = np.mean(self.initial)
statistics[1, 0] = np.std(self.initial)
statistics[2, 0] = np.max(self.initial)
statistics[3, 0] = np.min(self.initial)
statistics[0, 1] = np.mean(self.measurements_estimation)
statistics[1, 1] = np.std(self.measurements_estimation)
statistics[2, 1] = np.max(self.measurements_estimation)
statistics[3, 1] = np.min(self.measurements_estimation)
statistics[0, 2] = np.mean(self.error_estimation)
statistics[1, 2] = np.std(self.error_estimation)
statistics[2, 2] = np.max(self.error_estimation)
statistics[3, 2] = np.min(self.error_estimation)
df_1 = pd.DataFrame(statistics, index=["Mean", "STD", "Max", 'Min'],
columns=["Initial_Dif", "After_LSE_Dif", "Estimation_errors"])
with open('Results.csv', 'a') as f:
df_1.to_csv(f, header=True, sep="\t")
def variance_component(self, method, cut_off=0):
"""
Function to compute variance components based on the MINQUE method
it uses the same information as the estimation function and computes
"""
# Create the state matrix based on the user's preference about the model
if method == 1:
A = np.ones((len(self.H), 3))
A[:, 1] = self.H[:, 0]
A[:, 2] = self.N[:, 0]
elif method == 2:
A = np.ones((len(self.H), 2))
A[:, 1] = self.N[:, 0]
elif method == 3:
A = np.ones((len(self.H), 2))
A[:, 1] = self.H[:, 0]
elif method == 4:
A = np.ones((len(self.H), 3))
A[:, 1] = self.fl[:, 0]
A[:, 2] = self.N[:, 0]
# Choose the right error for the geoid heights based on the model
try:
self.N[:, 1] = self.N[:, 1] + cut_off**2
except:
pass
v0 = []
v0.append(np.eye(len(self.H)) * self.h[:, 1] ** 2)
v0.append(np.eye(len(self.H)) * self.H[:, 1] ** 2)
v0.append(np.eye(len(self.H)) * self.N[:, 1] ** 2)
# Create the measurements vector h - H - N
measurements = np.zeros((len(self.H), 1))
for i in range(0, len(self.H)):
measurements[i, 0] = self.h[i, 0] - self.H[i, 0] - self.N[i, 0]
e = 10**3*np.ones((3, 1))
thita = []
thita_1 = np.ones((3, 1))
thita.append(thita_1)
n = 0
v = np.eye(len(self.H))
while True:
weights = np.ravel(thita[n][0]) * v0[0] + np.ravel(thita[n][1]) * v0[1] + np.ravel(thita[n][2]) * v0[2]
n = n + 1
p = np.linalg.inv(v * weights)
# Compute the apriori variance estimation
Cx_pre = np.matmul(np.transpose(A), p)
Cx = np.linalg.inv(np.matmul(Cx_pre, A))
# Compute the estimation for the parameters of the model
x_pre = np.matmul(Cx_pre, measurements)
x = np.matmul(Cx, x_pre)
# Compute measurements estimation
measurements_estimation = (np.matmul(A, x))
# Compute the error of the estimation
error_estimation = measurements - measurements_estimation
w1 = np.matmul(np.linalg.inv(weights), A) # inv(p) * A
w2 = np.matmul(np.transpose(A), np.linalg.inv(weights)) # A' * inv(p)
w3 = np.linalg.inv(np.matmul(w2, A)) # inv(A' * inv(p) * A)
w4 = np.matmul(w1, w3) # inv(p) * A * inv(A'inv(p)A)
w5 = np.matmul(w4, np.transpose(A))
w6 = np.matmul(w5, np.linalg.inv(weights))
w = np.linalg.inv(weights) - w6
J = np.zeros((3, 3))
k = np.zeros((3, 1))
for i in range(0, 3):
for j in range(0, 3):
J[i, j] = np.trace(np.mat(w) * np.mat(v0[i]) * np.mat(w) * np.mat(v0[j]))
k[i, 0] = np.mat(np.transpose(error_estimation)) * np.mat(np.linalg.inv(weights)) * np.mat(v0[i]) * np.mat(np.linalg.inv(weights)) * np.mat(error_estimation)
thita.append(np.mat(np.linalg.pinv(J)) * np.mat(k))
C_thita = 2 * thita[n]
e = thita[n] - thita[n-1]
if np.abs(np.ravel(thita[n][1]) - np.ravel(thita[n - 1][1])) < 10**(-3.0):
break
self.thita = thita
self.flag = False
df = pd.DataFrame(thita[n], columns=["Components"], index=["θh", "θH", "θN"])
return df
def restore(self):
"""
A simple method to restore the initial weight matrix. If we dont want to use
the weights provided by variance component estimation
"""
self.thita = [[1, 1, 1]]
def save_components_to_csv(self, df):
"""
Function to output the parameter computed by variance_component function
It saves to a csv file the values of the components and the new updated
weight matrix that can be used for a new Least Squares Estimation of the
model
"""
with open('Components_Results.csv', 'w') as f:
f.write("Weights Matrix")
f.write("\n")
with open('Components_Results.csv', 'a') as f:
np.savetxt(f, np.diag(self.weights), delimiter="\t")
df.to_csv(f, header=True, sep="\t")
def cross_validation(self):
"""
Method to perform cross validation with estimation(). It always leaves 1 point
outside of the dataset and estimates the model with the remaining points. Then
it predicts the H of the point that was left out and compares it to its true value
"""
method = self.method
cut_off = self.cut_off
initial_h = np.copy(self.h)
initial_H = np.copy(self.H)
initial_N = np.copy(self.N)
initial_fl = np.copy(self.fl)
accuracy = []
for i in range(0, len(self.h)-1):
working_h = np.copy(initial_h)
working_H = np.copy(initial_H)
working_N = np.copy(initial_N)
working_fl = np.copy(initial_fl)
self.h = np.delete(working_h, i, 0)
self.H = np.delete(working_H, i, 0)
self.N = np.delete(working_N, i, 0)
self.fl = np.delete(working_fl, i, 0)
true_H = self.H[i, 0]
self.estimation(method, cut_off)
if method == 1:
predicted = (self.h[i, 0] - self.N[i, 0] - self.x[0] - self.x[2] * self.N[i, 0]) / (1 + self.x[1])
elif method == 2:
predicted = (self.h[i, 0] - self.N[i, 0] - self.x[0] - self.x[1] * self.N[i, 0])
elif method == 3:
predicted = (self.h[i, 0] - self.N[i, 0] - self.x[0]) / (1 + self.x[1])
elif method == 4:
predicted = self.h[i, 0] - self.N[i, 0] - self.x[0] - self.x[1] * self.fl[i, 0] - self.x[2] * self.N[i, 0]
accuracy.append((predicted - true_H)[0])
self.h = np.copy(initial_h)
self.H = np.copy(initial_H)
self.N = np.copy(initial_N)
self.fl = np.copy(initial_fl)
return accuracy
if __name__ == "__main__":
start = Computations()
start.read_fl("example_data/fl.csv")
start.read_H("example_data/H_ortho.csv")
start.read_h("example_data/h_data.csv")
start.read_N("example_data/N_egm.csv")
results = start.estimation(4)
print(results)
start.plot()
# print(start.weights)
# results = start.variance_component(1)
# print(results)
# results = start.estimation(1)
# print(results)
# print(np.mean(start.initial))
# print(np.std(start.initial))
# print(np.mean(start.measurements_estimation))
# print(np.std(start.measurements_estimation))
# start.save_components_to_csv(results)
# start.plot()
# print(results)
|
import tweepy
import json
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
from nltk.twitter import Twitter
consumer_key = "zkQZ4djk5UP7wVXseob8jJ6Vm"
consumer_secret = "umeeVPom6lC32sCthGcu8k1lsbAdEVKxUaHp2KtDUxb5VZAcnb"
access_token = "2255553137-uGEGeqc9lqYUQaYBwmzh4fJQHcGVbLDlH0d7FPF"
access_token_secret = "4f7PNLX4Hsnxcugc5BZLpVh4qfajWhHRV9Pt2MHFMc9UO"
def tweetApiCall():
auth = tweepy.OAuthHandler(consumer_key,consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth,parser=tweepy.parsers.JSONParser())
tweets2 = api.search(["Run"],count=100)
api2 = tweepy.API(auth)
tweets = tweets2
def makeTxt(filename):
with open(filename,"w") as file:
for tweet in tweets:
file.write(tweet.text)
def makeJson(filename):
with open(filename,"w") as json_file:
print(type(tweets2))
json.dump(tweets2,json_file)
def loadJson(filename):
with open(filename) as json_file:
data = json.load(json_file)
return data
def getAllUniqueHashtags(dicto):
uniques = {}
for i in range(len(dicto["statuses"])):
for j in range(len(dicto["statuses"][i]["entities"]["hashtags"])):
if not dicto["statuses"][i]["entities"]["hashtags"][j]["text"] in uniques:
uniques[dicto["statuses"][i]["entities"]["hashtags"][j]["text"]] = 1
else:
uniques[dicto["statuses"][i]["entities"]["hashtags"][j]["text"]] +=1
return uniques
def getFromTxt():
stringen = getData("data.txt")
tokens = tokenize(stringen)
filtered_sentence = removeStopWords(tokens)
print(getNMostCommon(filtered_sentence,5))
def getData(filename):
with open(filename) as readfile:
stringen = readfile.read()
return stringen
def tokenize(stringen):
word_tokens = nltk.word_tokenize(stringen)
word_tokens = [word.lower() for word in word_tokens if word.isalpha()]
return word_tokens
def removeStopWords(word_tokens):
stop_words = set(stopwords.words("english"))
extra = [":",",",":","https",".","@","!","#","´",";"]
for item in extra:
stop_words.add(item)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
return filtered_sentence
def getNMostCommon(filtered_sentence,n):
fdist1 = FreqDist(filtered_sentence)
return fdist1.most_common(n)
def getNMostCommonFiltered(word_tokens,n):
return getNMostCommon(removeStopWords(word_tokens),n)
def getNMostCommonHashtags(n,dicto):
dk = {}
for i in range(n):
biggest = max(dicto,key=dicto.get)
print(biggest)
dk[biggest] = dicto[biggest]
del dicto[biggest]
return dk
def main():
x = 5
main()
|
from django.conf.urls.defaults import *
urlpatterns = patterns('notificaciones.views',
# url(r'^name/$', 'name', name='name'),
) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.views.generic import ListView
# Create your views here.
from players.models import Player
class PlayerList(ListView):
model = Player
def index(request):
num_players = Player.objects.all().count()
return render(
request,
'index.html',
context={'num_players':num_players}
)
|
# -*- coding: utf-8 -*-
import MySQLdb
from eod_aps.model.server_constans import ServerConstant
from eod_aps.model.instrument import Instrument
from decimal import Decimal
from eod_aps.tools.getConfig import getConfig
def test():
# host_server_model = ServerConstant().get_server_model('host')
# session = host_server_model.get_db_session('om')
# file_path = 'E:/report/screenlog_MainFrame_20170822-084557_8kC000ee.log'
#
# with open(file_path) as fr:
# for line in fr.readlines():
# replace_index = line.index('replace into')
# if replace_index == 0:
# continue
# sql_str = line[replace_index:]
# print 'sql:', sql_str
# session.execute(sql_str)
# break
# session.commit()
# host_server_model.close()
cfg_dict = getConfig()
try:
conn = MySQLdb.connect( \
host=cfg_dict['host'], user=cfg_dict['db_user'], passwd=cfg_dict['db_password'], \
db='common', charset='utf8')
print 'db ip:', cfg_dict['host']
cursor = conn.cursor()
file_path = 'E:/report/screenlog_MainFrame_20170822-084557_8kC000ee.log'
with open(file_path) as fr:
for line in fr.readlines():
replace_index = line.index('replace into')
if replace_index == 0:
continue
sql_str = line[replace_index:]
sql_title, sql_value_info = sql_str.split('values')
sql_items = sql_value_info.split('),(')
index = 0
vaLue_list = []
for sql_item in sql_items:
if 'not-a-date-time' in sql_item:
continue
sql_item = sql_item.replace('(', '').replace(')', '')
vaLue_list.append('(%s)' % sql_item)
print len(vaLue_list)
sql_str = '%s values %s' % (sql_title, ','.join(vaLue_list))
cursor.execute(sql_str)
break
# sql_str = """
# replace INTO om.`order`
# (`ID`, `SYS_ID`,`ACCOUNT`,`SYMBOL`,`DIRECTION`,`TYPE`,`TRADE_TYPE`,`STATUS`,`OP_STATUS`,`PROPERTY`,`CREATE_TIME`,`TRANSACTION_TIME`,`USER_ID`,`STRATEGY_ID`,`PARENT_ORD_ID`,`QTY`,`PRICE`,`EX_QTY`,`EX_PRICE`,`HEDGEFLAG`,ALGO_TYPE) values
# ('8kE01163','','198800888077-TS-xhhm02-','601111 CG',1, 1, 0, 2, 2, 0, '2017-08-22 13:01:01.707253', '2017-08-22 13:07:31.183752', 'xhhm02', 'Event_Real.Earning_01', '8kC0003a', 100, 9.11, 100, 9.11, 0, 0)
# """
# cursor.execute(sql_str)
cursor.close()
conn.commit()
conn.close()
except Exception, e:
print e
if __name__ == '__main__':
test()
|
from Start_up import *
from Bullet import Bullet
from Upgrades import*
class HealthBar:
def __init__(self, player):
self.x = 100
self.y = 10
self.player = player
self.bar = pygame.Surface((20 * self.player.health, 10))
self.bar.fill((0, 255, 0))
self.bar_back = pygame.Surface((21 * self.player.health, 20))
self.bar_back.fill((50, 50, 50))
def reset(self, player):
self.player = player
self.bar = pygame.Surface((20 * self.player.health, 10))
self.bar.fill((0, 255, 0))
def update_health(self):
print("update")
self.bar = pygame.Surface((20 * self.player.health, 10))
self.bar.fill((0, 255, 0))
def display(self):
main_s.blit(self.bar_back, (self.x - 5, self.y - 5))
main_s.blit(self.bar, (self.x, self.y))
class Player(pygame.sprite.Sprite):
def __init__(self):
self.surface = pygame.Surface((20, 20))
self.surface.fill(main_theme)
self.rect = self.surface.get_rect()
self.rect.x = 10
self.rect.y = height/2
self.dy = 0
self.cool = True
self.cool_counter = 0
self.bullets_used = 0
self.money = 0
self.health = 10
self.alive = True
self.hit = False
self.health_update = False
self.s_upgrade = 0
self.m_upgrade = 0
self.g_s_upgrade = 0
self.g_p_upgrade = 0
self.speed = upgrades_speed[self.s_upgrade]
self.money_collection = upgrades_money_collection[self.m_upgrade]
self.cool_time = upgrades_gun_speed[self.g_s_upgrade]
self.bullet_power = upgrades_gun_power[self.g_p_upgrade]
def reset(self):
self.surface = pygame.Surface((20, 20))
self.surface.fill(main_theme)
self.rect = self.surface.get_rect()
self.rect.x = 10
self.rect.y = height/2
self.dy = 0
self.cool = True
self.cool_counter = 0
self.bullets_used = 0
self.money = 0
self.health = 10
self.alive = True
self.s_upgrade = 0
self.m_upgrade = 0
self.g_s_upgrade = 0
self.g_p_upgrade = 0
self.speed = upgrades_speed[self.s_upgrade]
self.money_collection = upgrades_money_collection[self.m_upgrade]
self.cool_time = upgrades_gun_speed[self.g_s_upgrade]
self.bullet_power = upgrades_gun_power[self.g_p_upgrade]
def reload_upgrades(self):
# set all player variables to the relevant upgrades
self.speed = upgrades_speed[self.s_upgrade]
self.money_collection = upgrades_money_collection[self.m_upgrade]
self.cool_time = upgrades_gun_speed[self.g_s_upgrade]
self.bullet_power = upgrades_gun_power[self.g_p_upgrade]
def get_coins(self, coins):
# multiply all coins by the money collection upgrade
self.money += coins * self.money_collection
def check_collide(self, bullet_list):
self.hit = False
# check the enemy against all off the bullets in the bullet list
for x in range(0, len(bullet_list)):
# check that the bullet was shot from a
# player and should damage the enemy
if bullet_list[len(bullet_list) - x - 1].shot_from == "Enemy":
if pygame.sprite.collide_rect(self, bullet_list[len(bullet_list) - x - 1]):
# if a bullet has collided remove the correct amount of health based
# off of the power of the players bullets
self.health -= 1
self.hit = True
if self.health == 0:
self.alive = False
# if the enemy will still be alive set its new colour based off of
# its new health value
# remove the bullet
del bullet_list[len(bullet_list) - x - 1]
def shoot(self, bullet_list):
# check the gun can shoot
if self.cool:
# create a new bullet and add it to the games bullet list
new_bullet = Bullet(self.rect.center, self.bullet_power, "Player")
bullet_list.append(new_bullet)
# set the gun to not able to shoot
self.cool = False
self.bullets_used += 1
return bullet_list
def move(self, direction):
# move the player by a direction (1 or -1) and speed
self.dy = direction * self.speed
def check_cool_down(self):
# allow the gun to run or add to the cool counter
if not self.cool:
self.cool_counter += 1
if self.cool_counter > self.cool_time:
self.cool = True
self.cool_counter = 0
def collect_package(self, item, note_controller, health_bar):
# determine the package picked up:
# SU = Speed Upgrade
# GSU = Gun Speed Upgrade
# GPU = Gun Power Upgrade
# MCU = Money Collection Upgrade
# M = Money
# H = Health
# then check that the player does not have the max amount of upgrades
# if no then add the upgrade and reload the player stats
# create relevant notes to display
if item == "SU":
if self.s_upgrade < len(upgrades_speed) - 1:
self.s_upgrade += 1
note_controller.add_note("Speed + 1", main_theme)
else:
note_controller.add_note("Speed is maxed", main_theme)
elif item == "GSU":
if self.g_s_upgrade < len(upgrades_gun_speed) - 1:
self.g_s_upgrade += 1
note_controller.add_note("Gun speed + 1", main_theme)
else:
note_controller.add_note("Gun speed is maxed", main_theme)
elif item == "GPU":
if self.g_p_upgrade < len(price_gun_power) - 1:
self.g_p_upgrade += 1
note_controller.add_note("Gun power + 1", main_theme)
else:
note_controller.add_note("Gun power is maxed", main_theme)
elif item == "MCU":
if self.m_upgrade < len(upgrades_money_collection) - 1:
note_controller.add_note("Money collection + 1", main_theme)
self.m_upgrade += 1
else:
note_controller.add_note("Money collection is maxed", main_theme)
elif item == "M":
random_amount = random.randint(10, 50)
note_controller.add_note("+ " + str(random_amount * self.money_collection) + " coins", main_theme)
self.money += random_amount
elif item == "H":
if self.health < 10:
self.health += 1
note_controller.add_note("1 health point restored", main_theme)
health_bar.update_health ()
else:
note_controller.add_note("Health is at max", main_theme)
self.reload_upgrades()
def check_package_collide(self, package_list, note_controller, health_bar):
# check if a package collides with the player and remove it from the package_list
for i in range(0, len(package_list)):
if pygame.sprite.collide_rect(self, package_list[len(package_list) - i - 1]):
self.collect_package(package_list[len(package_list) - i - 1].holds, note_controller, health_bar)
del package_list[len(package_list) - i - 1]
def update(self, package_list, note_controller, bullet_list, health_bar):
# update the player object
self.check_package_collide(package_list, note_controller, health_bar)
self.check_cool_down()
self.check_collide(bullet_list)
if self.money < 0:
self.money = 0
self.rect.y += self.dy
self.dy = 0
def display(self):
main_s.blit(self.surface, (self.rect.x, self.rect.y))
|
from django.db import models
from datetime import datetime
# Create your models here.
class Storage(models.Model):
degree = models.CharField(verbose_name="分类", choices=(("wz", "网站"), ("rj", "软件"), ("qt", "其他")), max_length=4)
add_time = models.DateTimeField(verbose_name="存储时间", default=datetime.now())
url_path = models.CharField(verbose_name="存储地点", max_length=150)
explain = models.TextField(verbose_name="简介")
class Meta:
verbose_name = "收藏"
verbose_name_plural = verbose_name
def __str__(self):
return self.degree
class CommonTools(models.Model):
name = models.CharField(verbose_name="工具名称", max_length=15)
url_path = models.CharField(verbose_name="工具来源", max_length=150)
class Meta:
verbose_name = "常用工具"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
|
from pyb import UART
from lepton import flirLepton
uart = UART(1, 9600)
## initialize a lepton object on micropy boards SPI1 bus at 10500000 baud, I2C2 at 100000 baud
lepton = flirLepton()
##enable automatic histogram equalization on the Lepton module
lepton.AGC_enable()
## read a frames-worth of data into frame_data field
lepton.leptonReadFrame()
## write each pixel to the serial bus
for x in range(lepton.frame_width):
for y in range(lepton.frame_height):
uart.write(lepton.leptonReadPixel(x,y))
|
import sqlite3
def conexion():
conn = sqlite3.connect('Base_de_Datos.db')
return conn
def data_base_Asistencias(conn):
cursor = conn.cursor()
cursor.executescript("""
CREATE TABLE "Asistencias" (
"ID_Alumno" INTEGER NOT NULL,
"Nombre" TEXT,
"Apellido" TEXT,
"Fechas" TEXT,
"Materias" TEXT,
"Clases" INTEGER,
PRIMARY KEY("ID_Alumno" AUTOINCREMENT)
)
""")
def data_base_Matriculas(conn):
cursor = conn.cursor()
cursor.executescript("""
CREATE TABLE "Matriculas" (
"PagoAnual" INTEGER,
"PagoSemanal" INTEGER,
"MontodelPago" REAL,
"RegistrodelaPaga" TEXT
)
""")
def db_alumno2(conn):
cursor = conn.cursor()
cursor.executescript("""
CREATE TABLE IF NOT EXISTS [registro] (
[id] INTEGER NOT NULL PRIMARY KEY,
[nombre] VARCHAR(15) NULL,
[apellido] VARCHAR(15) NULL,
[dni] VARCHAR(15) NULL,
[fechadenacimiento] VARCHAR(15) NULL,
[ciudad] VARCHAR(15) NULL,
[fechadesalida] VARCHAR(15) NULL,
[direccion] VARCHAR(15) NULL,
[correo] VARCHAR(15) NULL,
[telefono] VARCHAR(15) NULL
)""")
def db_alumno(conn):
cursor=conn.cursor()
cursor.executescript("""
CREATE TABLE IF NOT EXISTS [alumnos] (
[idAlumno] INTEGER NOT NULL PRIMARY KEY,
[nombre] VARCHAR(15) NULL,
[apellido] VARCHAR(15) NULL,
[dni] VARCHAR(15) NULL,
[correo] VARCHAR(15) NULL,
[numTel] VARCHAR(15) NULL,
[fechaNacimiento] VARCHAR(15) NULL,
[ciudad] VARCHAR(15) NULL,
[direccion] VARCHAR(15) NULL,
[codigoPostal] VARCHAR(15) NULL,
[fechaIngreso] VARCHAR(15) NULL,
[estado] VARCHAR(10) NULL
)""") |
from collections import deque
text = deque(input().split())
main_colors = ["red", "yellow", "blue"]
secondary_colors = ["orange", "purple", "green"]
secondary_colors_conditions = {
"orange": ["red", "yellow"],
"purple": ["red", "blue"],
"green": ["yellow", "blue"]
}
collected_colors = []
while text:
first = text.popleft()
if text:
second = text.pop()
else:
second = ""
result1 = first + second
result2 = second + first
if result1 in main_colors or result1 in secondary_colors:
collected_colors.append(result1)
elif result2 in main_colors or result2 in secondary_colors:
collected_colors.append(result2)
else:
first1 = first[:-1]
if first1:
text.insert(len(text) // 2, first1)
if second:
second1 = second[:-1]
if second1:
text.insert(len(text) // 2, second1)
for el in collected_colors:
if el in secondary_colors:
if secondary_colors_conditions[el][0] in collected_colors and \
secondary_colors_conditions[el][1] in collected_colors:
continue
else:
collected_colors.remove(el)
print(collected_colors)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
from math import log10
class Mapper:
def run(self):
data = self.readInput()
for cur_id, follow1 in data:
follow = int(follow1)
if follow == 1:
group = 0
else:
group = int(log10(follow-1))
print('%d\t1' % group)
def readInput(self):
for line in sys.stdin:
yield line.encode("utf8").strip().split('\t', 1)
class Reducer:
def run(self):
sys.stderr.write('reporter:status:Reducer started\n')
data = self.readInput()
last_group = -1
last_count = 0
for group1, one in data:
group = int(group1)
if last_group == group:
last_count += int(one)
else:
if last_group != -1:
print(self.makestring(last_group)+'\t'+str(last_count))
last_group = group
last_count = int(one)
print(self.makestring(last_group)+'\t'+str(last_count))
def readInput(self):
for line in sys.stdin:
yield unicode(line, 'utf8').strip().split('\t', 1)
def makestring(self, k):
if k == 0:
s = "["
else:
s = "("
s = s + str(10**k)+", " + str(10**(k+1))+"]"
return s
class Combiner:
def run(self):
sys.stderr.write('reporter:status:Reducer started\n')
data = self.readInput()
last_group = -1
last_count = 0
for group1, one in data:
group = int(group1)
if last_group == group:
last_count += int(one)
else:
if last_group != -1:
print(str(last_group)+'\t'+str(last_count))
last_group = group
last_count = int(one)
print(str(last_group)+'\t'+str(last_count))
def readInput(self):
for line in sys.stdin:
yield unicode(line, 'utf8').strip().split('\t', 1)
if __name__ == "__main__":
my_func = sys.argv[1]
if my_func == "map":
mapper = Mapper()
mapper.run()
elif my_func == "reduce":
reducer = Reducer()
reducer.run()
elif my_func == "combine":
combiner = Combiner()
combiner.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.