text stringlengths 38 1.54M |
|---|
#coding: utf-8
from utils.utils import *
import numpy as np
print "start::", getTime()
# Fibonacci.fib()
Fibonacci.fib(7,1,1)
print "end::", getTime()
|
# -*- coding:gb2312 -*-
import time
import logging
import random
import os
import threading
import sys
import getpass
import re
import string
import datetime
import traceback
import codecs
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#递归删除路径下文件和文件夹
def delete_file_folder(src):
'''delete files and folders'''
if os.path.isfile(src): #若为文件
os.remove(src)
elif os.path.isdir(src): #若为子文件夹
for item in os.listdir(src):
itemsrc=os.path.join(src,item)
delete_file_folder(itemsrc)
os.rmdir(src)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#############################################################################################
#def main():
if __name__ == "__main__":
#放在DAL目录下,生成DAL文件夹
#步骤0
rootDir = os.path.dirname(os.path.realpath(sys.argv[0] )) +'\\' #获取路径,类似D:/FHBD/xx/
if not os.path.exists(rootDir):
print('程序退出,根路径不存在:' + rootDir)
sys.exit(1)
filePath = 'config.ini'
fileName = 'new.txt'
i = 0;
#读取控件配置文件
with codecs.open(filePath, 'r', 'utf-8') as r:
lines = r.readlines()
#遍历并保存控件属性文件
with codecs.open(fileName,'w', 'utf-8') as writeFile:
for sLine in lines:
i = i + 1
sLine = sLine.strip() #去掉空格和换行符
writeFile.write(' public string Text' + str(i) +'\r\n')
writeFile.write(' {\r\n')
writeFile.write(' get { return ' + sLine + '.Text; }\r\n')
writeFile.write(' set { ' + sLine + '.Text = value; }\r\n')
writeFile.write(' }\r\n')
writeFile.write('\r\n') #留一个空行
#time.sleep(1)
a=input("输入任意内容退出!")
sys.exit(0)
|
#! /usr/bin/env python3
# Copyright (c) 2018-Present Advanced Micro Devices, Inc. See LICENSE.TXT for terms.
#
# Parts of this code were adapted from https://www.fullstackpython.com/blog/first-steps-gitpython.html
# available under MIT License: https://github.com/mattmakai/fullstackpython.com/blob/master/LICENSE
# Copyright (c) 2017 Matthew Makai
"""git_helpers.py : Contains classes and helper funcs for build automation """
__author__ = "AMD Research"
__copyright__ = "Copyright 2019"
import os
from git import Repo
def print_repo(repo):
"""
Prints basic repo info for debugging
Adapted from https://www.fullstackpython.com/blog/first-steps-gitpython.html
"""
print(f'Repo description: {repo.description}')
# print(f'Repo active branch is {repo.active_branch}')
for remote in repo.remotes:
print(f'Remote named "{remote}" with URL "{remote.url}"')
print(f'Last commit for repo is {repo.head.commit.hexsha}.')
def print_commit(commit):
"""
Prints commit info for debugging
Adapted from https://www.fullstackpython.com/blog/first-steps-gitpython.html
"""
print('----')
print(str(commit.hexsha))
print(f"\"{commit.summary}\" by {commit.author.name} ({commit.author.email})")
print(commit.authored_datetime)
print(f"count: {commit.count()} and size: {commit.size}")
def checkout_commit(commit, repo_dir):
""" Takes repo path and checks out Commit Hex SHA """
repo = Repo(repo_dir)
if not repo.bare:
print(f'Repo at {repo_dir} successfully loaded.')
print_repo(repo)
repo.git.checkout(commit, '--recurse-submodules')
else :
print(f'Could not load repository at {repo_dir}')
return repo
def list_commits_for_branch(branch, repo_dir):
"""returns list of commits sorted in most recent first order"""
repo = Repo(repo_dir)
if not repo.bare:
print(f'Repo at {repo_dir} successfully loaded.')
print_repo(repo)
# check if branch exists
if branch in [str(i) for i in repo.branches]:
print(f'Unable to find {branch} in repo')
return None
#return list of commits from branch:
commits = list(repo.iter_commits(branch))
for commit in commits:
print_commit(commit)
pass
return commits
else :
print(f'Could not load repository at {repo_dir}')
return None
def reset_to_commit(commit, repo_dir):
""" Take a commit object and reset repo to it """
repo = Repo(repo_dir)
if not repo.bare:
print(f'Repo at {repo_dir} successfully loaded.')
print_repo(repo)
repo.head.reset(commit=commit.hexsha, index=True, working_tree=True)
else :
print(f'Could not load repository at {repo_dir}')
return repo
def clone_repo(repo_addr, clone_in_dir, branch_or_commit):
""" Clone repo into specified directory and checkout to specific branch/commit """
repo = Repo.clone_from(repo_addr, clone_in_dir)
print_repo(repo)
if not repo.bare:
print(f'Checking out Repo {repo_addr} to {branch_or_commit}')
repo.git.checkout(branch_or_commit, "--recurse-submodules")
get_submodules(repo)
print_repo(repo)
else:
print(f'Bare repo {repo_addr}, unable to checkout')
return repo
def get_commit_id(repo_dir):
""" Get commit for the listed repo directory """
repo = Repo(repo_dir)
if not repo.bare:
print(f'Repo at {repo_dir} successfully loaded.')
print_repo(repo)
return repo.head.commit.hexsha
else:
print(f'Could not load repository at {repo_dir}')
return None
def get_submodules(repo):
""" Get submodules for a repo """
if not repo.bare:
print("Fetching Git submodules")
for submodule in repo.submodules:
submodule.update(init=True)
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^social/$', 'CLEI.apps.evento.views.evento_social'),
url(r'^simultaneo/$', 'CLEI.apps.evento.views.evento_simultaneo'),
url(r'^lugar/$', 'CLEI.apps.evento.views.nuevo_lugar'),
url(r'^lugar/asignar/$','CLEI.apps.evento.views.asignar_lugar'),
url(r'^lugar/asignar_simultaneo/$','CLEI.apps.evento.views.asignar_lugar_simultaneo'),
url(r'^lista/$', 'CLEI.apps.evento.views.lista_evento'),
url(r'^lista_asignados/$','CLEI.apps.evento.views.lista_asignados'),
url(r'^calendario/$', 'CLEI.apps.evento.views.crear_fecha')
)
|
# #################################################### The Repository
import atexit
import sqlite3
from DAO import _Vaccines, _Suppliers, _Clinics, _Logistics
class _Repository:
def __init__(self):
self._conn = sqlite3.connect('database.db')
self.vaccines = _Vaccines(self._conn)
self.suppliers = _Suppliers(self._conn)
self.clinics = _Clinics(self._conn)
self.logistics = _Logistics(self._conn)
def _close(self):
self._conn.commit()
self._conn.close()
def totals(self):
c = self._conn.cursor()
totals = [0, 0, 0, 0]
c.execute("SELECT quantity FROM vaccines")
list_from_db = c.fetchall()
for x in list_from_db:
totals[0] = totals[0] + x[0]
c.execute("SELECT demand FROM clinics")
list_from_db = c.fetchall()
for x in list_from_db:
totals[1] = totals[1] + x[0]
c.execute("SELECT count_received, count_sent FROM logistics")
list_from_db = c.fetchall()
for x in list_from_db:
totals[2] = totals[2] + x[0]
totals[3] = totals[3] + x[1]
totals = map(str, totals) # converts each cell from int to str
return totals
def create_tables(self):
self._conn.executescript("""
CREATE TABLE vaccines (
id INTEGER PRIMARY KEY,
date DATE NOT NULL,
supplier INTEGER NOT NULL,
quantity INTEGER NOT NULL,
FOREIGN KEY (supplier) REFERENCES Suppliers(id)
);
CREATE TABLE suppliers (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
logistic INTEGER ,
FOREIGN KEY (logistic) REFERENCES Logistic(id)
);
CREATE TABLE clinics (
id INTEGER PRIMARY KEY ,
location TEXT NOT NULL,
demand INTEGER NOT NULL,
logistic INTEGER NOT NULL
);
CREATE TABLE logistics (
id INTEGER PRIMARY KEY ,
name TEXT NOT NULL,
count_sent INTEGER NOT NULL,
count_received INTEGER NOT NULL
);
""")
self._conn.commit()
repo = _Repository()
atexit.register(repo._close)
|
def split_and_join(line):
# write your code here
for word in line:
line1 = line.split(" ")
line2 = "-".join(line1)
return(line2)
#another way to do it
def split_and_join(line):
# write your code here
line = line.split(" ")
line = "-".join(line)
l = ""
for lett in line:
l += lett
return (l)
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result) |
# -*- coding: utf-8 -*-
import csv
import glob
import os
import sys
#
# Illumina Native format of input filenames: s_(\d+)_(d+)_(\d+)_qseq.txt, where the numbers are lane, index, and tile
# index of 1 contains the sequence reads, index 2 contains the tags
#
def parse_tag_file(tag_filename):
"""Parses tag file into dicts. Import keys are 'Index' (the tag sequence), 'SampleID', and 'Lane'"""
filename = "SampleSheet.csv"
handle = open(filename)
lines = handle.readlines()
reader = csv.reader(lines)
header = reader.next()
dicts = [dict(zip(header, row)) for row in reader]
return dicts
def parse_qseq(file_handle):
"""Generator for access to sequencer reads."""
header = ["MachineID", "run#", "lane#", "tile#", "x-coord", "y-coord", "index", "read#", "sequence", "q-scores", "p/f flag"]
for line in file_handle:
yield line.split('\t')
def dist(s, t):
"""Compute the distance between two strings, assuming they are of the same length."""
d = 0
try:
for i in range(len(s)):
if s[i] != t[i]:
d += 1
except IndexError:
return float('inf')
return d
def process_tile(seq_data, tag_data, tags, max_count=100000):
"""Generator for buffered demultiplexing to handle large qseq files, limiting memory usage."""
def init_pools_dict():
pools = {}
for tag in tags:
pools[tag] = []
pools['other'] = []
return pools
pools = init_pools_dict()
count = 0
for x in seq_data:
seq = x[8]
y = tag_data.next()
tag = y[8][:6] # It's length 7 for some reason that I do not know, clip to 6.
found = False
for t in tags:
d = dist(tag, t)
if d < 2:
pools[t].append(x)
found = True
break
if not found:
pools['other'].append(x)
count += 1
if count >= max_count:
count = 0
yield pools
pools = init_pools_dict()
yield pools
def discover_filenames(data_directory):
filenames = glob.glob(os.path.join(data_directory, "s_*_qseq.txt"))
s = set()
for filename in filenames:
split = filename.split('/')
name = split[-1]
_, lane, index, tile, _ = name.split('_')
s.add((lane, tile))
s = list(s)
s.sort()
return s
def write_output(output_dir, lane, tile, pools):
"""Helper function for buffered output to limit memory consumption for large qseq files."""
for tag, value in pools.items():
f = open(os.path.join(output_dir, "s_%s_%s_qseq.txt") % (lane, tag), 'a')
lines = []
for d in value:
lines.append("\t".join(d))
f.writelines(lines)
f.close()
def main(argv):
### Tagfile "SampleSheet.csv" not available on second run so tags must be given manually
#tag_filename, data_directory = argv[1], argv[2]
## Load Tags from tag info file.
#tag_info = parse_tag_file(tag_filename)
## The tags are in triplicate in the file, so compress to a set.
#tags = list(set([x['Index'] for x in tag_info]))
data_directory = argv[1]
#e.g. tags = ATCACG CGATGT TTAGGC TGACCA ACAGTG GCCAAT CAGATC ACTTGA GATCAG TAGCTT GGCTAC CTTGTA
tags = argv[2:]
output_dir = data_directory + '-demultiplexed'
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Discover data files: lanes and tiles. Not actually filenames, rather lane and tile pairs.
filenames = discover_filenames(data_directory)
keys = ["MachineID", "run#", "lane#", "tile#", "x-coord", "y-coord", "index", "read#", "sequence", "q-scores", "p/f flag"]
# Prep data for processing for each file pair
for (lane, tile) in filenames:
seq_handle = open(os.path.join(data_directory, "s_%s_%s_%s_qseq.txt" % (lane, '1', tile)))
tag_handle = open(os.path.join(data_directory, "s_%s_%s_%s_qseq.txt" % (lane, '2', tile)))
seq_data = parse_qseq(seq_handle)
tag_data = parse_qseq(tag_handle)
# Identify tags and sort. Append to tag files. Pass optional max_counts argument to change number of reads processed per output, default = 100,000 reads per output.
pools_gen = process_tile(seq_data, tag_data, tags)
pool_counts = dict()
for pools in pools_gen:
for tag, value in pools.items():
try:
pool_counts[tag] += len(value)
except KeyError:
pool_counts[tag] = len(value)
write_output(output_dir, lane, tile, pools)
# Report tagging distribution.
print lane, tile, ': '
for tag, value in pool_counts.items():
print tag, value
if __name__ == "__main__":
main(sys.argv)
|
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import inv
L = 1.2
h = 0.8
N = 3
I = np.array([0 for i in range(2*N)])
VDF_inv = np.array([[0 for j in range(2*N)] for i in range(2*N)])
D = 1.5
a = 0.0035
pi = np.pi
Dz = L/N
s0 = 1/160
def z_bar(i):
if (i <= N):
zi = -h + (L/2) - i*Dz
return zi + (Dz/2)
return z_bar(i - N)
def VDF(i, j):
if ((0 < i <= N and N < j <= 2*N) or (N < i <= 2*N and 0 < j <= N)):
fraction1 = 1 / (np.sqrt((z_bar(i) - z_bar(j))**2 + D**2))
fraction2 = 1 / (np.sqrt((z_bar(i) + z_bar(j))**2 + D**2))
return (Dz / (4*pi*s0)) * (fraction1 + fraction2)
else:
fraction1 = 1 / (abs(z_bar(i) + z_bar(j)))
if (i == j):
root = np.sqrt(a**2 + (Dz/2)**2)
log = np.log(((Dz/2) + root) / ((Dz/-2) + root))
return (1/(4*pi*s0)) * (log + (Dz*fraction1))
fraction2 = 1 / (abs(z_bar(i) - z_bar(j)))
return (Dz / (4*pi*s0)) * (fraction1 + fraction2)
def F(x, y, z):
def r1(i):
return np.sqrt((x+(D/2))**2 + y**2 + (z - z_bar(i))**2)
def r2(i):
return np.sqrt((x-(D/2))**2 + y**2 + (z - z_bar(i))**2)
return 2*(Dz / (4*pi*s0)) * np.sum([(1 / r1(i) + 1 / r2(i)) * I[i - 1] for i in range(1, N + 1)])
F = np.vectorize(F)
def plot_a_lot1():
x = np.linspace(-3, 3, 100)
fig, ax = plt.subplots()
ax.plot(x, F(x, 0, 0))
ax.set(xlabel='x(m)', ylabel='Φ(Volt)', title='Δυναμικό Φ(x), N=' + str(N))
ax.grid()
fig.savefig("ask8_Fi_N=" + str(N) + ".png")
plt.clf()
def plot_a_lot2():
x = np.array([z_bar(i) for i in range(1, N + 1)])
fig, ax = plt.subplots()
ax.plot(x, I[:N])
ax.set(xlabel='z(m)', ylabel='I(A/m)',
title='Γραμμική Κατανομή Ρεύματος Ι(z), N=' + str(N))
ax.grid()
fig.savefig("ask8_I_N=" + str(N) + ".png")
plt.clf()
def row_sum_vdf(i):
sum = 0
for j in range(2*N):
sum += VDF_inv[i][j]
return sum
def updateN(n):
global N
global Dz
N = n
Dz = L / n
pinakas = [[]]
pinakas[0].append('N')
pinakas[0].append('Phi')
pinakas[0].append('Rg')
pinakas[0].append('F(0,0,0)')
for i in range(16):
if (i == 0):
updateN(3)
else:
updateN(5 * i)
pinakas.append([])
row = pinakas[i + 1]
VDF_inv = inv(
np.array([[VDF(i, j) for j in range(1, 2*N + 1)] for i in range(1, 2*N + 1)]))
Phi = 250 / (Dz*np.sum(VDF_inv))
I = Phi * np.array([row_sum_vdf(i) for i in range(2*N)])
Rg = Phi / (250)
row.append(N)
row.append(Phi)
row.append(Rg)
row.append(F(0, 0, 0))
# if (i == 15):
plot_a_lot1()
plot_a_lot2()
pinakas = np.array(pinakas)
print(pinakas)
|
from django.db import models
from ckeditor.fields import RichTextField
class Categoria(models.Model):
id = models.AutoField(primary_key=True)
nombre = models.CharField('Nombre de la Categoría', max_length=100, null=False, blank=False)
estado = models.BooleanField('Activo/No Activo', default=True)
fecha_creacion = models.DateField('Fecha de Creación', auto_now=False, auto_now_add=True)
fecha_publicado = models.DateField('Fecha de Publicado', blank=True, null=True)
class Meta:
verbose_name = 'Categoría'
verbose_name_plural = 'Categorías'
def __str__(self):
return self.nombre
class Autor(models.Model):
id = models.AutoField(primary_key=True)
nombre = models.CharField('Nombre de Autor', max_length=100, null=False, blank=False)
apellido = models.CharField('Apellido de Autor', max_length=100, null=False, blank=False)
facebook = models.URLField('Facebook', null=True, blank=True)
twitter = models.URLField('Twitter', null=True, blank=True)
instagram = models.URLField('Instagram', null=True, blank=True)
website = models.URLField('Website', null=True, blank=True)
correo = models.EmailField('Correo Electrónico', null=False, blank=False)
estado = models.BooleanField('Activo/No Activo', default=True)
fecha_creacion = models.DateField('Fecha de Creación', auto_now=False, auto_now_add=True)
fecha_publicado = models.DateField('Fecha de Publicado', blank=True, null=True)
class Meta:
verbose_name = 'Autor'
verbose_name_plural = 'Autores'
def __str__(self):
return "{0} {1}".format(self.nombre, self.apellido)
class Post(models.Model):
id = models.AutoField(primary_key=True)
titulo = models.CharField('Título', max_length=90, null=False, blank=False)
slug = models.CharField('Slug', max_length=100, null=False, blank=False)
descripcion = models.CharField('Descripción', max_length=110, null=False, blank=False)
contenido = RichTextField()
imagen = models.URLField('Imagen', max_length=255, null=False, blank=False)
autor = models.ForeignKey(Autor, on_delete=models.CASCADE)
categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE)
estado = models.BooleanField('Publicado/No Publicado', default=True)
fecha_creacion = models.DateField('Fecha de Creación', auto_now=False, auto_now_add=True)
fecha_publicado = models.DateField('Fecha de Publicado', blank=True, null=True)
class Meta:
verbose_name = 'Post'
verbose_name_plural = 'Posts'
def __str__(self):
return self.titulo
|
"""""
Description: This is the Main function for decision tree classification algorithm.
Uses monkdata.py, dtree.py, drawtree_qt5.py as well.
Function: Use the Machine-Learning algorithm in dtree.py to classify the unknown data set in monkdata.py
The file drawtree_qt5.py is used to draw the decision tree.
If you have any problem, DON'T HESITATE TO CONTACT ME.
Author: Yichen(Eason) Yang
Contact: yyichen@kth.se
Date: 2019-09-20
Address: Kungliga Tekniska Högskolan, Stockholm, Sweden.
"""""
import monkdata as m
import dtree as d
import statistics as st
import matplotlib.pyplot as plt
import random
import drawtree_qt5 as draw
class SplitDataSet:
def __init__(self):
self.Train = list()
self.Test = list()
# def set_train(self, val):
# self.Train = val
#
# def set_test(self, val):
# self.Test = val
#
# def get_train(self):
# return self.Train
#
# def get_test(self):
# return self.Test
# Class definition
def partition(data, fraction):
ldata = list(data)
random.shuffle(ldata)
breakPoint = int(len(ldata) * fraction)
return ldata[:breakPoint], ldata[breakPoint:]
#Dictionary: find the index of the maximum value
#This code is copied from: https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary
def key_with_maxval(di):
""" a) create a list of the dict's keys and values;
b) return the key with the max value"""
v = list(di.values())
k = list(di.keys())
return k[v.index(max(v))]
def seek_ratio(training_set):
s_orig = list()
s_dict = dict()
# s_var = list()
k = SplitDataSet()
for y in range(30, 81, 10):
for z in range(0, 30, 1):
k.Train, k.Test = partition(training_set, y/100.0)
t_temp = d.buildTree(k.Train, m.attributes)
s_orig.append(d.check(t_temp, k.Test))
s_dict[y] = st.mean(s_orig)
# s_var.append(st.variance(s_orig))
# print("Ensemble properties:(ratio:", y/100.0, ")")
# print("Max:", max(s_orig))
# print("Min:", min(s_orig))
# print("Average:", st.mean(s_orig))
# print("Variance:", st.variance(s_orig))
s_orig.clear()
return key_with_maxval(s_dict)
def plot_graph(training_set,accuracy,xaxis):
plt.plot(xaxis, accuracy)
plt.plot(xaxis, accuracy, 'ro')
plt.xlabel("Fraction Parameters: %")
plt.ylabel("Accuracy: ")
#plt.axis([25, 85, 0.5, 1])
if training_set == m.monk1:
plt.title("Data set: monk1.training")
elif training_set == m.monk2:
plt.title("Data set: monk2.training")
elif training_set == m.monk3:
plt.title("Data set: monk3.training")
plt.show()
#print(list(s_dict.keys()), "AND", list(s_dict.values()))
def check_pruning(data_set):
s_dict = dict()
t_temp = d.buildTree(data_set.Train, m.attributes)
prun_set = d.allPruned(t_temp)
for temp in prun_set:
s_dict[temp] = (d.check(temp, data_set.Test))
return key_with_maxval(s_dict)
def test_pruning_algo(train_data, test_data, ratio):
monk_set = SplitDataSet()
# Here some uncertainty occurs.
monk_set.Train, monk_set.Test = partition(train_data, ratio)
final_tree = check_pruning(monk_set)
accuracy = d.check(final_tree, test_data)
#print("Accuracy for Monk1.test", accuracy)
return accuracy
print(d.entropy(m.monk1))
print(d.entropy(m.monk2))
print(d.entropy(m.monk3))
#Printout the entropy of all datasets.
a = list()
b = list()
c = list()
for i in range(0, 6, 1):
a.append(d.averageGain(m.monk1, m.attributes[i]))
for i in range(0, 6, 1):
b.append(d.averageGain(m.monk2, m.attributes[i]))
for i in range(0, 6, 1):
c.append(d.averageGain(m.monk3, m.attributes[i]))
print(a)
print(b)
print(c)
#
#Calculate and printout the information get for all properties and datasets.
#
#r = d.select(m.monk1, m.attributes[1], 2)
#for x in r:
# print(x.attribute, "Positive:", x.positive)
# next: calculate the info gain
#To get the majority of one dataset
#print(d.mostCommon(m.monk1test))
t = list()
t.append(d.buildTree(m.monk1, m.attributes))
print("Accuracy for Monk1.test", d.check(t[0], m.monk1test))
print("Accuracy for Monk1", d.check(t[0], m.monk1))
#draw.drawTree(t[0])
#print("Standard decision tree for monk1: ", t)
t.append(d.buildTree(m.monk2, m.attributes))
print("Accuracy for Monk2.test", d.check(t[1], m.monk2test))
print("Accuracy for Monk2", d.check(t[1], m.monk2))
t.append(d.buildTree(m.monk3, m.attributes))
print("Accuracy for Monk3.test", d.check(t[2], m.monk3test))
print("Accuracy for Monk3", d.check(t[2], m.monk3))
#
# Calculate the accuracy
#
#PrunSet = d.allPruned(t)
#draw.drawTree(PrunSet[13])
#Optimal_ratio = list()
#Optimal_ratio.append(seek_ratio(m.monk1)/100.0)
#Optimal_ratio.append(seek_ratio(m.monk2)/100.0)
#Optimal_ratio.append(seek_ratio(m.monk3)/100.0)
#print("Optimal ratio for monk1,2,3:", Optimal_ratio)
Accuracy_monk1 = list()
Accuracy_monk2 = list()
Accuracy_monk3 = list()
temp_1 = list()
temp_2 = list()
temp_3 = list()
for i in range(30, 81, 10):
for j in range(0, 10, 1):
temp_1.append(test_pruning_algo(m.monk1, m.monk1test, i/100.0))
temp_2.append(test_pruning_algo(m.monk2, m.monk2test, i / 100.0))
temp_3.append(test_pruning_algo(m.monk3, m.monk3test, i/100.0))
# Accuracy_monk1.append(st.mean(temp_1))
# Accuracy_monk2.append(st.mean(temp_2))
# Accuracy_monk3.append(st.mean(temp_3))
Accuracy_monk1.append(st.variance(temp_1))
Accuracy_monk2.append(st.variance(temp_2))
Accuracy_monk3.append(st.variance(temp_3))
temp_1.clear()
temp_2.clear()
temp_3.clear()
plot_graph(m.monk1, Accuracy_monk1, range(30, 81, 10))
plot_graph(m.monk2, Accuracy_monk2, range(30, 81, 10))
plot_graph(m.monk3, Accuracy_monk3, range(30, 81, 10))
#print("Accuracy in average for Monk1:", st.mean(Accuracy_monk1))
#print("Accuracy in average for Monk1:", st.mean(Accuracy_monk2))
#print("Accuracy in average for Monk1:", st.mean(Accuracy_monk3))
#print("Pruning decision tree for monk1: ", final_tree)
#PrunSet = d.allPruned(t)
#draw.drawTree(PrunSet[1])
|
# def find(v, x):
# if v[x] == x:
# return x
# else:
# v[x] = find(v, v[x])
# return v[x]
#
# def union(v, y, x):
# y_root = find(v, y)
# x_root = find(v, x)
# v[x_root] = v[y_root]
#
# def solution(n, computers):
# v = [_ for _ in range(n)]
# for start, row in enumerate(computers):
# for end, is_connect in enumerate(row):
# if start < end and is_connect:
# union(v, start, end)
#
# return len(set(find(v, i) for i in range(n)))
#
from collections import deque
def bfs(computers, visit, com, n):
q = deque([com])
while q:
d_com = q.pop()
visit[d_com] = 1
for tmp in range(n):
if computers[d_com][tmp] and not visit[tmp]:
q.append(tmp)
def solution(n, computers):
answer = 0
visit = [0 for _ in range(n)]
for com in range(n):
if not visit[com]:
bfs(computers, visit, com, n)
answer += 1
return answer
# def solution(n, computers):
# v = [_ for _ in range(n)]
# for start, row in enumerate(computers):
# for end, is_connect in enumerate(row):
# if is_connect:
# v[end] = v[start]
# print(v)
# return len(set(v))
n, computers = 3, [[1, 1, 0], [1, 1, 0], [0, 0, 1]]
# n, computers = 3, [[1, 1, 0], [1, 1, 1], [0, 1, 1]]
# n, computers = 4, [[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]
# n, computers = 4, [[1, 1, 0, 0], [1, 1, 1, 0], [0, 1, 1, 1], [0, 0, 1, 1]]
# n, computers = 7, [[1, 0, 0, 0, 0, 0, 1], [0, 1, 1, 0, 1, 0, 0], [0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0], [0, 1, 0, 0, 1, 1, 0], [0, 0, 0, 0, 1, 1, 1], [1, 0, 0, 0, 0, 1, 1]]
print(solution(n, computers))
|
"""
.. module:: PeaksClustering
PeaksClustering
*************
:Description: PeaksClustering
Clusters the Peaks from an experiment all the files together
Hace un clustering de los picos de cada sensor usando el numero de clusters indicado en la
definicion del experimento y el conjunto de colores para el histograma de la secuencia del experimento
:Authors: bejar
:Version:
:Created on: 26/03/2015 8:10
"""
from collections import Counter
from operator import itemgetter
import matplotlib.pyplot as plt
from pylab import *
import seaborn as sn
from sklearn.cluster import KMeans
from kemlglearn.cluster import KernelKMeans
from Config.experiments import experiments
from util.plots import plotSignals
import warnings
from util.distances import hellinger_distance
from util.misc import compute_centroids
import argparse
warnings.filterwarnings("ignore")
__author__ = 'bejar'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch', help="Ejecucion no interactiva", action='store_true', default=False)
parser.add_argument('--exp', nargs='+', default=[], help="Nombre de los experimentos")
parser.add_argument('--hellinger', help="Show Hellinger distance", action='store_true', default=False)
args = parser.parse_args()
lexperiments = args.exp
if not args.batch:
# 'e150514''e120503''e110616''e150707''e151126''e120511''e150514'
args.hellinger = False
lexperiments = ['e110906o']
for expname in lexperiments:
datainfo = experiments[expname]
colors = datainfo.colors
f = datainfo.open_experiment_data(mode='r')
for sensor, nclusters in zip(datainfo.sensors, datainfo.clusters):
print(sensor)
# We only use the first file to compute the cluster
data = datainfo.get_peaks_resample_PCA(f, datainfo.datafiles[0], sensor)
km = KMeans(n_clusters=nclusters, n_jobs=-1)
km.fit_predict(data)
centroids = km.cluster_centers_
#centroids = compute_centroids(data, km.labels_)
lsignals = []
cnt = Counter(list(km.labels_))
lmax = []
for i in range(km.n_clusters):
lmax.append((i, np.max(centroids[i])))
lmax = sorted(lmax, key=itemgetter(1))
print('LMAX ', lmax)
print('SHAPE ', data.shape)
lhisto = []
for ndata in datainfo.datafiles:
dataf = datainfo.get_peaks_resample_PCA(f, ndata, sensor)
if dataf is not None:
histo = np.zeros(nclusters)
for i in range(dataf.shape[0]):
histo[km.predict(dataf[i])] += 1.0
histo /= dataf.shape[0]
# print(datainfo.name, ndata)
# print('HISTO ', histo)
histosorted = np.zeros(nclusters)
for i in range(histosorted.shape[0]):
histosorted[i] = histo[lmax[i][0]]
else:
histosorted = np.zeros(nclusters)
lhisto.append(histosorted)
if args.hellinger:
for h in lhisto[1:]:
rms = np.dot(lhisto[0] - h, lhisto[0] - h)
rms /= h.shape[0]
print(np.sqrt(rms), hellinger_distance(h, lhisto[0]))
matplotlib.rcParams.update({'font.size': 30})
fig = plt.figure()
ax = fig.add_subplot(2, 1, 1)
fig.set_figwidth(60)
fig.set_figheight(40)
ind = np.arange(nclusters) # the x locations for the groups
width = 1.0/(len(lhisto)+1) # the width of the bars
ax.set_xticks(ind+width)
ax.set_xticklabels(ind)
for i, h in enumerate(lhisto):
rects = ax.bar(ind+(i*width), h, width, color=colors[i])
fig.suptitle(datainfo.name + '-' + sensor, fontsize=48)
minaxis = np.min(centroids)
maxaxis = np.max(centroids)
for nc in range(nclusters):
ax2 = fig.add_subplot(2, nclusters, nc+nclusters+1)
signal = centroids[lmax[nc][0]]
plt.title(' ( '+str(cnt[lmax[nc][0]])+' )')
t = arange(0.0, len(signal), 1)
ax2.axis([0, len(signal), minaxis, maxaxis])
ax2.plot(t,signal)
plt.axhline(linewidth=1, color='r', y=0)
fig.savefig(datainfo.dpath + '/' + datainfo.name + '/Results/' + datainfo.name + '-' + sensor + '-' + str(nclusters)
+ '-histo-sort.pdf', orientation='landscape', format='pdf')
# plt.show()
print('*******************')
for nc in range(nclusters):
lsignals.append((centroids[lmax[nc][0]], str(nc)+' ( '+str(cnt[lmax[nc][0]])+' )'))
if nclusters % 2 == 0:
part = nclusters /2
else:
part = (nclusters /2) + 1
plotSignals(lsignals, part, 2, maxaxis, minaxis, datainfo.name + '-' + sensor,
datainfo.name + '-' + sensor, datainfo.dpath + '/' + datainfo.name + '/Results/')
datainfo.close_experiment_data(f) |
from random import randint
from time import sleep
itens = ('Pedra', 'papel', 'Tesoura')
print('Escolha:')
print('[ 0 ] Pedra')
print('[ 1 ] Papel')
print('[ 2 ] Tesoura')
n1 = int(input('Digite aqui :'))
n2 = randint(0, 2)
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO')
sleep(1)
print('-_-' * 11)
print('Você escolheu {}'.format(itens[n1]))
print('O computador escolheu {}'.format(itens[n2]))
print('-_-' * 11)
if n1 == n2:
print('EMPATE')
elif n1 == 0 and n2 == 1:
print('Computador venceu')
elif n1 == 0 and n2 == 2:
print('Você venceu')
elif n1 == 1 and n2 == 2:
print('Computador venceu')
elif n1 == 1 and n2 == 0:
print('Você Venceu')
elif n1 == 2 and n2 == 0:
print('Computador venceu')
elif n1 == 2 and n2 == 1:
print('Você Venceu')
print('-_-' * 11)
|
# -*- coding: utf-8 -*-
# @Time : 2017/9/10 23:51
# @Author : Forec
# @File : focus/views.py
# @Project : WildPointer
# @license : Copyright(C), Forec
# @Contact : forec@bupt.edu.cn
from flask import request, current_app, render_template, abort
from flask_login import login_required, current_user
from . import focus
from ..models import User, Follow
@focus.route('/followers/<username>', methods=['GET'])
def followers(username):
user = User.query.filter_by(username=username).first()
if not user:
abort(404)
page = request.args.get('page', 1, type=int)
pagination = user.followers.order_by(Follow.timestamp.desc()).paginate(
page, per_page=current_app.config['WP_FOLLOWERS_PER_PAGE'], error_out=False)
_followers = [item.follower for item in pagination.items]
return render_template('focus/followers.html', pagination=pagination, followers=_followers, user=user)
@focus.route('/followed-by/<username>', methods=['GET'])
def followed_by(username):
user = User.query.filter_by(username=username).first()
if not user:
abort(404)
page = request.args.get('page', 1, type=int)
pagination = user.followed.order_by(Follow.timestamp.desc()).paginate(
page, per_page=current_app.config['WP_FOLLOWERS_PER_PAGE'], error_out=False)
_followeds = [item.followed for item in pagination.items]
return render_template('focus/followeds.html', pagination=pagination, followeds=_followeds, user=user)
@focus.route('/my-followers', methods=['GET'])
@login_required
def my_followers():
page = request.args.get('page', 1, type=int)
pagination = current_user.followers.order_by(Follow.timestamp.desc()).paginate(
page, per_page=current_app.config['WP_FOLLOWERS_PER_PAGE'], error_out=False)
_followers = [item.follower for item in pagination.items]
return render_template('focus/my_followers.html', pagination=pagination, followers=_followers)
@focus.route('/my-following', methods=['GET'])
@login_required
def my_following():
page = request.args.get('page', 1, type=int)
pagination = current_user.followed.order_by(Follow.timestamp.desc()).paginate(
page, per_page=current_app.config['WP_FOLLOWERS_PER_PAGE'], error_out=False)
_followeds = [item.followed for item in pagination.items]
return render_template('focus/my_followeds.html', pagination=pagination, followeds=_followeds)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 15:48:17 2019
@author: id127392
"""
import os
from keras.models import load_model
import threading
import datetime
import time
import util
from dsce import dataset, train, datamanip, reduce, analyse, datastream
import tensorflow as tf
import os.path
from numpy import genfromtxt
import pandas as pd
import csv
import numpy as np
class App:
def __init__(self):
# Setup Parameters
util.params = None
self.dnnModelPath = util.getFullFileName(util.getParameter('DnnModelPath'))
self.numTrainingInstances = util.getParameter('NumActivationTrainingInstances')
self.timestamp = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
self.outputName = util.getSetupFileDescription() + '--' + self.timestamp
self.outputDir = 'output/%s'%(self.outputName)
util.makeDirectory(self.outputDir)
util.isLoggingEnabled = util.getParameter('LoggingEnabled')
util.logPath = self.outputDir + '/%s.log'%(self.outputName)
util.logLevel = util.getParameter('LogLevel')
util.thisLogger = util.Logger()
util.storeSetupParamsInLog()
# Setup memory environment
self.processorType = processorType = util.getParameter('ProcessorType')
self.startTime = datetime.datetime.now()
self.streamList = None
self.clustererList = None
self.classMaxValues1 = None # max value of raw activation data
self.classMaxValues2 = None # max value of reduced activation data
self.flatActivations = None
self.activationBatches = None
self.batchFlatActivations = None
self.reducedFlatActivations = None
def loadDnnModel(self, resetParams=False):
if resetParams == True:
util.params = None
# Load the pre-trained DNN model
util.thisLogger.logInfo("Loading DNN model %s..."%(self.dnnModelPath))
self.dnnModel = load_model(self.dnnModelPath)
util.thisLogger.logInfo(self.dnnModel.summary())
# Load the input dataset
util.thisLogger.logInfo("Loading input data...")
self.x_train, self.y_train, self.x_test, self.y_test = dataset.getFilteredData()
# limit the number of y data to the num training instances
if self.numTrainingInstances != -1:
self.x_train = self.x_train[:self.numTrainingInstances]
self.y_train = self.y_train[:self.numTrainingInstances]
def getActivations(self, resetParams=False):
if resetParams == True:
util.params = None
self.numLayers, self.batchData, self.activations, self.activationBatches = train.getActivations(self.x_train, self.numTrainingInstances, self.dnnModel, self.dnnModel, self.y_train)
# flatten the data
self.flatActivations, self.batchFlatActivations = datamanip.flattenActivationBatches(self.activationBatches)
self.flatActivations = tf.constant(self.flatActivations)
# Normalize the activations
self.flatActivations, self.classMaxValues1 = datamanip.normalizeFlatValues(self.flatActivations, True)
# tidy up memory - remove variables no longer needed
del self.batchData
del self.activations
del self.activationBatches
def reduceActivations(self, resetParams=False):
if resetParams == True:
util.params = None
# flat activations provided as one list and as batches - let the reduction training technique decide which to use
reduce.train(self.flatActivations, self.batchFlatActivations)
del self.batchFlatActivations
self.batchFlatActivations = None
# Reduce the training data activations
self.reducedFlatActivations = reduce.reduce(self.flatActivations, None)
# Normalize the reduced activations
self.reducedFlatActivations, self.classMaxValues2 = datamanip.normalizeFlatValues(self.reducedFlatActivations, True)
# Store the reduced training data activations
reduce.saveReducedData('%s/%s_trainingactivations'%(self.outputDir,self.outputName), self.reducedFlatActivations, self.y_train)
def setupActivationAnalysis(self, resetParams=False):
if resetParams == True:
util.params = None
self.streamList = {}
self.clustererList = {}
# make sure a new MOA gateway is setup for each run
util.killMoaGateway()
time.sleep(3)
util.startMoaGateway()
time.sleep(3)
# get unseen data and their predictions so we can efficiently set up the empty MCOD clusterers
datastream.unseenDataList = datastream.getInstancesWithResultsBatchObj()
datastream.setPredictions(self.dnnModel)
analyse.setup(self.reducedFlatActivations, self.y_train, self.streamList, self.clustererList, self.outputDir, self.outputName, datastream.unseenDataList)
def processDataStream(self):
# start the thread to process the streams so that new instances get clustered
thread1 = threading.Thread(target=analyse.processStreamInstances, args=(self.streamList, self.clustererList, self.numTrainingInstances, self.outputDir, self.outputName, False, True), daemon=True)
thread1.start()
unseenInstancesObjList = datastream.startDataInputStream(self.streamList, self.clustererList, reduce.reductionModel, self.dnnModel, self.x_test, self.classMaxValues1, self.classMaxValues2, self.outputDir, self.outputName)
# reshape into original array types
unseenInstances = [x.instance for x in unseenInstancesObjList[0]]
unseenResults = [x.correctResult for x in unseenInstancesObjList[0]]
dataDiscrepancyClass = self.layerExtraction = util.getParameter("DataDiscrepancyClass");
# append unseen instances to the training instances
unseenInstances = np.append(unseenInstances, unseenResults, axis=1)
classes = np.unique(self.y_train)
for dataClass in classes:
# Filter unseen instances to only include CE and data discrepancy class
filteredInstances = list(filter(lambda x: (x[len(unseenInstances[0])-1] == dataClass or x[len(unseenInstances[0])-1] == dataDiscrepancyClass), unseenInstances))
trainingActivations = util.readFromCsv('%s/%s_trainingactivations_%s.csv'%(self.outputDir,self.outputName,dataClass))
labels = np.arange(len(trainingActivations[0]))
labels = np.append(labels,len(labels))
classValues = np.full((trainingActivations.shape[0],1), 'Train_' + str(dataClass))
trainingActivations = np.append(trainingActivations, classValues, axis=1) # axis=1 means add columns
trainingActivations = np.concatenate((trainingActivations, filteredInstances), axis=0) # axis=0 means add rows
trainingActivations = np.concatenate(([labels], trainingActivations), axis=0) # axis=0 means add rows
analyse.stopProcessing()
thread1.join()
# capture any unprocessed instances
analyse.processStreamInstances(self.streamList, self.clustererList, self.numTrainingInstances, self.outputDir, self.outputName, True, True)
util.thisLogger.logInfo('End of instance processing')
# get outlier results and store in csv
if analyse.results != None:
util.createResults(unseenInstancesObjList, analyse.results, self.outputDir, self.outputName)
util.killMoaGateway()
endTime = datetime.datetime.now()
util.thisLogger.logInfo('Total run time: ' + str(endTime - self.startTime))
util.thisLogger.closeLog()
|
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split, KFold
import preprocessing as prep
from sklearn.model_selection import GridSearchCV
from neural_network import prepare_data_for_model
import matplotlib.pyplot as plt
import time
from tqdm import tqdm
# load preprocessed data
df_train, df_test = prep.run_both()
df_test, X_train, y_train, X_test = prepare_data_for_model()
# remove passenger ID variable but save IDs for test set
df_train = df_train.drop(columns=['PassengerId'])
pass_id_test = df_test['PassengerId']
df_test = df_test.drop(columns=['PassengerId'])
def prediction(X_train, y_train, pass_id_test, X_test):
"""
Make a prediction for the test set survival.
"""
random_forest = RandomForestClassifier(n_estimators=50, min_samples_split=6, min_samples_leaf=2, max_depth=10)
training = random_forest.fit(X_train, y_train)
score = random_forest.score(X_train, y_train)
print("Random forest score: ", score)
prediction_test_set = random_forest.predict(X_test).round(0).astype(int)
predictions = pd.DataFrame({'PassengerId': pass_id_test, 'Survived': prediction_test_set})
predictions.to_csv('solutions/prediction_random_forrest_prep_min.csv', index=False)
def param_tuning(X_train, y_train):
"""
Test the hyperparameters to obtain optimal accuracy on the test set.
"""
for i in tqdm(range(10)):
time.sleep(3)
parameters = {
'n_estimators': [10, 30, 50, 70, 100, 500],
'max_depth': [1, 5, 10, 15],
"max_terminal_nodes": [25, 50],
'min_samples_split': [5,8,10],
'min_samples_leaf': [.1, .2, .5, 2, 5],
}
rf = RandomForestClassifier(max_samples=.2)
# Using a grid search with a 5-fold cross validation to find the best model
rf_clf = GridSearchCV(rf, parameters, scoring='accuracy', cv=5)
rf_clf.fit(X_train, y_train)
print('Random Forrest')
print(rf_clf.best_params_)
print(f'Accuracy: {round(rf_clf.best_score_*100, 2)}%')
def round_survival():
"""
Round the survival results to integers for valid submission.
"""
result = pd.read_csv("solutions/prediction_random_forrest_prep.csv")
result["Survived"] = result["Survived"].round(0).astype(int)
result.to_csv('solutions/prediction_random_forrest_prep.csv', index=False)
def model_testing(X_train, y_train):
runs = 100
acc1 = []
acc2 = []
acc3 = []
for run in range(runs):
model1 = RandomForestClassifier(n_estimators=50, min_samples_split=5, min_samples_leaf=2, max_depth=10)
model2 = RandomForestClassifier(n_estimators=100, min_samples_split=5, min_samples_leaf=2, max_depth=10)
model3 = RandomForestClassifier(n_estimators=150, min_samples_split=5, min_samples_leaf=2, max_depth=10)
training1 = model1.fit(X_train, y_train)
training2 = model2.fit(X_train, y_train)
training3 = model3.fit(X_train, y_train)
acc1.append(model1.score(X_train, y_train))
acc2.append(model2.score(X_train, y_train))
acc3.append(model3.score(X_train, y_train))
# save average accuracy of runs
# val_accs.append(round(np.mean(acc_avg)*100, 2))
# print("accuracy: " + str(np.mean(acc_avg)))
# plot line for each activation method
plt.boxplot([acc1,acc2,acc3])
# plotting
plt.title("Accuracy of random forest", fontsize=22)
ax = plt.gca()
ax.set_xticklabels(['50', '100', '150'], fontsize=18)
plt.xlabel("# estimators")
plt.ylabel("Accuracy score", fontsize=20)
plt.subplots_adjust(bottom=.15, left=.15)
# plt.savefig("-" + str(runs) + "runs.png")
plt.show()
# param_tuning(X_train, y_train)
# prediction(X_train, y_train, pass_id_test, X_test)
model_testing(X_train, y_train)
# round_survival()
# # DIT HEEFT 63% GOED VOORSPELD, MET SIBSP ERBIJ 64 --> ALLEBEI ERG MATIG
# y = train_data["Survived"]
#
# features = ["Pclass", "Sex", "Parch", "SibSp"]
# X = pd.get_dummies(train_data[features])
# X_test = pd.get_dummies(test_data[features])
#
# print(X.columns)
#
# neigh = KNeighborsClassifier(n_neighbors=5)
# neigh.fit(X, y)
#
# correct = 0
# incorrect = 0
# for index, row in X.iterrows():
# pclass = row['Pclass']
# sex_f = row['Sex_1']
# sex_m = row['Sex_0']
# parch = row['Parch']
# sibsp = row['SibSp']
# prediction = neigh.predict([[pclass, sex_f, sex_m, parch, sibsp]])
#
# survived = train_data['Survived'][index]
#
# if prediction == survived:
# correct +=1
# else:
# incorrect += 1
#
# print(correct*100/(incorrect+correct))
# # TODO Dit is leuk bij exploratie erbij misschien!!!!
# alone = df_train.loc[df_train.alone == 0]["Survived"]
# not_alone = df_train.loc[df_train.alone == 1]["Survived"]
# rate_alone = sum(alone)/len(alone) # halve survived
# rate_not_alone = sum(not_alone)/len(not_alone) # 30% survived
# # print(rate_alone, rate_not_alone)
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db.models.expressions import F
# Create your models here.
class MyUserManager(BaseUserManager):
def create_user(self, email, username, password=None):
if not email:
return ValueError("An email address is required")
if not username:
return ValueError("An username is required")
user = self.model(
email = self.normalize_email(email),
username = username,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, username, password):
user = self.create_user(
email = self.normalize_email(email),
username = username,
password=password,
)
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class MyUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(verbose_name="email", max_length=60, unique=True)
username = models.CharField(max_length=15, unique=True)
date_joined = models.DateTimeField(verbose_name="date joined", auto_now_add=True)
last_login = models.DateTimeField(verbose_name="last login", auto_now=True)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username',]
objects = MyUserManager()
def __str__(self):
return self.email + ", " + self.username
def has_perm(self, perm, obj=None):
return self.is_admin
def has_module_perms(self, app_label):
return True
|
from multiprocessing.dummy import Pool as ThreadPool, current_process
def test(i):
# 本质调用了:threading.current_thread
print(f"[编号{i}]{current_process().name}")
def main():
p = ThreadPool()
for i in range(5):
p.apply_async(test, args=(i, ))
p.close()
p.join()
print(f"{current_process().name}")
if __name__ == '__main__':
main() |
# order matters!
import models
from models.coach import *
from models.team import *
from models.player import *
from models.ofl_pickem import *
from models.leader import *
from models.match import *
from models.tournament import *
from models.race_stats import *
#Script to initiate all static classes for a fresh DB (used for newly created local servers).
CoachLeader.init()
TeamLeader.init()
PlayerLeader.init()
Skill.init()
Race.init()
Position.init()
Injury.init()
for race in Race.all():
print race.key().name() + "<br>"
race_stats = RaceStats.all().filter("race =", race).get()
if not race_stats: #Race doesn't have stats yet.
print race.key().name() + " not found, creating....<br>"
race_stats = RaceStats(race = race)
# race_stats.race = race
race_stats.reset()
for team in Team.all().filter("race = ", race):
print " " + team.key().name() + i2s(team.tpts) + "<br>"
RaceStats.addTeam(race_stats, team)
print i2s(race_stats.tpts) + "<br>"
if race_stats.matches > 1:
RaceStats.compute_awp(race_stats)
race_stats.put() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 5 21:33:38 2017
@author: zhouying
"""
import tensorflow as tf
#import numpy as np
import myutil
class mysdae(object):
def __init__(self,input,hidden1,hidden2,transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(),
keep_rate = 1, scale = 0.1):
self.input_units = input.shape[0]
self.output_units = self.input_units
self.hidden1 = hidden1
self.hidden2 = hidden2
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
self.keep_rate = tf.placeholder(tf.float32)
self.keep_prob = keep_rate
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.sess = tf.Session()
def _initialize_weights(self):
all_weights = dict()
all_weights['ew1'] = tf.Variable(myutil.xavier_init([self.input_units, self.hidden1]))
all_weights['eb1'] = tf.Variable(tf.zeros([self.hidden1], dtype = tf.float32))
all_weights['dw1'] = tf.Variable(tf.zeros([self.hidden1, self.input_units], dtype = tf.float32))
all_weights['db1'] = tf.Variable(tf.zeros([self.input_units], dtype = tf.float32))
all_weights['ew2'] = tf.Variable(myutil.xavier_init([self.hidden1, self.hidden2]))
all_weights['eb2'] = tf.Variable(tf.zeros([self.hidden2], dtype = tf.float32))
all_weights['dw2'] = tf.Variable(tf.zeros([self.hidden2, self.hidden1], dtype = tf.float32))
all_weights['db2'] = tf.Variable(tf.zeros([self.hidden1], dtype = tf.float32))
return all_weights
def train1(self,train):
z1 = self.transfer(tf.matmul(self.x,self.all_weights['ew1'])+self.all_weights['eb1'])
o1 = self.transfer(tf.matmul(z1,self.all_weights['dw1'])+self.all_weights['db1'])
self.loss1 = tf.reduce_mean((o1-self.x),2)
self.sess.run((self.loss1, self.optimizer), feed_dict = {self.x: train,
self.scale: self.training_scale,
self.keep_rate: self.keep_prob
})
def train2(self,train):
z2 = self.transfer(tf.matmul(self.x,self.all_weights['ew2'])+self.all_weights['eb2'])
o2 = self.transfer(tf.matmul(z2,self.all_weights['dw2'])+self.all_weights['db2'])
self.loss2 = tf.reduce_mean((o2-self.x),2)
self.sess.run((self.loss2, self.optimizer), feed_dict = {self.x: train,
self.scale: self.training_scale,
self.keep_rate: self.keep_prob
})
def fine_tune(self,train):
z1 = self.transfer(tf.matmul(self.x,self.all_weights['ew1'])+self.all_weights['eb1'])
z2 = self.transfer(tf.matmul(z1,self.all_weights['ew2'])+self.all_weights['eb2'])
o2 = self.transfer(tf.matmul(z2,self.all_weights['dw2'])+self.all_weights['db2'])
o1 = self.transfer(tf.matmul(o2,self.all_weights['dw1'])+self.all_weights['db1'])
self.loss3 = tf.reduce_mean((o1-self.x),2)
self.sess.run((self.loss3, self.optimizer), feed_dict = {self.x: train,
self.scale: self.training_scale,
self.keep_rate: self.keep_prob
})
def re_error(self,data):
z1 = self.transfer(tf.matmul(data,self.all_weights['ew1'])+self.all_weights['eb1'])
z2 = self.transfer(tf.matmul(z1,self.all_weights['ew2'])+self.all_weights['eb2'])
o2 = self.transfer(tf.matmul(z2,self.all_weights['dw2'])+self.all_weights['db2'])
o1 = self.transfer(tf.matmul(o2,self.all_weights['dw1'])+self.all_weights['db1'])
return data-o1
|
#!/usr/bin/python3
max_num = 1000
counter = 1
sum_all = 0
while counter < max_num:
if (counter % 3 == 0) or (counter % 5 == 0):
sum_all = sum_all + counter
counter += 1
print(sum_all)
|
from django.template import Context, Template
from django.template.loader import get_template
from django.shortcuts import render_to_response, redirect
from django.core.context_processors import csrf
from fire_fighter_site.views.helper import create_navlinks
from candidate.forms import PreScreenForm
def display(request):
if not request.user.is_authenticated():
return redirect('/login')
context_dict = {}
context_dict['nav_links'] = create_navlinks(request.user)
context_dict['ps_form'] = PreScreenForm(Officer=request.user).as_ul()
context_dict['form_handel'] = "/certify/pre_screen"
context_dict.update(csrf(request))
return render_to_response('certifying_view_template.djt', context_dict) |
import datetime
from django.db import models
from django.utils import timezone
class Students(models.Model):
Name = models.CharField(max_length=20)
Sex = models.CharField(max_length=2)
School = models.CharField(max_length=100)
Company = models.CharField(max_length=100)
Tel = models.CharField(max_length=20)
room = models.CharField(max_length=50, default='624')
def __str__(self):
return self.Name
|
"""
Solution for origins_and_order
Thoughts:
- Year could be any value. Month is 1-12. Day depends on month.
- Ambiguity could be between month/day (two values <= 12) or day/year (two values <= # of days in month)
- Corner case: If two values are equal, then they are not ambiguous (since they have the same string representation). E.g., "02/28/28"
- I could use Calendar but since it *does* take leap years into account then the answers will actually be wrong for some times.
"""
#limit contains amount of days in the corresponding numerical month
limit = {
1: 31,
2: 27,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31
}
def answer(x, y, z):
#sort inputs in ascending order
nums = sorted([x, y, z])
first = int(nums[0])
second = int(nums[1])
third = int(nums[2])
valid = False
if first < 13 and not first == 0:
if second == first:
month = first
day = first
if third > limit[first]:
year = third
valid = True
elif third == first:
year = first
valid = True
else:
return "Ambiguous"
elif second in range(limit[first] + 1):
month = first
day = second
if second == third and second > 12:
year = second
valid = True
elif second > 12 and third not in range(limit[first] + 1):
valid = True
year = third
elif third in range(limit[first] + 1):
return "Ambiguous"
else:
return "Ambiguous"
elif first == 0:
year = first
if second < 13 and third < 13:
if second in range(limit[third] + 1) and third not in range(limit[second] + 1):
day = second
month = third
valid = True
elif second not in range(limit[third] + 1) and third in range(limit[second] + 1):
day = third
month = second
valid = True
elif second == third and second in range(limit[second] + 1):
valid = True
month = second
day = second
else:
return "Ambiguous"
elif second < 13 and third > 12:
valid = True
month = second
day = third
else:
return "Ambiguous"
else:
return 'Ambiguous'
if valid:
if year < 10:
year = '0' + str(year)
if month < 10:
month = '0' + str(month)
if day < 10:
day = '0' + str(day)
return str(month) + '/' + str(day) + '/' + str(year)
|
#! /usr/bin/python3
import sys
lab10 = sys.argv[0]
arguments = sys.argv[1:]
count = len(arguments)
total = 0
total = total + int(sys.argv[1])
avg = total / count
print ('The average of the', count, 'numbers is: ' , avg )
|
import csv
from math import radians, cos, sin, asin, sqrt
from datetime import datetime
from collections import Counter
import json
import time
from pytz import timezone
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 3956 # Radius of earth in mi. Use 6371 for km
return c * r
distance = 1 #mi
timedist = 60 #min
events_by_date = {}
# "date","acct","tweet","url","street_addr","boro","state","AUTO_UNIQUE_ID_2019-05-11_Jeremybmerrill_NYCFireWire_tw","Source","TimeTaken","UpdatedGeocoding","Version","ErrorMessage","TransactionId","naaccrQualCode","naaccrQualType","FeatureMatchingResultType","MatchedLocationType","RegionSizeUnits","InterpolationType","RegionSize","InterpolationSubType","FeatureMatchingGeographyType","MatchScore","FeatureMatchingHierarchy","TieHandlingStrategyType","FeatureMatchingResultTypeTieBreakingNotes","GeocodeQualityType","FeatureMatchingHierarchyNotes","FeatureMatchingResultTypeNotes","FeatureMatchingResultCount","Latitude","Longitude","MatchType"
with open("NYCFireWire_tw.csv") as events_csvfile:
for event in csv.DictReader(events_csvfile):
date = datetime.strptime(event["date"].split("at")[0].strip(), '%B %d, %Y').strftime('%Y-%m-%d')
time = event["date"].split("at")[1]
if event["Latitude"] == '0':
continue
if date not in events_by_date:
events_by_date[date] = []
events_by_date[date].append(event)
result = Counter()
same_hour_result = Counter()
with open("centerpoints.csv") as centerpoints_csvfile:
centerpoints_csv = csv.reader(centerpoints_csvfile)
centerpoints = list(centerpoints_csv)
for centerpoint_row in centerpoints:
if centerpoint_row[-1] == "nonhover":
continue
centerpoint_date = datetime.strptime(centerpoint_row[1][:10], '%Y-%m-%d').replace(tzinfo=timezone('UTC'))
try:
candidate_events = events_by_date[centerpoint_date.strftime('%Y-%m-%d')]
except KeyError:
candidate_events = []
continue
# if start_time > midnight:
# candidate_events += events_by_date[date + 1]
print(len(candidate_events))
centerpoint_datetime = datetime.strptime(centerpoint_row[1], '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone('UTC'))
candidate_events = [event for event in candidate_events if abs((datetime.strptime(event["date"].strip(), '%B %d, %Y at %I:%M%p').replace(tzinfo=timezone('US/Eastern')) - centerpoint_datetime).total_seconds()) < timedist * 60]
same_hour_result[len(candidate_events)] += 1
if len(candidate_events) == 0:
print("")
continue
print(len(candidate_events))
centerpoint = json.loads(centerpoint_row[6].replace("'", '"'))
candidate_event_distances = [haversine(centerpoint['lon'], centerpoint['lat'], float(event['Longitude']), float(event['Latitude'])) for event in candidate_events]
print(candidate_event_distances)
candidate_events = [event for event in candidate_events if haversine(centerpoint['lon'], centerpoint['lat'], float(event['Longitude']), float(event['Latitude'])) < distance]
print(len(candidate_events))
print()
if len(candidate_events) == 1:
print("")
print(centerpoint_row[4])
print(candidate_events[0]["date"])
print(candidate_events[0]["tweet"])
print("")
result[len(candidate_events)] += 1
print(result)
print(same_hour_result) |
import argparse
import networkx as nx
import random
import sys
import tqdm
from examples.page_rank import PageRankSimulation, LOG_LEVEL_PAGE_RANK_INFO
N_ITER = 15
timestep = .1
RUN_TIME = N_ITER * timestep
PARAMETERS = {
# Experimental results of good values
# |V|=400 |E|=600 : ts=1. tsf=40
# |V|=800 |E|=1200 : ts=1. tsf=45
# |V|=1600 |E|=2400 : ts=1. tsf=150
# |V|=3200 |E|=4800 : ts=? tsf=?
'time_scale_factor': 20000,
}
def _mk_label(n):
return '#%d' % n
def _mk_rd_node(node_count):
return random.randint(0, node_count - 1)
def _mk_graph(node_count, edge_count):
# Under these constraints we can comply with the requirements below
assert node_count <= edge_count <= node_count**2, \
"Need node_count=%d < edge_count=%d < %d " % (node_count, edge_count, node_count**2)
# Ensures no double edges
edges = set([])
# Ensures no dangling nodes
for i in range(node_count):
edges.add((i, _mk_rd_node(node_count)))
for _ in tqdm.tqdm(range(node_count, edge_count), desc="Generating edges",
initial=node_count, total=edge_count):
while True:
l = len(edges)
edges.add((_mk_rd_node(node_count), _mk_rd_node(node_count)))
if len(edges) > l:
break # Only move to next iteration if we've added a new edge
edges = [(_mk_label(src), _mk_label(tgt))
for src, tgt in tqdm.tqdm(edges, desc="Formatting edges")]
return edges
def _mk_sim_run(node_count=None, edge_count=None, verify=False, pause=False, show_out=False):
###############################################################################
# Create random Page Rank graphs
labels = map(_mk_label, list(range(node_count)))
edges = _mk_graph(node_count, edge_count)
###############################################################################
# Run simulation / report
with PageRankSimulation(RUN_TIME, edges, labels, PARAMETERS, log_level=0, pause=pause) as sim:
is_correct = sim.run(verify=verify, diff_only=True)
sim.draw_output_graph(show_graph=show_out)
return is_correct
def run(runs=None, **kwargs):
errors = 0
for _ in tqdm.tqdm(range(runs), total=runs):
while True:
try:
is_correct = _mk_sim_run(**kwargs)
errors += 0 if is_correct else 1
break
except nx.PowerIterationFailedConvergence:
print('Skipping nx.PowerIterationFailedConvergence graph...')
print('Finished robustness test with %d/%d error(s).' % (errors, runs))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create random Page Rank graphs')
parser.add_argument('-r', '--runs', type=int, default=1, help='# runs. Default is 1.')
parser.add_argument('node_count', metavar='NODE_COUNT', type=int, help='# nodes per graph')
parser.add_argument('edge_count', metavar='EDGE_COUNT', type=int, help='# edges per graph')
parser.add_argument('-v', '--verify', action='store_true', help='Verify sim w/ Python PR impl')
parser.add_argument('-p', '--pause', action='store_true', help='Pause after each runs')
parser.add_argument('-o', '--show-out', action='store_true', help='Display ranks curves output')
random.seed(42)
sys.exit(run(**vars(parser.parse_args())))
|
# A wee calculator to work out CPMs, Budgets, Impressions etc.
def workOutCPM(impressions,budget):
cpmCalc = (budget / impressions) * 1000
return cpmCalc
def workOutBudget(cpm,impressions):
budgetCalc = (impressions * cpm) / 1000
return budgetCalc
def workOutImpressions(cpm,budget):
impressionsCalc = (budget / cpm) * 1000
return impressionsCalc
prompt = ">>>"
print "what metric do you want to work out?"
print "Enter CPM, budget, impressions."
metric = raw_input(prompt)
if metric == "cpm":
print "Okay, you want to work out a CPM"
print "Please let me know the budget"
budget = float(raw_input(prompt))
print "Now let me know the impressions"
impressions = float(raw_input(prompt))
print "the CPM is: ", "%.2f" % workOutCPM(impressions,budget)
elif metric == "impressions":
print "Okay, you want to work out impressions"
print "Please let me know the CPM"
cpm = float(raw_input(prompt))
print "Now let me know the budget"
budget = float(raw_input(prompt))
print "The impressions are:" "%.0f" % workOutImpressions(cpm,budget)
elif metric == "budget":
print "Okay, you want to work out the budget"
print "Please let me know the CPM"
cpm = float(raw_input(prompt))
print "Now let me know the impressions"
impressions = float(raw_input(prompt))
print "The budget is: " "%.2f" % workOutBudget(cpm,impressions)
else:
print "You haven't entered a valid function to run" |
from django.db import models
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
from PIL import Image
# Create your models here.
# class DeliveryAddress(models.Model):
# pass
class Profile(models.Model): # class hồ sơ người dúng
# khai báo quan hệ 1-1 với class user tự động xóa profile nếu người dùng bị xóa
user = models.OneToOneField(User, on_delete=models.CASCADE)
FEMALE = "FEMA"
MALE = "MALE"
SEX_CHOICE = [
(FEMALE, "Female"),
(MALE, "Male")
]
sex = models.CharField(max_length=4, blank=False,
null=True, choices=SEX_CHOICE)
phone_number = models.CharField(max_length=11, null=True, blank=True)
address = models.CharField(max_length=255, null=True, blank=True)
birth_date = models.DateField(null=True, blank=True)
def __str__(self): # mặc định trả về tên người dùng user.username <-> profile
return self.user.username
@receiver(post_save, sender=User) # bắt tín hiệu lưu profile
def create_user_profile(sender, instance, created, **kwargs):
if created: # nếu tín hiệu là tạo bài mới thì thực hiện câu lệnh dưới
# tạo ra hồ sơ trống với user = user truyền vào
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
|
from collections import defaultdict
def _items_from_list(l):
for i in range(len(l)):
yield i, l[i]
def _items_from_dict(l):
return l.items()
def choose_from_distribution(distribution, random_number):
s = 0
last_key = None
items = (
_items_from_dict if hasattr(distribution, 'items')
else _items_from_list
)
for i, weight in items(distribution):
s += weight
if s > random_number:
return i
else:
last_key = i
return last_key
def choose_legal_action_randomly(state, random_number):
s = 0
uniform_weight = 1.0 / state.num_legal_actions()
for action in state.legal_actions():
s += uniform_weight
if s > random_number:
return action
return action
def probability_distribution_over_legal_actions(
state,
distribution,
sum_over_distribution,
new_distribution=lambda: defaultdict(lambda: 0.0)
):
p = new_distribution()
if sum_over_distribution > 0.0:
for action in state.legal_actions():
p[action] = distribution[action] / sum_over_distribution
else:
uniform_weight = 1.0 / state.num_legal_actions()
for action in state.legal_actions():
p[action] = uniform_weight
return p
|
#!/usr/bin/env python3
#Answer to exercise number 2
a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
b = []
for number in a:
if (number % 2 == 0):
b.append(number)
#b = [number for number in a if number % 2 == 0]
print(b)
|
from django.contrib import admin
from .models import *
# Register your models here.
# PERMISSAO MAXIMA DE ACESSO PODE REMOVER QUAL OBJECT DA CLASSE MODEL CAD;
# LOGIN: lsbloo
# SENHA : lsbloo6036236
admin.site.register(Pessoa)
admin.site.register(Professor)
admin.site.register(Aluno)
admin.site.register(Notas)
admin.site.register(Disciplinas)
admin.site.register(Perguntasx)
admin.site.register(Frequencia)
admin.site.register(Instituicao)
|
import os
path = 'C:\\Users\\julia\\Logs\\'
files = os.scandir(path)
data = []
results = []
for item in files:
results.append([])
if item.is_file():
with open(path+item.name) as fp:
lines = fp.readlines()
data.append(lines)
for index, content in enumerate(data):
while len(content) > 0:
line = content.pop()
if line in content:
results[index].append(line)
for index, res in enumerate(results):
with open(path + "result" + str(index) + ".txt", "w") as f:
f.writelines(results[index])
print(results)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@Time : 2018-11-09 21:39
@Author : jianjun.wang
@Email : alanwang6584@gmail.com
"""
import numpy as np
import cv2 as cv
img = np.zeros((320, 320, 3), np.uint8) #生成一个空灰度图像
print img.shape # 输出:(320, 320, 3)
# 起点和终点的坐标
ptStart = (60, 60)
ptEnd = (260, 260)
point_color = (0, 255, 0) # BGR
thickness = 1
lineType = 4
cv.line(img, ptStart, ptEnd, point_color, thickness, lineType)
ptStart = (260, 60)
ptEnd = (60, 260)
point_color = (0, 0, 255) # BGR
thickness = 1
lineType = 8
cv.line(img, ptStart, ptEnd, point_color, thickness, lineType)
cv.namedWindow("image")
cv.imshow('image', img)
cv.waitKey (10000) # 显示 10000 ms 即 10s 后消失
cv.destroyAllWindows()
|
# coding: utf-8
import os
from datetime import datetime
from unittest import TestCase
from unittest.mock import MagicMock, call
from shimbase.database import Database, DatabaseObject, DatabaseInvObjError, \
AdhocKeys
from shimbase.dbimpl import DatabaseDataError, DatabaseIntegrityError
from shimbase.sqlite3impl import SQLite3Impl
from foo import Foo
from bar import Bar
class TestDatabase(TestCase):
"""Database tests"""
db = None
@classmethod
def setUpClass(cls):
createName = './db/createdb.sql'
testDataName = './db/*_data.sql'
dbName = './db/test.db'
os.system('cat {} | sqlite3 {}'.format(createName, dbName))
os.system('cat {} | sqlite3 {}'.format(testDataName, dbName))
cls.db = Database(dbName, SQLite3Impl())
@classmethod
def tearDownClass(cls):
cls.db.close()
def setUp(self):
TestDatabase.db.enableForeignKeys()
def tearDown(self):
pass
def test_select(self):
bars = TestDatabase.db.select(Bar())
self.assertEqual(len(bars), 2)
self.assertEqual(
str(bars[0]), "bar : Keys {'id': 98} : Values {'heading': 98, 'speed': 2.3, 'signal': 'X'}")
self.assertEqual(bars[1].getTable(), 'bar')
self.assertEqual(bars[1].getId(), 99)
self.assertEqual(bars[1].getHeading(), 99)
self.assertEqual(bars[1].getSignal(), 'Z')
bars = TestDatabase.db.select(Bar(98))
self.assertEqual(len(bars), 1)
self.assertEqual(
str(bars[0]), "bar : Keys {'id': 98} : Values {'heading': 98, 'speed': 2.3, 'signal': 'X'}")
bars = TestDatabase.db.select(Bar.createAdhoc({'signal': 'Z'}))
self.assertEqual(len(bars), 1)
self.assertEqual(
str(bars[0]), "bar : Keys {'id': 99} : Values {'heading': 99, 'speed': 2.4, 'signal': 'Z'}")
def test_select_Ordered(self):
bars = TestDatabase.db.select(Bar.createAdhoc(order=('>id',)))
self.assertEqual(len(bars), 2)
self.assertEqual(
str(bars[0]), "bar : Keys {'id': 99} : Values {'heading': 99, 'speed': 2.4, 'signal': 'Z'}")
self.assertEqual(
str(bars[1]), "bar : Keys {'id': 98} : Values {'heading': 98, 'speed': 2.3, 'signal': 'X'}")
with TestDatabase.db.transaction() as t:
TestDatabase.db.upsert(Bar(101, 180, 33.5, 'A'))
bars = TestDatabase.db.select(Bar.createAdhoc(order=('signal',)))
self.assertEqual(len(bars), 3)
self.assertEqual(
str(bars[0]), "bar : Keys {'id': 101} : Values {'heading': 180, 'speed': 33.5, 'signal': 'A'}")
self.assertEqual(
str(bars[1]), "bar : Keys {'id': 98} : Values {'heading': 98, 'speed': 2.3, 'signal': 'X'}")
self.assertEqual(
str(bars[2]), "bar : Keys {'id': 99} : Values {'heading': 99, 'speed': 2.4, 'signal': 'Z'}")
t.fail()
def test_foreign_key(self):
with TestDatabase.db.transaction(), \
self.assertRaises(DatabaseIntegrityError) as cm:
# bar id 999 should not exist
TestDatabase.db.upsert(Foo('my foo', 'a new foo', 999))
self.assertEqual('FOREIGN KEY constraint failed', cm.exception.args[0])
def test_transaction(self):
# test commit
with TestDatabase.db.transaction():
TestDatabase.db.upsert(Bar(101, 180, 33.5, 'A'))
TestDatabase.db.upsert(Bar(102, 270, 50.33, 'B'))
bars = TestDatabase.db.select(Bar())
self.assertEqual(len(bars), 4)
# test rollback on exception
with TestDatabase.db.transaction(), \
self.assertRaises(DatabaseIntegrityError) as cm:
TestDatabase.db.upsert(Foo('a new foo', ))
self.assertEqual(cm.exception.args[0], 'NOT NULL constraint failed: foo.desc')
self.assertEqual(len(bars), 4)
# test a forced rollback
with TestDatabase.db.transaction() as t:
TestDatabase.db.upsert(
Bar(104, 355, 99.99, 'D'))
t.fail()
bars = TestDatabase.db.select(Bar())
self.assertEqual(len(bars), 4)
# restore table to pre-test state
with TestDatabase.db.transaction():
TestDatabase.db.delete(Bar(101))
TestDatabase.db.delete(Bar(102))
bars = TestDatabase.db.select(Bar())
self.assertEqual(len(bars), 2)
def test_select_NoRows(self):
bars = TestDatabase.db.select(Bar(1000))
self.assertEqual(len(bars), 0)
def test_upsert(self):
with TestDatabase.db.transaction() as t:
TestDatabase.db.upsert(Bar(101, 180, 23.45, 'F'))
bars = TestDatabase.db.select(Bar())
self.assertEqual(len(bars), 3)
self.assertEqual(
str(bars[2]), "bar : Keys {'id': 101} : Values {'heading': 180, 'speed': 23.45, 'signal': 'F'}")
TestDatabase.db.upsert(Bar(98, 270, signal='B'))
bars = TestDatabase.db.select(Bar(98))
self.assertEqual(len(bars), 1)
self.assertEqual(
str(bars[0]), "bar : Keys {'id': 98} : Values {'heading': 270, 'speed': None, 'signal': 'B'}")
# force a rollback
t.fail()
def test_null(self):
with TestDatabase.db.transaction() as t:
TestDatabase.db.upsert(Bar(999, 90, 120.0, 'C'))
bars = TestDatabase.db.select(Bar(999))
self.assertEqual(len(bars), 1)
self.assertEqual(
str(bars[0]), "bar : Keys {'id': 999} : Values {'heading': 90, 'speed': 120.0, 'signal': 'C'}")
# force a rollback
t.fail()
def test_DatabaseObjectError(self):
with TestDatabase.db.transaction(), \
self.assertRaises(DatabaseInvObjError) as cm:
leagues = TestDatabase.db.select(object())
self.assertEqual(
cm.exception.msg, 'Not a valid DB object : ' + str(object()))
def test_upsert_Error(self):
with TestDatabase.db.transaction(), \
self.assertRaises(DatabaseDataError) as cm:
TestDatabase.db.upsert(Bar())
self.assertEqual(
cm.exception.msg, 'No keys provided for UPDATE')
def test_delete(self):
with TestDatabase.db.transaction():
TestDatabase.db.upsert(Bar(123, 456, 78.9, 'D'))
bars = TestDatabase.db.select(Bar())
self.assertEqual(len(bars), 3)
self.assertEqual(
str(bars[2]), "bar : Keys {'id': 123} : Values {'heading': 456, 'speed': 78.9, 'signal': 'D'}")
with TestDatabase.db.transaction():
TestDatabase.db.delete(Bar(123))
bars = TestDatabase.db.select(Bar())
self.assertEqual(len(bars), 2)
bars = TestDatabase.db.select(Bar(123))
self.assertEqual(len(bars), 0)
def test_delete_Error(self):
with TestDatabase.db.transaction(), \
self.assertRaises(DatabaseDataError) as cm:
TestDatabase.db.delete(Bar())
self.assertEqual(
cm.exception.msg, 'No keys provided for DELETE')
if __name__ == '__main__':
import unittest
unittest.main()
|
import numpy as np
import os
np.random.seed(1337)
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import RMSprop
(x_train, y_train), (x_test, y_test) = mnist.load_data(os.getcwd() + '/data/mnist.npz')
x_train = x_train.reshape(x_train.shape[0], -1) / 255.
x_test = x_test.reshape(x_test.shape[0], -1) / 255.
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)
# Another way to build neural net
model = Sequential([
Dense(output_dim=32, input_dim=28*28),
Activation('relu'),
Dense(output_dim=10),
Activation('softmax')
])
rmsprop_optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-6, decay=0.01)
model.compile(optimizer=rmsprop_optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
print 'Training ......'
model.fit(x=x_train, y=y_train, batch_size=32, epochs=10)
print 'Testing ......'
loss, accuracy = model.evaluate(x=x_test, y=y_test, batch_size=32)
print 'loss = %f , accuracy = %f' % (loss, accuracy) |
from .CSVParser import CSV
from .URLCSVParser import URLCSV
from .ArffParser import Arff
from .URLArffParser import URLARFF
|
class Date:
annee: int
mois: int
jour: int
def __init__(self, jour=1, mois=1, annee=1960):
"""
Constructeur de la classe Date
:param jour: jour de la date
:type jour: int
:param mois: mois de la date
:type mois: int
:param annee: année de la date
:type annee: int
"""
if jour < 0 or jour > 31:
raise ValueError("le jour doit être compris entre 1 et 31")
if mois < 0 or mois > 12:
raise ValueError("le mois doit être compris entre 1 et 12")
self.jour = jour
self.mois = mois
self.annee = annee
@staticmethod
def from_iso_format(iso_format="01/01/1960"):
sep = iso_format.split("/")
return Date(int(sep[0]), int(sep[1]), int(sep[2]))
def __eq__(self, other: 'Date') -> bool:
""" Surcharge de l'opérateur == """
if isinstance(other, Date):
if self.jour == other.jour:
if self.mois == other.mois:
if self.annee == other.annee:
return True
return False
def __lt__(self, other: 'Date') -> bool:
""" Surcharge de l'opérateur < """
if isinstance(other, Date):
if self.annee < other.annee:
return True
elif self.annee == other.annee:
if self.mois < other.mois:
return True
elif self.mois == other.mois:
if self.jour < other.jour:
return True
return False
def __str__(self):
return "" + str(self.annee) + "-" + str(self.mois) + "-" + str(self.jour)
if __name__ == "__main__":
date1 = Date(31, 5, 2020)
date2 = Date(6, 9, 2020)
print(date1 == date2)
print(date1 < date2)
print(date2 < date1)
|
from django.urls import path, re_path
from django.views.generic import TemplateView
from .views import *
from rest_framework.schemas import get_schema_view
slash = '/?'
rest_urls = {
'cohort': 'rest/cohort/',
'trait': 'rest/trait/',
'trait_category': 'rest/trait_category/',
'performance': 'rest/performance/',
'publication': 'rest/publication/',
'release': 'rest/release/',
'sample_set': 'rest/sample_set/',
'score': 'rest/score/',
'gwas': 'rest/gwas/get_score_ids/'
}
urlpatterns = [
# REST Documentation
path('rest/', TemplateView.as_view(template_name="rest_api/rest_doc.html")),
# Cohorts
path(rest_urls['cohort']+'<str:cohort_symbol>', RestCohorts.as_view(), name="getCohorts"),
path(rest_urls['cohort']+'<str:cohort_symbol>/', RestCohorts.as_view(), name="getCohorts"),
# EFO Traits
re_path(r'^'+rest_urls['trait']+'all'+slash, RestListEFOTraits.as_view(), name="getAllTraits"),
re_path(r'^'+rest_urls['trait']+'search'+slash, RestEFOTraitSearch.as_view(), name="searchTraits"),
path(rest_urls['trait']+'<str:trait_id>', RestEFOTrait.as_view(), name="getTrait"),
path(rest_urls['trait']+'<str:trait_id>/', RestEFOTrait.as_view(), name="getTrait"),
# Performance metrics
re_path(r'^'+rest_urls['performance']+'search'+slash, RestPerformanceSearch.as_view(), name="searchPerformanceMetrics"),
path(rest_urls['performance']+'<str:ppm_id>', RestPerformance.as_view(), name="getPerformanceMetric"),
path(rest_urls['performance']+'<str:ppm_id>/', RestPerformance.as_view(), name="getPerformanceMetric"),
# Publications
re_path(r'^'+rest_urls['publication']+'all'+slash, RestListPublications.as_view(), name="getAllPublications"),
re_path(r'^'+rest_urls['publication']+'search'+slash, RestPublicationSearch.as_view(), name="searchPublications"),
path(rest_urls['publication']+'<str:pgp_id>', RestPublication.as_view(), name="getPublication"),
path(rest_urls['publication']+'<str:pgp_id>/', RestPublication.as_view(), name="getPublication"),
# Releases
re_path(r'^'+rest_urls['release']+'all'+slash, RestListReleases.as_view(), name="getAllReleases"),
re_path(r'^'+rest_urls['release']+'current'+slash, RestCurrentRelease.as_view(), name="getCurrentRelease"),
path(rest_urls['release']+'<str:release_date>', RestRelease.as_view(), name="getRelease"),
path(rest_urls['release']+'<str:release_date>/', RestRelease.as_view(), name="getRelease"),
# Sample Set
re_path(r'^'+rest_urls['sample_set']+'search'+slash, RestSampleSetSearch.as_view(), name="searchSampleSet"),
path(rest_urls['sample_set']+'<str:pss_id>', RestSampleSet.as_view(), name="getSampleSet"),
path(rest_urls['sample_set']+'<str:pss_id>/', RestSampleSet.as_view(), name="getSampleSet"),
# Scores
re_path(r'^'+rest_urls['score']+'all'+slash, RestListScores.as_view(), name="getAllScores"),
re_path(r'^'+rest_urls['score']+'search'+slash, RestScoreSearch.as_view(), name="searchScores"),
path(rest_urls['score']+'<str:pgs_id>', RestScore.as_view(), name="getScore"),
path(rest_urls['score']+'<str:pgs_id>/', RestScore.as_view(), name="getScore"),
# Extra endpoints
path(rest_urls['gwas']+'<str:gcst_id>', RestGCST.as_view(), name="pgs_score_ids_from_gwas_gcst_id"),
path(rest_urls['gwas']+'<str:gcst_id>/', RestGCST.as_view(), name="pgs_score_ids_from_gwas_gcst_id"),
# Trait Category
re_path(r'^'+rest_urls['trait_category']+'all'+slash, RestListTraitCategories.as_view(), name="getAllTraitCategories")
]
|
from drawLine import ViewPort,drawLine
from drawPolygon import drawPoly
#from scanLine import scanLineUtil
import math,sys
from graphics import color_rgb,Line,Point
'''
l=[
[40,0,0],[80,0,40],[40,0,80],[0,0,40],
[40,40,0],[80,40,40],[40,40,80],[0,40,40]
]
'''
def drawAxis(win,new_view):
L1 = Line(Point(0,0),Point(new_view.xVmax,0))
L2 = Line(Point(0,0),Point(0,new_view.yVmax))
L3 = Line(Point(0,0) , Point(new_view.xVmin,new_view.yVmin))
L1.setFill('blue')
L2.setFill('blue')
L3.setFill('blue')
L1.setArrow("last")
L2.setArrow("last")
L3.setArrow("last")
L1.draw(win)
L2.draw(win)
L3.draw(win)
return win
class _3D_helper:
def __init__(self,vport,win):
port = vport
win = win
def add_padding(self,l):
for i in l:
i.append(1)
return l
def remove_padding(self,l):
for i in range(len(l)):
for j in range(len(l[0])):
l[i][j] = l[i][j]//l[i][3]
for i in range(len(l)):
l[i] = l[i][:-1]
return l
def matrix_multiply(self,m1,m2):
return [[int(sum(a*b for a,b in zip(A_row,B_col))) for B_col in zip(*m2) ] for A_row in m1]
def draw_2d_polygon(self,l2d,color,undraw=False):
n = len(l2d)
drawPoly(l2d[:n//2], win ,color)
drawPoly(l2d[n//2:], win ,color)
#scanLineUtil(l2d[:n//2], win ,"green")
#scanLineUtil(l2d[n//2:], win, "purple")
for i in range(n//2):
x1,y1,x2,y2 = l2d[i][0],l2d[i][1],l2d[(n//2)+i][0],l2d[(n//2)+i][1]
drawLine(win,color,x1,y1,x2,y2)
if undraw: drawAxis(win,port)
def parallel(self,l,x0,y0,z0,n1,n2,n3,a,b,c):
d0 = n1*x0 + n2*y0 + n3*z0
d1 = n1*a + n2*b + n3*c
l = self.add_padding(l)
trans_matrix = [
[d1-a*n1,-b*n1,-c*n1,0],
[-a*n2,d1-b*n2,c*n2,0],
[-a*n3,-b*n3,d1-c*n3,0],
[a*d0,b*d0,c*d0,d1]
]
result = self.matrix_multiply(l,trans_matrix)
result = self.remove_padding(result)
return convert_to_2d(result)
def perspective(self,l,x0,y0,z0,n1,n2,n3,a,b,c):
d0 = x0*n1 + y0*n2 + z0*n3
d1 = a*n1 + b*n2 + c*n3
d = d0-d1
l = self.add_padding(l)
trans_matrix = [
[n1*a+d,b*n1,c*n1,n1],
[a*n2,b*n2+d,c*n2,n2],
[a*n3,b*n3,c*n3+d,n3],
[-a*d0,-b*d0,-c*d0,-d1]
]
result = self.matrix_multiply(l,trans_matrix)
result = self.remove_padding(result)
return convert_to_2d(result)
def orthographic_projection(self,l):
x0,y0,z0 = 0,0,0
n1,n2,n3 = 0,0,1
a,b,c = 0,0,1
return self.parallel(l,x0,y0,z0,n1,n2,n3,a,b,c)
def isometric_projection(self,l):
x0,y0,z0 = 50,50,50
n1,n2,n3 = 1,1,1
a,b,c = 1,1,1
return self.parallel(l,x0,y0,z0,n1,n2,n3,a,b,c)
def diametric_projection(self,l):
x0,y0,z0 = 50,0,0
n1,n2,n3 = 1,1,2
a,b,c = 1,1,2
return self.parallel(l,x0,y0,z0,n1,n2,n3,a,b,c)
def trimetric_projection(self,l):
x0,y0,z0 = 50,0,0
n1,n2,n3 = 6,4,3
a,b,c = 6,4,3
return self.parallel(l,x0,y0,z0,n1,n2,n3,a,b,c)
def cavalier_projection(self,l):
x0,y0,z0 = 0,0,0
n1,n2,n3 = 0,0,1
a,b,c = 3,4,5
return self.parallel(l,x0,y0,z0,n1,n2,n3,a,b,c)
def cabinet_projection(self,l):
x0,y0,z0 = 0,0,0
n1,n2,n3 = 0,0,1
a,b,c = 3,4,10
return self.parallel(l,x0,y0,z0,n1,n2,n3,a,b,c)
def one_point_perspective(self,l):
x0,y0,z0 = 0,0,0
n1,n2,n3 = 0,0,1
a,b,c = 50,50,150
return self.perspective(l,x0,y0,z0,n1,n2,n3,a,b,c)
def two_point_perspective(self,l):
x0,y0,z0 = 200,0,0
n1,n2,n3 = 1,1,0
a,b,c = -100,-75,-50
return self.perspective(l,x0,y0,z0,n1,n2,n3,a,b,c)
def three_point_perspective(self,l):
x0,y0,z0 = 0,0,0
n1,n2,n3 = 1,1,1
a,b,c = 150,150,150
return self.perspective(l,x0,y0,z0,n1,n2,n3,a,b,c)
def convert_to_2d(l):
l2d = []
for i in l:
l2d.append((i[0] - int(i[2]*(math.cos(math.pi/4))), i[1] - int(i[2]*(math.sin(math.pi/4)))))
return l2d
if __name__=="__main__":
port = ViewPort(-400,-400,400,400)
win = port.init_view()
win = drawAxis(win,port)
__3d = _3D_helper(port,win)
cube_size = 80
l=[
[40,0,0],[80,0,40],[40,0,80],[0,0,40],
[40,40,0],[80,40,40],[40,40,80],[0,40,40]
]
'''l = [[0,0,0],
[cube_size,0,0],
[cube_size,cube_size,0],
[0,cube_size,0],
[0,0,cube_size],
[cube_size,0,cube_size],
[cube_size,cube_size,cube_size],
[0,cube_size,cube_size]]'''
l2d = convert_to_2d(l)
print(l2d)
__3d.draw_2d_polygon(l2d,"red")
print('''Enter any operation (on the viewport):
o -> orthographic
i -> isometric
d -> diametric
t -> trimetric
c -> cavalier
a -> cabinet
l -> original polygon
1 -> one point perspective
2 -> two point perspective
3 -> three point perspective
q -> quit
Press e after each operation to undraw the projected polygon''')
i=0
while(True):
key = win.getKey()
if(i==0):
__3d.draw_2d_polygon(l2d,color_rgb(44,44,44),undraw=True)
i=1
if(key == 'o'):
mat = __3d.orthographic_projection(l)
elif(key == 'i'):
mat = __3d.isometric_projection(l)
elif(key == 'd'):
mat = __3d.diametric_projection(l)
elif(key == 't'):
mat = __3d.trimetric_projection(l)
elif(key == 'c'):
mat = __3d.cavalier_projection(l)
elif(key == 'a'):
mat = __3d.cabinet_projection(l)
elif(key == 'l'):
mat = l2d
elif(key == '1'):
mat = __3d.one_point_perspective(l)
elif(key == '2'):
mat = __3d.two_point_perspective(l)
elif(key == '3'):
mat = __3d.three_point_perspective(l)
elif(key == 'q'):
break
else:
print("Invalid key pressed...")
continue
__3d.draw_2d_polygon(mat,"yellow")
print(f'Operation {key} is completed...Press e to erase')
if(win.getKey() == 'e'):
__3d.draw_2d_polygon(mat,color_rgb(44,44,44),undraw=True)
win.getMouse()
win.close() |
import asyncio
import contextlib
import collections
import hashlib
import json
import os
import pathlib
import re
import textwrap
from .async_helpers import safe_communicate
from .compat import makedirs
from .error import PrintableError
from .keyval import KeyVal
# git output modes
TEXT_MODE = object()
BINARY_MODE = object()
# for tests
DEBUG_GIT_COMMAND_COUNT = 0
def compute_key(data):
# To hash this dictionary of fields, serialize it as a JSON string, and
# take the SHA1 of that string. Dictionary key order is unspecified, so
# "sort_keys" keeps our hash stable. Specifying separators makes the
# JSON slightly more compact, and protects us against changes in the
# default. "ensure_ascii" defaults to true, so specifying it just
# protects us from changes in the default.
json_representation = json.dumps(
data, sort_keys=True, ensure_ascii=True, separators=(',', ':'))
sha1 = hashlib.sha1()
sha1.update(json_representation.encode("utf8"))
return sha1.hexdigest()
class GitSession:
'''All of our git operations will share the same repo, but we don't want
them to share the same index file. That's for two reasons:
1) We want to be able to run multiple operations in parallel that write
to the index file.
2) We want to be able to save the index file corresponding to the last
imports, and guarantee that nothing will touch it.
A git session owns the index file it does operations on. We also use this
class to abstract away the low level details of git command flags. (And in
the future, this could be where we plug in libgit2.)'''
def __init__(self, git_dir, index_file, working_copy):
self.git_dir = git_dir
self.index_file = index_file
self.working_copy = working_copy
async def git(self, *args, input=None, output_mode=TEXT_MODE, cwd=None):
global DEBUG_GIT_COMMAND_COUNT
DEBUG_GIT_COMMAND_COUNT += 1
command = ['git']
command.append('--git-dir=' + self.git_dir)
if self.working_copy:
command.append("--work-tree=" + self.working_copy)
command.extend(args)
if isinstance(input, str):
input = input.encode()
process = await asyncio.subprocess.create_subprocess_exec(
*command,
cwd=cwd,
env=self.git_env(),
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await safe_communicate(process, input)
stderr = stderr.decode()
if output_mode == TEXT_MODE:
stdout = stdout.decode()
stdout = stdout.rstrip()
if process.returncode != 0:
raise GitError(command, process.returncode, stdout, stderr)
return stdout
def git_env(self):
'Set the index file and prevent git from reading global configs.'
env = dict(os.environ)
for var in ["HOME", "XDG_CONFIG_HOME"]:
env.pop(var, None)
env["GIT_CONFIG_NOSYSTEM"] = "true"
# Weirdly, GIT_INDEX_FILE is interpreted relative to the work tree. As
# a workaround, we absoluteify the path.
env["GIT_INDEX_FILE"] = os.path.abspath(self.index_file)
return env
async def init_git_dir(self):
await self.git('init', '--bare')
async def read_tree_into_index(self, tree):
await self.git('read-tree', tree)
async def read_tree_and_stats_into_index(self, tree):
await self.read_tree_into_index(tree)
# Refresh all the stat() information in the index.
try:
# This throws an error on modified files. Suppress it.
await self.git('update-index', '--refresh')
except GitError as e:
if 'needs update' not in e.stdout:
# Reraise any errors we don't recognize.
raise
async def make_tree_from_index(self):
tree = await self.git('write-tree')
return tree
async def read_working_copy_into_index(self, picks):
# Use --force to avoid .gitignore rules. We shouldn't respect them.
if picks:
# As in list_tree_entries, prepend ./ to avoid interpreting leading
# colons in pathspecs.
picks = ["./" + pick for pick in picks]
await self.git('add', '--force', '--', *picks)
else:
await self.git('add', '--all', '--force')
async def drop_paths_from_index(self, paths):
if not paths:
return
# As in list_tree_entries, prepend ./ to avoid interpreting leading
# colons in pathspecs.
paths = ["./" + path for path in paths]
ls_output = await self.git(
'ls-files', '--full-name', '-z', *paths, output_mode=BINARY_MODE)
await self.git(
'update-index', '--force-remove', '-z', '--stdin', input=ls_output)
async def merge_tree_into_index(self, tree, prefix):
# The --prefix argument to read-tree chokes on paths that contain dot
# or dot-dot. Instead of './', it wants the empty string. Oblige it.
# NOTE: This parameter must be forward-slash-separated, even on
# Windows. os.path.normpath() is not correct here!
prefix_path = pathlib.PurePosixPath(prefix)
assert '..' not in prefix_path.parts
prefix_arg = prefix_path.as_posix()
prefix_arg = '' if prefix_arg == '.' else prefix_arg
# Normally read-tree with --prefix wants to make sure changes don't
# stomp on the working copy. The -i flag ignores the working copy.
await self.git('read-tree', '-i', '--prefix', prefix_arg, tree)
async def working_copy_matches_index(self):
diff_output = await self.git('diff-files', output_mode=BINARY_MODE)
return len(diff_output) == 0
async def get_modified_files_skipping_deletes(self):
# We want to ignore deleted files, so we exclude only deletes using
# 'd' instead of including all of the capital letter forms.
# https://git-scm.com/docs/git-diff#Documentation/git-diff.txt---diff-filterACDMRTUXB82308203
diff_output = await self.git('diff-files', '-z', '--name-only',
'--diff-filter=d')
return [name for name in diff_output.split('\x00') if name]
async def get_new_files_in_tree(self, previous_tree, new_tree):
added_files_output = await self.git('diff-tree', '--diff-filter=A',
'--name-only', '-r', '-z',
previous_tree, new_tree)
return added_files_output.split('\x00')
async def read_tree_updating_working_copy(self, tree, force):
'''This method relies on the current working copy being clean with
respect to the current index. The benefit of this over
checkout_missing_files_from_index(), is that is clean up files that get
deleted between the current tree and the new one. Without force, this
raises an error rather than overwriting modified files.'''
if force:
await self.git('read-tree', '--reset', '-u', tree)
else:
await self.git('read-tree', '-m', '-u', tree)
async def checkout_files_from_index(self):
# This recreates any deleted files. As far as I can tell,
# checkout-index has no equivalent of the --full-tree flag we use with
# ls-tree below. Instead, the --all flag seems to respect the directory
# from which it's invoked, and only check out files below that
# directory. This, this is currently the only command we invoke with an
# explicit cwd. Original bug report:
# https://github.com/buildinspace/peru/issues/210
await self.git('checkout-index', '--all', cwd=self.working_copy)
async def get_info_for_path(self, tree, path):
# --full-tree makes ls-tree ignore the cwd. As in list_tree_entries,
# prepend ./ to avoid interpreting leading colons in pathspecs.
ls_output = await self.git('ls-tree', '--full-tree', '-z', tree,
"./" + path)
ls_lines = ls_output.strip('\x00').split('\x00')
# Remove empty lines.
ls_lines = list(filter(None, ls_lines))
if len(ls_lines) == 0:
raise FileNotFoundError('Path "{}" not found in tree {}.'.format(
path, tree))
assert len(ls_lines) == 1
mode, type, sha1, name = ls_lines[0].split()
return mode, type, sha1, name
async def read_bytes_from_file_hash(self, sha1):
return (await self.git(
'cat-file', '-p', sha1, output_mode=BINARY_MODE))
async def list_tree_entries(self, tree, path, recursive):
# Lines in ls-tree are of the following form (note that the wide space
# is a tab):
# 100644 blob a2b67564ae3a7cb3237ee0ef1b7d26d70f2c213f README.md
entry_regex = r'(\w+) (\w+) (\w+)\t(.*)'
command = ['ls-tree', '-z', tree]
if path is not None:
# If we do something like `git ls-tree -r -t HEAD foo/bar`, git
# will include foo in the output, because it was traversed. We
# filter those entries out below, by excluding results that are
# shorter than the original path. However, git will canonicalize
# paths in its output, and we need to match that behavior for the
# comparison to work.
canonical_path = str(pathlib.PurePosixPath(path))
# However, another complication: ls-tree arguments are what git
# calls "pathspecs". That means that leading colons have a special
# meaning. In order to support leading colons, we always prefix the
# path with dot-slash in git's arguments. As noted above, the
# dot-slash will be stripped again in the final output.
command += ["./" + canonical_path]
if recursive:
# -t means tree entries are included in the listing.
command += ['-r', '-t']
output = await self.git(*command)
if not output:
return {}
entries = {}
for line in output.strip('\x00').split('\x00'):
mode, type, hash, name = re.match(entry_regex, line).groups()
if (recursive and path is not None
and len(name) < len(canonical_path) and type == TREE_TYPE):
# In recursive mode, leave out the parents of the target dir.
continue
entries[name] = TreeEntry(mode, type, hash)
return entries
async def make_tree_from_entries(self, entries):
entry_format = '{} {} {}\t{}'
input = '\x00'.join(
entry_format.format(mode, type, hash, name)
for name, (mode, type, hash) in entries.items())
tree = await self.git('mktree', '-z', input=input)
return tree
async def Cache(root):
'This is the async constructor for the _Cache class.'
cache = _Cache(root)
await cache._init_trees()
return cache
class _Cache:
def __init__(self, root):
"Don't instantiate this class directly. Use the Cache() constructor."
self.root = root
self.plugins_root = os.path.join(root, "plugins")
makedirs(self.plugins_root)
self.tmp_path = os.path.join(root, "tmp")
makedirs(self.tmp_path)
self.keyval = KeyVal(os.path.join(root, 'keyval'), self.tmp_path)
self.trees_path = os.path.join(root, "trees")
self._empty_tree = None
async def _init_trees(self):
if not os.path.exists(os.path.join(self.trees_path, 'HEAD')):
makedirs(self.trees_path)
with self.clean_git_session() as session:
await session.init_git_dir()
# Override any .gitattributes files that might be in the sync dir,
# by writing 'info/attributes' in the bare repo. There are many
# attributes that we might want to disable, but disabling 'text'
# seems to take care of both 'text' and 'eol', which are the two
# that I know can cause problems. We might need to add more
# attributes here in the future. Note that other config files are
# disabled in _git_env below.
attributes_path = os.path.join(self.trees_path, 'info',
'attributes')
with open(attributes_path, 'w') as attributes:
# Disable the 'text' attribute for all files.
attributes.write('* -text')
@contextlib.contextmanager
def clean_git_session(self, working_copy=None):
with self.keyval.tmp_dir_context() as tmp_dir:
# Git will initialize a nonexistent index file. Empty files cause
# an error though.
index_file = os.path.join(tmp_dir, "index")
yield GitSession(self.trees_path, index_file, working_copy)
def no_index_git_session(self):
return GitSession(self.trees_path, os.devnull, os.devnull)
async def get_empty_tree(self):
if not self._empty_tree:
with self.clean_git_session() as session:
self._empty_tree = await session.make_tree_from_index()
return self._empty_tree
async def import_tree(self, src, *, picks=None, excludes=None):
if not os.path.exists(src):
raise RuntimeError('import tree called on nonexistent path ' + src)
with self.clean_git_session(src) as session:
await session.read_working_copy_into_index(picks)
# We want to avoid ever importing a .peru directory. This is a
# security/correctness issue similar to git's issue with .git dirs,
# and just like git we need to watch out for case-insensitive
# filesystems. See also:
# https://github.com/blog/1938-vulnerability-announced-update-your-git-clients.
full_excludes = dotperu_exclude_case_insensitive_git_globs()
if excludes:
full_excludes += excludes
await session.drop_paths_from_index(full_excludes)
tree = await session.make_tree_from_index()
return tree
async def merge_trees(self, base_tree, merge_tree, merge_path='.'):
with self.clean_git_session() as session:
if base_tree:
await session.read_tree_into_index(base_tree)
try:
await session.merge_tree_into_index(merge_tree, merge_path)
except GitError as e:
raise MergeConflictError(e.stdout) from e
unified_tree = await session.make_tree_from_index()
return unified_tree
async def export_tree(self,
tree,
dest,
previous_tree=None,
*,
force=False,
previous_index_file=None):
'''This method is the core of `peru sync`. If the contents of "dest"
match "previous_tree", then export_tree() updates them to match "tree".
If not, it raises an error and doesn't touch any files.
Because it's important for the no-op `peru sync` to be fast, we make an
extra optimization for this case. The caller passes in the path to the
index file used during the last sync, which should already reflect
"previous_tree". That allows us to skip the read-tree and update-index
calls, so all we have to do is a single diff-files operation to check
for cleanliness.
It's difficult to predict all the different states the index file might
end up in under different error conditions, not only now but also in
past and future git versions. For safety and simplicity, if any
operation returns an error code, we delete the supplied index file.
Right now this includes expected errors, like "sync would overwrite
existing files," and unexpected errors, like "index is on fire."'''
tree = tree or (await self.get_empty_tree())
previous_tree = previous_tree or (await self.get_empty_tree())
makedirs(dest)
with contextlib.ExitStack() as stack:
# If the caller gave us an index file, create a git session around
# it. Otherwise, create a clean one. Note that because we delete
# the index file whenever there are errors, we also allow the
# caller to pass in a path to a nonexistent file. In that case we
# have to pay the cost to recreate it.
did_refresh = False
if previous_index_file:
session = GitSession(self.trees_path, previous_index_file,
dest)
stack.enter_context(delete_if_error(previous_index_file))
if not os.path.exists(previous_index_file):
did_refresh = True
await session.read_tree_and_stats_into_index(previous_tree)
else:
session = stack.enter_context(self.clean_git_session(dest))
did_refresh = True
await session.read_tree_and_stats_into_index(previous_tree)
# The fast path. If the previous tree is the same as the current
# one, and no files have changed at all, short-circuit.
if previous_tree == tree:
if (await session.working_copy_matches_index()):
return
# Everything below is the slow path. Some files have changed, or
# the tree has changed, or both. If we didn't refresh the index
# file above, we must do so now.
if not did_refresh:
await session.read_tree_and_stats_into_index(previous_tree)
modified = await session.get_modified_files_skipping_deletes()
if modified and not force:
raise DirtyWorkingCopyError(
'Imported files have been modified ' +
'(use --force to overwrite):\n\n' +
_format_file_lines(modified))
# Do all the file updates and deletions needed to produce `tree`.
try:
await session.read_tree_updating_working_copy(tree, force)
except GitError:
# Give a more informative error if we failed because files that
# are new in `tree` already existed in the working copy.
new_files = await session.get_new_files_in_tree(
previous_tree, tree)
existing_new_files = [
f for f in new_files
if f and os.path.exists(os.path.join(dest, f))
]
existing_new_files.sort()
if existing_new_files:
raise DirtyWorkingCopyError(
'Imports would overwrite preexisting files '
'(use --force to write anyway):\n\n' +
_format_file_lines(existing_new_files))
else:
# We must've failed for some other reason. Let the error
# keep going.
raise
# Recreate any missing files.
await session.checkout_files_from_index()
async def read_file(self, tree, path):
# TODO: Make this handle symlinks in the tree.
with self.clean_git_session() as session:
mode, type, sha1, name = await session.get_info_for_path(
tree, path)
if type == 'tree':
raise IsADirectoryError(
'Path "{}" in tree {} is a directory.'.format(path, tree))
assert type == 'blob'
return (await session.read_bytes_from_file_hash(sha1))
async def ls_tree(self, tree, path=None, *, recursive=False):
session = self.no_index_git_session()
return (await session.list_tree_entries(tree, path, recursive))
async def modify_tree(self, tree, modifications):
'''The modifications are a map of the form, {path: TreeEntry}. The tree
can be None to indicate an empty starting tree. The entries can be
either blobs or trees, or None to indicate a deletion. The return value
is either the hash of the resulting tree, or None if the resulting tree
is empty. Modifications in parent directories are done before
modifications in subdirectories below them, so for example you can
insert a tree at a given path and also insert more new stuff beneath
that path, without fear of overwriting the new stuff.'''
# Read the original contents of the base tree.
if tree is None:
entries = {}
else:
entries = await self.ls_tree(tree, '.')
# Separate the modifications into two groups, those that refer to
# entries at the base of this tree (e.g. 'foo'), and those that refer
# to entries in subtrees (e.g. 'foo/bar').
modifications_at_base = dict()
modifications_in_subtrees = collections.defaultdict(dict)
for path_str, entry in modifications.items():
# Canonicalize paths to get rid of duplicate/trailing slashes.
path = pathlib.PurePosixPath(path_str)
# Check for nonsense paths.
# TODO: Maybe stop recursive calls from repeating these checks.
if len(path.parts) == 0:
raise ModifyTreeError('Cannot modify an empty path.')
elif path.parts[0] == '/':
raise ModifyTreeError('Cannot modify an absolute path.')
elif '..' in path.parts:
raise ModifyTreeError('.. is not allowed in tree paths.')
if len(path.parts) == 1:
modifications_at_base[str(path)] = entry
else:
first_dir = path.parts[0]
rest = str(pathlib.PurePosixPath(*path.parts[1:]))
modifications_in_subtrees[first_dir][rest] = entry
# Insert or delete entries in the base tree. Note that this happens
# before any subtree operations.
for name, entry in modifications_at_base.items():
if entry is None:
entries.pop(name, None)
else:
entries[name] = entry
# Recurse to compute modified subtrees. Note how we handle deletions:
# If 'a' is a file, inserting a new file at 'a/b' will implicitly
# delete 'a', but trying to delete 'a/b' will be a no-op and will not
# delete 'a'.
empty_tree = (await self.get_empty_tree())
for name, sub_modifications in modifications_in_subtrees.items():
subtree_base = None
if name in entries and entries[name].type == TREE_TYPE:
subtree_base = entries[name].hash
new_subtree = await self.modify_tree(subtree_base,
sub_modifications)
if new_subtree != empty_tree:
entries[name] = TreeEntry(TREE_MODE, TREE_TYPE, new_subtree)
# Delete an empty tree if it was actually a tree to begin with.
elif name in entries and entries[name].type == TREE_TYPE:
del entries[name]
# Return the resulting tree, or None if empty.
if entries:
session = self.no_index_git_session()
tree = await session.make_tree_from_entries(entries)
return tree
else:
return empty_tree
@contextlib.contextmanager
def delete_if_error(path):
'''If any exception is raised inside the context, delete the file at the
given path, and allow the exception to continue.'''
try:
yield
except Exception:
if os.path.exists(path):
os.remove(path)
raise
def _format_file_lines(files):
'''Given a list of filenames that we're about to print, limit it to a
reasonable number of lines.'''
LINES_TO_SHOW = 10
if len(files) <= LINES_TO_SHOW:
lines = '\n'.join(files)
else:
lines = ('\n'.join(files[:LINES_TO_SHOW - 1]) + '\n...{} total'.format(
len(files)))
return lines
class GitError(Exception):
def __init__(self, command, errorcode, stdout, stderr):
self.command = " ".join(command)
self.errorcode = errorcode
self.stdout = stdout
self.stderr = stderr
message = textwrap.dedent('''\
git command "{}" returned error code {}.
stdout: {}
stderr: {}''').format(command, errorcode, stdout, stderr)
Exception.__init__(self, message)
class ModifyTreeError(PrintableError):
pass
class DirtyWorkingCopyError(PrintableError):
pass
class MergeConflictError(PrintableError):
pass
TreeEntry = collections.namedtuple('TreeEntry', ['mode', 'type', 'hash'])
BLOB_TYPE = 'blob'
TREE_TYPE = 'tree'
NONEXECUTABLE_FILE_MODE = '100644'
EXECUTABLE_FILE_MODE = '100755'
TREE_MODE = '040000'
# All possible ways to capitalize ".peru", to exclude from imported trees.
DOTPERU_CAPITALIZATIONS = [
'.peru',
'.Peru',
'.pEru',
'.peRu',
'.perU',
'.PEru',
'.PeRu',
'.PerU',
'.pERu',
'.pErU',
'.peRU',
'.PERu',
'.PErU',
'.PeRU',
'.pERU',
'.PERU',
]
def dotperu_exclude_case_insensitive_git_globs():
"""These use the glob syntax accepted by `git ls-files` (NOT our own
glob.py). Note that ** must match at least one path component, so we have
to use separate globs for matches at the root and matches below."""
globs = []
for capitalization in DOTPERU_CAPITALIZATIONS:
globs.append(capitalization + '/**')
globs.append('**/' + capitalization + '/**')
return globs
|
import keras
class FitzHughNagumo(keras.layers.Layer):
def __init__(self, initializer="he_normal", **kwargs):
super(FitzHughNagumo, self).__init__()
v_init = tf.random_normal_initializer()
self.v = tf.Variable(
initial_value=v_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
p1_init = tf.zeros_initializer()
self.p1 = tf.Variable(
initial_value=p1_init(shape=(units,), dtype="float32"), trainable=True
)
p2_init = tf.zeros_initializer()
self.p2 = tf.Variable(
initial_value=p2_init(shape=(units,), dtype="float32"), trainable=True
)
p3_init = tf.zeros_initializer()
self.p3 = tf.Variable(
initial_value=p3_init(shape=(units,), dtype="float32"), trainable=True
)
p4_init = tf.zeros_initializer()
self.p4 = tf.Variable(
initial_value=p4_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
v - v^3/3 - p1 * w * v + inputs
w = p2 * (v - p3 * w) |
# coding: utf-8
# # 20 Newsgroups text classification with pre-trained word embeddings
#
# In this notebook, we'll use pre-trained [GloVe word
# embeddings](http://nlp.stanford.edu/projects/glove/) for text
# classification using TensorFlow 2.0 / Keras. This notebook is
# largely based on the blog post [Using pre-trained word embeddings in
# a Keras model]
# (https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html)
# by François Chollet.
#
# **Note that using a GPU with this notebook is highly recommended.**
#
# First, the needed imports.
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing import sequence, text
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import TensorBoard
from zipfile import ZipFile
import os, datetime
import sys
import numpy as np
print('Using Tensorflow version:', tf.__version__,
'Keras version:', keras.__version__,
'backend:', keras.backend.backend(), flush=True)
if 'DATADIR' in os.environ:
DATADIR = os.environ['DATADIR']
else:
DATADIR = "/scratch/project_2005299/data/"
print('Using DATADIR', DATADIR)
# ## GloVe word embeddings
#
# Let's begin by loading a datafile containing pre-trained word
# embeddings. The datafile contains 100-dimensional embeddings for
# 400,000 English words.
print('Indexing word vectors.')
glove_filename = os.path.join(DATADIR, "glove.6B", "glove.6B.100d.txt")
assert os.path.exists(glove_filename), "File not found: "+glove_filename
embeddings_index = {}
with open(glove_filename, encoding='utf-8') as f:
n_skipped = 0
for line in f:
try:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
except UnicodeEncodeError:
n_skipped += 1
print('Found {} word vectors, skipped {}.'.format(len(embeddings_index), n_skipped))
# ## 20 Newsgroups data set
#
# Next we'll load the [20 Newsgroups]
# (http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.html)
# data set.
#
# The dataset contains 20000 messages collected from 20 different
# Usenet newsgroups (1000 messages from each group):
#
# alt.atheism | soc.religion.christian | comp.windows.x | sci.crypt
# talk.politics.guns | comp.sys.ibm.pc.hardware | rec.autos | sci.electronics
# talk.politics.mideast | comp.graphics | rec.motorcycles | sci.space
# talk.politics.misc | comp.os.ms-windows.misc | rec.sport.baseball | sci.med
# talk.religion.misc | comp.sys.mac.hardware | rec.sport.hockey | misc.forsale
TEXT_DATA_ZIP = os.path.join(DATADIR, "20_newsgroup.zip")
assert os.path.exists(TEXT_DATA_ZIP), "File not found: "+TEXT_DATA_ZIP
zf = ZipFile(TEXT_DATA_ZIP, 'r')
print('Processing text dataset from', TEXT_DATA_ZIP, flush=True)
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for fullname in sorted(zf.namelist()):
parts = fullname.split('/')
dirname = parts[1]
fname = parts[2] if len(parts) > 2 else None
zinfo = zf.getinfo(fullname)
if zinfo.is_dir() and len(dirname) > 0:
label_id = len(labels_index)
labels_index[dirname] = label_id
print(' ', dirname, label_id)
elif fname is not None and fname.isdigit():
with zf.open(fullname) as f:
t = f.read().decode('latin-1')
i = t.find('\n\n') # skip header
if 0 < i:
t = t[i:]
texts.append(t)
labels.append(label_id)
print('Found %s texts.' % len(texts))
# ### Vectorization
#
# Vectorize the text samples into a 2D integer tensor.
MAX_NUM_WORDS = 10000
MAX_SEQUENCE_LENGTH = 1000
tokenizer = text.Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = sequence.pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# ### TF Datasets
#
# Let's now define our TF Datasets for training, validation, and test
# data.
VALIDATION_SET, TEST_SET = 1000, 4000
BATCH_SIZE = 128
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.shuffle(20000)
train_dataset = dataset.skip(VALIDATION_SET+TEST_SET)
train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=True)
validation_dataset = dataset.skip(TEST_SET).take(VALIDATION_SET)
validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=True)
test_dataset = dataset.take(TEST_SET)
test_dataset = test_dataset.batch(BATCH_SIZE, drop_remainder=False)
# ### Pretrained embedding matrix
#
# As the last step in data preparation, we construct the GloVe
# embedding matrix:
print('Preparing embedding matrix.')
num_words = min(MAX_NUM_WORDS, len(word_index) + 1)
embedding_dim = 100
embedding_matrix = np.zeros((num_words, embedding_dim))
for word, i in word_index.items():
if i >= MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print('Shape of embedding matrix:', embedding_matrix.shape)
# ## 1-D CNN
#
# ### Initialization
print('Build model...')
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(num_words, embedding_dim,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)(inputs)
x = layers.Conv1D(128, 5, activation='relu')(x)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(128, 5, activation='relu')(x)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(128, 5, activation='relu')(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dense(128, activation='relu')(x)
outputs = layers.Dense(20, activation='softmax')(x)
model = keras.Model(inputs=inputs, outputs=outputs,
name="20ng-cnn")
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
print(model.summary())
# ### Learning
logdir = os.path.join(os.getcwd(), "logs",
"20ng-cnn-"+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
print('TensorBoard log directory:', logdir)
os.makedirs(logdir)
callbacks = [TensorBoard(log_dir=logdir)]
epochs = 20
history = model.fit(train_dataset, epochs=epochs,
validation_data=validation_dataset,
verbose=2, callbacks=callbacks)
# ### Inference
#
# We evaluate the model using the test set. If accuracy on the test
# set is notably worse than with the training set, the model has
# likely overfitted to the training samples.
test_scores = model.evaluate(test_dataset, verbose=2)
print("Test set %s: %.2f%%" % (model.metrics_names[1], test_scores[1]*100))
|
import matplotlib.pyplot as plt
import numpy as np
from .visuals import plot_diagrams
__all__ = ["bottleneck_matching", "wasserstein_matching"]
def bottleneck_matching(I1, I2, matchidx, D, labels=["dgm1", "dgm2"], ax=None):
""" Visualize bottleneck matching between two diagrams
Parameters
===========
I1: array
A diagram
I2: array
A diagram
matchidx: tuples of matched indices
if input `matching=True`, then return matching
D: array
cross-similarity matrix
labels: list of strings
names of diagrams for legend. Default = ["dgm1", "dgm2"],
ax: matplotlib Axis object
For plotting on a particular axis.
"""
plot_diagrams([I1, I2], labels=labels, ax=ax)
cp = np.cos(np.pi / 4)
sp = np.sin(np.pi / 4)
R = np.array([[cp, -sp], [sp, cp]])
if I1.size == 0:
I1 = np.array([[0, 0]])
if I2.size == 0:
I2 = np.array([[0, 0]])
I1Rot = I1.dot(R)
I2Rot = I2.dot(R)
dists = [D[i, j] for (i, j) in matchidx]
(i, j) = matchidx[np.argmax(dists)]
if i >= I1.shape[0] and j >= I2.shape[0]:
return
if i >= I1.shape[0]:
diagElem = np.array([I2Rot[j, 0], 0])
diagElem = diagElem.dot(R.T)
plt.plot([I2[j, 0], diagElem[0]], [I2[j, 1], diagElem[1]], "g")
elif j >= I2.shape[0]:
diagElem = np.array([I1Rot[i, 0], 0])
diagElem = diagElem.dot(R.T)
plt.plot([I1[i, 0], diagElem[0]], [I1[i, 1], diagElem[1]], "g")
else:
plt.plot([I1[i, 0], I2[j, 0]], [I1[i, 1], I2[j, 1]], "g")
def wasserstein_matching(I1, I2, matchidx, labels=["dgm1", "dgm2"]):
plot_diagrams([I1, I2], labels=labels)
cp = np.cos(np.pi / 4)
sp = np.sin(np.pi / 4)
R = np.array([[cp, -sp], [sp, cp]])
if I1.size == 0:
I1 = np.array([[0, 0]])
if I2.size == 0:
I2 = np.array([[0, 0]])
I1Rot = I1.dot(R)
I2Rot = I2.dot(R)
for index in matchidx:
(i, j) = index
if i >= I1.shape[0] and j >= I2.shape[0]:
continue
if i >= I1.shape[0]:
diagElem = np.array([I2Rot[j, 0], 0])
diagElem = diagElem.dot(R.T)
plt.plot([I2[j, 0], diagElem[0]], [I2[j, 1], diagElem[1]], "g")
elif j >= I2.shape[0]:
diagElem = np.array([I1Rot[i, 0], 0])
diagElem = diagElem.dot(R.T)
plt.plot([I1[i, 0], diagElem[0]], [I1[i, 1], diagElem[1]], "g")
else:
plt.plot([I1[i, 0], I2[j, 0]], [I1[i, 1], I2[j, 1]], "g")
|
from IPython.kernel import client
from subprocess import *
import os
import sys
import commands
import string
import atexit
import time
import socket
import types
import inspect
import casadef
import numpy as np
from math import *
from get_user import get_user
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
import traceback
# jagonzal (CAS-4372): Introduce CASA logging system into cluster infrastructure
from casac import *
casalog = casac.logsink()
casalog.setglobal(True)
a=inspect.stack()
stacklevel=0
for k in range(len(a)):
if a[k][1] == "<string>" or (string.find(a[k][1], 'ipython console') > 0 or string.find(a[k][1],"casapy.py") > 0):
stacklevel=k
myf=sys._getframe(stacklevel).f_globals
if myf.has_key('casa') :
casa = myf['casa']
else:
casa = { }
class cluster(object):
"control cluster engines for parallel tasks"
_instance = None
__client=None
__controller=None
__timestamp=None
__engines=[]
__ipythondir=os.environ['PWD']+'/ipython'
if(os.environ.has_key('IPYTHONDIR')):
__ipythondir=os.environ['IPYTHONDIR']
__homepath=os.environ['HOME']
__start_controller_file='start_controller.sh'
__start_engine_file='start_engine.sh'
__stop_node_file='stop_node.sh'
__stop_engine_file='stop_engine.sh'
__stop_controller_file='stop_controller.sh'
__cluster_rc_file='clusterrc.sh'
__user = get_user()
__prefix = '/tmp/' + __user + '-'
__init_now=True
__new_engs=[]
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(cluster, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def __call__(self):
# If there is already a controller, use it
return self
def __init__(self):
"""Initialize a Cluster.
A Cluster enables parallel and distributed execution of CASA tasks and tools on a set of networked computers. A culster consists of one controller and one or more engines. Each engine is an independent Python instance that takes Python commands over a network connection. The controller provides an interface for working with a set of engines. A user uses casapy console to command the controller. A password-less ssh access to the computers that hosts engines is required for the communication between controller and engines.
"""
self.__client=None
self.__controller=None
self.__timestamp=None
self.__engines=[]
self.__ipythondir=os.environ['PWD']+'/ipython'
if(os.environ.has_key('IPYTHONDIR')):
self.__ipythondir=os.environ['IPYTHONDIR']
else:
os.environ['IPYTHONDIR']=self.__ipythondir
self.__homepath=os.environ['HOME']
if (self.__ipythondir==None or self.__ipythondir==''):
os.environ["IPYTHONDIR"]=os.environ['HOME']+'.casa/ipython'
self.__ipythondir=os.environ['IPYTHONDIR']
self.__init_now=True
self.__new_engs=[]
atexit.register(cluster.stop_cluster,self)
def _ip(self, host):
"""Returns a unique IP address of the given hostname,
i.e. not 127.0.0.1 for localhost but localhost's global IP"""
ip = socket.gethostbyname(host)
if ip == "127.0.0.1":
ip = socket.gethostbyname(socket.getfqdn())
return ip
def _cp(self, source, host, destination):
"""Creates the command to copy the source file to the destination file and destination host,
using either scp or cp for the localhost. This is to avoid the requirement of password-less ssh
in a single host environment."""
if self._ip(host) == self._ip("localhost"):
cmd = ['cp', source, destination]
else:
cmd = ['scp', source, host + ":" + destination]
return cmd
def _do(self, host, cmd):
"""Creates the command line to execute the give command on the given host.
If and only if the host is not localhost, ssh is used."""
if self._ip(host) == self._ip("localhost"):
return cmd.split(" ")
else:
return ['ssh', '-f', '-q', '-x', host, cmd]
def start_engine(self, node_name, num_engine, work_dir=None, omp_num_nthreads=1):
"""Start engines on the given node.
@param node_name The name of the computer to host the engines.
@param num_engine The number of the engines to initialize for this run.
@param work_dir The working directory where outputs and logs from the engines will be stored. If work_dir is not supplied or does not exist, the user's home directory will be used.
Running this command multiple times on the same node is ok. The total number of the engines on the node increases for each run.
Every engine has a unique integer id. The id is the key to send the instructions to the engine. The available engine ids can be obtained by calling get_ids() or get_engines().
"""
casalog.origin("parallel_go")
# Start controller
if not self.__start_controller():
casalog.post("The controller is not started","WARN","start_engine")
return False
# Start the engine
out=open('/dev/null', 'w')
err=open('/dev/null', 'w')
cmd = self._cp(self.__ipythondir+'/'+self.__cluster_rc_file,
node_name,
self.__prefix+self.__cluster_rc_file)
p=Popen(cmd, stdout=out, stderr=err)
sts = os.waitpid(p.pid, 0)
if sts[1] != 0:
casalog.post("Command failed: %s" % (" ".join(cmd)),"WARN","start_engine")
cmd = self._cp(self.__ipythondir+'/'+self.__start_engine_file,
node_name,
self.__prefix+self.__start_engine_file)
p=Popen(cmd, stdout=out, stderr=err)
sts = os.waitpid(p.pid, 0)
if sts[1] != 0:
casalog.post("Command failed: %s" % (" ".join(cmd)),"WARN","start_engine")
for i in range(1, num_engine+1):
args='bash '+self.__prefix+self.__start_engine_file
cmd = self._do(node_name, args)
q=Popen(cmd)
sts = os.waitpid(q.pid, 0)
if sts[1] != 0:
casalog.post("Command failed: %s" % (" ".join(cmd)),"WARN","start_engine")
casalog.post("start engine %s on %s" % (i, node_name),"INFO","start_engine")
self.__engines=self.__update_cluster_info(num_engine, work_dir,omp_num_nthreads)
out.close()
err.close()
# jagonzal (CAS-4292): This method crashes when initializing the nodes via __init_nodes,
# so it is deprecated. Instead it is necessary to use directly the start_engine method
# which does not only start the engine, but also initializes it using scripts
def start_cluster(self, cl_file):
"""Start engines that listed in a file
@param cl_file The name of the file that defines the engines.
The cl_file is a text file. Each line contains 3 columns with node name, number of engines and work directory separated by space. A line started with # will be ignored. Example:
#-----------------------------------------
#node_name num_of_engines work_dir
casa-dev-01 4 /home/casa-dev-01/hye/cluster
#casa-dev-02 3 /home/casa-dev-02/hye/cluster
subzero 1 /home/subzero/hye/test
#olddog 2 /home/olddog/hye
#-----------------------------------------
start_cluster and start_engine can be used multiple times.
"""
casalog.origin("parallel_go")
# Start controller
if not self.__start_controller():
casalog.post("The controller is not started","WARN","start_cluster")
return False
# Process the file
try:
clf=open(cl_file, 'r')
lines = clf.readlines()
for line in lines:
if line.startswith('#'):
continue
words = string.split(line)
if len(words) < 3:
casalog.post("The node definition is invalid: %s" % line,"WARN","start_cluster")
continue
try:
int(words[1])
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
continue
# Start all nodes
self.__init_now=False
casalog.post("start_engine(%s,%s,%s)" % (str(words[0]),str(words[1]),str(words[2])),"INFO","start_cluster")
self.start_engine(words[0], int(words[1]), words[2])
clf.close()
except IOError:
casalog.post("Cluster file '%s' doesn't exist" % cl_file,"SEVERE","start_cluster")
if len(self.__new_engs)>0:
self.__init_nodes(self.__new_engs)
self.__engines=self.__client.pull(['id', 'host', 'pid', 'inited'])
self.__new_engs=[]
self.__init_now=True
def __start_controller(self):
"""(Internal) Start the controller.
A user does not need to call this function directly. When a user runs either start_cluster or start_engine, it will check the existence of a valid controller. If the controller does not exist, this function will be called auto matically. All engines will connect to the valid controller.
"""
casalog.origin("parallel_go")
# If there is already a controller, use it
if (self.__controller!=None):
return True
# First of all write bashrc file which is needed by other cluster files
self.__write_bashrc()
# Generate time stamp and write start controller file
from time import strftime
timestamp=strftime("%Y%m%d%H%M%S")
self.__write_start_controller(timestamp)
# Start controller in a detached terminal
cmd = 'bash ' + self.__ipythondir + '/' + self.__start_controller_file
self.__controller=Popen(cmd,shell=True).pid
if (self.__controller==None):
return False
self.__timestamp=timestamp
casalog.post("Controller %s started" % self.__controller ,"INFO","start_controller")
# Now write the rest of the cluster files
self.__write_start_engine()
self.__write_stop_controller()
self.__write_stop_node()
# Wait for controller files to exist
info=self.__ipythondir+'/log/casacontroller-'+str(self.__timestamp)+'-'+str(self.__controller)+'.log'
meng=self.__ipythondir+'/security/casacontroller-mec-'+self.__timestamp+'.furl'
for i in range(1, 15):
if os.path.exists(info):
break
time.sleep(1)
for i in range(1, 15):
if os.path.exists(meng):
break
time.sleep(1)
# Start-up client
self.__client=client.MultiEngineClient(meng)
return True
def __write_start_engine(self):
"""(Internal) Create script for starting engines.
The created script will be stored in the user's $IPYTHONDIR. The start_cluster and start_engine will upload the script to the node and execute it in the proper shell.
"""
ef=open(self.__ipythondir+'/'+self.__start_engine_file, 'w')
bash=commands.getoutput("which bash")
ef.write('#!%s\n' % bash)
ef.write('. %s%s\n' % (self.__prefix, self.__cluster_rc_file))
cmd=commands.getoutput("which ipengine")
ef.write('export contrid=%s\n' % self.__controller)
ef.write('export stamp=%s\n' % self.__timestamp)
ef.write(cmd+' --furl-file='+self.__ipythondir+'/security/casacontroller-engine-'+self.__timestamp+'.furl --logfile='+self.__ipythondir+'/log/casaengine-'+self.__timestamp+'-'+str(self.__controller)+'- 2>&1 | grep -v NullSelection &\n')
ef.close()
def __write_start_controller(self,timestamp):
"""
"""
ef=open(self.__ipythondir+'/'+self.__start_controller_file, 'w')
bash=commands.getoutput("which bash")
ef.write('#!%s\n' % bash)
ef.write('. %s/%s\n' % (self.__ipythondir, self.__cluster_rc_file))
lfile=self.__ipythondir+'/log/casacontroller-'+timestamp+'-'
ffile=self.__ipythondir+'/security/casacontroller-engine-'+timestamp+'.furl'
efile=self.__ipythondir+'/security/casacontroller-mec-'+timestamp+'.furl'
tfile=self.__ipythondir+'/security/casacontroller-tc-'+timestamp+'.furl'
cmd = commands.getoutput("which ipcontroller")
cmd += ' -xy '
cmd += ' --engine-furl-file=' + ffile
cmd += ' --multiengine-furl-file=' + efile
cmd += ' --task-furl-file=' + tfile
cmd += ' --logfile=' + lfile
cmd += ' &\n'
ef.write(cmd)
ef.close()
def __write_stop_node(self):
"""(Internal) Create script for stoping a node.
The created script will be stored in the user's $IPYTHONDIR. The stop_cluster and stop_engine will upload the script to the node and execute it in the proper shell.
"""
ef=open(self.__ipythondir+'/'+self.__stop_node_file, 'w')
bash=commands.getoutput("which bash")
ef.write('#!%s\n' % bash)
# Stop all engines started by the current controller
ef.write("ps -fu `whoami` | grep ipengine | grep -v grep | grep "+self.__timestamp+" | awk '{print $2}' | xargs kill -TERM>/dev/null")
ef.close()
def __write_stop_controller(self):
"""(Internal) Create script for stoping the controller.
The created script will be stored in the user's $IPYTHONDIR. The stop_cluster will execute it in the proper shell.
"""
ef=open(self.__ipythondir+'/'+self.__stop_controller_file, 'w')
bash=commands.getoutput("which bash")
ef.write('#!%s\n' % bash)
ef.write("ps -ef | grep `whoami` | grep ipcontroller | grep -v grep | awk '{print $2}' | xargs kill -TERM >/dev/null")
ef.close()
def __write_bashrc(self):
"""(Internal) Create file containning bash startup instructions for the engine host.
When the controller startup, the necessary environment information for running cluster is extracted from the user's current shell (that runs this casapy session) and written to a rc file. The created script will be stored in the user's $IPYTHONDIR. The start_cluster and start_engine will upload the rc file to the nodes and establish the engine environment.
"""
bashrc=open(self.__ipythondir+'/'+self.__cluster_rc_file, 'w')
bash=commands.getoutput("which bash")
bashrc.write("#!%s\n" % bash)
envList=['PATH', 'LD_LIBRARY_PATH', 'IPYTHONDIR',
'CASAPATH', 'CASAARCH',
'PYTHONHOME', '__CASAPY_PYTHONDIR',
'PGPLOT_DEV', 'PGPLOT_DIR', 'PGPLOT_FONT']
for param in envList:
try:
bashrc.write('export %s="%s"\n' % (param,os.environ[param]))
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
pass
bashrc.write("export HOSTNAME=`uname -n`")
bashrc.close()
def stop_engine(self, engine_id):
"""Stop an engine.
@param engine_id The id of the engine to be stopped.
If an engine with the given id is in the current cluster, running this function will stop the engine and remove it from the engine list.
"""
casalog.origin("parallel_go")
if type(engine_id).__name__ != 'int':
casalog.post("engine id must be an integer","WARN","stop_engine")
return None
node_name=''
procid=None
for i in self.__engines:
if (i[0]==engine_id):
node_name=i[1]
procid=i[2]
if (node_name=='' or procid is None):
casalog.post("Could not find engine %d" % engine_id,"WARN","stop_engine")
return
ef=open(self.__ipythondir+'/'+self.__stop_engine_file, 'w')
bash=commands.getoutput("which bash")
ef.write('#!%s\n' % bash)
ef.write("kill -9 %d" % procid)
ef.close()
out=open('/dev/null', 'w')
err=open('/dev/null', 'w')
cmd = self._cp(self.__ipythondir+'/'+self.__stop_engine_file,
node_name,
self.__prefix+self.__stop_engine_file)
p=Popen(cmd, stdout=out, stderr=err)
out.close()
err.close()
sts = os.waitpid(p.pid, 0)
args='bash '+self.__prefix+self.__stop_engine_file
Popen(self._do(node_name, args))
casalog.post("stop engine %d on %s" % (engine_id, node_name),"INFO","stop_engine")
self.__engines=self.__update_cluster_info(-1)
def stop_node(self, node_name):
"""Stop a node (a engine-host computer)
@param node_node The node to be stopped.
If a computer with the given name is in the current cluster, running this function will stop all the engines currently running on that node and remove the node and engines from the engine list. This function will not shutdown the computer.
"""
casalog.origin("parallel_go")
if type(node_name).__name__ != 'str':
casalog.post("node_name must be a string","WARN","stop_node")
return None
if self.get_nodes().count(node_name) == 0:
casalog.post("There is no host with name %s" % node_name,"WARN","stop_node")
return None
out=open('/dev/null', 'w')
err=open('/dev/null', 'w')
cmd = self._cp(self.__ipythondir+'/'+self.__stop_node_file,
node_name,
self.__prefix+self.__stop_node_file)
p=Popen(cmd, stdout=out, stderr=err)
out.close()
err.close()
sts = os.waitpid(p.pid, 0)
args='bash '+self.__prefix+self.__stop_node_file
Popen(self._do(node_name, args))
casalog.post("stop engines on %s" % node_name,"INFO","stop_node")
num_engine=0
for i in self.__engines:
if i[1]==node_name:
num_engine=num_engine-1
self.__engines=self.__update_cluster_info(num_engine)
def __stop_controller(self):
"""(Internal) Stop the controller.
This is the last thing for quiting the cluster gracely.
"""
casalog.origin("parallel_go")
# If it is already down
if (self.__controller==None):
return True
import commands
node_name=commands.getoutput("uname -n")
out=open('/dev/null', 'w')
err=open('/dev/null', 'w')
cmd = self._cp(self.__ipythondir+'/'+self.__stop_controller_file,
node_name,
self.__prefix+self.__stop_controller_file)
p=Popen(cmd, stdout=out, stderr=err)
out.close()
err.close()
sts = os.waitpid(p.pid, 0)
args='bash '+self.__prefix+self.__stop_controller_file
Popen(self._do(node_name, args))
try:
os.remove(self.__ipythondir+'/'+self.__cluster_rc_file)
os.remove(self.__ipythondir+'/'+self.__start_engine_file)
os.remove(self.__ipythondir+'/'+self.__stop_node_file)
os.remove(self.__ipythondir+'/'+self.__stop_controller_file)
os.remove(self.__ipythondir+'/'+self.__stop_engine_file)
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
pass
try:
self.__controller=None
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
pass
casalog.post("Controller stopped","INFO","stop_controller")
return True
def stop_cluster(self):
"""Stop the cluster
This function stops all the running engines and the controller.
"""
# jagonzal (CAS-4292): We have to check the controller instance directly because the method
# start_cluster does not work properly (crashes when initializing the nodes via __init_nodes).
# Actually start_cluster is deprecated, and it is necessary to use directly the start_engine
# method which does not only start the engine, but also initializes it using scripts
if ((self.__controller==None) or (self.__client==None)):
return
# jagonzal (CAS-CHANGE): Do not use brute-force kill to schut down cluster
else:
# Kill the engines and controller using kernel.multiengineclient interface
try:
self.__client.kill(True,self.__engines,False)
del self.__client
except:
traceback.print_exception((sys.exc_info()[0]), (sys.exc_info()[1]), (sys.exc_info()[2]))
# Reset state before doing anything else, otherwise we may try to use one method from the client object
self.__client=None
self.__controller=None
# Update cluster info
self.__engines=[]
# Remove initialization/shut-down scripts
try:
os.remove(self.__ipythondir+'/'+self.__start_controller_file)
os.remove(self.__ipythondir+'/'+self.__cluster_rc_file)
os.remove(self.__ipythondir+'/'+self.__start_engine_file)
os.remove(self.__ipythondir+'/'+self.__stop_node_file)
os.remove(self.__ipythondir+'/'+self.__stop_controller_file)
os.remove(self.__prefix+self.__cluster_rc_file)
os.remove(self.__prefix+self.__start_engine_file)
except:
traceback.print_exception((sys.exc_info()[0]), (sys.exc_info()[1]), (sys.exc_info()[2]))
# jagonzal (CAS-4370): Remove all the ipcontroller/ipengine files because
# otherwise it might confuse future cluster/MultiEngineClient instances
self.wash_logs()
return
### jagonzal (CAS-4292): Code below is deprecated ###
# shutdown all engines
elist=[]
for i in self.__engines:
elist.append(i[1])
fruit=set(elist)
for i in fruit:
try:
self.stop_node(i)
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
traceback.print_exception((sys.exc_info()[0]), (sys.exc_info()[1]), (sys.exc_info()[2]))
continue
# shutdone controller
try:
self.__stop_controller()
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
traceback.print_exception((sys.exc_info()[0]), (sys.exc_info()[1]), (sys.exc_info()[2]))
pass
try:
# jagonzal (CAS-4106): We have to shut down the client, not activate it
# besides, the activate method only enables parallel magic commands
self.__client=None
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
traceback.print_exception((sys.exc_info()[0]), (sys.exc_info()[1]), (sys.exc_info()[2]))
pass
def wash_logs(self):
"""Clean up the cluster log files.
A set of logs containing controller-engine information will be created every time a cluster is created. This function deletes all cluster log files that cumulated in the user's $IPYTHONDIR, if there is no active cluster running. (The files will be removed only before starting any engine of after stoping the whole cluster.
"""
# do this only if no controller running
if (self.__controller!=None):
return True
# jagonzal (CAS-4370): Remove all the ipcontroller/ipengine files because
# otherwise it might confuse future cluster/MultiEngineClient instances
os.system("rm -rf %s/log/*" % self.__ipythondir)
os.system("rm -rf %s/security/*" % self.__ipythondir)
def __init_nodes(self, i):
"""(Internal) Initialize engines
@param i The list of the engine ids
An engine is a Python interpreter. To make an engine capable of running CASA tasks and tools, we must setup the environment and import necessary modules. This function effectively make every engine a running CASA instance (except that it is a non-interactive CASA running in Python, in contrast the casapy that is an interactive CASA running in IPython).
"""
casalog.origin("parallel_go")
casalog.post("Initialize engines %s" %str(i),"INFO","init_nodes")
self.__client.push({'casa': casa })
self.__client.execute('import os', i)
self.__client.execute('if os.path.isdir(work_dir):os.chdir(work_dir)\nelse:work_dir=os.environ["HOME"]', i)
phome=''
try:
phome=os.environ["PYTHONHOME"]
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
pass
if phome=='':
try:
v=str.split(os.environ["CASAPATH"], ' ')
phome=v[0]+'/'+v[1]
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
pass
dhome=''
try:
dhome=os.environ["CASAARCH"]
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
pass
if phome=='':
casalog.post("could not locate casa_in_py.py","SEVERE","init_nodes")
return None
if (dhome!=phome):
phome=dhome
sdir = casadef.python_library_directory + '/'
self.__client.push(dict(phome=phome), i)
self.__client.execute('import sys', i)
self.__client.push(dict(sdir=sdir), i)
self.__client.execute('scriptdir=sdir', i)
self.__client.execute('sys.path.insert(2, scriptdir)', i)
try:
self.__client.execute("execfile(scriptdir+'casa_in_py.py')", i)
self.__client.execute('inited=True', i)
except client.CompositeError, exception:
casalog.post("Error initializing engine %s: %s" % (str(i), str(exception)),"SEVERE","init_nodes")
exception.print_tracebacks()
except:
casalog.post("Error initializing engine %s" % str(i),"SEVERE","init_nodes")
traceback.print_tb(sys.exc_info()[2])
def reset_cluster(self):
"""Re-initialize the engines.
This function reset the running environment for all the available engines.
"""
casalog.origin("parallel_go")
if self.__client is None:
casalog.post("Multiengineclient is not initialized","WARN","reset_cluster")
return None
try:
tobeinit=self.__client.pull('id')
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
return None
if len(tobeinit)>0:
self.__init_nodes(tobeinit)
self.__engines=self.__client.pull(['id', 'host', 'pid', 'inited'])
def __update_cluster_info(self, num_engine, work_dir=None,omp_num_nthreads=1):
"""(Internal) Construct the list of engines.
@param num_engine The number of new engines
@param work_dir The initial working directory
This function appends num_engine engines to the engine list and setup initial Python environment on them. Before further initialization, an engine can only run Python programs (it can not run CASA tasks or tools).
"""
casalog.origin("parallel_go")
if self.__client is None :
casalog.post("Controller is not initialized","WARN","update_cluster_info")
return []
engs=len(self.__engines)+num_engine
if engs<0:
engs=0
i=0
idlist=self.__client.get_ids()
while (len(idlist)!=engs and i<10):
idlist=self.__client.get_ids()
time.sleep(1)
i=i+1
# Here we only take care of the quick-init-abel items
# The init of real casa_in_py will be done in parallel
tobeinit=[]
for i in idlist:
inited=False
try:
inited=self.__client.pull('inited', i)
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
tobeinit.append(i)
self.__client.execute('id=%d'%i, i)
self.__client.execute('import os', i)
self.__client.execute('import socket', i)
self.__client.execute('host=socket.gethostname()', i)
self.__client.execute('pid=os.getpid()', i)
self.__client.execute('job=None', i)
self.__client.execute('import signal', i)
self.__client.execute('original_sigint_handler = signal.signal(signal.SIGINT,signal.SIG_IGN)', i)
# jagonzal (CAS-4276): New cluster specification file allows to automatically set the number of open MP threads
self.__client.execute("os.environ['OMP_NUM_THREADS']='"+str(omp_num_nthreads)+"'", i)
if work_dir!=None and os.path.isdir(work_dir):
self.__client.push(dict(work_dir=work_dir), i)
else:
self.__client.execute('work_dir=os.environ["HOME"]', i)
# These are environment variabls set for each node at startup.
# It may be better to set as global in this module then pass to each engine when update_cluster_info
self.__client.execute('contrid=os.environ["contrid"]', i)
self.__client.execute('stamp=os.environ["stamp"]', i)
self.__client.execute('inited=False', i)
self.__new_engs.extend(tobeinit)
if self.__init_now:
if len(self.__new_engs)>0:
self.__init_nodes(self.__new_engs)
self.__init_now=True
self.__new_engs=[]
if len(idlist)>0:
return self.__client.pull(['id', 'host', 'pid', 'inited'])
else:
return []
def get_casalogs(self):
"""Get a list of the casa logs for all the current cluster engines.
Each working engine is a CASA instance and saves its own log. This function retrun the list of logs with their full path. One can view the log contents with casalogviewer.
"""
try:
self.__client.execute('tmp=work_dir+"/"+thelogfile')
return self.__client.pull('tmp')
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
return None
def read_casalogs(self):
"""Read the casa log files.
The current implementation of this function is only a prototype. A multi-log viewer needs to be developed.
"""
casalog.origin("parallel_go")
import os
import string
_logs = self.get_casalogs()
if _logs != None:
files = string.join(_logs, ' ')
os.system("emacs "+files+ "&")
else:
casalog.post("Cannot read casalogs","WARN","read_casalogs")
def pad_task_id(self, b='', task_id=[]):
"""Generate a dictionary of id-padded variables
@param b The base name to be padded
@param task_id A list of integers to pad the base name
One way of distributing varaibles to a set of engnines is through python a dictionary. This is a convenience function for quick generating a dictionary of padded names. Example:
x=c.pad_task_id('basename', [3, 5, 8])
x
{3: 'basename-3', 5: 'basename-5', 8: 'basename-8'}
x=c.pad_task_id([1,3],[0,1,2,3])
x
{0: '1-0', 1: '3-1', 2: '3-2', 3: '3-3'}
x=c.pad_task_id(['a', 'b','c','d','e'],[0,1,2,3])
x
{0: 'a-0', 1: 'b-1', 2: 'c-2', 3: 'd-3'}
y=c.pad_task_id(x)
y
{0: 'a-0-0', 1: 'b-1-1', 2: 'c-2-2', 3: 'd-3-3'}
"""
casalog.origin("parallel_go")
base={}
int_id=True
for j in task_id:
if type(j)!=types.IntType or j<0:
casalog.post("Task id %s must be a positive integer" % str(j),"WARN","pad_task_id")
int_id=False
break
if not int_id:
return base
if type(b)==list:
for j in range(len(b)):
if type(b[j])!=str:
b[j]=str(b[j])
if len(task_id)==0:
task_id=list(xrange(0, len(self.__engines)))
if type(b)==str:
for j in task_id:
base[j]=b+'-'+str(j)
if type(b)==list:
k=len(b)
m=len(task_id)
if m<=k:
for j in range(m):
base[task_id[j]]=b[j]+'-'+str(task_id[j])
else:
for j in range(k):
base[task_id[j]]=b[j]+'-'+str(task_id[j])
for j in range(k,m):
base[task_id[j]]=b[k-1]+'-'+str(task_id[j])
if type(b)==dict:
for i in b.keys():
base[i]=b[i]+'-'+str(i)
return base
def one_to_n(self, arg, task_id=[]):
"""Genrate a dictionary of one variable for n keys
@param arg The variable to be distributed
@param task_id The list of integer ids
One way of distributing varaibles to a set of engnines is through python a dictionary. This is a convenience function for quick generating a dictionary of same variable for n keys. Example:
x=c.one_to_n('basename', [1, 2, 7])
x
{1: 'basename', 2: 'basename', 7: 'basename'}
"""
casalog.origin("parallel_go")
# assign 1 value to n targets
base={}
int_id=True
for j in task_id:
if type(j)!=types.IntType or j<0:
casalog.post("Task id %s must be a positive integer" % str(j),"WARN","one_to_n")
int_id=False
break
if not int_id:
return base
if len(task_id)==0:
task_id=list(xrange(0, len(self.__engines)))
for j in task_id:
base[j]=arg
return base
def n_to_n(self, args=[], task_id=[]):
"""Generate a dictionary of n varables
@param arags A list of n variables
@param task_id A list of n integer ids
One way of distributing varaibles to a set of engnines is through python a dictionary. This is a convenience function for quick generating a dictionary of a set of n variables for n keys. Example:
x=c.n_to_n(['a', 'b', 'c'], [3, 6, 7])
x
{3: 'a', 6: 'b', 7: 'c'}
"""
casalog.origin("parallel_go")
# Assign n value to n targets
base={}
if len(args)==0:
return base
int_id=True
for j in task_id:
if type(j)!=types.IntType or j<0:
casalog.post("Task id %s must be a positive integer" % str(j),"WARN","n_to_n")
int_id=False
break
if not int_id:
return base
if len(task_id)==0:
task_id=list(xrange(0, len(self.__engines)))
i=-1
for j in task_id:
i=i+1
if i==len(args):
break
base[j]=args[i]
return base
def split_int(self, start, end, task_id=[]):
"""Generate a dictionary to distribute the spectral windows
@param start The start integer value
@param end The end integer value
@param task_id The list of integer ids
This is a convenience function for quick generating a dictionary of integer start points. Example:
x=c.split_int(9, 127, [2,3,4])
x
{2: 9, 3: 49, 4: 89 }
"""
casalog.origin("parallel_go")
base={}
if len(task_id)==0:
task_id=list(xrange(0, len(self.__engines)))
if len(task_id)==0:
casalog.post("There are no engines available","WARN","split_int")
return base
if type(start)!=int or type(end)!=int:
casalog.post("start and end point must be integer","WARN","split_int")
return base
if start<0:
casalog.post("start point must be greater than 0","WARN","split_int")
return base
if start>=end:
casalog.post("end point must be greate than start point","WARN","split_int")
return base
nx=1
try:
nx=int(ceil(abs(float(end - start))/len(task_id)))
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
pass
i=-1
for j in task_id:
i=i+1
if i>=len(task_id):
break
st=i*nx
base[j]=st+start
return base
def split_channel(self, spw, nchan, task_id=[]):
"""Generate a dictionary to distribute the spectral windows
@param spw The spectral window
@param nchan The number of channels to split
@param task_id The list of integer ids
One way of distributing a spectral windows to a set of engnines is through python a dictionary. This is a convenience function for quick generating a dictionary of spw expressions. Example:
x=c.split_channel(1, 127, [2,3,4])
x
{0: '1:0~42', 1: '1:43~85', 2: '1:86~128'}
"""
casalog.origin("parallel_go")
base={}
if len(task_id)==0:
task_id=list(xrange(0, len(self.__engines)))
if len(task_id)==0:
casalog.post("There are no engines available","WARN","split_channel")
return base
if nchan<len(task_id):
casalog.post("There are no enough channels to split","WARN","split_channel")
return base
nx=1
try:
nx=int(ceil(abs(float(nchan))/len(task_id)))
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
pass
i=-1
for j in task_id:
i=i+1
if i==len(task_id):
break
st=i*nx
se=st+nx-1
base[j]=str(spw)+":"+str(st)+"~"+str(se)
return base
def pgc(self,*args,**kwargs):
"""Parallel execution of commands and/or dictionary
of commands
@param *args any number of commands or dictionary of
commands (where the key of the dictionary is the
engine id)
@param **kwargs available options are
job=<str> or jobname=<str>
block=<True/False>
Example:
c.pgc({0:'ya=3',1:'ya="b"'})
c.pull('ya')
{0: 3, 1: 'b'}
c.pgc('xa=-1')
c.pull('xa')
{0: -1, 1: -1, 2: -1, 3: -1}
c.pull('job')
Out[23]: {0:'xa=-1', 1:'xa=-1', 2:'xa=-1', 3:'xa=-1'}
"""
casalog.origin("parallel_go")
tasks={}
for j in self.__client.get_ids():
tasks[j]=[]
for i in args:
if type(i)==types.DictType:
for j in i.keys():
if type(j)!=types.IntType or j<0:
casalog.post("task id %s must be a positive integer" % str(j),"WARN","pgc")
pass
else:
st=''
if type(i[j])==types.StringType:
st=i[j]
else:
pass
if st!='':
tasks[j].append(st)
elif type(i)==types.StringType:
# For all tasks
for j in xrange(0, len(self.__engines)):
tasks[j].append(i)
else:
casalog.post("command %s must be a string or a dictionary" % str(i),"WARN","pgc")
# May be better to use non-block mode and catch the result
# How to give name, say 'cmd_name', to a set of commands a name such
# that cluster.pull('cmd_name') returns the script is excuteded?
keys = kwargs.keys()
job='NoName'
block=True
for kw in keys:
if kw.lower()=='job' or kw.lower()=='jobname':
job=kwargs[kw]
if kw.lower()=='block':
block=kwargs[kw]
for i in tasks.keys():
cmd=string.join(tasks[i], '\n')
self.__client.push(dict(job=cmd), i)
return self.__client.execute('exec(job)',
block=block,targets=tasks.keys())
def parallel_go_commands(self,*args,**kwargs):
"""Parallel execution of commands and/or dictionary
of commands
"""
self.pgc(*args,**kwargs)
def pgk(self, **kwargs):
"""Parallel execution to set keywords
@param **kwargs keyword args
Example:
x=np.zeros((3,3))
c.pgk(c={1:x},d=6,t='b',s={0:'y'})
c.pull('c')
{1: array([[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])}
c.pull('d')
{0: 6, 1: 6}
c.pull('s')
{0: 'y'}
c.pull('t')
{0: 'b', 1: 'b'}
"""
casalog.origin("parallel_go")
tasks={}
for j in self.__client.get_ids():
tasks[j]=dict()
keys = kwargs.keys()
for kw in keys:
vals=kwargs[kw]
if type(vals)==types.DictType:
for j in vals.keys():
if type(j)!=types.IntType or j<0:
casalog.post("task id %s must be a positive integer" % str(j),"WARN","pgk")
pass
else:
tasks[j][kw]=vals[j]
else:
for j in tasks.keys():
tasks[j][kw]=vals
for i in tasks.keys():
self.__client.push(tasks[i], i, True)
return tasks
def make_command(self, func, **kwargs):
"""Make command strings to be distributed to engines
@func function name
@kwargs **kwargs available
Example:
x=np.ones((3,3))
c.make_command(func=None,c={1:x},d=6,t='b',s={0:'y'})
{0: 's="y"; t="b"; d=6',
1: 'c=array([[ 1., 1., 1.],\n[ 1., 1., 1.],\n[ 1., 1., 1.]]); t="b"; d=6'}
c.make_command(func='g',c={1:x},d=6,t='b',s={0:'y'})
{0: 'g(s="y", t="b", d=6)',
1: 'g(c=array([[1., 1., 1.],\n[1., 1., 1.],\n[1., 1., 1.]]), t="b", d=6)'}
"""
casalog.origin("parallel_go")
tasks=self.pgk(**kwargs)
if func!=None and type(func)!=str:
casalog.post("func must be a str","WARN","make_command")
return None
if len(tasks)==0:
casalog.post("Parameters not specified","WARN","make_command")
return None
if func==None or len(str.strip(func))==0:
func=''
func=str.strip(func)
cmds=dict()
for i in tasks.keys():
cmd=''
for (k, v) in tasks[i].iteritems():
cmd+=k+'='
if type(v)==str:
cmd+='"'+v+'"'
elif type(v)==np.ndarray:
cmd+=repr(v)
else:
cmd+=str(v)
if func=='':
cmd+='; '
else:
cmd+=', '
cmd=cmd[0:-2]
if func!='':
cmd=func+'('+cmd+')'
cmds[i]=cmd
return cmds
def parallel_go_keywords(self, **kwargs):
"""Parallel execution to set keywords
"""
self.pgk(**kwargs)
def hello(self):
"""Parallel execution to print 'hello' message from all engines
"""
casalog.origin("parallel_go")
casalog.post("Hello CASA Controller","INFO","hello")
if self.get_engines() != []:
return self.__client.execute('casalog.origin("parallel_go");casalog.post("Hello CASA Controller","INFO","hello")')
else:
return None
def __set_cwds(self, clusterdir):
"""Set current working dir for all engines
"""
# This is not very useful because dirs are generally different cross nodes
self.__client.execute('import os')
self.__client.push(dict(clusterdir=clusterdir))
self.__client.execute('os.chdir(clusterdir)')
self.__client.execute("user=self.__user")
self.__client.execute('print user')
self.__client.execute('import socket')
self.__client.execute('host=socket.gethostname()')
self.__client.execute('print host')
self.__client.execute('print os.getcwd()')
def get_ids(self):
"""get ids for all available engines
"""
try:
return self.__client.get_ids()
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
return []
def get_nodes(self):
"""get hostnames for all available engines
"""
from sets import Set
elist=[]
for i in self.__engines:
elist.append(i[1])
return list(Set(elist))
def get_engines(self):
"""get current status of the engines
"""
return self.__engines
def get_stdout(self,cmd):
"""get the standard output from all engines for execting a comment
"""
return commands.getstatusoutput(cmd)
def pdo(self,job):
"""parallel execution of a job
"""
return self.__client.execute(job)
def odo(self,job,nodes):
"""execute a job on a subset of engines
"""
return self.__client.execute(job,block=False,targets=nodes)
def execute(self,job,nodes):
"""execute a job on a subset of engines in blocking mode
"""
return self.__client.execute(job,block=True,targets=nodes)
def queue_status(self):
"""query to queue status
"""
return self.__client.queue_status()
def clear_queue(self):
"""remove all jobs from the queue
"""
return self.__client.clear_queue()
def get_timer(self, timer=''):
"""get the eleapsed time for a timer
"""
casalog.origin("parallel_go")
base={}
prop=self.__client.get_properties()
for i in self.get_ids():
try:
ky=prop[i]['timertype']
if ky=='proc':
end=time.clock()
else:
end=time.time()
base[i]='%.2f sec' % (end-prop[i][timer])
except:
pass
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
casalog.post("Timer: %s" % str(base),"INFO","get_timer")
return
def set_timer(self,timer='timer',type='proc',
targets=None,block=None):
"""set a timer
"""
if self.__client==None:
return
properties={}
if type=='proc':
properties[timer]=time.clock()
else:
properties[timer]=time.time()
properties['timertype']=type
self.__client.set_properties(properties,
targets, block)
def del_timer(self, timer=['']):
"""delete a timer
"""
casalog.origin("parallel_go")
for i in self.get_ids():
self.__client.del_properties(timer, i)
casalog.post("Delete timer %s %s" % (str(timer),str(i)),"INFO","del_timer")
return
def get_properties(self):
"""get the set properties from all engines
"""
return self.__client.get_properties()
def set_properties(self, properties,
targets=None, block=None):
"""set properties for target engines
@param properties a dictionary its keys are
"""
self.__client.set_properties(
properties, targets, block)
def keys(self):
"""get all keys from all engines
"""
return self.__client.keys()
def push(self, **kwargs):
"""set values to the engines
@param kekword value to distribute
@param targets, the engines of interest
By default, this function set the keyword values to all engines.
To set values on a subset of engines, use kekword parameter targets,
whick takes integer or array of integer of engine ids.
You can also use function pgk to set values onto the engines.
Example:
c.push(a=[1,3,7.1])
c.pull('a')
{0: [1, 3, 7.0999999999999996], 1: [1, 3, 7.0999999999999996]}
c.push(b=[1.2,3.7], targets=1)
c.pull('b',[1])
{1: [1.2, 3.7000000000000002]}
c.pull('b')
{1: [1.2, 3.7000000000000002]}
"""
casalog.origin("parallel_go")
keys = kwargs.keys()
#keys.sort()
if len(keys)==0:
return False
tgt=[]
targets=None
for kw in keys:
if kw.lower()=='targets':
targets=kwargs[kw]
break
if targets=='all' or targets==None or \
type(targets)==list and len(targets)==0:
tgt=list(xrange(0, len(self.__engines)))
elif type(targets)==list:
for j in targets:
if type(j)==types.IntType and j>=0:
tgt.append(j)
elif type(targets)==int and targets>=0:
tgt.append(targets)
if len(tgt)==0:
casalog.post("There are no target engines","WARN","push")
return False
ok=True
for i in tgt:
try:
self.__client.push(dict(kwargs),i)
except:
pass
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
return ok
def pull(self, key, targets='all'):
"""get the value of a key
@param key the var of interest
@param targets, the engines of interest
Example:
c.pgc({0:'ya=3',1:'ya="b"'})
c.pull('ya')
{0: 3, 1: 'b'}
c.pull('ya',[1])
{1: 'b'}
c.pull('ya',1)
{1: 'b'}
"""
casalog.origin("parallel_go")
base={}
tgt=[]
if targets=='all' or \
type(targets)==list and len(targets)==0:
tgt=list(xrange(0, len(self.__engines)))
elif type(targets)==list:
for j in targets:
if type(j)==types.IntType and j>=0:
tgt.append(j)
elif type(targets)==int and targets>=0:
tgt.append(targets)
if len(tgt)==0:
casalog.post("There are no target engines","WARN","push")
return base
for i in tgt:
rslt=None
try:
rslt=self.__client.pull(key,i)
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
pass
if rslt!=None:
base[i]=rslt[0]
return base
def get_result(self, i):
"""get the result of previous execution
"""
casalog.origin("parallel_go")
# jagonzal (CAS-4375): We have to capture the engine's exceptions at this level
try:
res = self.__client.get_result()[i]
except client.CompositeError, exception:
casalog.post("Error retrieving result from engine %s: %s" % (str(i),str(exception)),"SEVERE","get_result")
exception.print_tracebacks()
res = None
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
casalog.post("Error retrieving result from engine %s" % (str(i)),"SEVERE","get_result")
traceback.print_tb(sys.exc_info()[2])
return res
def activate(self):
"""set the cluster to parallel execution mode
"""
return self.__client.activate()
def parallel_go_task(self,taskname=None,outfile='',
target=[],ipython_globals=None):
""" Make parallel tasks using current input values
"""
self.pgt(taskname,outfile,target,ipython_globals)
def pgt(self, taskname=None,outfile='',
target=[],ipython_globals=None):
""" Make parallel tasks using current input values
taskname -- Name of task
default: None = current active task;
example: taskname='bandpass'
<Options: type tasklist() for the complete list>
outfile -- Output file for the task inputs
default: '' = taskname.parallel;
example: outfile=taskname.orion
target -- List of integer parallel engine ids
default: [] = all current active engines;
example: target=[0,2,4]
"""
casalog.origin("parallel_go")
base={}
for j in target:
if type(j)!=types.IntType or j<0:
casalog.post("engine id %s must be a positive integer" % str(j),"WARN","pgt")
return base
if len(target)==0:
target=list(xrange(0, len(self.__engines)))
if len(target)==0:
casalog.post("There are no target engines","WARN","pgt")
return base
try:
if ipython_globals == None:
t=len(inspect.stack())-1
myf=sys._getframe(t).f_globals
else:
myf=ipython_globals
if taskname==None or taskname=='' or \
type(taskname)!=str:
taskname=myf['taskname']
if outfile=='' or outfile==None or \
type(outfile)!=str:
outfile=taskname+'.parallel'
tname=myf[taskname]
if not myf.has_key(taskname) and \
str(type(tname))!="<type 'instance'>" and \
not hasattr(tname,"defaults"):
raise TypeError("task %s is not defined " %
taskname)
else:
myf['taskname']=taskname
myf['update_params'](func=myf['taskname'],
printtext=False, ipython_globals=myf)
for j in target:
script=taskname+'('
for k in myf[taskname].parameters:
par=myf[taskname].parameters[k]
if type(par)==dict:
val=par
for v in par.keys():
if type(v)==types.IntType and j==v:
val=par[v]
break
elif type(v)==str:
a=-1
try:
a=int(v)
except:
# jagonzal (CAS-4106): Properly report all the exceptions and errors in the cluster framework
# traceback.print_tb(sys.exc_info()[2])
pass
if a!=-1 and a==j:
val=par[v]
break
if type(val)==str:
script=script+k+"='"+val+"',"
else:
script=script+k+"="+str(val)+","
elif type(par)==str:
script=script+k+"='"+par+"',"
else:
script=script+k+"="+str(par)+","
script=script.rstrip(',')
script=script+')'
base[j]=script
return base
except TypeError, e:
casalog.post("TypeError: %s" % str(e),"SEVERE","pgt")
def check_job(self, job, verbose=True):
"""check the status of an asynch job
"""
casalog.origin("parallel_go")
if type(job)==type(None):
print "job None has no status"
return True
try:
x=job.get_result(block=False)
if x==None:
if verbose:
casalog.post("job '%s' has not finished yet, result is pending" % job,"INFO","check_job")
return False
else:
if verbose:
casalog.post("job '%s' done" % job,"INFO","check_job")
return True
except client.CompositeError, exception:
casalog.post("Error retrieving result of job from engine: %s, backtrace:" % (str(exception)),"SEVERE","check_job")
exception.print_tracebacks()
raise
except:
casalog.post("Error retrieving result of job from engine, backtrace:","SEVERE","check_job")
traceback.print_tb(sys.exc_info()[2])
raise
def howto(self):
print """A simple example for use the cluster
from parallel_go import *
c=cluster()
c.start_engine('casa-dev-08',2,'/home/casa-dev-08/hye/cluster')
#can use tb.clearlocks() to remove leftover
c.pgc('default("split")')
c.pgc('inp("split")')
c.pgk(mspath='/home/casa-dev-01/hye/test/')
c.pgk(msfile='ngc5921.ms')
c.pgc('vis=mspath+msfile')
c.pull('vis')
tb.clearlocks('/home/casa-dev-01/hye/test/5921.ms')
c.pgc('outputvis=work_dir+"/"+msfile+"-"+str(id)')
#alternatively
#for i in c.get_ids():
# p=c.pull('work_dir')[i]
# f=c.pull('msfile')[i]
# o=p+"/"+f+"--"+str(i)
# c.pgk(outputvis={i: o})
c.pull('outputvis')
c.pgc('field="2"')
spw=c.split_channel(0, 64)
spw[0]='0:6~15'
spw[3]='0:48~55'
c.pgk(spw=spw)
c.pgc('inp("split")')
c.pgc('go("split")')
c.read_casalogs()
c.pgc('inp("clean")')
c.pgc('vis=outputvis')
c.pgk(imagetag='.clean')
c.pgc('imagename=vis+imagetag')
c.pgc('inp("clean")')
c.pgc('go("clean")')
c.pgc('import commands')
c.pgc('a=commands.getstatusoutput("ls ")')
"""
def use_often(self):
print """Frequently used commands
from parallel_go import *
c=cluster()
c.hello()
c.get_ids()
c.get_nodes()
c.activate()
px print "cluster activated"
c.pdo 'print "parallel"'
c.odo('print "node 0 only"', 0)
c.odo('print "node 0 and 1"', [0,1])
c.odo 'print "node 1 only"', 1
c.queue_status()
c.clear_queue()
c.get_properties()
c.keys()
c.pull('mpi')
c.get_result(1)
#pg.activate()
#px 'from casa_in_py import *'
"""
def example(self):
print """example: run clean on 4 engines
from parallel_go import *
c=cluster()
c.start_engine('casa-dev-08', 4,
'/home/casa-dev-08/hye/cluster')
default clean
mspath='/home/casa-dev-09/hye/test/ngc6503/ngc6503_output/'
msname='ngc6503.ms.contsub'
vis=mspath+msname
wpath=[]
for i in c.pull('work_dir'):
wpath.append(i+'/'+msname+'.clean')
imagename=c.pad_task_id(wpath)
mode='channel'
start=c.split_int(9, 127)
nchan=40
width=1
calready=False
gain=0.1
msize=[370,250]
psfmode='clark'
cell=[4.,4.]
niter=10000
threshold=0.0003
taskghting='briggs'
#rmode = 'norm'
robust=0.5
mask = ''
s=c.pgt()
job=[]
for i in c.get_ids():
job.append(c.odo(s[i], i))
c.check_job(job[0])
c.get_result(0)
"""
cluster=cluster()
"""
for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16;
do ssh casa-dev-$i "ps -ef | grep hye" ; done
"""
"""
c.pgc('import time', {0: 'time.sleep(10); x=5; y="y is y"', 1: 'time.sleep(12);a=7;b="b is not y"'},block=False,job="wakeup")
c.pull('x',0)
c.pull('y',0)
c.pull('a',1)
c.pull('b',1)
c.odo('print x', 0)
c.odo('print x', 0).r
"""
|
from flask_wtf import FlaskForm
from wtforms import TextField, SubmitField, StringField, Form, IntegerField, DateField
class CargarTweetForm(FlaskForm):
search = StringField("search")
maximo = IntegerField('maximo')
fecha = DateField('fecha_hasta')
submit = SubmitField('Cargar')
class ObtenerTweetForm(FlaskForm):
coleccion = StringField("coleccion")
submit = SubmitField('Buscar')
|
# -*-coding:utf-8-*-
import numpy as np
import freetype
import copy
import random
import cv2
font_path = '/media/cyoung/000E88CC0009670E/projectCV/chinese_image_text/' \
'img/Img2TextSequence/data_gen/fonts/Deng.ttf'
class put_chinese_text(object):
def __init__(self, ttf_path):
self._face = freetype.Face(ttf_path)
def draw_text(self, image, pos, text, text_size, text_color):
"""
draw chinese text with ttf
:param image: ndarray
:param pos: where is draw text
:param text: the context,for chinese be unicode type
:param text_size: text size
:param text_color: text color
:return: image
"""
self._face.set_char_size(text_size * 64)
metrics = self._face.size
ascender = metrics.ascender / 64.0
# descender = metrics.descender / 64.0
# height = metrics.height / 64.0
# linegap = height - ascender + descender
y_pos = int(ascender)
if not isinstance(text, str):
text = text.decode('utf-8')
img = self.draw_string(image, pos[0], pos[1] + y_pos, text, text_color)
return img
def draw_string(self, img, x_pos, y_pos, text, color):
"""
draw string
:param img: ndarray
:param x_pos: x coordinate
:param y_pos: y coordinate
:param text: text(str)
:param color: text color
:return: image
"""
prev_char = 0
pen = freetype.Vector()
pen.x = x_pos << 6
pen.y = y_pos << 6
hscale = 1.0
matrix = freetype.Matrix(int(hscale) * 0x10000, int(0.2 * 0x10000),
int(0.0 * 0x10000), int(1.1 * 0x10000))
cur_pen = freetype.Vector()
pen_translate = freetype.Vector()
image = copy.deepcopy(img)
for cur_char in text:
self._face.set_transform(matrix, pen_translate)
self._face.load_char(cur_char)
# kerning = self._face.get_kerning(prev_char, cur_char)
pen.x = pen.x + 170
slot = self._face.glyph
bitmap = slot.bitmap
cur_pen.x = pen.x
cur_pen.y = pen.y - slot.bitmap_top * 64
self.draw_ft_bitmap(image, bitmap, cur_pen, color)
# cv2.imshow('image', image)
# cv2.waitKey(0)
pen.x += slot.advance.x
prev_char = cur_char
return image
def draw_ft_bitmap(self, img, bitmap, pen, color):
"""
draw each char
:param img: ndarray
:param bitmap:
:param pen:
:param color: pen color e.g.(0,0,255)-->red
:return: image
"""
x_pos = pen.x >> 6
y_pos = pen.y >> 6
cols = bitmap.width
rows = bitmap.rows
glyph_pixels = bitmap.buffer
for row in range(rows):
for col in range(cols):
if glyph_pixels[row * cols + col] != 0:
img[y_pos + row][x_pos + col][0] = color[0]
img[y_pos + row][x_pos + col][1] = color[1]
img[y_pos + row][x_pos + col][2] = color[2]
# return img
class gen_id_card(object):
def __init__(self, ttf_path):
self.number = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
self.char_set = self.number
self.len = len(self.char_set)
self.max_size = 18
self.ft = put_chinese_text(ttf_path)
def gen_text(self, is_random=False):
text = ''
vecs = np.zeros((self.max_size * self.len))
# 生成不定长的字符串
if is_random:
size = random.randint(8, self.max_size)
else:
size = self.max_size
for i in range(size):
c = random.choice(self.char_set)
vec = self.char2vec(c)
text = text + c
vecs[i * self.len:(i + 1) * self.len] = np.copy(vec)
return text, vecs
def char2vec(self, c):
vec = np.zeros(self.len)
for j in range(self.len):
if self.char_set[j] == c:
vec[j] = 1
return vec
def vec2text(self, vecs):
text = ''
v_len = len(vecs)
for i in range(v_len):
if vecs[i] == 1:
text = text + self.number[i % self.len]
return text
def gen_one_image(self, is_random=False):
text, vec = self.gen_text(is_random)
img = np.zeros((32, 256, 3))
color_ = (255, 255, 255)
pos = (0, 0)
text_size = 21
image = self.ft.draw_text(img, pos, text, text_size, color_)
return image[:, :, 2], text, vec
def get_next_batch(self, batch_size=128, output_size=(32, 256)):
inputs = np.zeros((batch_size, output_size[1], output_size[0]))
codes = []
for i in range(batch_size):
image, text, vec = self.gen_one_image(True)
inputs[i, :, :] = np.transpose(image)
codes.append(list(text))
targets = [np.asarray(i) for i in codes]
sparse_targets = sparse_tuple_from(targets)
seq_len = np.ones(inputs.shape[0]) * output_size[1]
# [batch_size,256,32] seq_len:[batch_size,]:[256,256,...]
return inputs, sparse_targets, seq_len
def gen_anymore_image(self):
while True:
yield self.gen_one_image()
# 作用于sparse_tensor
def sparse_tuple_from(sequences, dtype=np.int32):
indices = []
values = []
for n, seq in enumerate(sequences):
indices.extend(zip([n] * len(seq), list(range(len(seq)))))
values.extend(seq)
# shape:(n,2)
indices = np.asarray(indices, dtype=np.int64)
# shape:(n,)
values = np.asarray(values, dtype=dtype)
# shape:(2,)
shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1])
return indices, values, shape
def decode_sparse_tensor(sparse_tensor, digits):
"""
:param sparse_tensor: tuple(indices, values, shape)
:param label_list:
:return:
"""
decoded_indexes = list()
current_i = 0
current_seq = []
for offset, i_and_index in enumerate(sparse_tensor[0]):
i = i_and_index[0]
if i != current_i:
decoded_indexes.append(current_seq)
current_i = i
current_seq = list()
current_seq.append(offset)
decoded_indexes.append(current_seq)
result = []
for index in decoded_indexes:
result.append(decode_a_seq(index, sparse_tensor, digits))
return result
def decode_a_seq(indexes, spars_tensor,digits):
decoded = []
for m in indexes:
char = digits[spars_tensor[1][m]]
decoded.append(char)
return decoded
if __name__ == '__main__':
gen_obj = gen_id_card(font_path)
for image, label, vec in gen_obj.gen_anymore_image():
cv2.imshow('image', image)
cv2.waitKey()
|
from cryptopals import block
if __name__ == '__main__':
test_string = 'YELLOW SUBMARINE'
print(block.pkcs7_pad(test_string.encode(), 20))
|
"""
NeuroImaging volumes visualization
==================================
Simple example to show Nifti data visualization.
"""
##############################################################################
# Fetch data
# ----------
from nilearn import datasets
# By default 2nd subject will be fetched
haxby_dataset = datasets.fetch_haxby()
# print basic information on the dataset
print('First anatomical nifti image (3D) located is at: %s' %
haxby_dataset.anat[0])
print('First functional nifti image (4D) is located at: %s' %
haxby_dataset.func[0])
##############################################################################
# Visualization
# -------------
from nilearn.image.image import mean_img
# Compute the mean EPI: we do the mean along the axis 3, which is time
func_filename = haxby_dataset.func[0]
mean_haxby = mean_img(func_filename)
from nilearn.plotting import plot_epi, show
plot_epi(mean_haxby, colorbar=True, cbar_tick_format="%i")
##############################################################################
# Extracting a brain mask
# -----------------------
# Simple computation of a mask from the fMRI data
from nilearn.masking import compute_epi_mask
mask_img = compute_epi_mask(func_filename)
# Visualize it as an ROI
from nilearn.plotting import plot_roi
plot_roi(mask_img, mean_haxby)
##############################################################################
# Applying the mask to extract the corresponding time series
# ----------------------------------------------------------
from nilearn.masking import apply_mask
masked_data = apply_mask(func_filename, mask_img)
# masked_data shape is (timepoints, voxels). We can plot the first 150
# timepoints from two voxels
# And now plot a few of these
import matplotlib.pyplot as plt
plt.figure(figsize=(7, 5))
plt.plot(masked_data[:150, :2])
plt.xlabel('Time [TRs]', fontsize=16)
plt.ylabel('Intensity', fontsize=16)
plt.xlim(0, 150)
plt.subplots_adjust(bottom=.12, top=.95, right=.95, left=.12)
show()
|
import pandas as pd
import numpy as np
def processData():
fp_german = open('train_german.txt', 'r')
german_string = fp_german.read()
german_string = german_string.replace('\n','')
# Stringi 140'ar parçaya bölme.
germanTrainingSet = np.array([german_string[i:i + 140] for i in range(0,len(german_string),140)])
# germanTrainingSet = [german_string[i:i + 140] for i in range(0,len(german_string),140)]
# Sondaki eleman 140'tan küçük olması ihitmaline karşı silinir
np.delete(germanTrainingSet, len(germanTrainingSet) - 1, axis=0)
# Test ve training setleri ayrılır
germanTestSet = germanTrainingSet[2000:2200]
germanTrainingSet = germanTrainingSet[0:2000]
# 0'lardan oluşan bi dizi oluşturulur.
german_labels = np.zeros((germanTrainingSet.__len__(),1))
# Training ve test set'tekiarakterler ASCII karşılıklarına çevrilir.
germanTrainingSet = [[ord(c[i]) for i in range(c.__len__())] for c in germanTrainingSet]
germanTestSet = [[ord(c[i]) for i in range(c.__len__())] for c in germanTestSet]
#print(germanTrainingSet[3])
fp_german.close()
# -----------
'''
İngilizcede de aynı işlemler yapılır
'''
fp_english = open('train_english.txt', 'r')
english_string = fp_english.read()
english_string = english_string.replace('\n','')
englishTrainingSet = np.array([english_string[i:i + 140] for i in range(0,len(english_string),140)])
# englishTrainingSet = [english_string[i:i + 140] for i in range(0,len(english_string),140)]
np.delete(englishTrainingSet,len(englishTrainingSet) - 1, axis=0)
englishTestSet = englishTrainingSet[2000:2200]
englishTrainingSet = englishTrainingSet[0:2000]
englishTrainingSet = [[ord(c[i]) for i in range(c.__len__())] for c in englishTrainingSet]
englishTestSet = [[ord(c[i]) for i in range(c.__len__())] for c in englishTestSet]
#print(englishTrainingSet.__len__())
fp_english.close()
return germanTrainingSet,englishTrainingSet,germanTestSet,englishTestSet
processData() |
import pytest
import jax
import jax.numpy as jnp
import jax.flatten_util
import numpy as np
from functools import partial
import itertools
import tarfile
import glob
from io import BytesIO
from flax import serialization
import netket as nk
from .. import common
pytestmark = common.skipif_mpi
SEED = 111
@pytest.fixture()
def vstate(request):
N = 8
hi = nk.hilbert.Spin(1 / 2, N)
g = nk.graph.Chain(N)
ma = nk.models.RBM(
alpha=1,
dtype=float,
hidden_bias_init=nk.nn.initializers.normal(),
visible_bias_init=nk.nn.initializers.normal(),
)
return nk.variational.MCState(
nk.sampler.MetropolisLocal(hi),
ma,
)
def test_variables_from_file(vstate, tmp_path):
fname = str(tmp_path) + "/file.mpack"
with open(fname, "wb") as f:
f.write(serialization.to_bytes(vstate.variables))
for name in [fname, fname[:-6]]:
vstate2 = nk.variational.MCState(
vstate.sampler, vstate.model, n_samples=10, seed=SEED + 100
)
vstate2.variables = nk.variational.experimental.variables_from_file(
name, vstate2.variables
)
# check
jax.tree_multimap(
np.testing.assert_allclose, vstate.parameters, vstate2.parameters
)
def test_variables_from_tar(vstate, tmp_path):
fname = str(tmp_path) + "/file.tar"
with tarfile.TarFile(fname, "w") as f:
for i in range(10):
save_binary_to_tar(
f, serialization.to_bytes(vstate.variables), f"{i}.mpack"
)
for name in [fname, fname[:-4]]:
vstate2 = nk.variational.MCState(
vstate.sampler, vstate.model, n_samples=10, seed=SEED + 100
)
for j in [0, 3, 8]:
vstate2.variables = nk.variational.experimental.variables_from_tar(
name, vstate2.variables, j
)
# check
jax.tree_multimap(
np.testing.assert_allclose, vstate.parameters, vstate2.parameters
)
with pytest.raises(KeyError):
nk.variational.experimental.variables_from_tar(name, vstate2.variables, 15)
def save_binary_to_tar(tar_file, byte_data, name):
abuf = BytesIO(byte_data)
# Contruct the info object with the correct length
info = tarfile.TarInfo(name=name)
info.size = len(abuf.getbuffer())
# actually save the data to the tar file
tar_file.addfile(tarinfo=info, fileobj=abuf)
|
import string
import nltk
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.corpus import stopwords
from nltk.corpus import pros_cons
from nltk.stem.lancaster import LancasterStemmer
import itertools
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
def bag_of_words(words):
return dict([(word, True) for word in words])
def bigram_words(words, score_fn=BigramAssocMeasures.chi_sq, n=10):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return bag_of_words(words + bigrams) # 所有词和(信息量大的)双词搭配一起作为特征
# 去除掉文字的标点符号,并切分成单词列表
def preprocess(paragraph):
# 标点符号
english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
# 分词
paragraph = nltk.word_tokenize(paragraph.lower())
return [word for word in paragraph if not word in (stopwords.words("english") + english_punctuations)]
# 找到词语的主干部分,一般用于分类
def stem(word_list):
st = LancasterStemmer()
return [st.stem(word) for word in word_list]
sentence = "As the Chinese currency is not freely convertible under the capital account," \
+ " the central bank has to purchase foreign currency generated by China's trade surplus and foreign investment in the country, adding funds to the money market." \
+ "The narrowing decline indicated easing pressure from capital flight as the Chinese economy firms up and the yuan stabilizes against the U.S. dollar." \
+ "Official data showed China forex reserves climbing to 3.0295 trillion U.S. dollars at the end of April from 3.0091 trillion dollars a month earlier." \
+ "This was the first time since June 2014 the reserves expanded for three consecutive months"
# print(preprocess(sentence))
# print(stem(preprocess(sentence)))
# print(remove_punctuation(word))
nltk.download()
# print(pros_cons.readme())
# print(bigram_words(preprocess(sentence)))
# print(stopwords.words("english"))
|
import random
def noun_pr():
with open("noun_pr.txt", encoding="utf-8") as f:
nouns_pr = f.read()
splited_nouns_pr= nouns_pr.split()
return random.choice(splited_nouns_pr)
def narech():
with open("narech.txt", encoding="utf-8") as f:
narechs = f.read()
splited_narechs= narechs.split()
return random.choice(splited_narechs)
def noun_nom():
with open("noun_nom.txt", encoding="utf-8") as f:
nouns_nom = f.read()
splited_nouns_nom= nouns_nom.split()
return random.choice(splited_nouns_nom)
def predlog():
with open("predlog.txt", encoding="utf-8") as f:
predlogs = f.read()
splited_predlogs= predlogs.split()
return random.choice(splited_predlogs)
def verb():
with open("verb.txt", encoding="utf-8") as f:
verbs = f.read()
splited_verbs= verbs.split()
return random.choice(splited_verbs)
def pril2():
with open("pril2.txt", encoding="utf-8") as f:
prils2 = f.read()
splited_prils2= prils2.split()
return random.choice(splited_prils2)
def pril3():
with open("pril3.txt", encoding="utf-8") as f:
prils3 = f.read()
splited_prils3= prils3.split()
return random.choice(splited_prils3)
def verse1():
return predlog() + ' ' + pril2() + ' ' + noun_pr()
def verse2():
return noun_nom() + ' ' + verb() + ' ' + narech()+'.'
def verse3():
return pril3() + ' ' + noun_nom() + '.'
print(verse1())
print(verse2())
print(verse3())
|
# Generated by Django 2.2.6 on 2019-10-24 10:52
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("site", "0024_sitesettings_customer_set_password_url")]
operations = [
migrations.AlterField(
model_name="sitesettings",
name="company_address",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="account.Address",
),
)
]
|
from rest_framework.parsers import JSONParser
from rest_framework import viewsets
from scapi.models.shoppingcart import shoppingcart
from scapi.serializers.shoppingcartSerializer import shoppingcartSerializer
class CartViewSet(viewsets.ModelViewSet):
queryset = shoppingcart.objects.all()
serializer_class = shoppingcartSerializer |
#!/usr/bin/env python
from setuptools import setup
setup(
name='intercom',
version='0.0.1',
url='https://github.com/alexhanson/intercom',
license='ISC License',
python_requires='>=3.8',
install_requires=[
'click==7.1.2',
'CherryPy==18.6.0',
'Jinja2==2.11.3',
'pytz==2021.1',
],
packages=[
'intercom',
],
package_data={
'intercom': ['templates/*'],
},
entry_points={
'console_scripts': ['intercom = intercom.__main__:main'],
},
)
|
"""
列表推导和生成器表达式
"""
def main():
symbols = '%^&$*('
symbol = 'time'
codes = [ord(symbol) for symbol in symbols]
print(codes)
# 在 python3 中 推导式不再会有变量泄漏的问题
print(symbol) # time
# 推导式与filter,map比较
print([ord(symbol) for symbol in symbols if ord(symbol) > 40])
print(list(filter(lambda c: c > 40, map(ord, symbols))))
# 笛卡尔乘积
colors = ['black', 'white']
sizes = ['S', 'M', 'L']
# 这里需要注意生成后的顺序:是优先排列 前一个循环条件
t_shirts = [(color, size) for color in colors for size in sizes]
print(t_shirts)
# 生成器表达式
# 生成器可以避免内存占用,其原理使用了协程, 并不会将数据一次性取到内存中
# 具体的详情在后续协程中会详细说明
generator_obj = ('%r,%r' % (color, size) for color in colors for size in sizes)
print(type(generator_obj))
print(generator_obj)
print(next(generator_obj)) # next(generator_obj)
print(list(generator_obj))
# 元组不仅仅是不可变的列表
# 元组的更多用途在于记录,有点像DDD中的值对象的概念, 将多组数据捆绑成一个标示一个信息
lax_coordinates = (33.9425, -118.408056)
latitude, longitude = lax_coordinates
print(latitude, longitude)
# * 运算符可以标示一个可迭代对象
print(divmod(1, 3))
a_tuple = (1, 3)
print(divmod(*a_tuple))
# * 也可以做一个强大的占位符
a, b, *c, d = [1, 2, 3, 4, 5, 6]
print(a, b, c, d, ) # 1 2 [3, 4, 5] 6
# 命名元组
# namedtuple构建的类所消耗的内存与元组是一样的,比对象实例要小一点,没有__dict__存放属性
import collections
Card = collections.namedtuple('Card', 'rank, suit')
# Card = collections.namedtuple('Card', ['rank', 'suit'])
card = Card(2, 'hearts')
print(card)
# 命名元祖属性创建后不可再次更改
# card.rank = 3 AttributeError: can't set attribute
print(card._fields) # 打印名字元祖中的属性
# 通过元祖来创建名字元组
card_tuple = (3, 'hearts')
print(Card._make(card_tuple))
print(card._asdict()) # 通过 OrderedDict的形式返回元组, OrderedDict: 有顺序的dict
# 切片
l = [10, 20, 30, 40, 50, 60]
print(l[:2])
print(l[2:])
s = 'bicycle'
print(s[::3])
print(s[::-1])
print(s[::-2])
print(l)
l[2:5] = [100]
print(l)
a = [1, 2, 3]
print(a * 5)
print(5 * 'abcd')
board = [['_'] * 3 for i in range(3)]
print(board)
board[1][2] = 'x'
print(board)
l = [1, 2, 3]
print(l)
print(id(l))
l *= 2 # 本地乘
print(l)
print(id(l))
t = (1, 2, [30, 40])
try:
t[2] += [50, 60] # TypeError: 'tuple' object does not support item assignment
except BaseException as e:
print(e)
print(t)
# 这个问题很神奇,我本来以为 答案会是 (1, 2, [30, 40, 50, 60]), 因为[30, 40]是个可变变量,其实就是一个内存的指针
# 但是结果却是发生了异常,并且 t的结果发生了改变
# 引发异常的原因,是因为增量赋值并不是一个原子操作。
# 所以在元组之内,我们应该尽量避免出现填入可变对象
# list.sort 与 sorted
a = [3, 5, 1, 5, 7, 4]
print(sorted(a)) # sorted 会返回一个排序后的新列表(浅复制)
print(a)
print(a.sort()) # .sort() 没有返回值,会直接在原来的数组之上进行排序
print(a)
# 这两个函数都存在一个共有的参数, key(制定排序规则), reverse(控制正反序列)
a = ['time', 'nihao', 'me', 'python']
print(sorted(a, reverse=True, key=lambda x: x[-1]))
# 排序是一个十分耗时的工作,对于已经排好的序列,我们应该妥善管理
# 使用bisect管理已经排序的序列(通过二分查找实现)
# bisect 的实现只是通过 大小(<) 比较, 没有实现,排序规则的指定(key), 但是有指定排序范围的工具
import bisect
a = [1, 4, 6, 7, 9, 10]
a.sort()
print(a)
print(bisect.bisect(a, 8)) # 返回应该插入的位置
# 更多的排序功能实现 应该看下 cookie book
# https://docs.python.org/3/howto/sorting.html 这篇官方文档有一些排序的妙用
# list 虽然是一个不错的工具,但是我们有时候需要使用更加专业的工具
# 队列
# 利用.append 和 .pop 我们可以把list当作队列(栈)使用, 但是在删除第一个元素或者增加第一个元素是个很耗时的过程
# collections.deque 双向队列,是个线程安全,可以快速从两端添加或者删除元素的数据类型
from collections import deque
dq = deque(range(15), maxlen=10) # 开头的5个被弹出了
print(dq)
dq.rotate(3) # 旋转操作
print(dq)
dq.appendleft(-1)
print(dq)
dq.extend([1, 2, 3])
print(dq)
# deque 实现了几乎所有的list 方法, 但是并不推荐操作中间部分,因为实现方式是双向链表
print(dq[3])
# 其他队列:
# queue
# multiprocessing
# asyncio
# heapq
# array 数组, array 在对数组处理上更加高效,并且拥有一个序列化工具
# 感觉如果需要用到 array 那一定是一个非常非常大的数组了
from array import array
from random import random
fs = array('d', (random() for i in range(10 ** 6)))
# print(fs) # 我为什么要想不开
print(fs[-1])
# memeoryview 与 NumPy, SciPy
# memv = memoryview([1, 3, 4, 5, 6, 7, -1]) # TypeError: memoryview: a bytes-like object is required, not 'list'
memv = memoryview(array('d', [1, 4, 5, 67, 9, 7, 5, 45, 3]))
print(memv)
print(len(memv))
if __name__ == "__main__":
main()
|
from openpyxl import Workbook
from openpyxl.drawing.image import Image
#please do install Pillow module
#pip install Pillow to use images
book = Workbook()
sheet = book.active
img = Image("xlf/apple.jpg")
sheet['A1'] = 'This is apple'
sheet.add_image(img, 'B2')
book.save("xlf/9.xlsx") |
import math
def add(a,b):
'this adds 2 num'
print("sum of a and b", a+b)
def diff(a,b):
'diff btn 2 num'
print("diff of 2 num", a-b)
def mul(a,b):
'multiples a with b'
print("Mul of a and b", a*b)
def div(a,b):
'divides 2 nos'
if b==0:
print("enter a nonzero num")
else:
print("div a/b is ", a/b)
def sqroot(a):
print("square root of number", a , "is ", math.sqrt(a))
def floor_div(a,b):
print("floor division of a and b is ", a//b)
def fib(nterms):
a=0
b=1
if nterms<0:
print("please enter positive num")
elif nterms ==1:
print("fibo... ", a)
elif nterms ==2:
print (a)
print( b)
else:
print( a)
print( b)
while(nterms>2):
next=a+b
print(next)
a=b
b=next
nterms=nterms-1
def isprime(num):
if num>1:
for i in range(2,num):
if(num %i) == 0:
print(num, "not a prime num")
break
else:
print(num, " is a prime") |
# coding=UTF-8
'''
Created on 2017
@author: XYJ
'''
import jieba
import os
import random
import math
def TextProcessing(floder_path,train_size =0.8):
floder_list = os.listdir(floder_path)
train_data_list = []
train_class_list = []
test_data_list = []
test_class_list = []
for floder in floder_list:
new_floder_path = os.path.join(floder_path,floder)
new_floder_list = os.listdir(new_floder_path)
word_list = []
for file in new_floder_list:
txt_list =[]
with open(os.path.join(new_floder_path,file),'rb') as f:
raw = f.read().decode('ANSI','ignore')
txt_list = list(jieba.cut(raw,cut_all = False))
while '\u3000' in txt_list:
txt_list.remove('\u3000')
while '\r\n' in txt_list:
txt_list.remove('\r\n')
while '\x00' in txt_list:
txt_list.remove('\x00')
while '\n' in txt_list:
txt_list.remove('\n')
word_list.append(txt_list)
random.shuffle(word_list)
size = int(len(word_list)*train_size)
print(floder)
print(size)
tem_train_list = word_list[:size]
tem_test_list = word_list[size:]
tem_train_word = []
for a in tem_train_list :
for b in a:
tem_train_word.append(b)
3
##生成训练数据集和测试数据集
train_data_list.append(tem_train_word)
train_class_list.append(floder)
test_data_list.append(tem_test_list)
test_class_list.append(floder)
return train_data_list,test_data_list,train_class_list,test_class_list
'''
@param param is stopwords's filename:
@return: a set of stopwords_file
'''
def makeStopwordsSet(stopwords_file):
words_set = set()
with open(stopwords_file,'rb') as f:
lines = f.readlines()
for line in lines:
word = line[:-2].decode('UTF-8')
if len(word)>0 and word not in words_set:
words_set.add(word)
return words_set
def listToDict(data_list,stopwords_set=set()):
data_dict = {}
for word in data_list:
if word not in stopwords_set and not word.isdigit():
if word in data_dict:
data_dict[word] += 1
else:
data_dict[word] = 1
return data_dict
def clearlist(test_list,stopwords_set = set()):
test = []
for word in test_list:
if word not in stopwords_set and not word.isdigit():
test.append(word)
return test
def predicted(test_list,train_data_list_dict,train_class_list,train_data_count):
predicte = []
for dic ,count in zip(train_data_list_dict,train_data_count):
laplace = 0
for word in test_list:
laplace += P(word,dic,count)
predicte.append(laplace)
ma = max(predicte)
return train_class_list[list.index(predicte,ma)]
def P(word,dic,count):
if word in dic:
laplace = math.log(((dic[word]+1)/(count + len(dic))))/math.log(10)
else:
laplace = math.log((1/(count + len(dic))))/math.log(10)
return laplace
def main():
abspath = os.path.abspath(os.path.dirname(os.getcwd()))
##########获取不关键单词集合##########
stopwords_file = abspath + '\\stopwords_cn.txt'
stopwords_set = makeStopwordsSet(stopwords_file)
###########获取数据集################
folder_path = abspath+'/Reduced'
train_data_list,test_data_list,train_class_list,test_class_list = TextProcessing(folder_path,train_size = 0.8)
##处理训练数据集#####################
train_data_list_dict = []
for word_list in train_data_list:
train_data_list_dict.append(listToDict(word_list, stopwords_set))
print('训练数据集处理完成')
##处理测试训练集########
for test_list in test_data_list:
for test in test_list:
test = clearlist(test,stopwords_set)
print('测试数据集处理完成')
##对每一类的关键词按照递减顺序排列
for a in train_data_list_dict:
internet_list = sorted(a.items(),key = lambda f : f[1],reverse = True)
print(internet_list[:200])
##统计每一类的单词数,为了方便计算P(Bi/A)
train_data_count = []
for dic in train_data_list_dict:
count = 0
for v in dic.values():
count += v
train_data_count.append(count)
###test###########################################
for li,classtpye in zip(test_data_list,test_class_list):
corr = 0
count = 0
for lis in li:
name = predicted(lis, train_data_list_dict, train_class_list, train_data_count)
count += 1
if name == classtpye:
corr += 1
print(classtpye+'类预测成功率为 %.3f %%'%(corr*100/count))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import unittest
import os
import swc2vtk
class TestVtkGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.swc_file_path = os.path.join('tests', 'simple.swc')
self.data_file_path = os.path.join('tests', 'simple.dat')
self.output_file_path = os.path.join('output.vtk')
self.vtk_generator = swc2vtk.VtkGenerator()
def tearDown(self):
self.vtk_generator.write_vtk(self.output_file_path)
def test_add_cylinder(self):
self.vtk_generator.add_cylinder()
def test_add_swc(self):
simple_cmp_size = 11
self.vtk_generator.add_swc(self.swc_file_path)
self.assertEqual(simple_cmp_size, len(self.vtk_generator.swc_list[0].data))
def test_add_swc_flip(self):
simple_cmp_size = 11
self.vtk_generator.add_swc(self.swc_file_path, inv_x=True, inv_y=True, inv_z=True,
shift_x=100, shift_y=100, shift_z=100)
self.assertEqual(simple_cmp_size, len(self.vtk_generator.swc_list[0].data))
def test_add_datafile(self):
simple_cmp_size = 11
self.vtk_generator.add_swc(self.swc_file_path)
self.vtk_generator.add_datafile(self.data_file_path)
self.assertEqual(simple_cmp_size, len(self.vtk_generator.swc_list[0].data))
def test_draw_mode1(self):
self.vtk_generator.set_draw_mode(1)
self.vtk_generator.add_swc(self.swc_file_path)
def test_draw_mode2(self):
self.vtk_generator.set_draw_mode(2)
self.vtk_generator.add_swc(self.swc_file_path)
def test_draw_mode3(self):
self.vtk_generator.set_draw_mode(3)
self.vtk_generator.add_swc(self.swc_file_path)
def test_draw_mode4(self):
self.vtk_generator.set_draw_mode(4)
self.vtk_generator.add_swc(self.swc_file_path)
def test_write_vtk_options(self):
self.vtk_generator.add_swc(self.swc_file_path)
self.vtk_generator.write_vtk(self.output_file_path, fixedval=True, movingval=True, coloring=True,
diam_ratio=0.2, normalize_diam=True, radius_data=True, type_data=True)
def test_write_volume_vtk(self):
self.vtk_generator.add_swc(self.swc_file_path)
self.vtk_generator.write_volume_vtk('simple.vtk', origin=(-10.0, -10.0, -10.0), ratio=(1, 1, 1), div=(20, 20, 20))\
def test_add_mark(self):
self.vtk_generator.add_swc(self.swc_file_path)
self.vtk_generator.add_mark()
def test_add_swc_connection(self):
self.vtk_generator.add_swc(self.swc_file_path)
self.vtk_generator.add_swc_connection(0, 0, 100, 200)
def suite():
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(TestVtkGenerator))
return suite
if __name__ == '__main__':
unittest.main()
|
"""Utility functions for POST /tasks/{id}:cancel endpoint."""
import logging
from requests import HTTPError
from typing import Dict
from celery import current_app
from connexion.exceptions import Forbidden
import tes
from pro_tes.config.config_parser import get_conf
from pro_tes.errors.errors import TaskNotFound
from pro_tes.ga4gh.tes.states import States
from pro_tes.tasks.utils import set_task_state
# Get logger instance
logger = logging.getLogger(__name__)
# Utility function for endpoint POST /runs/<run_id>/delete
def cancel_task(
config: Dict,
id: str,
*args,
**kwargs
) -> Dict:
"""Cancels running workflow."""
collection = get_conf(config, 'database', 'collections', 'tasks')
document = collection.find_one(
filter={'task_id': id},
projection={
'task_id_tes': True,
'tes_uri': True,
'task.state': True,
'user_id': True,
'worker_id': True,
'_id': False,
}
)
# Raise error if task was not found
if not document:
logger.error("Task '{id}' not found.".format(id=id))
raise TaskNotFound
# Raise error trying to access workflow run that is not owned by user
# Only if authorization enabled
if 'user_id' in kwargs and document['user_id'] != kwargs['user_id']:
logger.error(
(
"User '{user_id}' is not allowed to access task '{id}'."
).format(
user_id=kwargs['user_id'],
id=id,
)
)
raise Forbidden
# If task is in cancelable state...
if document['task']['state'] in States.CANCELABLE or \
document['task']['state'] in States.UNDEFINED:
# Get timeout duration
timeout = get_conf(
config,
'api',
'endpoint_params',
'timeout_service_calls',
)
# Cancel local task
current_app.control.revoke(
document['worker_id'],
terminate=True,
signal='SIGKILL'
)
# Cancel remote task
if document['tes_uri'] is not None and document['task_id_tes'] is not None:
cli = tes.HTTPClient(document['tes_uri'], timeout=timeout)
try:
cli.cancel_task(document['task_id_tes'])
except HTTPError:
# TODO: handle more robustly: only 400/Bad Request is okay;
# TODO: other errors (e.g. 500) should be dealt with
pass
# Write log entry
logger.info(
(
"Task '{id}' (worker ID '{worker_id}') was canceled."
).format(
id=id,
worker_id=document['worker_id'],
)
)
# Update task state
set_task_state(
collection=collection,
task_id=id,
worker_id=document['worker_id'],
state='CANCELED',
)
return {}
|
import numpy as np
import matplotlib.pyplot as plt
from dna_diags import *
def get_time_from_tempfile(ikx,iky):
cikx=str(int(ikx))
if len(cikx)==1:
cikx='0'+cikx
elif len(cikx) > 2:
print cikx
print "Error in get_time_from_temp"
stop
ciky=str(int(iky))
if len(ciky)==1:
ciky='0'+ciky
elif len(ciky) > 2:
print "Error in get_time_from_gk"
stop
diagdir=par['diagdir'][1:-1]
print diagdir
file_name='temp_kx'+cikx+'ky'+ciky+'.dat'
print "Reading file",diagdir+'/'+file_name
file_exists=os.path.isfile(diagdir+'/'+file_name)
if file_exists:
pass
else:
print "File does not exist:",diagdir+'/'+file_name
stop
f=open(diagdir+'/'+file_name,'r')
ntot=3*par['nkz0']
mem_tot=ntot*8
time=np.empty(0)
continue_read=1
i=0
while (continue_read):
f.seek(i*(mem_tot+8))
i=i+1
input=np.fromfile(f,dtype='float64',count=1)
if input==0 or input:
time = np.append(time,input)
else:
continue_read=0
f.close()
return time
def test_gknl(ikx,iky,start_time=-1.0,end_time=-1.0,calc_from_gout=False):
time=get_time_from_gkfile(ikx,iky,read_nl=True)
time0=get_time_from_gout()
if start_time==-1.0:
start_time=time[0]
if end_time==-1.0:
end_time=time[len(time)-1]
if start_time > end_time:
stop
istart=np.argmin(abs(time-start_time))
iend=np.argmin(abs(time-end_time))
ntime=iend-istart+1
istart0=np.argmin(abs(time0-start_time))
iend0=np.argmin(abs(time0-end_time))
ntime0=iend0-istart0+1
if ntime != ntime0:
print "Error in test_gknl!"
stop
kxgrid,kygrid,kzgrid,herm_grid=get_grids()
#Get from gk file
etot=np.zeros(ntime,dtype='float')
#Get from time derivative of above
detot=np.zeros(ntime,dtype='float')
#Get from gkfile
erhslin=np.zeros(ntime,dtype='float')
#Get from calculated nonlinearity from g_out
erhsnl0=np.zeros(ntime,dtype='float')
#Get from nl file
erhsnl=np.zeros(ntime,dtype='float')
#Get from temp file
terhsnl=np.zeros(ntime,dtype='float')
#Get from temp file
terhs=np.zeros(ntime,dtype='float')
#Get from temp file
tetot=np.zeros(ntime,dtype='float')
#Get from calculated nonlinearity from g_out
erhsnl0=np.zeros(ntime0,dtype='float')
#Get from gk file
etemp=np.zeros(par['nkz0'],dtype='float')
#Get from gk file
rhstemp=np.zeros(par['nkz0'],dtype='float')
#Get from nl file
rhsnltemp=np.zeros(par['nkz0'],dtype='float')
#Get from gout file
rhsnltemp0=np.zeros(par['nkz0'],dtype='float')
#Get from temp file
tetemp=np.zeros(par['nkz0'],dtype='float')
trhstemp=np.zeros(par['nkz0'],dtype='float')
trhsnltemp=np.zeros(par['nkz0'],dtype='float')
################
################
#etotk=np.zeros((par['nkz0'],ntime0),dtype='float')
#erhslink=np.zeros((par['nkz0'],ntime0),dtype='float')
#erhsnlk=np.zeros((par['nkz0'],ntime0),dtype='float')
################
################
for i in range(istart,iend+1):
it=i-istart
nl=read_time_step_gkfile(ikx,iky,i,read_nl=True)
nl=np.reshape(nl,(par['nkz0'],par['nv0']),order='F')
print it+1, " of ", ntime0
gt0=read_time_step_gkfile(ikx,iky,i)
temp=read_time_step_tempfile(ikx,iky,i)
e_from_temp=temp[:,0]
gt0=np.reshape(gt0,(par['nkz0'],par['nv0']),order='F')
#print np.info(gt0)
g0=read_time_step_g(i)
g0=np.reshape(g0,(par['nkx0'],par['nky0'],par['nkz0'],par['nv0']),order='F')
for k in range(par['nkz0']):
#for k in range(1,2):
#print k, " of ", par['nkz0']
if calc_from_gout:
rhsnltemp0[k]=get_energy_single_k(g0,g0,kxgrid[ikx],kygrid[iky],kzgrid[k],9)
rhstemp[k]=get_energy_single_k(gt0[k,:],gt0[k,:],kxgrid[ikx],kygrid[iky],kzgrid[k],0)
etemp[k]=get_energy_single_k(gt0[k,:],gt0[k,:],kxgrid[ikx],kygrid[iky],kzgrid[k],-1)
eop=energy_operator_single_k(gt0[k,:],kxgrid[ikx],kygrid[iky])
rhsnltemp[k]=np.real(np.sum(eop*nl[k,:]))
#temp file stuff
tetemp[k]=temp[k,0]
trhstemp[k]=temp[k,1]
trhsnltemp[k]=temp[k,2]
################
################
# erhslink[k,it]=rhstemp[k]
# erhsnlk[k,it]=rhsnltemp[k]
# etotk[k,it]=etemp[k]
################
################
erhslin[it]=np.sum(rhstemp)
erhsnl0[it]=np.sum(rhsnltemp0)
erhsnl[it]=np.sum(rhsnltemp)
etot[it]=np.sum(etemp)
#temp file stuff
terhs[it]=np.sum(trhstemp)
terhsnl[it]=np.sum(trhsnltemp)
tetot[it]=np.sum(tetemp)
for i in range(ntime-1):
detot[i]=(etot[i+1]-etot[i])/(time[i+1]-time[i])
plt.plot(time[istart:iend+1],etot,label='FE (gk)')
plt.plot(time[istart:iend+1],tetot,'x-',label='Etot temp')
plt.legend()
plt.show()
plt.plot(time[istart:iend+1],erhslin,label='RHS lin (gk)')
plt.plot(time[istart:iend+1],terhs-terhsnl,'x-',label='RHS lin temp')
plt.legend()
plt.show()
plt.plot(time[istart:iend+1],erhslin-(terhs-terhsnl),label='RHS lin - RHS lin (gk/temp)')
plt.legend()
plt.show()
plt.plot(time[istart:iend+1],erhsnl,label='RHS nl (nl)')
if calc_from_gout:
plt.plot(time[istart:iend+1],erhsnl0,'+-',label='RHS nl0 (gout)')
plt.plot(time[istart:iend+1],terhsnl,'x-',label='RHS nl temp')
plt.legend()
plt.show()
plt.plot(time[istart:iend+1],terhs,'x-',label='RHS temp')
plt.plot(time[istart:iend+1],terhsnl,'x-',label='RHSnl temp')
plt.legend()
plt.show()
plt.plot(time[istart:iend+1],erhslin+erhsnl,label='RHS tot (gk+nl)')
#plt.plot(time[istart:iend+1],erhslin+erhsnl0,label='RHS tot0 (gk_gout)')
plt.plot(time[istart:iend+1],terhs,'x',label='RHS temp')
plt.plot(time[istart:iend+1],detot,label='detot')
plt.legend()
plt.show()
################
################
#for k in range(par['nkz0']):
# for i in range(istart,iend):
# detot[i]=2.0*(etotk[k,i+1]-etotk[k,i])/(time[i+1]-time[i])
# plt.plot(time[istart:iend+1],erhslink[k,:]+erhsnlk[k,:],label='RHS tot')
# plt.plot(time[istart:iend+1],detot,label='detot')
# plt.legend()
# plt.show()
################
################
#for i in range(istart0,iend0+1):
# it=i-istart0
# print it+1, " of ", ntime0
# gt0=read_time_step_g(i)
# gt0=np.reshape(gt0,(par['nkx0'],par['nky0'],par['nkz0'],par['nv0']),order='F')
# for k in range(par['nkz0']):
# print k, " of ", par['nkz0']
# rhs[:,k]=get_rhs_nl_single_k(gt0,kxgrid[ikx],kygrid[iky],kzgrid[k])
# nl_tot0[k,it]=np.real(np.sum(rhs[:,k]))
# print nl_tot[k,it],nl_tot0[k,it]
def read_time_step_tempfile(ikx,iky,which_itime):
cikx=str(int(ikx))
if len(cikx)==1:
cikx='0'+cikx
elif len(cikx) > 2:
print "Error in get_time_from_gk"
stop
ciky=str(int(iky))
if len(ciky)==1:
ciky='0'+ciky
elif len(ciky) > 2:
print "Error in get_time_from_gk"
stop
#cikz=str(int(ikz))
#if len(cikz)==1:
# cikz='0'+cikz
#elif len(cikz) > 2:
# print "Error in get_time_from_gk"
# stop
diagdir=par['diagdir'][1:-1]
#print diagdir
file_name='temp_kx'+cikx+'ky'+ciky+'.dat'
#print "Reading file",diagdir+'/'+file_name
file_exists=os.path.isfile(diagdir+'/'+file_name)
if file_exists:
pass
else:
print "File does not exist:",diagdir+'/'+file_name
stop
f = open(diagdir+'/'+file_name,'rb')
ntot=3*par['nkz0']
mem_tot=ntot*8
gt0=np.empty((par['nkz0'],par['nv0']))
f.seek(8+which_itime*(8+mem_tot))
gt0=np.fromfile(f,dtype='float64',count=ntot)
gt0=np.reshape(gt0,(par['nkz0'],3),order='F')
#print sum(gt0)
f.close()
return gt0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 13:56:15 2020
@author: damla
"""
import socket
import threading
import time
clientWords = ["Selam", "Naber", "Hava", "Haber", "Kapan"]
serverAnswers = ["Selam", "Iyiyim, sagol", "Yagmurlu", "Korona", "Gule gule"]
print_lock=threading.Lock()
class ConnectionThread(threading.Thread):
def __init__(self, threadID, c, caddr):
threading.Thread.__init__(self)
self.threadID = threadID
self.c=c
self.caddr=caddr
def run(self):
threaded(self.c)
def cevapla(index):
cevap=serverAnswers[index]
print("Sunucunun cevabi:", cevap)
return cevap
def sor(datastr):
print("Istemcinin sordugu soru:", datastr)
if datastr not in clientWords:
sonuc="Anlamadim"
print(sonuc)
elif datastr == "Kapan":
sonuc="Gule gule"
print(sonuc)
else:
index=clientWords.index(datastr)
sonuc = cevapla(index)
return sonuc
def threaded(c):
exitFlag=0
while not exitFlag:
data=c.recv(1024)
datastr=data.decode().strip()
soru=sor(datastr)
if soru=="Gule gule":
c.send(soru.encode())
c.close()
exitFlag=1
else:
c.send(soru.encode())
c.close()
def main():
global exitFlag
#exitFlag=0
s = socket.socket()
ip="127.0.0.1"
port = 8000
saddr=(ip,port)
s.bind(saddr)
s.listen(5)
threads=[]
counter=0
t=time.localtime()
suan = time.strftime("%H:%M:%S", t)
while True:
c, addr = s.accept()
print(addr, "adresine baglanildi. Saat su an", suan)
#newConnectionThread=ConnectionThread(c,addr)
newConnectionThread=ConnectionThread(counter,c,addr)
threads.append(newConnectionThread)
newConnectionThread.start()
counter+=1
s.close()
if __name__ == "__main__":
main() |
from django.shortcuts import render, redirect, get_object_or_404
from .models import Item, SellerAccount
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import login, logout, authenticate
from .forms import ItemForm
from django.contrib.auth.decorators import login_required
from django.contrib import messages
# Create your views here.
def seller_sign_up(request):
if request.user.is_authenticated:
return request('dashboard')
else:
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_staff = True
user.save()
SellerAccount.objects.create(user=user)
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('dashboard')
return render(request, 'signup.html', {'form': UserCreationForm()})
def seller_login(request):
if request.user.is_authenticated:
return redirect('dashboard')
else:
if request.method == 'POST':
user = authenticate(username= request.POST['username'], password=request.POST['password'])
if user is not None:
login(request, user)
return redirect('dashboard')
return render(request, 'login.html', {'form': AuthenticationForm()})
def seller_dashboard(request):
if request.user.is_authenticated and request.user.is_staff:
items = Item.objects.filter(seller__user = request.user)
return render(request, 'sellerdashboard.html', {'items': items})
else:
return redirect('login')
def seller_logout(request):
if request.user.is_authenticated:
user = request.user
if user.is_authenticated:
logout(request)
return redirect('login')
return render(request, 'sellerdashboard.html')
else:
return redirect('login')
def seller_add_product(request):
if request.user.is_authenticated and request.user.is_staff:
form = ItemForm()
if request.method == 'POST':
form = ItemForm(request.POST, request.FILES)
if form.is_valid():
item = form.save(commit=False)
item.seller = SellerAccount.objects.get(user=request.user)
item.save()
return redirect('dashboard')
else:
messages.error('Please Enetr Valid information')
return redirect('dashboard')
else:
return render(request, 'addproduct.html', {'form': form})
else:
return redirect('login')
def seller_edit_product(request, pk):
if request.user.is_authenticated and request.user.is_staff:
item = get_object_or_404(Item, pk = pk, seller__user = request.user)
form = ItemForm(instance=item)
if request.method == 'POST':
form = ItemForm(request.POST, request.FILES,instance=item)
if form.is_valid():
edit_item = form.save(commit=False)
edit_item.seller = SellerAccount.objects.get(user=request.user)
edit_item.save()
return redirect('dashboard')
else:
return render(request, 'editproduct.html', {'form': form})
else:
return redirect(login)
def seller_delete_product(request, pk):
if request.user.is_authenticated and request.user.is_staff:
item = get_object_or_404(Item, pk = pk, seller__user = request.user)
item.delete()
return redirect('dashboard')
else:
return redirect('login')
def seller_view_product(request, pk):
if request.user.is_authenticated and request.user.is_staff:
item = get_object_or_404(Item, pk = pk, seller__user = request.user)
return render(request, 'viewproduct.html', {'item': item})
else:
return redirect('login')
|
import pytest
from selenium import webdriver
@pytest.fixture
def browser():
driver = webdriver.Chrome('/anaconda3/lib/python3.7/selenium/webdriver/chrome/chromedriver')
driver.set_page_load_timeout(20)
driver.get("http://www.google.com")
driver.maximize_window()
driver.implicitly_wait(20)
driver.quit()
|
"""empty message
Revision ID: 2b54b23dec03
Revises: de8d1488ff93
Create Date: 2019-09-04 00:11:21.771179
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2b54b23dec03'
down_revision = 'de8d1488ff93'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('address', table_name='contacts')
op.drop_index('agenda_slug', table_name='contacts')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('agenda_slug', 'contacts', ['agenda_slug'], unique=True)
op.create_index('address', 'contacts', ['address'], unique=True)
# ### end Alembic commands ###
|
import logging
import requests
import simplejson
import json
from flask import Flask, escape, request
from flask import render_template
from flask import jsonify
app = Flask(__name__)
@app.route('/')
def hello(name=None):
#JSON data goes here, so long as it's kept between the def function and fuchsia return, which ends its use.
uri = "https://api.stackexchange.com/2.0/users? order=desc&sort=reputation&inname=fuchida&site=stackoverflow"
try:
uResponse = requests.get(uri)
except requests.ConnectionError:
return "Connection Error"
Jresponse = uResponse.text
data = json.loads(Jresponse)
displayName = data['items'][0]['display_name']# <-- The display name
reputation = data['items'][0]['reputation']# <-- The reputation
print(Jresponse)
kibble=Jresponse
return render_template('index.html', name=kibble)
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
# [END gae_flex_quickstart] <!DOCTYPE html>
|
# Generated by Django 3.1.2 on 2020-10-28 14:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shared_models', '0018_auto_20201028_1105'),
]
operations = [
migrations.AlterField(
model_name='cruise',
name='funding_agency_name',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='funding agency name'),
),
migrations.AlterField(
model_name='cruise',
name='funding_project_id',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='funding project ID'),
),
migrations.AlterField(
model_name='cruise',
name='funding_project_title',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='funding project title'),
),
migrations.AlterField(
model_name='cruise',
name='references',
field=models.TextField(blank=True, null=True, verbose_name='references'),
),
migrations.AlterField(
model_name='cruise',
name='research_projects_programs',
field=models.TextField(blank=True, null=True, verbose_name='research projects programs'),
),
migrations.AlterField(
model_name='cruise',
name='vessel',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='cruises', to='shared_models.vessel'),
),
]
|
import numpy as np
import pylab
feature_lengths = [3,4,5,6,7,8,10,12,14]
ngram_crude_f1 = [0.94554,0.95722,0.95337,0.96534,0.95346,0.93217,0.93405,0.90452,0.88380]
ngram_crude_p = [0.94303,0.94453,0.93904,0.95330,0.94699,0.91840,0.92733,0.88707,0.89579]
ngram_crude_re = [0.94908,0.97126,0.96997,0.97836,0.96160,0.94933,0.94188,0.92614,0.87454]
ngram_corn_f1 = [0.90720,0.92952,0.92189,0.94116,0.92174,0.88954,0.89357,0.84504,0.81129]
ngram_corn_p = [0.91451,0.95182,0.94997,0.96359,0.93759,0.92040,0.90609,0.88293,0.80124]
ngram_corn_re = [0.90311,0.91073,0.90027,0.92141,0.91053,0.86724,0.88381,0.81833,0.82809]
ngram_earn_f1 = [0.90637,0.90647,0.89605,0.91625,0.91242,0.91221,0.88302,0.83103,0.77092]
ngram_earn_p = [0.95793,0.97972,0.96777,0.97339,0.97566,0.95035,0.92052,0.86535,0.77528]
ngram_earn_re = [0.86352,0.84504,0.83732,0.86944,0.85794,0.88181,0.85345,0.80728,0.77085]
ngram_ac_f1 = [0.91408,0.91943,0.91064,0.92599,0.92102,0.91844,0.88925,0.83732,0.77553]
ngram_ac_p = [0.87498,0.86527,0.85894,0.88414,0.87117,0.89093,0.86429,0.81733,0.77698]
ngram_ac_re = [0.95997,0.98215,0.97151,0.97535,0.97785,0.95206,0.92124,0.86680,0.77814]
ssk_crude_f1 = []
ssk_crude_p = []
ssk_crude_re = []
ssk_corn_f1 = []
ssk_corn_p = []
ssk_corn_re = []
ssk_earn_f1 = []
ssk_earn_p = []
ssk_earn_re = []
ssk_ac_f1 = []
ssk_ac_p = []
ssk_ac_re = []
pylab.rc('grid', linestyle='dashed', color='gray')
pylab.plot(feature_lengths,crude_f1, '-o', label='crude')
pylab.plot(feature_lengths,corn_f1, '-o', color='r', label='corn')
pylab.plot(feature_lengths,earn_f1, '-o', color='g', label='earn')
pylab.plot(feature_lengths,ac_f1, '-o', color='g', label='acquisition')
pylab.legend(loc='lower left')
pylab.xlabel('Length of features')
pylab.ylabel('Precision')
pylab.title('F1 score of different categories')
pylab.xticks(feature_lengths)
pylab.grid()
pylab.tight_layout()
pylab.show()
pylab.rc('grid', linestyle='dashed', color='gray')
pylab.plot(feature_lengths,crude_p, '-o', label='crude')
pylab.plot(feature_lengths,corn_p, '-o', color='r', label='corn')
pylab.plot(feature_lengths,earn_p, '-o', color='g', label='earn')
pylab.plot(feature_lengths,ac_p, '-o', color='g', label='acquisition')
pylab.legend(loc='lower left')
pylab.xlabel('Length of features')
pylab.ylabel('Precision')
pylab.title('Precision of different categories')
pylab.xticks(feature_lengths)
pylab.grid()
pylab.tight_layout()
pylab.show()
pylab.rc('grid', linestyle='dashed', color='gray')
pylab.plot(feature_lengths,crude_re, '-o', label='crude')
pylab.plot(feature_lengths,corn_re, '-o', color='r', label='corn')
pylab.plot(feature_lengths,earn_re, '-o', color='g', label='earn')
pylab.plot(feature_lengths,ac_re, '-o', color='g', label='acquisition')
pylab.legend(loc='lower left')
pylab.xlabel('Length of features')
pylab.ylabel('Recall')
pylab.title('Recall of different categories')
pylab.xticks(feature_lengths)
pylab.grid()
pylab.tight_layout()
pylab.show() |
from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns=[
path('',views.Blog_list.as_view(),name="blog_list"),
path('<int:id>',views.Blog_detail.as_view(),name="blog_detail"),
]
|
from flask import Flask, render_template, request
import json
from math import ceil
import imp
import os
from correlator import correlator
from fault_detector import fault_detector
# Create Flask app
app = Flask(__name__)
# Later we will want to pull this from a file, or autogenerate,
# but this prevents arbitrary file access by whitelisting channels
valid_channels = None
batch_size = 720
pre_frame_size = 180 # This is the amount before the 'time' to look for correlation
post_frame_size = 180 # This is the time after, thus a size of pre + post + 1
time_step = 17 # Setting this explicitly for now...
@app.route('/')
def display_main():
return render_template('index.html')
@app.route('/get_faults')
def get_faults():
time = request.args.get('time', None)
if time is None:
return json_error('time must be specified.')
time = int(float(time))
channel_values = {}
for channel_name in get_valid_channels():
with open(relative_path('data/' + channel_name + '.json')) as channel_file:
info = json.load(channel_file)
time_index = calculate_index(info, time)
# Calculate indices for slicing
start_index = max(0, time_index - pre_frame_size)
slice_index = time_index + post_frame_size # Assumes all will have either enough data, or only the same amount
channel_values[channel_name] = info['values'][start_index:slice_index]
alarm_data = fault_detector.get_alarm_data(relative_path('config/alarms.json'))
faults = fault_detector.get_faults(channel_values, time, time_step, alarm_data)
return json.dumps(
{
'status': 'SUCCESS',
'faults': faults
}
)
# This route will return json, containing all the values
# known for that channel within the time_range
@app.route('/data/<channel>', methods=['GET'])
def fetch_data(channel):
if not is_valid_channel(channel):
return json_error('Invalid channel name.')
start_time = request.args.get('start_time', None)
include_time = request.args.get('include_time', None)
exclude_start = bool(request.args.get('exclude_start', False))
num_vals = request.args.get('num_vals', None)
# Load channel specific info
with open(relative_path('data/' + channel + '.json')) as channel_file:
info = json.load(channel_file)
if include_time is None:
return json_error('include_time is required')
else:
include_time = int(float(include_time))
if start_time is None:
if num_vals is not None:
start_time = max(0, include_time - int(float(num_vals) / info['time_span']))
else:
start_time = include_time
else:
start_time = int(float(start_time))
if include_time < start_time:
return json_error('include_time cannot be earlier than start_time')
# Determine start and include indices
start_index = calculate_index(info, start_time)
include_index = calculate_index(info, include_time)
if exclude_start:
start_index += 1
# Now we want to make sure we're sending back at
# least batch_size, if not more (when asked for)
include_index += max(0, batch_size - (include_index - start_index + 1))
# Now slice including start index and slice index, and return that array, along with info
info['values'] = info['values'][start_index:include_index + 1]
# Adjust the time_start, so the client can know the offset if it wants
info['time_start'] += start_index * info['time_span']
# Add the display name as the channel name if not set
if 'display_name' not in info:
info['display_name'] = channel
info['status'] = 'SUCCESS'
return json.dumps(info)
# For now, it is important to note that this relies on the assumption
# of regularity of data for the files (time_start, time_span, and
# the size of the values array being the same)
# Later this can/will be changed to take those into account, but for now
# this is the assumption, in order to get it working
@app.route('/correlation_vector/<channel>')
def correlation_vector(channel):
time = request.args.get('time', None)
limit = int(float(request.args.get('limit', -1)))
if time is None:
return json_error('time must be specified')
time = int(float(time))
if not is_valid_channel(channel):
return json_error('Invalid channel name.')
# First read in info for the main channel
with open(relative_path('data/' + channel + '.json')) as channel_file:
info = json.load(channel_file)
# Determine starting and ending indices of the slices
time_index = calculate_index(info, time)
if time_index >= len(info['values']):
return json_error('Invalid time')
# We want to push the window forward if we can
# We're explicitly clipping in case a channel has more values than main
# We may want to address the case where any has less, but for now all should
# have the same amount. One option would be to zero pad the arrays before slicing
end_index = min(time_index + post_frame_size, len(info['values']) - 1)
# Now we push the window backward if we can
start_index = max(time_index - pre_frame_size, 0)
# Now read in the appropriate channel slices
channel_values = {}
channel_names = {}
for channel_name in get_valid_channels():
with open(relative_path('data/' + channel_name + '.json')) as channel_file:
info = json.load(channel_file)
channel_values[channel_name] = info['values'][start_index:end_index]
channel_names[channel_name] = info.get('display_name', channel_name)
main_channel = channel_values.pop(channel)
# Calculate the correlation vector
corr_vector = correlator.get_correlation_vector(main_channel, channel_values.values())
# This creates an array of dictionaries, matching name to correlation, which
# we can then sort, and slice, to return only a subset. This could be done on
# the client, but will require less data transfer if done here. If sending all
# we could just make it a dictionary comprehension, and dump that as json
# We also normalize scores from [-1, 1] => [0, 1]
corr_map = [
{
'name': name, # Internal name
'display_name': channel_names[name], # Set display name
'correlation': corr_vector[i]
}
for (i, name)
in enumerate(channel_values.keys())
] # Order should be preserved
# Sort in place based on absolute value of correlation
corr_map.sort(key=lambda k: abs(k['correlation']), reverse=True)
if limit > -1:
corr_map = corr_map[0:limit]
response_info = {
'status': 'SUCCESS',
'correlation_vector': corr_map
}
return json.dumps(response_info)
# As noted above, this functionality relies on the assumption of data regularity
# This should be addressed later, probably, to enforce appropriate constraints
@app.route('/correlation_matrix')
def correlation_matrix():
time = request.args.get('time', None)
if time is None:
return json_error('time must be specified')
time = int(float(time))
# First read in a random channel
with open(relative_path('data/' + next(iter(get_valid_channels())) + '.json')) as channel_file:
general_info = json.load(channel_file)
# Determine starting and ending indices of the slices
# See the note about regularity above
time_index = calculate_index(general_info, time)
# It would be nice to check that time is valid, but for now we're assuming
# that it is a valid time. Later we could check against a random channel, or such
# We want to push the window forward if we can
# Note the similar section in correlation vector, and it's comment
# Note also the use of the first valid channel's info
end_index = min(time_index + post_frame_size, len(general_info['values']) - 1)
# Now we push the window backward if we can
start_index = max(time_index - pre_frame_size, 0)
# We continue by reading in all the data files
channel_values = {}
channel_names = {}
for channel_name in get_valid_channels():
with open(relative_path('data/' + channel_name + '.json')) as channel_file:
info = json.load(channel_file)
channel_values[channel_name] = info['values'][start_index:end_index]
channel_names[channel_name] = info.get('display_name', channel_name)
# Now we can get the correlation matrix
corr_matrix = correlator.get_correlation_matrix(channel_values.values())
name_list = [channel_names[name] for name in channel_values.keys()] # Order should be preserved
response_info = {
'status': 'SUCCESS',
'channel_names': name_list,
'correlation_matrix': corr_matrix.tolist()
}
return json.dumps(response_info)
# Below are helper methods
def is_valid_channel(channel):
return channel in get_valid_channels()
def calculate_index(info, time):
return int(ceil((time - info['time_start']) * 1.0 / info['time_span']))
def json_error(msg):
return '{{"status":"ERROR","message":"{0}"}}'.format(msg)
def relative_path(path):
return os.path.join(os.path.dirname(__file__), path)
def get_valid_channels():
global valid_channels
if valid_channels is None:
valid_channels = set(
[name for name, ext in [os.path.splitext(filename) for filename in os.listdir(relative_path('data/'))] if ext == '.json']
)
return valid_channels
if __name__ == '__main__':
app.run(debug=True)
|
import requests
import logging
import sys
import opentracing
from flask import Flask, jsonify, request
from jaeger_client import Config
from flask_opentracing import FlaskTracer
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(asctime)s [%(levelname)s] - %(name)s %(threadName)s : %(message)s'
)
def initialize_tracer():
logging.getLogger('').handlers = []
logging.basicConfig(format='%(message)s', level=logging.INFO)
config = Config(
config={
'sampler': {'type': 'const', 'param': 1},
'local_agent': {'reporting_host': 'jaeger'},
'logging': True,
},
service_name='app-frontend'
)
return config.initialize_tracer()
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
flask_tracer = FlaskTracer(initialize_tracer, True, app)
@app.before_request
def log_request_info():
app.logger.info('Headers: %s', request.headers)
app.logger.info('Body: %s', request.get_data())
@app.route('/')
def call_backend():
parent_span = flask_tracer.get_span()
with opentracing.tracer.start_span('call_backend', child_of=parent_span) as span:
span.set_tag('http.url', 'http://app-backend:5000/api/v1/list')
span.set_tag('appname', 'app-frontend')
span.log_kv({'event': 'test message', 'method': 'GET'})
inventory = requests.get('http://app-backend:5000/api/v1/list')
span.set_tag('http.status_code', inventory.status_code)
with opentracing.tracer.start_span('parse_json', child_of=parent_span) as span:
json = inventory.json()
span.set_tag('get_inventory_from_api', len(json))
span.log_kv({'event': 'got inventory count', 'value': len(json)})
return jsonify(json)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000, debug=False, threaded=True)
|
from source.pages.urls import UrlConstants
import re
class UserDecklists(UrlConstants):
TAPPEDOUT_DECKNAME_CLASSES = "name deck-wide-header"
TAPPEDOUT_PAGINATION = "pagination"
def __init__(self, browser):
self.browser = browser
def navigate_to_users_decklists(self, username):
self.browser.get(self.TAPPEDOUT_USER_PAGE + username + self.TAPPEDOUT_USER_DECKLIST)
def navigate_to_users_decklist_page(self, username, page):
self.browser.get(
self.TAPPEDOUT_USER_PAGE +
username +
self.TAPPEDOUT_USER_DECKLIST +
self.TAPPEDOUT_PAGE +
str(page)
)
def get_users_decklists_names_for_user(self, username):
self.navigate_to_users_decklist_page(username, 1)
decknames = {}
index = 0
while index <= self.get_number_of_pages():
elements = self.browser.find_elements_by_class_name(
self.TAPPEDOUT_DECKNAME_CLASSES.split(" ")[-1]
)
for element in elements:
if element.get_attribute("class") == self.TAPPEDOUT_DECKNAME_CLASSES:
decknames[self.get_decklist_name(element)] = self.get_decklist_url(element)
index = index + 1
self.navigate_to_users_decklist_page(username, index)
return decknames
def get_decklist_name(self, name_element):
pattern = re.compile("^mtg decks - ")
for element in name_element.find_elements_by_tag_name("a"):
if pattern.match(element.get_attribute("title")):
return element.text
def get_decklist_url(self, decklist_url_element):
pattern = re.compile("^mtg decks - ")
for element in decklist_url_element.find_elements_by_tag_name("a"):
if pattern.match(element.get_attribute("title")):
return element.get_attribute("href")
def get_number_of_pages(self):
try:
pagination_element = self.browser.find_element_by_class_name(self.TAPPEDOUT_PAGINATION)
return len(pagination_element.find_elements_by_tag_name("li"))
except Exception:
return 1
|
# -*- coding: utf-8 -*-
"""
March 2016
author: teezeit
This script optimizes parameters for xgboost using greeedy gridsearch + crossvalidation
"""
# Imports
print("Test version -Changes made on 12052021")
print("Test version -Changes made on 12052021-Changes_02")
import numpy as np
import xgboost as xgb
import tuning_xgboost
#. define gridsearch parameters
# a) model complexity
#i) max_depth
#ii) min_child_weight
# b) randomness
#i) subsample
#ii) colsample_bytree
# c) stepsize
#i) eta = n_estimators
#ii) learning_rate
# d) weighting positive data
#i) scale_pos_weight
#Training data + label
X_train = np.random.rand(100,4)
y_train = np.random.randint(2,size=100)
#Grids
grid1 = {'n_estimators' : [50, 100, 200], 'learning_rate' : [0.1, 0.2, 0.3]}
grid2 = {'max_depth' : [2,3,4], 'min_child_weight' : [4,5]}
grid3 = {'colsample_bylevel' : [0.7, 0.8, 0.9], 'subsample' : [0.5, 0.6, 0.7]}
grid4 = {'scale_pos_weight' : [1,2,5]}
hyperlist_to_try = [grid1, grid2,grid3,grid4]
#Booster
booster = xgb.XGBClassifier()
###############################################################################
# Now run
print 'Run Simple Parameter Tuning\n________'
tuned_estimator = tuning_xgboost.grid_search_tuning(X_train,y_train,hyperlist_to_try,booster)
tuned_parameters = tuned_estimator.get_params()
for parameter in tuned_parameters:
print parameter, '\t\t',tuned_parameters[parameter]
# Define additional parameters
print '\n\n Run Parameter Tuning with extra parameters given to GridSearchCV\n________'
gridsearch_params = {
'cv' : 2,
'scoring' : 'roc_auc',#'roc_auc', 'average_precision'
'verbose' : 2
}
tuned_estimator = tuning_xgboost.grid_search_tuning(X_train,y_train,hyperlist_to_try,booster,gridsearch_params,verbose=False,plotting=True)
tuned_parameters = tuned_estimator.get_params()
for parameter in tuned_parameters:
print parameter, '\t\t',tuned_parameters[parameter]
#end of file example_tuning_xgboost.py
|
items = input("Enter the items separated by spaces (must be even number): ").strip().split()
i = 0
if (len(items))%2 == 1:
input("You need to enter an even amount of numbers")
exit(0)
elif (len(items))<4:
input("Not enough data")
exit(0)
else:
j = int((len(items))/2)
num1 = [0 for i in range(j)]
num2 = [0 for i in range(j)]
for i in range(j):
num1[i] = eval(items[2 * i])
num2[i] = eval(items[2*i +1])
length = sorted(num1)
width = sorted(num2)
maxX = length[j-1]
maxY = width[j-1]
maxLength = maxX - length[0]
maxWidth = maxY - width[0]
midLength = (maxLength/2)
midWidth = (maxWidth/2)
string1 = str(length[0])
string2 = str(width[0])
from tkinter import * # Import tkinter
class CanvasDemo:
def __init__(self):
window = Tk() # Create a window
window.title("Canvas Demo") # Set title
# Place self.canvas in the window
self.canvas = Canvas(window, width = 400, height = 400,
bg = "white")
self.canvas.pack()
# Place buttons in frame
frame = Frame(window)
frame.pack()
btRectangle = Button(frame, text = "Rectangle",
command = self.displayRect)
btLine = Button(frame, text = "Line",
command = self.displayLine)
btString = Button(frame, text = "String",
command = self.displayString)
btClear = Button(frame, text = "Clear",
command = self.clearCanvas)
btRectangle.grid(row = 1, column = 1)
btLine.grid(row = 1, column = 2)
btString.grid(row = 1, column = 3)
btClear.grid(row = 1, column = 4)
window.mainloop() # Create an event loop
# Display a rectangle
def displayRect(self):
self.canvas.create_rectangle(length[0]+1, width[0]+1, maxX+1, maxY+1, tags = "rect")
# Display a line
def displayLine(self):
for i in range (j-1):
self.canvas.create_line(num1[i]+1, num2[i]+1, num1[i+1]+1, num2[i+1]+1, fill = "red", tags = "line")
self.canvas.create_line(num1[j-1]+1, num2[j-1]+1, num1[0]+1, num2[0]+1, fill = "red", tags = "line")
# Display a string
def displayString(self):
self.canvas.create_text(maxX+65, maxY+65, text = "minimum length x: " + str(length[0])
+ "\n minimum width y is: " + str(width[0])
+ "\n maximum x is: " + str(maxX)
+ "\n maximum y is: " + str(maxY)
+ "\n center point is: (" + str(midLength) + " , " + str(midWidth)+ ")"
+ "\n Length is: " + str(maxLength) + ", Width is: "+ str(maxWidth),
font = "Times 14 bold underline", tags = "string")
# Clear drawings
def clearCanvas(self):
self.canvas.delete("rect", "line", "string")
CanvasDemo() # Create GUI
|
def chooseFromList(message, choices, nullChoice=None, debug = True):
print(message)
if nullChoice is not None:
print("Enter:\t{}".format(nullChoice))
for i, choice in enumerate(choices):
print("{}:\t{}".format(i, choice))
while True:
try:
choice = input("-> ")
if nullChoice is not None and choice == "":
return nullChoice
else:
choice = int(choice)
except ValueError:
print("Invalid choice. Please choose again")
continue
if choice in range(len(choices)):
if debug == True:
print("received valid choice {} corresponding to {}".format(choice, choices[choice]))
break
else:
print("Invalid choice. Please choose again")
continue
return choices[choice] |
# Generated by Django 3.0.8 on 2020-07-08 09:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('booking', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AlterField(
model_name='booking',
name='service',
field=models.CharField(choices=[('Hair Cut Only', 'Hair Cut Only'), ('Hair Cut and Wash', 'Hair Cut and Wash'), ('Colouring', 'Colouring'), ('Perming', 'Perming'), ('Straightening', 'Straightening'), ('Treatment', 'Treatment')], max_length=50),
),
migrations.AlterField(
model_name='booking',
name='time_slot',
field=models.CharField(choices=[('8', '0800 to 0900'), ('9', '0900 to 1000'), ('10', '1000 to 1100'), ('11', '1100 to 1200'), ('12', '1200 to 1300'), ('13', '1300 to 1400'), ('14', '1400 to 1500'), ('15', '1500 to 1600'), ('16', '1600 to 1700'), ('17', '1700 to 1800'), ('18', '1800 to 1900'), ('19', '1900 to 2000')], max_length=2),
),
]
|
# https://www.reddit.com/r/dailyprogrammer/comments/67q3s6/20170426_challenge_312_intermediate_next_largest/
from itertools import permutations as ps
def get_next(num):
perms = sorted(set(map(lambda x: int(''.join(x)), ps(str(num)))))
return perms[perms.index(num) + 1]
def main():
inp = [
1234,
1243,
234765,
19000,
]
for i in inp:
print('{} => {}'.format(i, get_next(i)))
if __name__ == '__main__':
main()
|
'''Jason A Smith'''
fract = ''
count = 1
while len(fract) < 1000000:
fract += str(count)
count += 1
print(int(fract[1 - 1]) * int(fract[10 - 1]) * int(fract[100 - 1]) * int(fract[1000 - 1]) * int(fract[10000 - 1]) * int(fract[100000 - 1]) * int(fract[1000000 - 1]))
|
# Generated by Django 2.2.dev20181016201044 on 2018-10-29 04:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20181027_1845'),
]
operations = [
migrations.AddField(
model_name='user',
name='email',
field=models.CharField(default='no-email', max_length=255),
),
]
|
if __name__ == '__main__':
with open(r"F:\u1.txt","w+") as f:
mk=str([i for i in range(1,20)])
f.writelines(mk)
ml=f.readline()
print(mk)
print(ml) |
from hashlib import sha256
from time import time
from urllib.parse import urlparse
import requests
from flask import Flask, jsonify, request
import random
#Declaration of the Blockchain
class Blockchain(object):
def __init__(self):
self.chain= []
self.vote_info = {}
self.neighbours = [] #Sets are immutable, hence better than lists. Also in this case it is used to ensure a same address does not register twice
self.create_genesis(previous_hash = 1,proof = 1)
def validate_chain(self, chain):
last_block = chain[0]
index = 1
while(index < len(chain)):
block = chain[index]
if block['previous_hash'] != self.hash(last_block):
return False
if self.validate_proof(last_block['proof'], block['proof']) == False:
return False
last_block = block
index += 1
return True
def update_chain(self):
neighbours = self.neighbours
new_chain = []
current_length = len(self.chain)
for neighbour in neighbours:
response = requests.get('http://'+neighbour+'/display_chain') #request for chain from the neighboring blocks
if response.status_code == 200:
neighbour_length = response.json()['length'] #find length of neighbor chain
neighbour_chain = response.json()['chain'] # find the neighbor chain
if(neighbour_length != current_length):
current_length+=neighbour_length
new_chain.append(neighbour_chain) ####NEED TO CONVERT JSON TO DICT!!
if len(new_chain)>0:
original_chain=self.chain
temporary_chain=[]
for k in new_chain:
for t in k:
if t not in self.chain: # find all the blocks that aren't in the present chain and add them
self.chain.append(t)
return True
return False
def add_neighbour(self,address):
self.neighbours.append(urlparse(address).netloc) #Adds the network location of the input address into the set
# checks if the complete binary tree represented by chararray[0:14] is a binary search tree.
def check_for_bst(self, chararray):
import time
import hashlib
i = 1
j = 0
testlen = 10
while(1):
starttime = time.time()
while(1):
code = hashlib.sha256(str(i).encode()).hexdigest()
flag = 0
for j in range(0, testlen):
print(code)
right = 2*j + 2;
left = 2*j + 1;
if (left >= testlen or right >= testlen):
break
if(ord(code[left]) > ord(code[j]) or ord(code[right]) < ord(code[j])):
flag = 1
break
if(flag == 0):
print(code)
break
i += 1
finishtime = time.time()
print(finishtime - starttime)
break
# need to make a harder proof of work
def validate_proof(self, previous_proof,proof):
#Function that validates the proof and checks if the current proof is correct
predicted = str(previous_proof*proof).encode()
predicted_hash = sha256(predicted).hexdigest()
print(predicted_hash)
return self.check_for_bst(predicted_hash[0:2])
def proof_of_work(self,previous_proof):
#Finds a proof such that when hashed with the previous proof gives a hash who's first two and last two characters are equal
proof=0
while(True):
if(self.validate_proof(previous_proof,proof)==True):
break
proof+=1
return proof
def create_genesis(self,previous_hash,proof):
#Creates a genesis block and adds it to the chain
block={
'index': 0,
'time': time(),
'vote_info': {'voter':'Genesis','UIDAI':'000000000000','vote':'Genesis'},
'proof': proof,
'previous_hash': previous_hash
}
self.chain.append(block)
def create_block(self,previous_hash,proof):
#Creates a new block and adds it to the chain
block={
'index':len(self.chain)+1,
'time':time(),
'vote_info':self.vote_info,
'proof':proof,
'previous_hash':previous_hash
}
self.vote_info={}
self.chain.append(block)
return block
def create_vote(self,voter_name,aadhar_number,voted_for):
#Adds a new vote to the existing list of vote
self.vote_info.update({
'voter':voter_name,
'UIDAI':aadhar_number,
'vote': voted_for #need to add logic to check if the candidate voted for has been registered previously on the server
})
return self.last_block["index"]+1
@staticmethod
def hash(block):
#Hashes a block using SHA-256
sorted_block=dict(sorted(block.items())) #Sort the block to maintain consistency
sorted_block=str(sorted_block).encode() #Encode block to be able to hash
return sha256(sorted_block).hexdigest()
@property
def last_block(self):
#Returns the last block of the current chain
return self.chain[-1]
|
#import tensorflow as tf
#from tensorflow import keras
#from tensorflow.keras import layers
import numpy as np
import tensorflow as tf
import argparse
import os
import numpy as np
import json
import smdebug.tensorflow as smd
def model():
hook = smd.KerasHook.create_from_json_file()
optimizer=tf.keras.optimizers.Adam(0.01)
opt = hook.wrap_optimizer(optimizer)
model = tf.keras.Sequential()
# Adds a densely-connected layer with 64 units to the model:
model.add(layers.Dense(32, input_shape=(32,)activation='relu'))
# Add another:
model.add(layers.Dense(64, activation='relu'))
# Add an output layer with 10 output units:
model.add(layers.Dense(10))
model.compile(opt ,
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
val_data = np.random.random((100, 32))
val_labels = np.random.random((100, 10))
model.fit(data, labels, epochs=10, batch_size=32,
validation_data=(val_data, val_labels),callbacks=[hook])
# With a Dataset
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
model.evaluate(dataset)
#model.save(os.path.join(args.sm_model_dir, '000000001'), 'my_model.h5')
return model
def _parse_args():
parser = argparse.ArgumentParser()
# Data, model, and output directories
# model_dir is always passed in from SageMaker. By default this is a S3 path under the default bucket.
parser.add_argument('--model_dir', type=str)
parser.add_argument('--sm-model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAINING'))
parser.add_argument('--hosts', type=list, default=json.loads(os.environ.get('SM_HOSTS')))
parser.add_argument('--current-host', type=str, default=os.environ.get('SM_CURRENT_HOST'))
return parser.parse_known_args()
if __name__ == "__main__":
args, unknown = _parse_args()
model()
|
#180119 MKT
#build gffutils database
import gffutils
if snakemake.params['gtf_format'] == 'ensembl':
db = gffutils.create_db(snakemake.input['gtf_file'], snakemake.output['db_file'], disable_infer_transcripts = True, disable_infer_genes = True)
#flybase gtf file needs id_spec in order to have mRNAs as ids in the db
elif snakemake.params['gtf_format'] == 'flybase':
key_dict = {'gene':'gene_id', 'mRNA': 'transcript_id', 'ncRNA': 'transcript_id'}
db = gffutils.create_db(snakemake.input['gtf_file'], snakemake.output['db_file'], id_spec = key_dict, disable_infer_transcripts = True, disable_infer_genes = True) |
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import time
import sys
from htmlParseAereoMultiData import htmlParse
from selenium.webdriver.support.ui import Select
import os
from pathlib import Path
"""
Script in cui specifico nome di una stazione
Dato un parametro trova tutti i sottoparametri e scrive su file csv
"""
url = "http://clima.meteoam.it/RichiestaDatiGenerica.php"
p = Path(os.path.realpath(__file__))
parent = p.parent.parent.parent
driver_path = os.path.join(parent,"geckodriver")
optionsFire = Options()
optionsFire.add_argument('--headless')
webdriver = webdriver.Firefox(executable_path=driver_path, options=optionsFire)
def aeronatutica(parametro, city, gi, mi, ai, gf, mf, af):
with webdriver as driver:
wait = WebDriverWait(driver, 10)
# retrive url in headless browser
driver.get(url)
#apro il menu "Parametri: Seleziona la categoria"
categoria = driver.find_element_by_xpath('/html/body/div[2]/div/section/div/section[2]/div[2]/form/fieldset/div/button')
categoria.click()
#clicco sul prametro scelto
precip = driver.find_element_by_link_text(parametro)
precip.click()
#apro il menu "Dettaglio parametri"
param_xpath = '/html/body/div[2]/div/section/div/section[2]/div[2]/form/fieldset[2]/div/button/span[1]'
wait.until(EC.element_to_be_clickable((By.XPATH, param_xpath)))
param = driver.find_element_by_xpath(param_xpath)
param.click()
#trovo tutti i dettagli parametri
select = Select(driver.find_element_by_id('parametri_input_id'))
all_option = select.options
dettagli_text = []
for e in all_option:
dettagli_text.append(e.text)
#clicco su tutti i dettagli del parametro scelto
for i in range(len(dettagli_text)):
dettaglio = driver.find_element_by_link_text(dettagli_text[i])
dettaglio.click()
param.click() #chiudo il menu "Dettaglio Parametri"
#apro il menu "Stazione: Seleziona una o più stazioni"
stazione_xpath = '/html/body/div[2]/div/section/div/section[2]/div[2]/form/div[1]/fieldset/div/button/span[1]'
wait.until(EC.element_to_be_clickable((By.XPATH, stazione_xpath)))
stazione = driver.find_element_by_xpath(stazione_xpath)
stazione.click()
#seleziono una stazione
choice = driver.find_element_by_link_text(city)
choice.click()
#chiudo il menu stazione
stazione.click()
#scrollo la pagina
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
button_xpath = '//*[@id="visualizzaMessaggi_id"]'
wait.until(EC.element_to_be_clickable((By.XPATH, button_xpath)))
#inserisco periodo d'interesse
periodo_xpath = '/html/body/div[2]/div/section/div/section[2]/div[2]/form/div[2]/input[9]'
periodo = driver.find_element_by_xpath(periodo_xpath)
periodo.click()
periodo.clear()
periodo_string = '' + gi + '/' + mi + '/' + ai + ' - ' + gf + '/' + mf + '/' + af
periodo.send_keys(periodo_string + Keys.ENTER)
#submit all
wait.until(EC.element_to_be_clickable((By.XPATH, button_xpath)))
button = driver.find_element_by_xpath(button_xpath)
button.click()
#wait
print('finish')
wait.until(EC.element_to_be_clickable((By.XPATH, "/html/body/div[2]/div/section/div/section[2]/div[2]/form/div[2]/input")))
#result
html = driver.page_source
#close the browser
driver.quit()
#parsing
filename = city+'.csv'
htmlParse(html, filename)
def getParametri():
with webdriver as driver:
#wait = WebDriverWait(driver, 10)
# retrive url in headless browser
driver.get(url)
#trovo tutti i parametri (Temperatura, Nuvolosità...)
select = Select(driver.find_element_by_id('categoria'))
all_option = select.options
for e in all_option:
print(e.text)
#close the browser
driver.quit()
return
if __name__ == "__main__":
if (len (sys.argv) == 9):
aeronatutica(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8])
elif(len(sys.argv) == 1):
#se non ci scrivono parametri da riga di comando
print('scrivi p per avere elenco parametri disponibili')
print('specifica Parametro Città giorno mese anno giorno mese anno')
aeronatutica('Precipitazioni','Pescara', '1','1','2008','1','6','2020')
elif(len(sys.argv) == 2 and sys.argv[1] == 'p'):
getParametri()
|
# Generated by Django 3.2 on 2021-04-19 23:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Datapoint',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('temperature', models.DecimalField(decimal_places=1, max_digits=3)),
('humidity', models.DecimalField(decimal_places=1, max_digits=3)),
],
),
migrations.RemoveField(
model_name='location',
name='datapoints',
),
migrations.DeleteModel(
name='Datapoints',
),
migrations.AddField(
model_name='datapoint',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.location'),
),
]
|
import json
from tqdm import tqdm
from typing import List
from collections import namedtuple
import torch
from torch.nn.utils.rnn import pad_sequence
def load_vocab(path) -> namedtuple:
return_dict = json.load(open(path))
# for idx2token, idx2chartoken, have to change keys from strings to ints
# https://stackoverflow.com/questions/45068797/how-to-convert-string-int-json-into-real-int-with-json-loads
if "token2idx" in return_dict:
return_dict.update({"idx2token": {v: k for k, v in return_dict["token2idx"].items()}})
if "chartoken2idx" in return_dict:
return_dict.update({"idx2chartoken": {v: k for k, v in return_dict["chartoken2idx"].items()}})
# NEW
# vocab: dict to named tuple
vocab = namedtuple('vocab', sorted(return_dict))
return vocab(**return_dict)
def create_vocab(data: List[str],
keep_simple=False,
min_max_freq: tuple = (1, float("inf")),
topk=None,
intersect: List = None,
load_char_tokens: bool = False,
is_label: bool = False,
labels_data_split_at_whitespace: bool = False) -> namedtuple:
"""
:param data: list of sentences from which tokens are obtained as whitespace seperated
:param keep_simple: retain tokens that have ascii and do not have digits (for preprocessing)
:param min_max_freq: retain tokens whose count satisfies >min_freq and <max_freq
:param topk: retain only topk tokens (specify either topk or min_max_freq)
:param intersect: retain tokens that are at intersection with a custom token list
:param load_char_tokens: if true, character tokens will also be loaded
:param is_label: when the inouts are list of labels
:return: a vocab namedtuple
"""
if topk is None and (min_max_freq[0] > 1 or min_max_freq[1] < float("inf")):
raise Exception("both min_max_freq and topk should not be provided at once !")
# if is_label
if is_label:
def split_(txt: str):
if labels_data_split_at_whitespace:
return txt.split(" ")
else:
return [txt, ]
# get all tokens
token_freq, token2idx, idx2token = {}, {}, {}
for example in tqdm(data):
for token in split_(example):
if token not in token_freq:
token_freq[token] = 0
token_freq[token] += 1
print(f"Total tokens found: {len(token_freq)}")
print(f"token_freq:\n{token_freq}\n")
# create token2idx and idx2token
for token in token_freq:
idx = len(token2idx)
idx2token[idx] = token
token2idx[token] = idx
token_freq = list(sorted(token_freq.items(), key=lambda item: item[1], reverse=True))
return_dict = {"token2idx": token2idx,
"idx2token": idx2token,
"token_freq": token_freq,
"n_tokens": len(token2idx),
"n_all_tokens": len(token2idx)}
else:
# get all tokens
token_freq, token2idx, idx2token = {}, {}, {}
for example in tqdm(data):
for token in example.split(" "):
if token not in token_freq:
token_freq[token] = 0
token_freq[token] += 1
print(f"Total tokens found: {len(token_freq)}")
# retain only simple tokens
if keep_simple:
isascii = lambda s: len(s) == len(s.encode())
hasdigits = lambda s: len([x for x in list(s) if x.isdigit()]) > 0
tf = [(t, f) for t, f in [*token_freq.items()] if (isascii(t) and not hasdigits(t))]
token_freq = {t: f for (t, f) in tf}
print(f"After removing non-ascii and tokens with digits, total tokens retained: {len(token_freq)}")
# retain only tokens with specified min and max range
if min_max_freq[0] > 1 or min_max_freq[1] < float("inf"):
sorted_ = sorted(token_freq.items(), key=lambda item: item[1], reverse=True)
tf = [(i[0], i[1]) for i in sorted_ if (min_max_freq[0] <= i[1] <= min_max_freq[1])]
token_freq = {t: f for (t, f) in tf}
print(f"After min_max_freq selection, total tokens retained: {len(token_freq)}")
# retain only topk tokens
if topk is not None:
sorted_ = sorted(token_freq.items(), key=lambda item: item[1], reverse=True)
token_freq = {t: f for (t, f) in list(sorted_)[:topk]}
print(f"After topk selection, total tokens retained: {len(token_freq)}")
# retain only interection of tokens
if intersect is not None and len(intersect) > 0:
tf = [(t, f) for t, f in [*token_freq.items()] if (t in intersect or t.lower() in intersect)]
token_freq = {t: f for (t, f) in tf}
print(f"After intersection, total tokens retained: {len(token_freq)}")
# create token2idx and idx2token
for token in token_freq:
idx = len(token2idx)
idx2token[idx] = token
token2idx[token] = idx
# add <<PAD>> special token
ntokens = len(token2idx)
pad_token = "<<PAD>>"
token_freq.update({pad_token: -1})
token2idx.update({pad_token: ntokens})
idx2token.update({ntokens: pad_token})
# add <<UNK>> special token
ntokens = len(token2idx)
unk_token = "<<UNK>>"
token_freq.update({unk_token: -1})
token2idx.update({unk_token: ntokens})
idx2token.update({ntokens: unk_token})
# new
# add <<EOS>> special token
ntokens = len(token2idx)
eos_token = "<<EOS>>"
token_freq.update({eos_token: -1})
token2idx.update({eos_token: ntokens})
idx2token.update({ntokens: eos_token})
# new
# add <<SOS>> special token
ntokens = len(token2idx)
sos_token = "<<SOS>>"
token_freq.update({sos_token: -1})
token2idx.update({sos_token: ntokens})
idx2token.update({ntokens: sos_token})
# return dict
token_freq = list(sorted(token_freq.items(), key=lambda item: item[1], reverse=True))
return_dict = {"token2idx": token2idx,
"idx2token": idx2token,
"token_freq": token_freq,
"pad_token": pad_token,
"pad_token_idx": token2idx[pad_token],
"unk_token": unk_token,
"unk_token_idx": token2idx[unk_token],
"eos_token": eos_token,
"eos_token_idx": token2idx[eos_token],
"sos_token": sos_token,
"sos_token_idx": token2idx[sos_token],
"n_tokens": len(token2idx) - 4,
"n_special_tokens": 4,
"n_all_tokens": len(token2idx)
}
# load_char_tokens
if load_char_tokens:
print("loading character tokens as well")
char_return_dict = create_char_vocab(use_default=True, data=data)
return_dict.update(char_return_dict)
# NEW
# vocab: dict to named tuple
vocab = namedtuple('vocab', sorted(return_dict))
return vocab(**return_dict)
def create_char_vocab(use_default: bool, data=None) -> dict:
if not use_default and data is None:
raise Exception("data is None")
# reset char token utils
chartoken2idx, idx2chartoken = {}, {}
char_unk_token, char_pad_token, char_start_token, char_end_token = \
"<<CHAR_UNK>>", "<<CHAR_PAD>>", "<<CHAR_START>>", "<<CHAR_END>>"
special_tokens = [char_unk_token, char_pad_token, char_start_token, char_end_token]
for char in special_tokens:
idx = len(chartoken2idx)
chartoken2idx[char] = idx
idx2chartoken[idx] = char
if use_default:
chars = list(
"""ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}""")
for char in chars:
if char not in chartoken2idx:
idx = len(chartoken2idx)
chartoken2idx[char] = idx
idx2chartoken[idx] = char
else:
# helper funcs
# isascii = lambda s: len(s) == len(s.encode())
"""
# load batches of lines and obtain unique chars
nlines = len(data)
bsize = 5000
nbatches = int( np.ceil(nlines/bsize) )
for i in tqdm(range(nbatches)):
blines = " ".join( [ex for ex in data[i*bsize:(i+1)*bsize]] )
#bchars = set(list(blines))
for char in bchars:
if char not in chartoken2idx:
idx = len(chartoken2idx)
chartoken2idx[char] = idx
idx2chartoken[idx] = char
"""
# realized the method above doesn't preserve order!!
for line in tqdm(data):
for char in line:
if char not in chartoken2idx:
idx = len(chartoken2idx)
chartoken2idx[char] = idx
idx2chartoken[idx] = char
print(f"number of unique chars found: {len(chartoken2idx)}")
return_dict = {"chartoken2idx": chartoken2idx,
"idx2chartoken": idx2chartoken,
"char_unk_token": char_unk_token,
"char_pad_token": char_pad_token,
"char_start_token": char_start_token,
"char_end_token": char_end_token,
"char_unk_token_idx": chartoken2idx[char_unk_token],
"char_pad_token_idx": chartoken2idx[char_pad_token],
"char_start_token_idx": chartoken2idx[char_start_token],
"char_end_token_idx": chartoken2idx[char_end_token],
"n_tokens": len(chartoken2idx) - 4,
"n_special_tokens": 4}
return return_dict
def char_tokenize(batch_sentences, vocab):
"""
:returns List[pad_sequence], Tensor[int]
"""
chartoken2idx = vocab.chartoken2idx
char_unk_token = vocab.char_unk_token
char_pad_token = vocab.char_pad_token
char_start_token = vocab.char_start_token
char_end_token = vocab.char_end_token
func_word2charids = lambda word: [chartoken2idx[char_start_token]] + \
[chartoken2idx[char] if char in chartoken2idx else chartoken2idx[char_unk_token]
for char in list(word)] + \
[chartoken2idx[char_end_token]]
char_idxs = [[func_word2charids(word) for word in sent.split(" ")] for sent in batch_sentences]
char_padding_idx = chartoken2idx[char_pad_token]
tokenized_output = [pad_sequence(
[torch.as_tensor(list_of_wordidxs).long() for list_of_wordidxs in list_of_lists],
batch_first=True,
padding_value=char_padding_idx
)
for list_of_lists in char_idxs]
# dim [nsentences,nwords_per_sentence]
nchars = [torch.as_tensor([len(wordlevel) for wordlevel in sentlevel]).long() for sentlevel in char_idxs]
# dim [nsentences]
nwords = torch.tensor([len(sentlevel) for sentlevel in tokenized_output]).long()
return tokenized_output, nchars, nwords
def sclstm_tokenize(batch_sentences, vocab):
"""
return List[pad_sequence], Tensor[int]
"""
chartoken2idx = vocab.chartoken2idx
char_unk_token_idx = vocab.char_unk_token_idx
def sc_vector(word):
a = [0]*len(chartoken2idx)
if word[0] in chartoken2idx: a[ chartoken2idx[word[0]] ] = 1
else: a[ char_unk_token_idx ] = 1
b = [0]*len(chartoken2idx)
for char in word[1:-1]:
if char in chartoken2idx: b[ chartoken2idx[char] ] += 1
#else: b[ char_unk_token_idx ] = 1
c = [0]*len(chartoken2idx)
if word[-1] in chartoken2idx: c[ chartoken2idx[word[-1]] ] = 1
else: c[ char_unk_token_idx ] = 1
return a+b+c
# return list of tesnors and we don't need to pad these unlike cnn-lstm case!
tensor_output = [ torch.tensor([sc_vector(word) for word in sent.split(" ")]).float() for sent in batch_sentences]
nwords = torch.tensor([len(sentlevel) for sentlevel in tensor_output]).long()
return tensor_output, nwords
|
# "Make a program in Python, that solve quadratic equations
# A.x2 + B.x1 + C = 0
# Where A, B, C is real numbers (could be negative), find X. "
import math
list_param = []
i = 1
while i <= 3:
print('Type your number', i, ':')
value = input()
if i == 1 and float(value) == 0:
print('Please retype your number', i, ', it must be greater than 0:')
else:
number = float(value)
list_param.append(number)
i += 1
print(list_param)
delta = (list_param[1] ** 2) - (4 * list_param[0] * list_param[2])
if delta < 0:
result = 'The equation has no solution...'
elif delta == 0:
result = 'x =' + str(-list_param[1] / (2 * list_param[0]))
else:
result = 'x1 =' + str((-list_param[1] + math.sqrt(delta)) / (2 * list_param[0])) + '\nx2 =' + str((-list_param[1] - math.sqrt(delta)) / (2 * list_param[0]))
print(delta)
print(result)
|
"""myblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('Blog/', include('Blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from Blog import views
from django.conf.urls import url
urlpatterns = [
path('', views.blog_list, name='blog_list'),
path('<int:blog_pk>', views.blog_detail, name="blog_detail"),
path('type/<int:blog_type_pk>', views.blogs_with_type, name="blogs_with_type"),
url(r'^date/(?P<val>\w+-*\w*)/', views.blogs_with_date,name='blogs_with_date'),
] |
from pyspark import SparkConf, SparkContext, RDD
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating
import math
conf = SparkConf().setAppName("Recommender").set("spark.executor.memory", "7g")
conf = SparkConf().setAppName("Recommender").set("spark.storage.memoryFraction", "0.1")
sc = SparkContext(conf=conf)
# get data, make rdd
weather_file = sc.textFile('proc_weather.csv')
weather_data = weather_file.map(lambda l: l.split(','))
# stat_nbr, (year, month, day, week, avg temp)
weather_data = weather_data.map(lambda l: (int(l[0]), (int(l[1]), int(l[2]), int(l[3]), int(l[4]), int(l[5]))))
key_file = sc.textFile('key_store_stat.csv')
key_data = key_file.map(lambda l: l.split(','))
# stat_nbr, store_nbr
key_data = key_data.map(lambda l: (int(l[1]), int(l[0])))
combined_data = key_data.join(weather_data)
store_date_temp = combined_data.map(lambda l: l[1])
# ^ now (store, (YY, MM, DD, week, avgTemp))
#store_date_temp = store_date_temp.map(lambda l: (str(l[0])+'-'+l[1][0], l[1][1]))
sales_file = sc.textFile('proc_sales.csv')
#[store number, year, month, day, item number, sales]
sales_data = sales_file.map(lambda l: l.split(','))
#[(store #, year, month, day), (item, sales)]
sales_data = sales_data.map(lambda l: ((int(l[0]), int(l[1]), int(l[2]), int(l[3])), (int(l[4]), int(l[5]))))
#[(store #, year, month, day), (week, temp)]
store_date_temp = store_date_temp.map(lambda l: ((l[0], l[1][0], l[1][1], l[1][2]), (l[1][3], l[1][4])))
sales_temp_data = sales_data.join(store_date_temp)
# ((store, year, month, date), ((item, sales), (week, temp))
ratings_RDD = sales_temp_data.map(lambda l: Rating(l[0][0]*1000+l[1][0][0], (l[1][1][1]//3)+100*l[1][1][0], l[1][0][1]))
# ((store*1000+item, temp//7+100*week, sales)
#print(ratings_RDD.take(3))
# train model
#training_RDD, validation_RDD = ratings_RDD.randomSplit([8, 2], 0)
#validation_for_predict_RDD = validation_RDD.map(lambda x: (x[0], x[1]))
#print(training_RDD.collect().take(3))
seed = 5
iterations = 12
regularization_parameter = 0.1
rank = 4
#errors = [0, 0, 0]
#err = 0
#tolerance = 0.02
training_RDD, test_RDD = ratings_RDD.randomSplit([8, 2], 0)
complete_model = ALS.train(training_RDD, rank, seed=None, iterations=iterations, lambda_=regularization_parameter,\
nonnegative = True)
test_for_predict_RDD = test_RDD.map(lambda x: (x[0], x[1]))
predictions = complete_model.predictAll(test_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2]))
rates_and_preds = test_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions)
mae = rates_and_preds.map(lambda r: (abs(r[1][0] - r[1][1]))).mean()
rmse = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean())
logs = rates_and_preds.map(lambda r: (math.log(r[1][1] + 1) - math.log(r[1][0] + 1)))
rmsle = math.sqrt(logs.map(lambda x: x**2).mean())
print("The MAE is {:G}".format(mae))
print("The RMSE is {:G}".format(rmse))
print("The RMSLE is {:G}".format(rmsle))
'''
total = ratings_RDD.count()
print(total)
testlog = ratings_RDD.filter(lambda r: (r[2] == 0)).count()
print(testlog)
negTemp = ratings_RDD.filter(lambda r: (r[1] < 0)).count()
print(negTemp)
'''
#print("negative is {:d}".format(negative))
'''
min_error = float('inf')
best_rank = -1
best_iteration = -1
for rank in ranks:
model = ALS.train(training_RDD, rank, seed=seed, iterations=iterations,
lambda_=regularization_parameter)
predictions = model.predictAll(validation_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2]))
rates_and_preds = validation_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions)
error = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean())
errors[err] = error
err += 1
print("For rank {0} the RMSE is {1}".format(rank, error))
if error < min_error:
min_error = error
best_rank = rank
print("The best model was trained with rank {:d}".format(best_rank))
'''
#print(sales_temp_data.take(3))
#^[((1, 12, 1, 1), ((1, 0), 42)), ((1, 12, 1, 1), ((2, 0), 42)), ((1, 12, 1, 1), ((3, 0), 42))]
|
import os
from pathlib import Path
from PIL import Image,ImageDraw,ImageFont
import requests
import cairosvg
import time
import time
import shutil
import logging
from config import Ink_HEIGHT,Ink_WIDTH
picdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pic')
font16 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 16)
font24 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 24)
font36 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 36)
imgdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'img')
svgdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'weather_icons')
def getWeather(key, days):
resp = requests.get("https://api.weatherapi.com/v1/forecast.json?key={}&q=10025&days={}&aqi=no&alerts=no".format(key, days))
logging.info("Received Weather")
return resp.json()
def getHeadlines(apiKey):
headlineJson = requests.get("https://newsapi.org/v2/top-headlines?country=us&apiKey={}".format(apiKey)).json()
logging.info("Received News")
return headlineJson["articles"]
def getWeatherIcon(weatherReportJson, size):
iconPath = getWeatherIconPath(weatherReportJson)
tmpImg = os.path.join(imgdir, str(time.time()) + ".png")
cairosvg.svg2png(url=iconPath, write_to=tmpImg, parent_width=size, parent_height=size)
return Image.open(tmpImg)
# Takes a 1hr report or a "currentDay" report
def getWeatherIconPath(weatherReportJson):
iconNum = mapWeatherCodeToWeatherIconDir(weatherReportJson["condition"]["code"])
return getWeatherIconFromSVGs(iconNum, weatherReportJson.get("is_day"))
def getWeatherIconFromSVGs(iconNum, dayNum):
weatherIconDir = os.path.join(svgdir, iconNum)
icons = os.listdir(weatherIconDir)
if dayNum == None:
dayNum = 1
if len(icons) == 1:
return os.path.join(weatherIconDir, icons[0])
else:
for icon in icons:
if icon == ".DS_Store":
# skip
logging.info("skip")
elif dayNum == 0 and icon.find("night") >= 0:
return os.path.join(weatherIconDir, icon)
elif dayNum == 1 and icon.find("night") == -1:
return os.path.join(weatherIconDir, icon)
return os.path.join(svgdir, "Extra/wi-na.svg")
def deleteFileIfExists(path):
if os.path.exists(path):
os.remove(path)
def makeImgDirIfNotExists():
logging.info("makeImgDirIfNotExists")
p = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'img')
if (os.path.isdir(p)):
shutil.rmtree(p)
Path(p).mkdir(parents=True, exist_ok=True)
def emptyImage():
emptyImage = Image.new('1', (Ink_WIDTH, Ink_HEIGHT), 255)
return emptyImage
def getUniqueInfo(weatherJson):
# if weatherJson["wind_mph"] > 25 or weatherJson["gust_mph"] > 30:
# return "and windy"
# elif weatherJson["wind_mph"] > 15 or weatherJson["gust_mph"] > 25:
# return "and breezy"
# elif weatherJson.get("precip_in") < .01 and weatherJson["avghumidity"] > 65:
# return "and humid"
# else:
# return datetime.datetime.str
return "on " + time.strftime('%b %-d @ %I:%M %p', time.localtime())
def mapWeatherCodeToWeatherIconDir(code):
switcher = {
1000: "113",
1003: "116",
1006: "119",
1009: "122",
1030: "143",
1063: "176",
1066: "179",
1069: "182",
1072: "185",
1087: "200",
1114: "227",
1117: "230",
1135: "248",
1147: "260",
1150: "263",
1153: "266",
1168: "281",
1171: "284",
1180: "293",
1183: "296",
1186: "299",
1189: "302",
1192: "305",
1195: "308",
1198: "311",
1201: "314",
1204: "317",
1207: "320",
1210: "323",
1213: "326",
1216: "329",
1219: "332",
1222: "335",
1225: "338",
1237: "350",
1240: "353",
1243: "356",
1246: "359",
1249: "362",
1252: "365",
1255: "368",
1258: "371",
1261: "374",
1264: "377",
1273: "386",
1276: "389",
1279: "392",
1282: "395",
}
return switcher.get(code)
def shortenWeatherText(desc):
desc = desc.replace("with", "w/")
desc = desc.replace("Patchy", "Some")
desc = desc.replace("Moderate or h", "H")
return desc |
from server import app
# import os
import unittest
# import tempfile
class BaseTestCase(unittest.TestCase):
# def setUp(self):
# # server.app.config['TESTING'] = True
# # self.app = server.app.test_client()
# pass
# def tearDown(self):
# pass
def test_index(self):
# app.config['TESTING'] = True
tester = app.test_client(self)
res = tester.get('/')
self.assertEqual(res.status_code, 200)
# res = self.app.get('/', follow_redirects=True)
# self.assertEqual(res.status_code, 200)
# self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
BaseTestCase() |
from googleplaces import GooglePlaces, types, lang
from pprint import pprint
import sys
import utils
YOUR_API_KEY = "AIzaSyCQENSbRFfd9_lGuqoXf2icRgtSvED-WHI"
RADIUS = 10000 # in meters
google_places = GooglePlaces(YOUR_API_KEY)
def get_gplaces_results(place, city, state):
# You may prefer to use the nearby_search API, instead.
city_state = city + ", " + state
query_result = google_places.text_search(query=place, location=city_state, radius=RADIUS)
# If types param contains only 1 item the request to Google Places API
# will be send as type param to fullfil:
# http://googlegeodevelopers.blogspot.com.au/2016/02/changes-and-quality-improvements-in_16.html
#pprint(query_result.places)
if len(query_result.places) == 0:
return None #No such places within RADIUS km of city
else:
places_in_city = list()
for place in query_result.places:
place_d = dict()
# The following method has to make a further API call.
place.get_details()
if (utils.state_to_abbr[state] in place.formatted_address) and (city in place.formatted_address):
place_d['lat'] = float(place.geo_location['lat'])
place_d['long'] = float(place.geo_location['lng'])
place_d['google_places_id'] = place.place_id
place_d['address'] = place.formatted_address
place_d['name'] = place.name
places_in_city.append(place_d)
break
if len(places_in_city) == 0:
return None #No such places in city but are within RADIUS km
else:
return places_in_city[0]
|
# -*- coding: utf-8 -*-
#########################################################
#
# Alejandro German
#
# https://github.com/seralexger/clothing-detection-dataset
#
#########################################################
from PIL import Image
import json
import glob
import random
import matplotlib.pyplot as plt
import matplotlib.patches as patches
files_list = glob.glob('data/*.json')
img_data = json.loads(open(files_list[random.randint(0, len(files_list)-1)]).read())
normal_img = Image.open('dataset/' + img_data['file_name'])
#fig,ax = plt.subplots(1)
fig=plt.figure(figsize=(8, 8))
columns = 4
rows = 5
for index,item in enumerate(img_data['arr_boxes']):
box = patches.Rectangle((item['x'], item['y']), item['width'], item['height'],linewidth=2,edgecolor=(random.uniform(0.0,1.0), random.uniform(0.0,1.0),random.uniform(0.0,1.0)) ,facecolor='none', label = item['class'])
ax = fig.add_subplot(111)
ax.add_patch(box)
ax.axis('off')
plt.legend()
plt.imshow(normal_img)
plt.show() |
"""
Asynchronous Learning Engine (ALE)
Supports PWS standard desktop (studio)
Mentor Queues
Load Balancing / Air Traffic Control
Courses / Flights
A mentor queue is a worker queue with
tasks pending, in process, complete.
The many subclasses of Task are only hinted
at in this overview.
Example Tasks (Transactions archiving to Chronofile):
Set start time on booked flight, notify students
Take Off
In-flight Services (the learning experience)
Land
Postmortem **/ Archive Stats
** sounds dire and we do try experimental courses
sometimes that "crash" but in this shoptalk it's
how we discuss any completed flight.
In-flight the students have a Call Bell for
special services. We run "shows" which in the
better schools are highly interactive and require
a lot of student activity. Passivism is a killer
when it comes to building confidence and competence
in one's tools, as Scott Gray would point out during
faculty meetings.
A normal / standard flight consists of working
through course materials in a PWS Studio with
asynchronous feedback from one or more mentors.
The "flight" (course) is also a unit of accounting
i.e. we containerize it in terms of fixed cost
overhead, tuition, compensation and so on. See
workflow diagrams.
ALE:
In the OO version, ALE is the root object, adding mixins as needed
Kirby Urner
Want graphics?
https://www.flickr.com/photos/kirbyurner/sets/72157654417641521
"""
class Flight(ALE):
pass
class AirTrafficUtils(ALE):
pass
class Passenger(AWS):
pass
class PWS:
pass
class Dispatcher(AirTrafficUtils):
pass
class Student(Passenger):
pass
class Task:
# Examples: Start Class, Submit Work, Annotate Materials, Return Work
pass
class Mentor(Oasis): # # Example mixin (ways to "phone home")
pass
class Course(Flight): # Expense Unit for accounting / bookkeeping
pass
class Oversight(ALE):
pass
class Admin(Oversight):
pass
class Recruiting(Mentor):
pass # Exhibited Mentors, free samples
class StudentSupport(Oversight):
pass # guidance functions ("Travel Agency")
|
import argparse
from string import Template
import subprocess
template_delete_user = Template("""
dn: uid=${ACCOUNT},ou=people,dc=tanaka,dc=lab
changetype: delete
""")
template_delete_from_group = Template("""
dn: cn=${ACCOUNT},ou=groups,dc=tanaka,dc=lab
changetype: delete
""")
def main(user):
# delete user
content_delete_user = template_delete_user.safe_substitute(
ACCOUNT=user,
)
fname_delete_user = f"delete-user.{user}.ldif"
with open(fname_delete_user, 'w') as f:
f.write(content_delete_user)
subprocess.run([
'ldapmodify',
'-h', 'localhost',
'-x',
'-D', "'cn=admin,dc=tanaka,dc=lab'",
'-w', 'admin',
'-f', fname_delete_user,
])
subprocess.run(['rm', fname_delete_user])
# delete from group
content_delete_from_group = template_delete_from_group.safe_substitute(
ACCOUNT=user,
)
fname_delete_from_group = f"delete-from-group.{user}.ldif"
with open(fname_delete_from_group, 'w') as f:
f.write(content_delete_from_group)
subprocess.run([
'ldapmodify',
'-h', 'localhost',
'-x',
'-D', 'cn=admin,dc=tanaka,dc=lab',
'-w', 'admin',
'-f', fname_delete_from_group,
])
subprocess.run(['rm', fname_delete_from_group])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Delete user on LDAP")
parser.add_argument('user', help='deleting user name')
args = parser.parse_args()
user = args.user
main(user)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2022 shmilee
'''
Extend the matplotlib Axes3D class.
ref
---
1. https://gist.github.com/WetHat/1d6cd0f7309535311a539b42cccca89c
2. mpl_toolkits.mplot3d.art3d.Text3D
'''
from matplotlib.text import Annotation
from matplotlib.patches import FancyArrowPatch
from matplotlib.artist import allow_rasterization
from mpl_toolkits.mplot3d.proj3d import proj_transform
from matplotlib import cbook
from matplotlib.axes import Axes
from mpl_toolkits.mplot3d.axes3d import Axes3D
class Annotation3D(Annotation):
'''
Annotation object with 3D position.
'''
def __str__(self):
return "Annotation3D(%g, %g, %g, %r)" % (
self.xy[0], self.xy[1], self.xyz[2], self._text)
def __init__(self, text, xyz, xyztext=None, **kwargs):
xy = xyz[:2]
xytext = None if xyztext is None else xyztext[:2]
Annotation.__init__(self, text, xy, xytext=xytext, **kwargs)
self.set_3d_properties(xyz, xyztext=xyztext)
def set_3d_properties(self, xyz, xyztext=None):
self.xyz = xyz
self._z = xyz[2] if xyztext is None else xyztext[2]
self.stale = True
@allow_rasterization
def draw(self, renderer):
xy = proj_transform(*self.xyz, self.axes.M)[:2]
_x, _y, _ = proj_transform(self._x, self._y, self._z, self.axes.M)
with cbook._setattr_cm(self, xy=xy, _x=_x, _y=_y):
Annotation.draw(self, renderer)
self.stale = False
def annotate3D(self, text, xyz, xyztext=None, **kwargs):
'''
Add anotation `text` to an `Axes3d` instance.
Parameters
----------
text: str
The text of the annotation.
xyz: (float, float, float)
The point *(x, y, z)* to annotate.
xyztext : (float, float, float), default: *xyz*
The position *(x, y, z)* to place the text at.
**kwargs
Additional kwargs are passed to Axes.annotate.
'''
xy = xyz[:2]
xytext = None if xyztext is None else xyztext[:2]
annotation = super(Axes3D, self).annotate(
text, xy, xytext=xytext, **kwargs)
annotation.__class__ = Annotation3D
annotation.set_3d_properties(xyz, xyztext=xyztext)
return annotation
setattr(Axes3D, 'annotate3D', annotate3D)
setattr(Axes3D, 'annotate2D', Axes.annotate)
setattr(Axes3D, 'annotate', annotate3D)
class FancyArrowPatch3D(FancyArrowPatch):
'''
A fancy arrow patch with 3D positions.
'''
def __str__(self):
(x1, y1, z1), (x2, y2, z2) = self._posA_posB_3D
return f"{type(self).__name__}(({x1:g}, {y1:g}, {z1:g})->({x2:g}, {y2:g}, {z2:g}))"
def __init__(self, posA, posB, **kwargs):
FancyArrowPatch.__init__(self, posA=posA[:2], posB=posB[:2], **kwargs)
self.set_3d_properties(posA, posB)
def set_3d_properties(self, posA, posB):
self._posA_posB_3D = [posA, posB]
self.stale = True
def do_3d_projection(self, renderer=None):
(x1, y1, z1), (x2, y2, z2) = self._posA_posB_3D
xs, ys, zs = proj_transform((x1, x2), (y1, y2), (z1, z2), self.axes.M)
return min(zs)
@allow_rasterization
def draw(self, renderer):
(x1, y1, z1), (x2, y2, z2) = self._posA_posB_3D
xs, ys, zs = proj_transform((x1, x2), (y1, y2), (z1, z2), self.axes.M)
_posA_posB = [(xs[0], ys[0]), (xs[1], ys[1])]
with cbook._setattr_cm(self, _posA_posB=_posA_posB):
FancyArrowPatch.draw(self, renderer)
self.stale = False
def arrow3D(self, posA, posB, **kwargs):
'''
Add an 3d `arrow` to an `Axes3d` instance.
Parameters
----------
posA, posB : (float, float, float)
(x, y, z) coordinates of arrow tail and arrow head respectively.
**kwargs
Additional kwargs are passed to `~matplotlib.patches.FancyArrowPatch`.
'''
arrow = FancyArrowPatch3D(posA, posB, **kwargs)
self.add_artist(arrow)
return arrow
setattr(Axes3D, 'arrow3D', arrow3D)
setattr(Axes3D, 'arrow2D', Axes.arrow)
setattr(Axes3D, 'arrow', Axes.arrow)
|
import re
import tkinter as tk
import tkinter.filedialog
import tkinter.ttk as ttk
from pathlib import Path
from tkinter import font
from tkinter.messagebox import showerror, showinfo
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import srtm_path as srtm
from p452 import p452_loss
version = 'version 1.0 2020'
def dmstodec(coords):
if re.search('[SsWw]', coords):
sign = -1
else:
sign = 1
coords = coords.strip('SsWw ')
splitlist = re.split(r'[°\s\'\"]', coords)
splitlist = [*filter(len, splitlist)]
if len(splitlist) == 1:
dec = sign * float(splitlist[0])
elif len(splitlist) == 3:
dec = sign * float(splitlist[0]) + float(splitlist[1]) / 60 + float(splitlist[2]) / 3600
else:
raise ValueError
return dec
def readme():
msg = "This is a program to compute the basic transmission loss according to ITU-R P.452-16 " \
"recommendation (07/2015).\n" \
"\nThe main module used is p452.py, and all the other subfunctions of it are located " \
"in '/p452_modules' folder.\n\nThe input parameters for p452 calculations are passed through the GUI, but " \
"regarding the path elevation profile and radio-climate zones they may be loaded either from " \
"a xls(x) file, like the test examples provided by ITU, or calculated from SRTM 30m data (heights along the " \
"path) as long as there are the necessary hgt files inside the /hgt folder.\n\n" \
"The .xlsx path profile file should consist of 3 columns, describing the di, hi, and radio-climate zone " \
"arrays respectively. When computing automatically given the SRTM data files, radio-climate zone for each " \
"point can be set to a fixed 'A2' value (see p452 documentation for details) or you can set a rule to " \
"calculate the zone depending on elevation of each point (hi) (File -> Settings).\n\nRx station can be " \
"selected from a csv file 'radars.csv' that should be located in the root directory of the program, " \
"with columns: [name, lat, lon, height above ground, gain], or alternativley can be defined manually. " \
"A plot with the path elevation profile vs distance is showed, with earth curvature and refraction " \
"impact on line of sight, if this is selected in settings. SRTM hgt files can be downloaded from:\n" \
"https://search.earthdata.nasa.gov/search\n (an earthdata account should be created first)\nIf any hgt " \
"is missing from directory, there is the choice to fill with 0s, or a warning appears with the list of all " \
"missing hgt files.\n" \
"\nFinally, transmitter gain can be calculated using the selected antenna radiation pattern, from a pandas " \
"dataframe stored as 'database.hdf' in the root directory. The dataframe has columns " \
"[antenna name, el.tilt, gain, pattern], where pattern is a (2, 361) shaped numpy array containing " \
"the horizontal and vertical radiation pattern of antennas (relative gain loss (dB) at each angle). An " \
"example database and antennas pattern are given, but you can use the module 'create_antenna_db.py' to use " \
"other radiation patterns (.csv files) to create your own database (no gui tool for antenna database as of now)."
showinfo('README', msg)
# '- The main implementation of the recommendation is MATLAB function'
# ' tl_p452.m placed in this folder that can be used independently'
# ' of this Graphical User Interface but needs the functions '
# ' defined in the folder ./src.'
def aboutcommand():
try:
with open('LICENSE.md', 'r') as file:
text = file.read()
except FileNotFoundError:
text = "License file cannot be loaded."
text = version + '\n\n' + text
showinfo('About', text)
def openimage(fignumber, title):
topwindow = tk.Toplevel()
topwindow.title(title)
fname = f'Fig{fignumber}.gif'
canvas = tk.Canvas(topwindow, width=1200, height=700)
canvas.grid(row=0, column=0)
img = tk.PhotoImage(file=fname)
canvas.create_image(10, 10, image=img, anchor=tk.NW)
# keep reference
canvas.img = img
class ValidEntry(ttk.Entry):
def __init__(self, master, validtype, *args, **kwargs):
super().__init__(master, *args, justify='center', **kwargs)
vcmd = (self.register(self.onvalidate), '%d', '%P', '%V')
if validtype == 'coords':
self.configure(validate='focusout')
else:
self.configure(validate='all')
self.configure(validatecommand=vcmd)
self.validtype = validtype
def onvalidate(self, action, value, event):
if self.validtype == 'coords':
if not value:
return True
try:
dmstodec(value)
return True
except ValueError:
showerror('Error', 'Invalid coordinates format, '
'must be decimal or DMS in format: D Min Secs(opt: N/S or E/W)')
self.delete(0, tk.END)
self.insert(0, 0)
return False
else:
if event == 'key':
if action != '1':
return True
try:
float(value)
return True
except ValueError:
self.bell()
return False
elif event == 'focusout':
try:
float(value)
return True
except ValueError:
self.delete(0, tk.END)
self.insert(0, 0)
showerror('Error', 'You have to enter a number to coordinates entry, temporary filled with 0 value')
return False
else:
return True
class MainGUI(ttk.Frame):
def __init__(self, master, **kwargs):
super().__init__(master, **kwargs)
master.title('Radar Interference Prediction (ITU P.472)')
master.geometry('1400x700')
master.iconbitmap('icon.ico')
try:
self.antennas = pd.read_hdf('database.hdf', key='db')
antennaslist = self.antennas.index.unique(level='Antenna').tolist()
self.antennasloaded = True
except FileNotFoundError:
self.antennasloaded = False
antennaslist = ['No dB file found']
try:
self.radars = pd.read_csv('radars.csv', delimiter=';')
radarslist = self.radars['Name'].tolist()
radarsloaded = True
except FileNotFoundError:
radarsloaded = False
radarslist = ['No .csv list found']
menubar = tk.Menu(master)
filemenu = tk.Menu(menubar, tearoff=0)
radiomapsmenu = tk.Menu(menubar, tearoff=0)
aboutmenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label='Settings', command=self.opensettings)
filemenu.add_command(label='Exit', command=quit_me)
radiomapsmenu.add_command(label='Median Annual values of ΔΝ',
command=lambda: openimage(1, 'Median Annual values of ΔΝ'))
radiomapsmenu.add_command(label='Average Annual values of ΔΝ',
command=lambda: openimage(2, 'Average Annual values of ΔΝ'))
radiomapsmenu.add_command(label='Median sea-level surface refractivity',
command=lambda: openimage(3, 'Median sea-level surface refractivity'))
radiomapsmenu.add_command(label='Sea-level surface refractivity',
command=lambda: openimage(4, 'sea-level surface refractivity'))
aboutmenu.add_command(label='About', command=aboutcommand)
aboutmenu.add_command(label='Help', command=readme)
menubar.add_cascade(label='File', menu=filemenu)
menubar.add_cascade(label='Radio-meteo maps', menu=radiomapsmenu)
menubar.add_cascade(label='Help', menu=aboutmenu)
master.config(menu=menubar)
master.rowconfigure(0, weight=0)
master.rowconfigure(1, weight=1)
master.rowconfigure(2, weight=0)
master.columnconfigure(0, weight=0)
master.columnconfigure(1, weight=0)
master.columnconfigure(2, weight=1)
master.columnconfigure(3, weight=0)
# default settings and settings variables, stores as class instances
self.zoneselvar = tk.StringVar()
self.earthcurvvar = tk.StringVar()
self.fillvoidsrtmvar = tk.IntVar()
self.zonesel = 'auto' # as opposed to 'fixed' (=A1, inland)
self.earthcurv = 'curved' # as opposed to 'flat' earth in path profile plot
self.fillvoidsrtm = 0 # don't fill with 0 missing srtm data, raise an error, set 1 to fill with 0s
self.seathreshvalue = 0 # when 'auto' fill radio-climate zones, set sea zone when h <= value
self.coastlandthreshvalue = 50 # when 'auto' fill climate zones, set coastland zone when h <= value
self.coordsamples = 700 # maximum number of samples, when generating path profile from SRTM files
self.validpathloaded = False # turned to valid if manual path is selected and valid xls file loaded
# Build frames
paramframe = ttk.LabelFrame(master, text='Parameters')
paramframe.grid(row=1, column=1, sticky='nsew', pady=(5, 20), padx=15)
frame2 = tk.Frame(master)
frame2.grid(row=1, column=2, sticky='nsew', pady=5, padx=15)
frame2.rowconfigure(0, weight=0) # transmitter frame
frame2.rowconfigure(1, weight=0) # receiver frame
frame2.rowconfigure(2, weight=0) # button frame
frame2.rowconfigure(3, weight=1) # results frame
frame2.rowconfigure(4, weight=0)
frame2.columnconfigure(0, weight=1)
# parametersframe
freqlabel = tk.Label(paramframe, text='Frequency (GHz):')
freqlabel.grid(row=0, column=0, padx=5, pady=(7, 4), sticky='nsw')
self.freqentry = ValidEntry(paramframe, 'float', width=6)
self.freqentry.insert(0, 2.6)
self.freqentry.grid(row=0, column=1, padx=7, pady=(7, 4), sticky='nse')
timelabel = tk.Label(paramframe, text='Time percentage (%):')
timelabel.grid(row=1, column=0, padx=5, pady=4, sticky='nsw')
self.timeentry = ValidEntry(paramframe, 'float', width=6)
self.timeentry.insert(0, 20)
self.timeentry.grid(row=1, column=1, padx=7, pady=4, sticky='nse')
self.poldict = {'Vertical polarization': 2, 'Horizontal polarization': 1, 'Slant polarization': 3}
self.polarbox = ttk.Combobox(paramframe, values=[*self.poldict.keys()], width=21, state='readonly')
self.polarbox.grid(row=2, column=0, columnspan=2, padx=5, pady=6, sticky='ns')
self.polarbox.current(0)
pathframe = tk.LabelFrame(paramframe, text='Path profile')
pathframe.grid(row=3, padx=5, pady=8, sticky='nsew', columnspan=2)
self.pathselectvar = tk.IntVar(0)
pathrbtn1 = ttk.Radiobutton(pathframe, text='Generate from SRTM data', variable=self.pathselectvar, value=0,
command=self.pathrbtn)
pathrbtn2 = ttk.Radiobutton(pathframe, text='Load from file:', variable=self.pathselectvar, value=1,
command=self.pathrbtn)
pathrbtn1.grid(row=0, column=0, sticky='nsw', padx=5, pady=(5, 3))
pathrbtn2.grid(row=1, column=0, sticky='nsw', padx=(5, 25), pady=(3, 5))
self.pathloadbtn = ttk.Button(pathframe, text='Load', command=self.loadpathfromfile, width=8, state='disabled')
self.pathloadbtn.grid(row=1, column=2, sticky='nsw', padx=(8, 5), pady=(3, 5))
meteoframe = tk.LabelFrame(paramframe, text='Meteorological data')
meteoframe.grid(row=4, column=0, columnspan=2, padx=5, pady=8, sticky='nsew')
meteoframe.columnconfigure(0, weight=0)
meteoframe.columnconfigure(1, weight=1)
pressurelabel = tk.Label(meteoframe, text='Pressure (hPa):')
pressurelabel.grid(row=0, column=0, sticky='nsw', padx=5, pady=4)
self.pressureentry = ValidEntry(meteoframe, 'float', width=7)
self.pressureentry.insert(0, 1013)
self.pressureentry.grid(row=0, column=1, sticky='nse', padx=7, pady=4)
templabel = tk.Label(meteoframe, text='Temperature (°C):')
templabel.grid(row=1, column=0, sticky='nsw', padx=5, pady=4)
self.tempentry = ValidEntry(meteoframe, 'float', width=7)
self.tempentry.insert(0, 25)
self.tempentry.grid(row=1, column=1, sticky='nse', padx=7, pady=4)
dnlabel = tk.Label(meteoframe, text='ΔΝ (N-units/km):')
dnlabel.grid(row=2, column=0, sticky='nsw', padx=5, pady=4)
self.dnentry = ValidEntry(meteoframe, 'float', width=7)
self.dnentry.insert(0, 50)
self.dnentry.grid(row=2, column=1, sticky='nse', padx=7, pady=4)
n0label = tk.Label(meteoframe, text='N0 (N-units/km):')
n0label.grid(row=3, column=0, sticky='nsw', padx=5, pady=4)
self.n0entry = ValidEntry(meteoframe, 'float', width=7)
self.n0entry.insert(0, 350)
self.n0entry.grid(row=3, column=1, sticky='nse', padx=7, pady=4)
othersframe = tk.LabelFrame(paramframe, text='Other parameters')
othersframe.grid(row=5, column=0, columnspan=2, sticky='nsew', padx=5, pady=7)
othersframe.columnconfigure(0, weight=0)
othersframe.columnconfigure(1, weight=1)
dctlabel = tk.Label(othersframe, text='dct (km):')
dctlabel.grid(row=0, column=0, sticky='nsw', padx=5, pady=4)
self.dctentry = ValidEntry(othersframe, 'float', width=7)
self.dctentry.insert(0, 500)
self.dctentry.grid(row=0, column=2, sticky='nse', padx=5, pady=4)
dcrlabel = tk.Label(othersframe, text='dcr (km):')
dcrlabel.grid(row=1, column=0, sticky='nsw', padx=5, pady=4)
self.dcrentry = ValidEntry(othersframe, 'float', width=7)
self.dcrentry.insert(0, 500)
self.dcrentry.grid(row=1, column=2, sticky='nse', padx=5, pady=4)
self.clutterdict = {'None': (0, 0), 'High crop fields': (4, 0.1), 'Park land': (4, 0.1),
'Irregularly spaced sparse trees': (4, 0.1), 'Orchard (regularly spaced)': (4, 0.1),
'Sparse houses': (4, 0.1), 'Village center': (5, 0.07), 'Deciduous trees': (15, 0.05),
'Mixed tree forest': (15, 0.05), 'Coniferous trees': (20, 0.05),
'Tropical rain forest': (20, 0.03), 'Suburban': (9, 0.025), 'Dense suburban': (12, 0.02),
'Urban': (20, 0.02), 'Dense Urban': (25, 0.02), 'High-rise urban': (35, 0.02),
'Industrial zone': (20, 0.05)}
txclutterlbl = tk.Label(othersframe, text='Tx clutter type:')
rxclutterlbl = tk.Label(othersframe, text='Rx clutter type:')
self.txclutterbox = ttk.Combobox(othersframe, values=[*self.clutterdict.keys()], width=27,
state='readonly', justify='center')
self.rxclutterbox = ttk.Combobox(othersframe, values=[*self.clutterdict.keys()], width=27,
state='readonly', justify='center')
txclutterlbl.grid(row=2, column=0, padx=5, pady=(6, 1), sticky='nsw')
self.txclutterbox.grid(row=3, column=0, columnspan=3, padx=5, pady=1, sticky='nsw')
rxclutterlbl.grid(row=4, column=0, padx=5, pady=(6, 1), sticky='nsw')
self.rxclutterbox.grid(row=5, column=0, columnspan=3, padx=5, pady=1, sticky='nsw')
self.txclutterbox.current(0)
self.rxclutterbox.current(0)
# frame2 subframes:
txframe = ttk.LabelFrame(frame2, text='Transmitter')
txframe.grid(row=0, column=0, padx=5, pady=(0, 6), sticky='nsew')
rxframe = ttk.LabelFrame(frame2, text='Receiver station')
rxframe.grid(row=1, column=0, padx=5, pady=6, sticky='nsew')
calcbtnframe = tk.Frame(frame2)
calcbtnframe.grid(row=2, column=0, pady=2, padx=5, sticky='nsew')
calcbtnframe.columnconfigure(0, weight=1)
resultsframe = ttk.LabelFrame(frame2, text='Results')
resultsframe.grid(row=3, column=0, pady=(2, 20), padx=5, sticky='nsew')
# rx frame
txposframe = tk.Frame(txframe)
txposframe.grid(row=0, column=0, columnspan=9, padx=5, pady=5, sticky='nsew')
txlatlbl = tk.Label(txposframe, text='Lat (φ):')
txlatlbl.grid(row=0, column=0, padx=(5, 2), pady=5, sticky='nsw')
self.txlatentry = ValidEntry(txposframe, 'coords', width=9)
self.txlatentry.grid(row=0, column=1, padx=2, pady=5, sticky='nsw')
txlonlbl = tk.Label(txposframe, text='Lon (λ):')
txlonlbl.grid(row=0, column=2, padx=(30, 2), pady=5, sticky='nsw')
self.txlonentry = ValidEntry(txposframe, 'coords', width=9)
self.txlonentry.grid(row=0, column=3, padx=2, pady=5, sticky='nsw')
txheightlbl = tk.Label(txposframe, text='Height (agl, m):')
txheightlbl.grid(row=0, column=4, padx=(35, 2), pady=5, sticky='nsw')
self.txheightentry = ValidEntry(txposframe, 'float', width=5)
self.txheightentry.grid(row=0, column=5, padx=2, pady=5, sticky='nsw')
txpowerlbl = tk.Label(txposframe, text='Tx Power (dBm):')
txpowerlbl.grid(row=0, column=6, padx=(35, 2), pady=5, sticky='nsw')
self.txpowerentry = ValidEntry(txposframe, 'float', width=6)
self.txpowerentry.grid(row=0, column=7, padx=2, pady=5, sticky='nsw')
self.txgainselectvar = tk.IntVar()
if self.antennasloaded:
self.txgainselectvar.set(0)
else:
self.txgainselectvar.set(1)
self.txgainrbtn1 = ttk.Radiobutton(txframe, text='Calculate Gain:', variable=self.txgainselectvar, value=0,
command=self.txgainrbtn)
self.txgainrbtn1.grid(row=2, column=0, padx=5, pady=3, rowspan=1, sticky='nsw')
self.txgainrbtn2 = ttk.Radiobutton(txframe, text='Fixed Gain (dBi):', variable=self.txgainselectvar, value=1,
command=self.txgainrbtn)
self.txgainrbtn2.grid(row=3, column=0, padx=5, pady=(10, 3), sticky='nsw')
antennalbl = tk.Label(txframe, text='Antenna type')
antennalbl.grid(row=1, column=1, sticky='ns', padx=(10, 2), pady=1)
eltiltlbl = tk.Label(txframe, text='El. Tilt (deg)')
eltiltlbl.grid(row=1, column=2, sticky='ns', padx=8, pady=1)
azimlbl = tk.Label(txframe, text='Azimuth (deg)')
azimlbl.grid(row=1, column=3, sticky='ns', padx=8, pady=1)
mechtiltlbl = tk.Label(txframe, text='Mech. Tilt (deg)')
mechtiltlbl.grid(row=1, column=4, sticky='ns', padx=8, pady=1)
self.txgainentry = ValidEntry(txframe, 'float', width=7)
self.txgainentry.grid(row=3, column=1, padx=15, pady=(10, 3), sticky='sw')
self.antennabox = ttk.Combobox(txframe, values=antennaslist, state='readonly', width=30, justify='center')
self.antennabox.grid(row=2, column=1, padx=(10, 2))
self.antennabox.current(0)
self.antennabox.bind("<<ComboboxSelected>>", self.updatetilts)
self.tiltsbox = ttk.Combobox(txframe, values=[], state='readonly', width=4, justify='center')
self.tiltsbox.grid(row=2, column=2, padx=8)
self.updatetilts("<<ComboboxSelected>>")
self.azimentry = ValidEntry(txframe, 'float', width=6)
self.azimentry.grid(row=2, column=3, sticky='ns', padx=8, pady=2)
self.mechtiltentry = ValidEntry(txframe, 'float', width=5)
self.mechtiltentry.grid(row=2, column=4, sticky='ns', padx=8, pady=2)
if self.antennasloaded:
self.txgainselectvar.set(0)
else:
self.txgainselectvar.set(1)
# rxframe:
self.rxposvar = tk.IntVar()
rxposbtn1 = ttk.Radiobutton(rxframe, text='Rx from list:', variable=self.rxposvar, value=0,
command=self.rxselection)
rxposbtn1.grid(row=0, column=0, padx=5, pady=5, sticky='nsw')
rxposbtn2 = ttk.Radiobutton(rxframe, text='Manual Rx position:', variable=self.rxposvar, value=1,
command=self.rxselection)
rxposbtn2.grid(row=1, column=0, padx=5, pady=(5, 5), sticky='nsw')
self.rxcombobox = ttk.Combobox(rxframe, values=radarslist, state='readonly', width=25, justify='center')
self.rxcombobox.grid(row=0, column=1, padx=15, pady=5, sticky='nsw')
self.rxcombobox.current(0)
if not radarsloaded:
self.rxposvar.set(1)
rxposframe = tk.Frame(rxframe)
rxposframe.grid(row=1, column=1, pady=(5, 5))
rxlatlbl = tk.Label(rxposframe, text='Lat (φ):')
rxlatlbl.grid(row=0, column=0, padx=(12, 2), pady=5, sticky='nsw')
self.rxlatentry = ValidEntry(rxposframe, 'coords', width=9, state='disabled')
self.rxlatentry.grid(row=0, column=1, padx=2, pady=5, sticky='nsw')
rxlonlbl = tk.Label(rxposframe, text='Lon (λ):')
rxlonlbl.grid(row=0, column=2, padx=(17, 2), pady=5, sticky='nsw')
self.rxlonentry = ValidEntry(rxposframe, 'coords', width=9, state='disabled')
self.rxlonentry.grid(row=0, column=3, padx=2, pady=5, sticky='nsw')
rxheightlbl = tk.Label(rxposframe, text='Height (agl, m):')
rxheightlbl.grid(row=0, column=4, padx=(20, 2), pady=5, sticky='nsw')
self.rxheightentry = ValidEntry(rxposframe, 'float', width=5, state='disabled')
self.rxheightentry.grid(row=0, column=5, padx=2, pady=5, sticky='nsw')
rxpowerlbl = tk.Label(rxposframe, text='Rx Gain (dBm):')
rxpowerlbl.grid(row=0, column=6, padx=(20, 2), pady=5, sticky='nsw')
self.rxgainentry = ValidEntry(rxposframe, 'float', width=6, state='disabled')
self.rxgainentry.grid(row=0, column=7, padx=(2, 5), pady=5, sticky='nsw')
# initialize radiobutton selections, just limit selections if preset lists for antenna or rx stations are loaded
self.rxselection()
self.txgainrbtn()
# button frame
self.calcbutton = ttk.Button(calcbtnframe, text='Run', command=self.calculateloss, width=10)
self.calcbutton.grid(row=0, column=0)
# results frame
resultsframe.rowconfigure(0, weight=0)
resultsframe.rowconfigure(1, weight=0)
resultsframe.rowconfigure(2, weight=1)
resultsframe.rowconfigure(3, weight=0)
resultsframe.columnconfigure(0, weight=1)
resultsframe.columnconfigure(1, weight=0)
resultsframe.columnconfigure(2, weight=0)
boxframe1 = ttk.LabelFrame(resultsframe, text='Transmission', width=200, height=140)
boxframe1.grid(row=0, column=1, padx=5, pady=5, sticky='nw')
boxframe1.grid_propagate(0)
boxframe2 = ttk.LabelFrame(resultsframe, text='Path profile', width=200, height=150)
boxframe2.grid(row=1, column=1, padx=5, pady=5, sticky='nw')
boxframe2.grid_propagate(0)
infoframe = tk.Frame(resultsframe)
infoframe.grid(row=2, column=1, padx=5, pady=5, sticky='nsew')
# plot frame
plt.style.use('seaborn')
fig, self.ax = plt.subplots()
fig.patch.set_facecolor((0.97, 0.97, 0.97))
self.ax.set_xlabel('d (km)')
self.ax.set_ylabel('Elevation (m)')
self.ax.set_title('Path elevation profile')
fig.subplots_adjust(left=0.1, bottom=0.15, right=0.95, top=0.92)
self.canvas = FigureCanvasTkAgg(fig, resultsframe)
self.canvas.get_tk_widget().grid(row=0, column=0, rowspan=3, sticky='nsew')
# transmission results
text1 = tk.Label(boxframe1, text='Rx level (dBm):')
text1.grid(row=0, column=0, sticky='nsw')
text2 = tk.Label(boxframe1, text='Transmission loss (dB):')
text2.grid(row=1, column=0, sticky='nsw')
text3 = tk.Label(boxframe1, text='FSP & gaseous atten.(dB):')
text3.grid(row=2, column=0, sticky='nsw')
text4 = tk.Label(boxframe1, text='Diff. loss (Ldp) (dB):')
text4.grid(row=3, column=0, sticky='nsw')
text5 = tk.Label(boxframe1, text='Tx Gain (dBi):')
text5.grid(row=4, column=0, sticky='nsw')
self.tlossvar = tk.StringVar()
self.rxlevelvar = tk.StringVar()
self.fspgaslossvar = tk.StringVar()
self.difflossvar = tk.StringVar()
self.txgainvar = tk.StringVar()
self.rxlevelvar.set(f'{np.NAN:9.2f}')
self.tlossvar.set(f'{np.NAN:9.2f}')
self.fspgaslossvar.set(f'{np.NAN:9.2f}')
self.difflossvar.set(f'{np.NAN:9.2f}')
self.txgainvar.set(f'{np.NAN:9.2f}')
rxlevellbl = tk.Label(boxframe1, textvariable=self.rxlevelvar)
rxlevellbl.grid(row=0, column=1, padx=2, sticky='e')
tlosslbl = tk.Label(boxframe1, textvariable=self.tlossvar)
tlosslbl.grid(row=1, column=1, padx=2, sticky='e')
fspgaslbl = tk.Label(boxframe1, textvariable=self.fspgaslossvar)
fspgaslbl.grid(row=2, column=1, padx=2, sticky='e')
difflosslbl = tk.Label(boxframe1, textvariable=self.difflossvar)
difflosslbl.grid(row=3, column=1, padx=2, sticky='e')
txgainlbl = tk.Label(boxframe1, textvariable=self.txgainvar)
txgainlbl.grid(row=4, column=1, padx=2, sticky='e')
# path profile frame
text4 = tk.Label(boxframe2, text='Distance (km): ')
text4.grid(row=0, column=0, sticky='nsw')
text5 = tk.Label(boxframe2, text='Bearing (deg): ')
text5.grid(row=1, column=0, sticky='nsw')
text6 = tk.Label(boxframe2, text='Path type:')
text6.grid(row=2, column=0, sticky='nsw')
text6 = tk.Label(boxframe2, text='Tx height (m, amsl):')
text6.grid(row=3, column=0, sticky='nsw')
text7 = tk.Label(boxframe2, text='Rx height (m, amsl):')
text7.grid(row=4, column=0, sticky='nsw')
text8 = tk.Label(boxframe2, text='theta_t (deg):')
text8.grid(row=5, column=0, sticky='nsw')
self.distvar = tk.StringVar()
self.bearingvar = tk.StringVar()
self.pathtypevar = tk.StringVar()
self.htsvar = tk.StringVar()
self.hrsvar = tk.StringVar()
self.thetatvar = tk.StringVar()
self.distvar.set(f'{np.NAN:20.2f}')
self.bearingvar.set(f'{np.NAN:20.2f}')
self.pathtypevar.set('NLOS')
self.htsvar.set(f'{np.NAN:20.2f}')
self.hrsvar.set(f'{np.NAN:20.2f}')
self.thetatvar.set(f'{np.NAN:20.2f}')
distlbl = tk.Label(boxframe2, textvariable=self.distvar)
distlbl.grid(row=0, column=1, padx=2, sticky='e')
bearinglbl = tk.Label(boxframe2, textvariable=self.bearingvar)
bearinglbl.grid(row=1, column=1, padx=2, sticky='e')
pathtypelbl = tk.Label(boxframe2, textvariable=self.pathtypevar)
pathtypelbl.grid(row=2, column=1, padx=2, sticky='e')
htslbl = tk.Label(boxframe2, textvariable=self.htsvar)
htslbl.grid(row=3, column=1, padx=2, sticky='e')
hrslbl = tk.Label(boxframe2, textvariable=self.hrsvar)
hrslbl.grid(row=4, column=1, padx=2, sticky='e')
thetatlbl = tk.Label(boxframe2, textvariable=self.thetatvar)
thetatlbl.grid(row=5, column=1, padx=2, sticky='e')
# clickable labels for plot info and detailed p452 output
plotinfo = tk.Label(infoframe, text='Plot info', cursor='hand2', fg='blue')
plotinfo.grid(row=0, column=1, padx=5, pady=2)
plotinfo.bind("<Button-1>", self.showplotinfo)
detailedoutput = tk.Label(infoframe, text='p452 output', cursor='hand2', fg='blue')
detailedoutput.grid(row=0, column=2, padx=5, pady=2)
detailedoutput.bind("<Button-1>", self.showdetailedoutput)
f = font.Font(plotinfo, plotinfo.cget("font"))
f.configure(underline=True)
plotinfo.configure(font=f)
detailedoutput.configure(font=f)
self.line1 = None
self.line2 = None
self.line3 = None
self.fill1 = None
self.fill2 = None
self.results = None
def pathrbtn(self):
if self.pathselectvar.get():
# manual path
self.pathloadbtn.configure(state='normal')
self.txgainselectvar.set(1)
self.txgainrbtn1.configure(state='disabled')
self.txgainrbtn2.configure(state='normal')
self.txgainrbtn()
# self.txlonentry.configure(state='disabled')
# self.rxlonentry.configure(state='disabled')
else:
# auto path selected:
self.pathloadbtn.configure(state='disabled')
self.txgainselectvar.set(0)
self.txgainrbtn1.configure(state='normal')
self.txgainrbtn()
# self.txlonentry.configure(state='normal')
if self.rxposvar.get():
self.rxlonentry.configure(state='normal')
def txgainrbtn(self):
if self.txgainselectvar.get():
self.txgainentry.configure(state='normal')
self.antennabox.configure(state='disabled')
self.tiltsbox.configure(state='disabled')
self.azimentry.configure(state='disabled')
self.mechtiltentry.configure(state='disabled')
else:
self.txgainentry.configure(state='disabled')
self.antennabox.configure(state='readonly')
self.tiltsbox.configure(state='readonly')
self.azimentry.configure(state='normal')
self.mechtiltentry.configure(state='normal')
def rxselection(self):
if self.rxposvar.get():
self.rxcombobox.configure(state='disabled')
self.rxlatentry.configure(state='normal')
self.rxlonentry.configure(state='normal')
self.rxheightentry.configure(state='normal')
self.rxgainentry.configure(state='normal')
else:
self.rxcombobox.configure(state='readonly')
self.rxlatentry.configure(state='disabled')
self.rxlonentry.configure(state='disabled')
self.rxheightentry.configure(state='disabled')
self.rxgainentry.configure(state='disabled')
def loadpathfromfile(self):
filepath = tk.filedialog.askopenfilename(initialdir=Path.cwd(), title="Select file",
filetypes=[("Excel file", ".xlsx .xls")])
pathdf = pd.read_excel(filepath, names=['d', 'h', 'zone'], header=None)
# fast check if path profile file contains header or not:
try:
float(pathdf.iloc[0, 0])
except ValueError:
pathdf.drop(0, inplace=True)
try:
data = pathdf.to_numpy()
d = data[:, 0].astype('float64')
h = data[:, 1].astype('float64')
zone = data[:, 2].astype('str')
except ValueError:
showerror('Invalid path profile', 'Invalid path profile. File must include 3 columns: '
'[d: number, h: number, zone: [A1/A2/B]')
return
if data.shape[0] < 4 or data.shape[1] != 3:
showerror('Invalid path profile', 'Invalid path profile. It should contain more than 3 rows-data '
'points and exactly 3 columns: [d, h, zonetype]')
return
if not set(zone).issubset({'A1', 'A2', 'B'}):
showerror('Error in climate zones', 'Error in climate zones. Zone column must contain '
'only "A1" (Coastal land), "A2 (inland)" or "B" (sea) values.')
return
self.di = d
self.hi = h
self.zonei = zone
self.validpathloaded = True
def getpathsrtm(self, phi_t, psi_t, phi_r, psi_r):
dist, atrdeg, di, hi, delta = srtm.get_path_profile(phi_t, psi_t, phi_r, psi_r,
coord_samples=self.coordsamples,
fill_missing=self.fillvoidsrtm)
zonei = np.asarray(['A2'] * self.coordsamples)
zonei[hi <= self.coastlandthreshvalue] = 'A1'
zonei[hi <= self.seathreshvalue] = 'B'
return dist, atrdeg, di, hi, zonei, delta
def calculateloss(self):
*response, = self.validateparam()
if response[0]:
(f, p, pressure, temp, DN, N0, dct, dcr, phi_t, psi_t, txheight, txpower, azim, mechtilt,
fixedtxgain, phi_r, psi_r, rxheight, rxgain) = response[2]
pol = self.poldict[self.polarbox.get()]
ha_t = self.clutterdict[self.txclutterbox.get()][0]
dk_t = self.clutterdict[self.txclutterbox.get()][1]
ha_r = self.clutterdict[self.rxclutterbox.get()][0]
dk_r = self.clutterdict[self.rxclutterbox.get()][1]
if self.pathselectvar.get():
if not self.validpathloaded:
self.loadpathfromfile() # sets di, hi, zonei arrays
dist = self.di[-1] - self.di[0]
atrdeg = srtm.get_path_geometry(phi_t, psi_t, phi_r, psi_r, 0)[1]
else:
dist, atrdeg, di, hi, zonei, delta = self.getpathsrtm(phi_t, psi_t, phi_r, psi_r)
self.di = di
self.hi = hi
self.zonei = zonei
if self.txgainselectvar.get():
self.results = p452_loss(f, p, self.di, self.hi, self.zonei, txheight, rxheight, phi_t, phi_r, rxgain,
pol, dct, dcr, DN, N0, pressure, temp, Gt=fixedtxgain,
ha_t=ha_t, dk_t=dk_t, ha_r=ha_r, dk_r=dk_r)
(Lb, Lbfsg, Lb0p, Lb0b, Ld50, Ldp, Lbs, Lba, theta_t, Gt, pathtype) = self.results
else:
# calculate gain by antenna pattern inside p452 function:
eltilt = float(self.tiltsbox.get())
antenna = self.antennabox.get()
self.results = p452_loss(f, p, self.di, self.hi, self.zonei, txheight, rxheight, phi_t, phi_r, rxgain,
pol, dct, dcr, DN, N0, pressure, temp, psi_t=psi_t, psi_r=psi_r,
antennasdb=self.antennas, antennaname=antenna, azim=azim, eltilt=eltilt,
mechtilt=mechtilt, ha_t=ha_t, dk_t=dk_t, ha_r=ha_r, dk_r=dk_r)
(Lb, Lbfsg, Lb0p, Lb0b, Ld50, Ldp, Lbs, Lba, theta_t, Gt, pathtype) = self.results
rxlevel = Gt + rxgain + txpower - Lb
# convert theta_t mrad to degrees
theta_t = np.rad2deg(theta_t / 1000)
# transmitter and receiver heights amsl:
hts = txheight + self.hi[0]
hrs = rxheight + self.hi[-1]
# plot path profile and line of sight
if self.line1:
self.line1.remove()
self.line2.remove()
self.fill1.remove()
self.fill2.remove()
if self.line3:
self.line3.remove()
ec = np.full_like(self.di, 0)
rc = ec
# median effective Earth radius factor k50 for the path
k50 = 157 / (157 - DN)
Ro = 6371 # km
ae = k50 * Ro
# if self.pathselectvar.get() == 0 and self.earthcurv == 'curved':
if self.earthcurv == 'curved':
# earth curvature
# setting reference point x0 as the center of path (x0 = dist/2)
ec = -1000 * (self.di - dist / 2) ** 2 / (2 * Ro)
ec -= min(ec)
# line of sight curvature due to refraction
rc = ec * (1 - 1 / k50)
rc -= min(rc)
# stragiht line los without accounting refraction
los = self.di * (hrs - hts) / dist + hts
# modify path heights and line of sight curvature:
self.h = self.hi + ec
los += rc
# plot elevation along path:
self.line1, = self.ax.plot(self.di, self.h, color='grey')
if pathtype == 'los':
pathtype = 'Line of Sight'
# if los, blue color to line connecting tx and rx
self.line2, = self.ax.plot(self.di, los, color='blue')
self.line3 = None
else:
# no los:
self.line2, = self.ax.plot(self.di, los, color='red')
# find obstruction point (point with maximum theta_tx):
dii = self.di[1:-1]
hii = self.h[1:-1] - ec[1:-1]
thetatx = np.arctan((hii - hts) / (1000 * dii) - dii / (2 * ae))
idx = np.argmax(thetatx)
dmax = self.di[idx]
self.line3, = self.ax.plot((0, dmax), (hts, self.h[idx + 1]), color='cyan', linewidth=1)
self.fill1 = self.ax.fill_between(self.di, self.h, ec, color='darkgoldenrod', alpha=0.6)
self.fill2 = self.ax.fill_between(self.di, ec, 0, color='silver')
self.ax.set_xlim(0, dist)
self.ax.set_ylim(0, max(2.5 * max(self.h), 1.7 * hts, 1.7 * hrs))
self.canvas.draw()
# update transmission results
self.rxlevelvar.set(f'{rxlevel:9.2f}')
self.tlossvar.set(f'{Lb:9.2f}')
self.fspgaslossvar.set(f'{Lbfsg:9.2f}')
self.difflossvar.set(f'{Ldp:9.2f}')
self.txgainvar.set(f'{Gt:9.2f}')
# update path parameters results
self.distvar.set(f'{dist:20.2f}')
self.bearingvar.set(f'{atrdeg:20.2f}')
self.htsvar.set(f'{hts:20.2f}')
self.hrsvar.set(f'{hrs:20.2f}')
self.thetatvar.set(f'{theta_t:20.2f}')
self.pathtypevar.set(f'{pathtype}')
else:
showerror('Error', response[1])
def validateparam(self):
freq = float(self.freqentry.get())
if freq < 0.1:
return False, 'Frequency should be greater than 0.1GHz'
timeper = float(self.timeentry.get())
if timeper > 50:
return False, 'Time percentage should be lower than 50%'
pressure = float(self.pressureentry.get())
if pressure < 0:
return False, 'Pressure can\'t be negative'
temp = float(self.tempentry.get())
DN = float(self.dnentry.get())
N0 = float(self.n0entry.get())
if N0 <= DN:
return False, 'N0 should be greater than ΔΝ value'
dct = float(self.dctentry.get())
dcr = float(self.dcrentry.get())
if len(self.txlatentry.get()) == 0:
return False, 'Please enter Tx latitude (decimal or DMS)'
else:
phi_t = dmstodec(self.txlatentry.get())
if len(self.txlonentry.get()) == 0:
return False, 'You need to enter Tx longitude value (decimal or DMS)'
else:
psi_t = dmstodec(self.txlonentry.get())
if len(self.txheightentry.get()) == 0 or len(self.txpowerentry.get()) == 0:
return False, 'You need to enter Tx antenna height (above ground) and power.'
else:
txheight = float(self.txheightentry.get())
if txheight <= 0:
return False, "Transmitter antenna height must be can't be 0"
txpower = float(self.txpowerentry.get())
if not self.txgainselectvar.get():
if len(self.azimentry.get()) == 0 or len(self.mechtiltentry.get()) == 0:
return False, 'You need to enter Tx antenna azimuth and mech. tilt.'
else:
azim = float(self.azimentry.get())
mechtilt = float(self.mechtiltentry.get())
fixedtxgain = None
else:
if len(self.txgainentry.get()) == 0:
return False, 'You need to enter a number for Tx antenna Gain (dBi).'
fixedtxgain = float(self.txgainentry.get())
azim = None
mechtilt = None
if self.rxposvar.get():
if len(self.rxlatentry.get()) == 0:
return False, 'You need to enter a valid Rx station latitude (decimal or DMS).'
phi_r = dmstodec(self.rxlatentry.get())
if len(self.rxlonentry.get()) == 0:
return False, 'You need to enter a valid Rx station longitude (decimal or DMS).'
psi_r = dmstodec(self.rxlonentry.get())
if len(self.rxheightentry.get()) == 0 or len(self.rxgainentry.get()) == 0:
return False, 'You need to enter Rx station height (above ground) and gain (dBi).'
rxheight = float(self.rxheightentry.get())
if rxheight <= 0:
return False, "Rx antenna height above ground must be greater than 0."
rxgain = float(self.rxgainentry.get())
else:
# get receiver station from preset list
try:
rxidx = self.rxcombobox.current()
phi_r = self.radars.loc[rxidx, 'lat']
psi_r = self.radars.loc[rxidx, 'lon']
rxheight = self.radars.loc[rxidx, 'h_agl']
rxgain = self.radars.loc[rxidx, 'gain']
except KeyError:
return False, 'Error when trying to load Rx station attributes'
# check if transmitter and receiver have the same coords:
if (phi_t, psi_t) == (phi_r, psi_r):
return False, 'Tx position is identical to Rx position.'
return True, None, (freq, timeper, pressure, temp, DN, N0, dct, dcr, phi_t, psi_t,
txheight, txpower, azim, mechtilt, fixedtxgain, phi_r, psi_r, rxheight, rxgain)
def updatetilts(self, evnt):
if not self.antennasloaded:
return
antennasel = self.antennabox.get()
tiltslist = self.antennas.loc[antennasel].index.tolist()
self.tiltsbox.configure(values=tiltslist)
self.tiltsbox.current(0)
def opensettings(self):
self.settingswindow = tk.Toplevel()
self.settingswindow.title('Settings')
x = self.master.winfo_x()
y = self.master.winfo_y()
self.settingswindow.geometry("+%d+%d" % (x + 100, y + 70))
self.settingswindow.iconbitmap('icon.ico')
# read saved variables
self.zoneselvar.set(self.zonesel)
self.earthcurvvar.set(self.earthcurv)
self.fillvoidsrtmvar.set(self.fillvoidsrtm)
self.settingswindow.columnconfigure(0, weight=0)
self.settingswindow.columnconfigure(1, weight=1)
zoneframe = ttk.LabelFrame(self.settingswindow, text='Radio-climate zones')
text = tk.Label(self.settingswindow, text='Radio-climate zone calculation options, '
'when the path is not loaded from file:')
text.grid(row=0, column=0, columnspan=2, padx=(10, 70), pady=(10, 2), sticky='nsew')
zoneframe.grid(row=1, column=0, columnspan=2, padx=10, pady=(2, 10), sticky='nsw')
zonerb1 = ttk.Radiobutton(zoneframe, text='Calculate zone based on heights:',
variable=self.zoneselvar, value='auto')
zonerb2 = ttk.Radiobutton(zoneframe, text='Fixed A2 (inland) zone', variable=self.zoneselvar, value='fixed')
seathreshlbl = tk.Label(zoneframe, text="Sea ('B') zone when h<=:")
coastlandthreshlbl = tk.Label(zoneframe, text="Coastland ('A2') zone when h<=:")
self.seathreshentry = ValidEntry(zoneframe, 'float', width=5)
self.coastlandthreshentry = ValidEntry(zoneframe, 'float', width=5)
self.seathreshentry.delete(0, tk.END)
self.coastlandthreshentry.delete(0, tk.END)
self.seathreshentry.insert(0, self.seathreshvalue)
self.coastlandthreshentry.insert(0, self.coastlandthreshvalue)
zonerb1.grid(row=0, column=0, padx=3, pady=1, columnspan=4, sticky='nsw')
seathreshlbl.grid(row=1, column=2, padx=(10, 5), pady=1, sticky='nsw')
self.seathreshentry.grid(row=1, column=3, padx=(2, 10), pady=1, sticky='nsw')
coastlandthreshlbl.grid(row=1, column=0, padx=(20, 2), pady=1, sticky='nsw')
self.coastlandthreshentry.grid(row=1, column=1, padx=2, pady=1, sticky='nsw')
zonerb2.grid(row=2, column=0, padx=3, pady=5, columnspan=4, sticky='nsw')
earthcurvbtn = ttk.Checkbutton(self.settingswindow, text='Consider earth curvature and refraction when '
'plotting path',
variable=self.earthcurvvar, onvalue='curved', offvalue='flat')
earthcurvbtn.grid(row=2, column=0, columnspan=2, pady=6, padx=15, sticky='nsw')
fillvoidsrtm = ttk.Checkbutton(self.settingswindow, text='Fill missing SRTM data with 0 heights',
variable=self.fillvoidsrtmvar)
fillvoidsrtm.grid(row=3, column=0, columnspan=2, pady=6, padx=15, sticky='nsw')
coordsampleslbl = tk.Label(self.settingswindow, text='Max number of SRTM samples in path profile generator:')
coordsampleslbl.grid(row=4, column=0, padx=(15, 2), pady=6, sticky='nsw')
self.coordsamplesentry = ValidEntry(self.settingswindow, 'float', width=6)
self.coordsamplesentry.grid(row=4, column=1, padx=2, pady=6, sticky='nsw')
self.coordsamplesentry.delete(0, tk.END)
self.coordsamplesentry.insert(0, self.coordsamples)
submitsettingsbtn = ttk.Button(self.settingswindow, text='Save', command=self.submitsettings)
submitsettingsbtn.grid(row=5, column=0, columnspan=2, pady=20)
def submitsettings(self):
sea = float(self.seathreshentry.get())
coast = float(self.coastlandthreshentry.get())
samples = int(self.coordsamplesentry.get())
if samples == 0:
self.coordsamples = 700
elif samples > 1500:
self.coordsamples = 1500
else:
self.coordsamples = samples
if self.zoneselvar.get() == 'auto':
if sea >= coast:
showerror('Error', 'Sea threshold value must be smaller than coast-land threshold value.')
else:
self.zonesel = 'auto'
self.seathreshvalue = sea
self.coastlandthreshvalue = coast
self.earthcurv = self.earthcurvvar.get()
self.fillvoidsrtm = self.fillvoidsrtmvar.get()
self.settingswindow.destroy()
else:
self.zonesel = 'fixed'
self.earthcurv = self.earthcurvvar.get()
self.fillvoidsrtm = self.fillvoidsrtmvar.get()
self.settingswindow.destroy()
def showdetailedoutput(self, e):
if self.results:
resultswd = tk.Toplevel()
resultswd.transient(self.master)
resultswd.grab_set()
resultswd.title('p452 output parameters')
x = self.master.winfo_x()
y = self.master.winfo_y()
resultswd.geometry(f"+{x + 500:d}+{y + 200:d}")
paramslbl = ['Lb', 'Lbfsg', 'Lb0p', 'Lb0b', 'Ld50', 'Ldp', 'Lbs', 'Lba']
paramsvl = self.results[:8]
text = ''
for i, lbl in enumerate(paramslbl):
text += f'{paramslbl[i]:5s} = {paramsvl[i]:8.4f} dB\n'
textwidget = tk.Text(resultswd, padx=10, pady=10, width=35, height=8)
textwidget.grid(row=0, column=0, sticky='nsew')
textwidget.insert(tk.END, text)
textwidget.config(state='disabled')
closebtn = ttk.Button(resultswd, text='Exit', command=lambda: resultswd.destroy())
closebtn.grid(row=1, column=0, pady=6, padx=5)
else:
showerror('No results', 'First run a prediction to get results')
def showplotinfo(self, e):
showinfo('Plot explained', 'Path elavation profile is plotted, traveling along a great circle arc, from '
'transmitter to receiver station. WGS84 reference ellipsoid is used.\n'
'When enabled in settings, earth curvature (Ro=6371km) and refraction is taken into '
'consideration, so what you actually see as elevation profile is the distance above '
'the chord running tx to rx station.\nRefraction is also considered, thus the '
'line-of-sight isn\'t actually a straight line, but a curved line with a '
'curvature depending on ΔΝ value (average radio-refractive index lapse-rate through '
'the lowest 1 km of the atmosphere). Refraction and curvature of Earth have opposite'
' impact of tx-rx line of sight determination, since the curvature of Earth makes '
'distant objects look lower, and refraction makes them look higher.')
def quit_me():
root.quit()
root.destroy()
if __name__ == '__main__':
root = tk.Tk()
root.protocol("WM_DELETE_WINDOW", quit_me)
s = ttk.Style()
s.theme_use('vista')
mainapp = MainGUI(root)
root.mainloop()
|
import segyio
import pandas as pd
import numpy as np
from tqdm import tqdm_notebook as tqdm
from obspy.io.segy.core import _read_segy
SEGYIO_HEADER_ITEMS = {
'EnergySourcePoint': "SPID",
'SourceX': "SRCX",
'SourceY': "SRCY",
'GroupX': "GRPX",
'GroupY': "GRPY",
'offset': "OFFSET",
'INLINE_3D': "INLINE",
'CROSSLINE_3D': "XLINE",
}
SEISEE_HEADER_ITEMS = {
"Trace index in file": "IDX",
"Trace number within field record": "TRCFLD",
"SP - Energy source point number": "SPID",
"CDP ensemble number": "CDP",
"Distance from source point to receiv grp": "OFFSET",
"Receiver group elevation": "GRPZ",
"Surface elevation at source": "SRCZ",
"Source X coordinate": "SRCX",
"Source Y coordinate": "SRCY",
"Group X coordinate": "GRPX",
"Group Y coordinate": "GRPY",
"CDP X": "CDPX",
"CDP Y": "CDPY",
"Inline Number": "ILINE",
"Clossline Number": "XLINE",
}
PRIME_TIME_COLUMNS = ['SRCX', 'SRCY', 'SRCZ', 'GRPX', 'GRPY', 'GRPZ', 'FB']
src_cols = ['SRCX', 'SRCY', 'SRCZ']
grp_cols = ['GRPX', 'GRPY', 'GRPZ']
src_o_cols = ['SRCX', 'SRCY']
grp_o_cols = ['GRPX', 'GRPY']
def header_info(df, name=None):
"""
Print statistics of segy header
:param df: sgy header DataFrame
:param name: header name
:return:
"""
if not isinstance(name, str):
name = str(name)
heading = "{} SGY HEADER: '{:^20}' {}".format('>' * 10, name, '<' * 10)
print(heading)
print('df columns: ', np.sort(df.columns.tolist()))
print('>>> {:15} {:.0f}'.format('df len:', len(df)))
if 'SRCID' in df.columns:
print('>>> {:15} {:.0f}'.format('No of Shots:', df['SRCID'].nunique()))
if 'GRPID' in df.columns:
print('>>> {:15} {:.0f}'.format('No of Groups:', df['GRPID'].nunique()))
info_min = '>>> min'
info_max = '>>> max'
info_line = ' ' * 7
for c in ['SRCX', 'SRCY', 'SRCZ']:
if c not in df.columns:
continue
info_min += " |'{}' {:12.2f}|".format(c[-1], df[c].min())
info_max += " |'{}' {:12.2f}|".format(c[-1], df[c].max())
info_line += '-' * 19
if any(np.intersect1d(['SRCX', 'SRCY', 'SRCZ'], df.columns)):
print('\n')
print('>>>>>> Source Geometry Info')
print(info_min)
print(info_line)
print(info_max)
info_min = '>>> min'
info_max = '>>> max'
info_line = ' ' * 7
for c in ['GRPX', 'GRPY', 'GRPZ']:
if c not in df.columns:
continue
info_min += " |'{}' {:12.2f}|".format(c[-1], df[c].min())
info_max += " |'{}' {:12.2f}|".format(c[-1], df[c].max())
info_line += '-' * 19
if any(np.intersect1d(['GRPX', 'GRPY', 'GRPZ'], df.columns)):
print('\n')
print('>>>>>> Group Geometry Info')
print(info_min)
print(info_line)
print(info_max)
print('>' * 55)
def read_sgy_traces(filename, idx, verbose=True, ignore_geometry=True):
"""
Reading set of traces by its ID from '.*sgy' file. Reading by 'segyio'.
:param filename: str path to sgy file
:param idx: 1D list or array of traces ID
:param verbose: show reading progress bar True/False
:param ignore_geometry: ignore geometry checking of 'sgy' True/False
:return: 2D numpy array of traces (nr - num. of traces, ns - num. of samples)
"""
data = []
if verbose:
iteration = tqdm(idx)
else:
iteration = idx
with segyio.open(filename, ignore_geometry=ignore_geometry) as src:
for i in iteration:
tmp = src.trace[i]
data.append(tmp)
return np.array(data, ndmin=2, dtype=np.float32)
def read_segy_file_obspy(filename):
"""
Read segy with obspy
:param filename:
:return:
"""
segy = _read_segy(filename)
return np.array([x.data for x in segy], ndmin=2, dtype=np.float32)
def read_header_segyio(filename, fields=None, ignore_geometry=True, converter=SEGYIO_HEADER_ITEMS, verbose=False):
"""
Reading header of 'sgy' with 'segyio'.
:param filename: str path to sgy file
:param ignore_geometry: ignore_geometry: ignore geometry checking of 'sgy' True/False
:param fields: list of 'sgy' headers to use. Default :
'EnergySourcePoint',
'SourceX',
'SourceY',
'GroupX',
'GroupY',
'offset',
'INLINE_3D',
'CROSSLINE_3D',
:param converter: rename column names to be more useful
:param verbose: ...
:return: pandas DataFrame
"""
if not fields:
fields = list(SEGYIO_HEADER_ITEMS.keys())
head = {}
with segyio.open(filename, ignore_geometry=ignore_geometry) as segyfile:
for h in fields:
column = converter[h]
head[column] = segyfile.attributes(eval('segyio.TraceField.{}'.format(h)))[:]
df = pd.DataFrame(head)
df['IDX'] = df.index
return df
def read_header_segyio_full(filename, ignore_geometry=True, drop_nonunique=True):
"""
Read all fields of segy file by segyio to DataFrame
:param filename:
:param ignore_geometry:
:return:
"""
with segyio.open(filename, ignore_geometry=ignore_geometry) as segyfile:
columns = [str(x) for x in segyfile.header[0].keys()]
values = [dict(x.items()).values() for x in segyfile.header]
header = pd.DataFrame(values, columns=columns)
header['IDX'] = header.index
header = header.T[header.nunique() > 1].T
header = header.rename(columns=SEGYIO_HEADER_ITEMS)
return header
def read_seisee_header_info(filename):
"""
Read header of 'sgy' header written by Seisee
:param filename:
:return:
"""
header_info = []
i = 0
with open(filename, "r") as f:
while True:
line = f.readline()
if line.startswith("+-"):
break
line = line.replace("*", " ")
line = line.replace("+", " ")
line = line[8:]
line = " ".join(line.split())
header_info.append([i, line])
i += 1
return header_info
def read_header_seisee(filename, fields=None, converter=SEISEE_HEADER_ITEMS, verbose=False):
"""
Read Seisee header to Pandas DataFrame
:param filename:
:param fields:
:param converter:
:param verbose:
:return:
"""
if not fields:
fields = list(SEISEE_HEADER_ITEMS.keys())
header_info = read_seisee_header_info(filename)
use_cols = [x[0] for x in header_info if x[1] in fields]
names = [converter[x[1]] for x in header_info if x[1] in fields]
skip_rows = len(header_info) + 1
df = pd.read_csv(
filename,
skiprows=skip_rows,
sep="\s+",
header=None,
usecols=use_cols,
names=names,
dtype=int,
)
df["IDX"] -= 1
return df |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.