text stringlengths 38 1.54M |
|---|
import json
import os
import re
import shutil
from collections import MutableSequence
from functools import total_ordering
from . import utils
from .constants import FORMATS, VALID_EXTENSIONS
from .contents import Contents
from .extract import _extract
from .parse import Parser
from .search import Searcher
from .slice import Filter, Interim
tqdm = utils._get_tqdm()
class Collection(object):
"""
Model new-style corpus structure, with attributes for each data type
todo: we can add parse methods here for example
"""
def __init__(self, path=None, **data_paths):
path = os.path.expanduser(path)
self.path = os.path.abspath(path)
self.name = os.path.basename(os.path.abspath(path).rstrip("/"))
for form in FORMATS:
subpath = os.path.join(path, form)
if os.path.isdir(subpath) and len(os.listdir(subpath)):
corpus = Corpus(subpath, in_collection=self)
else:
corpus = None
setattr(self, form, corpus)
def __repr__(self):
sup = super().__repr__().rstrip(">")
return f"{sup} ({self.name})>"
@classmethod
def new(cls, path, **data_paths):
"""
Create a new collection. Provide a path for it, and the data to ingest
coll = Collection.new("my-data", source="./source-files")
"""
path = os.path.expanduser(path)
os.makedirs(path)
for kind, subpath in data_paths.items():
if kind not in FORMATS:
err = f"{kind} not recognised. Must be one of: {','.join(FORMATS)}"
raise ValueError(err)
format_path = os.path.join(path, kind)
print(f"Adding {subpath} --> {format_path} ...")
shutil.copytree(subpath, format_path)
return cls(path)
def parse(
self,
language="en",
multiprocess=False,
constituencies=False,
speakers=True,
just_missing=False,
):
language = language.split("_", 1)[0] # de_frak to de
parsed_path = os.path.join(self.path, "conllu")
if self.conllu or os.path.isdir(parsed_path):
if not just_missing:
msg = f"Parsed data found at {parsed_path}. Move or delete the folder before parsing again, or parse with just_missing==True."
raise ValueError(msg)
self.parser = Parser(
language=language,
multiprocess=multiprocess,
constituencies=constituencies,
speakers=speakers,
just_missing=just_missing,
)
parsed = self.parser.run(self)
self.conllu = parsed
return parsed
def load(self, **kwargs):
"""
Sensible helper for loading
"""
if self.conllu:
return self.conllu.load(**kwargs)
return self.txt.load(**kwargs)
def extract(self, language="en", multiprocess=False, coordinates=True, page_numbers=False, output="txt"):
return _extract(
self,
language=language,
multiprocess=multiprocess,
coordinates=coordinates,
page_numbers=page_numbers,
output=output
)
@total_ordering
class Corpus(MutableSequence):
"""
Model a collection of plain text or CONLL-U files.
"""
def __init__(self, path=None, in_collection=None):
"""
Initialise the corpus, deteremine if parsed, hook up methods
"""
path = os.path.expanduser(path)
self.format = os.path.basename(path)
# this is a temporary measure while corpora are being restructured.
# self.format should eventually be one of a finite set of formats...
if self.format not in FORMATS:
if path.endswith("-parsed"):
self.format = "conllu"
else:
self.format = "txt"
self.in_collection = in_collection
if not os.path.isdir(path):
raise NotADirectoryError(f"Not a valid path: {path}")
self.path = path
self.name = os.path.basename(os.path.dirname(path))
self.is_parsed = os.path.basename(path) in {"conllu", "feather"} or path.endswith("-parsed")
self.subcorpora, self.files = self._get_subcorpora_and_files()
self.filepaths = Contents(
[i.path for i in self.files], is_parsed=self.is_parsed, name=self.name
)
self.nlp = None
self.iterable = self.subcorpora if self.subcorpora else self.files
def __len__(self):
return len(self.iterable)
def __lt__(self, other):
if not isinstance(other, self.__class__):
raise TypeError(f"Not same class: {self.__class__} vs {other.__class__}")
return self.name < other.name
def __eq__(self, other):
if not isinstance(other, self.__class__):
raise TypeError(f"Not same class: {self.__class__} vs {other.__class__}")
return self.path == other.path
def __repr__(self):
sup = super().__repr__().rstrip(">")
form = getattr(self, "format", os.path.splitext(self.path)[-1])
return f"{sup} ({self.path}, {form})>"
def __getitem__(self, i):
"""
Customise what indexing/loopup does for Corpus objects
"""
return self.iterable[i]
def __delitem__(self, i):
del self.iterable[i]
def __setitem__(self, i, v):
self.iterable[i] = v
def insert(self, i, v):
self.iterable.insert(i, v)
def tgrep(self, query, **kwargs):
"""
Search constituency parses using tgrep
"""
return Searcher().run(self, "t", query, **kwargs)
def table(self, show=["w"], subcorpora=["file"], **kwargs):
"""
Generate a frequency table from the whole corpus
"""
if isinstance(show, str):
show = [show]
if isinstance(subcorpora, str):
subcorpora = [subcorpora]
needed = show + subcorpora
usecols = kwargs.pop("usecols", needed)
loaded = self.load(usecols=usecols)
return loaded.table(show=show, subcorpora=subcorpora, **kwargs)
def depgrep(self, query, **kwargs):
"""
Search dependencies using depgrep
"""
return Searcher().run(self, "d", query, **kwargs)
def parse(self, language="en", multiprocess=False, constituencies=False, speakers=True):
"""
Parse a plaintext corpus
"""
from buzz.file import File
language = language.split("_", 1)[0] # de_frak to de
files = []
if isinstance(self, File):
parsed_path = self.path.split("/txt/", 1)[0] + "/conllu"
files.append(self)
else:
parsed_path = os.path.join(os.path.dirname(self.path), "conllu")
if os.path.isdir(parsed_path):
msg = f"Parsed data found at {parsed_path}. Move or delete the folder before parsing again."
raise ValueError(msg)
self.parser = Parser(
language=language,
multiprocess=multiprocess,
constituencies=constituencies,
speakers=speakers,
)
return self.parser.run(self, files=files)
def load(self, **kwargs):
"""
Load a Corpus into memory
If it's parsed, a Dataset is returned.
If unparsed, return a dict mapping paths to strings (the file content)
Multiprocessing is a bit complex here. You can pass in a keyword arg,
`multiprocess`, which can be True (use your machine's number of cores),
an integer (use that many processes), or false/None/0/1, which mean
just one process.
Multiprocess is not specified in the call signature, because the default
should change based on whether or not your corpus is parsed. For parsed
corpora, multiprocessing is switched on by default. For unparsed, it is
switched off. This is for performance in both cases --- your unparsed
corpus needs to be pretty huge to be loaded quicker via multiprocess.
"""
if self.format == "feather":
return self.files[0].load()
return utils._load_corpus(self, **kwargs)
@property
def vector(self):
"""
Grab the spacy vector for this document
"""
spac = self.to_spacy(concat=True)
return spac.vector
def to_spacy(self, language="en", concat=False):
"""
Get spacy's model of the Corpus
If concat is True, model corpus as one spacy Document, rather than a list
"""
if concat:
file_datas = [f.read() for f in self.files]
# for parsed corpora, we have to pull out the "# text = " lines...
if self.is_parsed:
out = list()
for data in file_datas:
out.append(utils._get_texts(data))
file_datas = out
# TODO: constituencies?
self.nlp = utils._get_nlp(language=language)
return self.nlp(" ".join(file_datas))
models = list()
for file in self.files:
models.append(file.to_spacy(language=language))
return models
def _get_subcorpora_and_files(self):
"""
Helper to set subcorpora and files
"""
from .file import File
info = dict(is_parsed=self.is_parsed, name=self.name)
subcorpora = list()
files = list()
fullpaths = list()
for root, dirnames, filenames in os.walk(self.path):
for directory in sorted(dirnames):
if directory.startswith("."):
continue
directory = os.path.join(root, directory)
directory = Subcorpus(directory)
subcorpora.append(directory)
for filename in filenames:
allowed = VALID_EXTENSIONS[self.format]
if allowed and not filename.endswith(tuple(allowed)):
continue
if filename.startswith("."):
continue
fpath = os.path.join(root, filename)
fullpaths.append(fpath)
for path in sorted(fullpaths):
fpath = File(path)
files.append(fpath)
subcorpora = Contents(subcorpora, **info)
files = Contents(files, **info)
return subcorpora, files
@property
def just(self):
"""
Allow corpus.just.word.the without loading everything into memory
"""
return SliceHelper(self)
@property
def skip(self):
"""
Allow corpus.skip.word.the without loading everything into memory
"""
return SliceHelper(self, inverse=True)
@property
def see(self):
"""
Allow corpus.see.word.by.speaker
"""
return SliceHelper(self, inverse=True, see=True)
class Subcorpus(Corpus):
"""
Simply a renamed Corpus, fancy indeed!
"""
def __init__(self, path, **kwargs):
super().__init__(path, **kwargs)
class SliceHelper(object):
"""
This connects corpus.py and slice.py, so that Corpus and Dataset work the same way
"""
def __init__(self, corpus, inverse=False, see=False):
self._corpus = corpus
self.inverse = inverse
self.see = see
def __getattr__(self, attr):
use = Filter if not self.see else Interim
return use(self._corpus, attr, inverse=self.inverse)
def __call__(self, column, *args, **kwargs):
column = utils._ensure_list_of_short_names(column)
# duplicated because we can't pass list to getattr
use = Filter if not self.see else Interim
return use(self._corpus, column, inverse=self.inverse)
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LlcPPP(Base):
__slots__ = ()
_SDM_NAME = "llcPPP"
_SDM_ATT_MAP = {
"LlcPPPHheaderLlcHeader": "llcPPP.llcPPPHheader.llcHeader-1",
"LlcPPPHheaderNlpid": "llcPPP.llcPPPHheader.nlpid-2",
"LlcPPPHheaderPid": "llcPPP.llcPPPHheader.pid-3",
}
def __init__(self, parent, list_op=False):
super(LlcPPP, self).__init__(parent, list_op)
@property
def LlcPPPHheaderLlcHeader(self):
"""
Display Name: Logical Link Control(LLC) Header
Default Value: 0xfefe03
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["LlcPPPHheaderLlcHeader"])
)
@property
def LlcPPPHheaderNlpid(self):
"""
Display Name: NLPID
Default Value: 0xCF
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["LlcPPPHheaderNlpid"])
)
@property
def LlcPPPHheaderPid(self):
"""
Display Name: Protocol ID (PID)
Default Value: 0x0021
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(
self, self._get_attribute(self._SDM_ATT_MAP["LlcPPPHheaderPid"])
)
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
import logging
import os
import numpy as np
import xrloc.map.covisible as covisible
# from solver import prior_guided_pnp
from xrprimer.data_structure import VectorDouble
from xrprimer.ops import prior_guided_pnp
from xrloc.features.extractor import Extractor
from xrloc.map.reconstruction import Reconstruction
from xrloc.matchers.matcher import Matcher
from xrloc.retrieval.image_database import ImageDatabase
from xrloc.retrieval.pairs_database import PairsDatabase
from xrloc.utils.miscs import head_logging, config_logging
class Localizer(object):
"""Hieracihcal Localization
Args:
config (dict): Configuration
"""
default_config = {
'mode': '2D3D',
'local_feature': 'd2net',
'global_feature': 'netvlad',
'matcher': 'gam',
'coarse': 'cluster',
'retrieval_num': 20,
'scene_size': 20,
'max_inlier': 100,
'max_scene_num': 2
}
def __init__(self, map_path, config=default_config):
head_logging('XRLocalization')
self.config = config
database_path = os.path.join(map_path, 'database.bin')
if os.path.exists(database_path):
self.database = ImageDatabase(database_path)
self.database.create()
else:
pairs = [name for name in os.listdir(map_path)
if name.startswith('pairs-query')]
if len(pairs) == 0:
raise ValueError(
'Not found database under map: {}'.format(map_path))
else:
self.pairs = PairsDatabase(os.path.join(map_path, pairs[0]))
self.reconstruction = Reconstruction(map_path)
if hasattr(self, 'database'):
self.gextractor = Extractor(self.config['global_feature'])
self.lextractor = Extractor(self.config['local_feature'])
self.matcher = Matcher(self.config['matcher'])
config_logging(self.config)
if self.config['mode'] == '2D2D' and self.config['matcher'] == 'gam':
raise ValueError('Loc mode {} is not compatible with matcher {}'.format(
self.config['mode'], self.config['matcher']))
head_logging('Init Success')
def coarse_localize(self, image_ids):
"""Coarse localization phase."""
if self.config['coarse'] == 'cluster':
scenes = covisible.covisible_clustering(image_ids,
self.reconstruction)
elif self.config['coarse'] == 'sr':
scenes = covisible.scene_retrieval(image_ids, self.reconstruction,
self.config['scene_size'])
elif self.config['coarse'] == 'none':
scenes = [image_ids]
else:
raise ValueError('Not support coarse loc: {}'.format(
self.config['coarse']))
return scenes
def geo_localize(self, data):
"""Perform geo localization
Args:
data (np.array or str): image data or image name
Returns:
list(int): image ids
"""
if hasattr(self, 'database'):
image_feature = self.gextractor.extract(data)
image_ids = self.database.retrieve(image_feature,
self.config['retrieval_num'])
return image_ids
elif hasattr(self, 'pairs') and isinstance(data, str):
image_names = self.pairs.image_retrieve(data, self.config['retrieval_num'])
image_ids = [self.reconstruction.name_to_id(name) for name in image_names]
return np.array([id for id in image_ids if id != -1])
def feature_match_2d3d(self, query_points, query_point_descriptors,
train_points, train_point_descriptors, width, height):
"""Feature matching phase."""
query_feat = {
'shape': np.array([height, width]),
'points': query_points,
'descs': query_point_descriptors,
}
train_feat = {
'points': train_points,
'descs': train_point_descriptors,
}
pred = self.matcher.match(query_feat, train_feat)
return pred['matches'], pred['scores']
def establish_correspondences_2d2d(self, query_feat, image_ids):
'''Establish 2D-3D correspondences depend on 2D2D matching
'''
logging.info('Scene size: {0}'.format(len(image_ids)))
match_indices = np.ones(len(query_feat['points']), dtype=int)*-1
match_priors = np.zeros(len(query_feat['points']))
for image_id in image_ids:
ref_image = self.reconstruction.image_at(image_id)
ref_feat = {
'points': ref_image.xys,
'descs': self.reconstruction.point3d_features(ref_image.point3D_ids),
'scores': np.ones(len(ref_image.xys)),
'shape': np.array([600, 600]) # TODO
}
pred = self.matcher.match(query_feat, ref_feat)
matches, scores = pred['matches'], pred['scores']
reserve_matches = matches[:, scores > match_priors[matches[0]]]
reserve_scores = scores[scores > match_priors[matches[0]]]
if len(reserve_scores) > 0:
match_indices[reserve_matches[0]] = ref_image.point3D_ids[reserve_matches[1]]
match_priors[reserve_matches[0]] = reserve_scores
if len(match_priors[match_indices != -1]) > 400:
break
point3d_ids = match_indices[match_indices != -1]
points3d = self.reconstruction.point3d_coordinates(
point3d_ids)
points2d = query_feat['points'][match_indices != -1]
priors = match_priors[match_indices != -1]
logging.info('Match number: {0}'.format(len(priors)))
return points2d, points3d, priors
def establish_correspondences_2d3d(self, feat, image_ids):
'''Establish 2D-3D correspondences depend on 2D3D matching
'''
point3d_ids = self.reconstruction.visible_points(image_ids)
point3ds = self.reconstruction.point3d_coordinates(
point3d_ids)
point3d_descs = self.reconstruction.point3d_features(
point3d_ids)
logging.info('3d points size: {0}'.format(
point3d_descs.shape[1]))
# Matching
matches, priors = self.feature_match_2d3d(feat['points'],
feat['descs'],
point3ds,
point3d_descs,
feat['shape'][1], # width
feat['shape'][0])
logging.info('Match number: {0}'.format(matches.shape[1]))
points2d = feat['points'][matches[0]]
points3d = point3ds[matches[1]]
return points2d, points3d, priors
def prior_guided_pose_estimation(self, point2Ds, point3Ds, priors, camera):
"""Pose estimation phase."""
point2Ds = point2Ds.transpose().astype('float32').copy()
point3Ds = point3Ds.transpose().astype('float32').copy()
priors = priors.astype('float32').copy()
params = VectorDouble(camera[3])
camera_config = {'model_name': camera[0], 'params': params}
ransac_config = {
'error_thres': 12,
'inlier_ratio': 0.01,
'confidence': 0.9999,
'max_iter': 10000,
'local_optimal': True
}
return prior_guided_pnp(point2Ds, point3Ds, priors, camera_config,
ransac_config)
def refine_localize(self, image, camera, ref_image_ids):
"""Perform localization
Args:
image (np.array): RGB & WHC
camera (tuple): (model, width, height, params)
"""
feat = self.lextractor.extract(image)
logging.info('Local feature number: {0}'.format(
feat['points'].shape[0]))
scenes = self.coarse_localize(ref_image_ids)
logging.info('Coarse location number: {0}'.format(len(scenes)))
best_ret = {
'ninlier': 0, 'qvec': np.array([1, 0, 0, 0]),
'tvec': np.array([0, 0, 0]), 'mask': None
}
for i, image_ids in enumerate(scenes[:self.config['max_scene_num']]):
# Establish 2D-3D correspondences
if self.config['mode'] == '2D3D':
points2d, points3d, priors = self.establish_correspondences_2d3d(
feat, image_ids)
elif self.config['mode'] == '2D2D':
points2d, points3d, priors = self.establish_correspondences_2d2d(
feat, image_ids)
if len(priors) < 3: continue
# Pose estimation
ret = self.prior_guided_pose_estimation(points2d, points3d, priors,
camera)
logging.info('Inlier number: {0}'.format(ret['ninlier']))
# Return
if ret['ninlier'] > best_ret['ninlier']:
best_ret = ret
if best_ret['ninlier'] > self.config['max_inlier']:
break
return best_ret |
'''identificaremso si un numero es o no primo'''
def es_primo(x):
if x <= 1:#menor o igual a uno NO ES PRIMO
return(False)
c=0#contador
for i in range(1,x):#del 1 al x(por ejemplo 2)
if x%i==0:#si la division en el recorrido da 0 residuo e sun numero divisible
c+=1
if c>1:
return(False)#si mas de un numero lo divide entonces NO ES PRIMO
else:
return(True)
#el mismo numero no se contabiliza aqui
print(es_primo(4))
|
import cPickle as pickle
def read_pickle(file_path, type):
with open(file_path, type) as f:
obj = pickle.load(f)
f.close()
return obj |
#!/usr/bin/env python3
"""
Input your IISER email in format name-rollno@iiserkol.ac.in, extract the name,
roll no using split() and print them.
"""
email = input("Enter your email in the format name-rollno@iiserkol.ac.in : ")
try:
name_roll, domain = email.split("@")
name, rollno = name_roll.split("-")
print(f"Your name is {name}, your roll number is {rollno}.")
except ValueError:
print("Invalid format!")
|
from validate import validateOn, model
from algorithm.Methods import methods
from algorithm.decomposition import nameAssemble
for gram in ('gram-1', 'gram-2','gram-3','gram-mix'):
for decomp in nameAssemble:
for method_name in methods:
validateOn(method_name, decomp, gram)
|
import os
import sys
import cv2
import panorama
import utils
if __name__ == '__main__':
ROOT_DIR = sys.argv[1]
focal_len = float(sys.argv[2])
scale = 1 if len(sys.argv) <= 3 else float(sys.argv[3])
pano = panorama.stitch_panorama(utils.load_series(ROOT_DIR, scale), focal_len)
utils.show_image(pano)
cv2.imwrite('%s_panorama.jpg' % os.path.basename(ROOT_DIR), pano) |
#!/usr/bin/python
# hmmlearn.py HMM model data
# Usage:
# hmmlearn.py input_filename1
# This program reads the input file, tokenizes each input line, calculate transition and emission probability
# and write the model parameters into the file
import sys
from collections import Counter, defaultdict
from decimal import *
import pickle
import time
p_start_time=time.time()
def read_file(filename):
"""
Read the text file with the given filename and return a list of lines
:param(string) filename:
:return:list of strings
"""
try:
trimmed_lines=list()
fp = open(filename, 'r',encoding="utf-8")
lines=fp.readlines()
for line in lines:
trimmed_lines.append(line.strip())
return trimmed_lines
except IOError:
print('Error opening or reading the file '+filename)
sys.exit()
def get_vocab_tags(linestrings):
"""
:param linestrings(list of strings):
:return:
"""
vocab_set = set()
tag_set = set()
tag_frequency = Counter() # Dictionary
word_tag_frequency = defaultdict(Counter) # Dictionary of Dictionary
tag_list=[]
tag_out_frequency = Counter() # Dictionary
tag_tag_frequency = defaultdict(Counter) # Dictionary of Dictionary
for linestring in linestrings:
words = linestring.split(' ')
tag_line=[]
for word in words:
token = word[:-3]
tag = word[-2:].upper()
vocab_set.add(token)
tag_set.add(tag)
tag_frequency[tag] += 1
word_tag_frequency[tag][token] += 1
tag_line.append(tag)
tag_list.append(tag_line)
for tag_line in tag_list:
for index in range(0,len(tag_line)-1):
cur_tag = tag_line[index] # Fetch the current tag
next_tag = tag_line[index + 1] # Fetch the next tag
if (index == 0): # Find the transition frequency from the start state
tag_out_frequency['START'] += 1
tag_tag_frequency['START'][cur_tag] += 1
# Transition out frequency of a tag
tag_out_frequency[cur_tag] += 1
# Transition frequency from previous tag to current tag
tag_tag_frequency[cur_tag][next_tag] += 1
return (vocab_set, tag_set, tag_frequency, word_tag_frequency, tag_out_frequency, tag_tag_frequency)
def find_emission_probability(tag_frequency, word_tag_frequency):
"""
Returns the dictionary of emission probabilty for each pair of word and tag.
:param tag_frequency(dictionary): Count of each tag
:param tag_word_frequency(dictionary): Count(word and tag)
:return: dictionary of emission probabilty
"""
emission_probability = dict()
for tag in tag_frequency.keys():
if emission_probability.get(tag) is None:
emission_probability[tag] = dict()
for word in word_tag_frequency[tag]:
word_count = Decimal(word_tag_frequency[tag][word])
tag_count = Decimal(tag_frequency[tag])
emission_probability[tag][word] = Decimal.log10(word_count / tag_count)
#emission_probability[tag][word]=word_count / tag_count
return emission_probability
def find_transition_probability(tag_out_frequency, tag_tag_frequency,tags_set):
"""
Returns the dictionary of emission probabilty for each pair of word and tag.
:param tag_out_frequency(dictionary): Transition out frequency of each tag
:param tag_tag_frequency(dictionary): Transition frequency from tag to tag
:return: dictionary of emission probabilty
"""
transition_probability = dict()
for tag1 in tags_set:
transition_probability[tag1] = dict()
for tag2 in tags_set:
tag_tag_count=Decimal(tag_tag_frequency[tag1][tag2])
tag_out_count=Decimal(tag_out_frequency[tag1])
total_tags=len(tags_set);
transition_probability[tag1][tag2] = Decimal.log10((tag_tag_count+1)/ (tag_out_count+total_tags))
return transition_probability
def write_model(tag_set, emission_probability, transition_probability,vocab):
"""
Writes the model into a text file using pickle
:param emission_probability(dictionary): Emission probability
:param transition_probability(dictionary): Transition Probability
:return:
"""
fp=open('hmmmodel.txt','wb')
pickle_data=[tag_set,emission_probability,transition_probability,vocab]
pickle.dump(pickle_data,fp)
def main():
lines = read_file(sys.argv[1])
(vocab,tags,tag_frequency, word_tag_frequency,tag_out_frequency, tag_tag_frequency)=get_vocab_tags(lines)
emission_probability = find_emission_probability(tag_frequency, word_tag_frequency)
tags.add('START')
transition_probability = find_transition_probability(tag_out_frequency, tag_tag_frequency, tags)
tags.remove('START')
write_model(vocab, tags, emission_probability, transition_probability)
if __name__ == '__main__':
main()
print("\n\n--- %s seconds for entire program ---" % (time.time() - p_start_time)) |
# Ceci est un commentaire !
# ces lignes ne seront pas execute
# pour le bien de ce tutoriel, python3 et non python2 sera utilise
# Aussi pour la clarte, l'avertissement E501 Line too long sera ignorer ( pour les plus avance )
# chaque lignes dans un fichier python est execute une apres l'autre
# pour execute un un fichier "fichier.py":
# dans la console:
# python3 fichier.py
# pour stopper l'execution d'un programme, il suffit d'appuyer sur les touches Ctrl+z
print("Hello world!")
# La fonction print() sert a affichier du texte dans la console
# Nous la verons plus par la suite
# tente d'executer ce fichier !
# ps: python3 00-intro.py
|
from pyrosetta import *
import os
import sys
import argparse
from movemap import MOVEMAP
def exists(tag, dir):
for filename in os.listdir(dir):
if ".movemap" in filename:
if tag == filename.split(".movemap")[0]:
return True
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='generate movemap')
parser.add_argument("-pdb", help="provide pdb name", default=None)
parser.add_argument("-dir", help="provide directory containing pdbs", default=None)
parser.add_argument("-pattern", help="provide a pattern", default="low")
parser.add_argument("-pep_start_index", help="provide peptide start index", type=int)
parser.add_argument("-pep_length", help="provide peptide length", type=int)
parser.add_argument("-groove_distance", help="provide groove distance", type=float, default=3.5)
args = parser.parse_args()
init()
if (args.pdb == None and args.dir != None):
for filename in os.listdir(args.dir):
if args.pattern in filename and ".movemap" not in filename:
tag = args.dir+"/"+filename.split(".pdb")[0]
if not exists(tag, args.dir):
movemap = MOVEMAP(tag+".pdb", args.pep_start_index, args.pep_length, args.groove_distance, tag+".movemap")
movemap.apply()
else:
print ("Ignored: ", tag)
elif args.pdb != None:
tag = args.pdb.split(".pdb")[0]
movemap = MOVEMAP(tag+".pdb", args.pep_start_index, args.pep_length, args.groove_distance, tag+".movemap")
movemap.apply()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-22 05:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('server', '0005_auto_20171022_0452'),
]
operations = [
migrations.RemoveField(
model_name='serverstats',
name='users',
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 14 15:32:30 2017
@author: yansenxu
lesson 4 numpy
"""
#%%
import os
os.getcwd()#查看环境位置
import numpy as np#导入numpy库,简写为np
np.array #查看说明
c=np.array([[1,2,3,4],[5,6,7,8],[7,8,9,0]])
a=np.array([1,2,3,4])
b=np.array((1,2,3,4))
a.dtype
a.shape
b.shape
c.shape=4,3#修改为4行3列数组
c.shape=2,-1#-1表示为自行计算,定义好了两行
d=a.reshape(2,2)#改变结构,d位2×2,a和d现在共享内存数据,修改一个数据就会发生变化
a[21]=109;#a[1]修改为100
d#d也发生了修改
np.array([1,2,3,4.4],dtype=np.float)#浮点数显示
np.array([1,2,3,4.4],dtype=np.complex)#复数显示
np.arange(0,1,0.1)#数列
number=np.linspace(1,5,10)#1到5生成10个点
number
np.linspace(0,1,10,endpoint=False)#不包括终点1
list(np.logspace(0,2,20))
np.empty((2,3),np.int)#空矩阵
np.zeros((4,3))#0矩阵
#%%
#np.fromfunction
def func(i):
return i%4+1
np.fromfunction(func,(10,))#第一个为函数,第二个为数组大小
def func2(i,j):
return (i+1)*(j+1)
np.fromfunction(func2,(9,9))#(9,9)为一个9×9的矩阵
#%%
a=np.arange(10)
a[3:5]
a[2:4]=100,101
a[1:-1:2]
b=a[np.array([1,3,4,5])]#不共享空间
b=[[1,2,3,4]]#按照下标存取
#%%
#b布尔数组存储
x=np.arange(5,0,-1)
x
x[np.array([True,False,True,True,False])]#生成一个新的变量,True取出来
x[[True,False,True,True,False]]#list,T为1好位置,F为0号位置
#%%
#布尔数组的生成
x=np.random.rand(10)#生一个10个,0-1的随机数的数组
x>0.5#生成一个布尔数组
x[x>0.5]#
#%%
#多维数组的创建与存储
a=np.arange(0,60,10).reshape(-1,1)+np.arange(0,6)#列和行向量相加成为一个广播
a[(2,3,4,5),(1,2,3,4)]#矩阵索引
a[3:,[0,2,5]]
#%%
#结构数组
persontype=np.dtype({'names':['name','age','weight'],'formats':['S32','i','f']})#定义一个类型
a = np.array([("Zhang",32,75.5),("Wang",24,65.2)],dtype=persontype)#定义一个数组
c=a[0]
c['name']='li'
b=a['age']
b
b[0]=100
b
#%%
#ufunc运算
x=np.linspace(0.2*np.pi,10)#10点
y=np.sin(x,x)#计算的x覆盖原来的x
id(y)#查看地址
#%%
#比较numpy和python数据库的运算速度比较
import time#时间库
import math#数学库
x=[i*0.001 for i in range(10000000)]
start=time.clock()
for i,t in enumerate(x):
x[i]=math.sin(t)
print('math.sin',time.clock()-start)
#numpy计算时间
x = [i * 0.001 for i in range(1000000)]
x = np.array(x)
start = time.clock()
np.sin(x,x)
print('numpy.sin:', time.clock()-start)
#%%
#四则运算
a=np.arange(0,4)
b=np.arange(1,5)
np.add(a,b)#相加
np.add(a,b,a)#将a覆盖
a
#%%
x=np.arange(5)
np.true_divide(x,4)#精确除
x/4
x//4
#%%
a=np.arange(5)
b=np.arange(4,-1,-1)
a==b
a>b
np.logical_or(a==b,a>b)#a==b or a>b
#%%
def triangle_wave(x,c,c0,hc):
x=x-int(x)#保证x在0~1之间
if x>=c:
r=0.0
elif x<c0:
r=x/c0*hc
else:
r=(c-x)/(c-c0)*hc
return r
#定义一个函数
x=np.linspace(0,2,1000)#生成一个数组
y1=np.array([triangle_wave(t,0.6,0.4,1.0) for t in x])#将每个x点带入进去
#ufunc函数的使用
triangle_ufunc1=np.frompyfunc(triangle_wave,4,1)#定义为一个新的函数类型
y2=triangle_ufunc1(x,0.6,0.4,1.0)
triangle_ufunc2=np.frompyfunc(lambda x: triangle_wave(x,0.6,0.4,1.0),1,1)
y3 = triangle_ufunc2(x)
#%%
#shape use
a=np.arange(0,60,10).reshape(-1,1)#one col
a.shape
b=np.arange(0,5)#one row
b.shape
c=a+b#利用广播进行了操作
c.shape
a.ndim
b.ndim
b.shape=1,5
b
#%%
#ogrid对象
x,y=np.ogrid[0:5,0:5]
x
y
x,y=np.ogrid[0:1:4j,0:1:3j]#j表示三个点
#%%
from mayavi import mlab
x,y=np.ogrid[-2:2:20j,-2:2:20j]
z=x*np.exp(-x**2-y**2)
pl=mlab.surf(x,y,z,warp_scale="auto")#绘制立体图,
mlab.axes(xlabel='x',ylabel='y',zlabel='z')
mlab.outline(pl)
mlab.show
#%%
#ufunc函数的方法
np.add.reduce([1,2,3])#对数组相加
np.add.reduce([[1,2,3],[4,5,6]],axis=1)#按行相加
np.add.reduce([[1,2,3],[4,5,6]],axis=0)#按列相加
np.add.accumulate([1,2,3])#累加
np.add.accumulate([[1,2,3,4],[1,2,3,4]],axis=1)
a=np.array([1,2,3,4,5])
result=np.add.reduceat(a,indices=[0,1,0,2,0,3,0])
result
#%%
#矩阵运算
A=np.array([[1,2,3],[3,4,5],[4,5,6]])
A.T
A.T.T
c = np.array([[[ 0, 1, 2], [ 10, 12, 13]],[[100,101,102],[110,112,113]]])
#%%
a=np.random.rand(10,10)
b=np.random.rand(10)
x=np.linalg.solve(a,b)
x
#%%
np.mat([[1,2],[3,4]])
#%%
import numpy as np
a=np.arange(0,12)
a.shape=3,4
a
import os
os.getcwd()
a.tofile('mydata.bin')#二进制的
b=np.fromfile('mydata.bin',dtype=np.float)
b
a.dtype
b=np.fromfile('mydata.bin',dtype=np.int32)
b
b.shape=3,4
b
#%%
np.save('mydata.npy',a)
c=np.load('mydata.npy')
c
#%%
#多个数据保存
a = np.array([[1,2,3],[4,5,6]])
b = np.arange(0, 1.0, 0.1)
c = np.sin(b)
np.savez("result.npz", a, b, sin_array = c)
r=np.load('result.npz')
r['arr_0']#数组a
r['arr_1']#数组b
r['sin_array']#sin_array数组
#%%
#文本文件的存储
a = np.arange(0,12,0.5).reshape(4,-1)
np.savetxt('mydata.txt',a)
np.loadtxt('mydata.txt')
np.savetxt('data.txt',a,fmt="%d",delimiter=',')#存为整数,利用逗号分隔
np.loadtxt('data.txt')
#%%
a=np.arange(8)
b=np.add.accumulate(a)
c=a+b
f =file("result.npy", "wb")
#现在存为f的对象
np.save(f,a)
np.save(f,b)
np.safe(f,c)
f.close()
#读取文件
|
import requests
# http status code of 200 means positive response
# Will get a ConnectionError for invalid domain
def request(url):
try:
return requests.get("http://"+url) # get simulates clicking on a link
except Exception:
pass # Do nothing
target_url = "iitk.ac.in"
with open("subdomains-wordlist.txt","r") as wordlist_file: # Discovering subdomains
for line in wordlist_file:
word = line.strip() # The words have \n appended in the file
test_url = word + "." + target_url
response = request(test_url) # Will contain None for "pass", ie nothing returned
if response:
print("[+] Discovered subdomain: "+test_url)
with open("files-and-dirs-wordlist.txt","r") as wordlist_file: # Discovering subdirectories
for line in wordlist_file:
word = line.strip() # The words have \n appended in the file
test_url = target_url + "/" + word
response = request(test_url) # Will contain None for "pass", ie nothing returned
if response:
print("[+] Discovered subdirectory: "+test_url) |
n=input()
ans=''
for p in range(1,n+1):
t=raw_input()
d=0
l=list(t)
while '-' in l:
i=0
o=l[0]
while l[i]==o:
i+=1
if i>len(l)-2:
break
a=l[:i]
b=l[i+1:]
a=a[::-1]
for q in range(len(a)):
if a[q]=='-':
a[q]='+'
else:
a[q]='-'
l=a+b
d+=1
ans+='Case #'+str(p)+': '+str(d)+'\n'
print ans
|
# Tuple is a collection which is ordered and unchanegable. Allows duplicate members
#Create tuple
fruits = ('Apples', 'Oranges', 'Grapes')
# fruits2 = tuple(('Apples', 'Oranges', 'Grapes'))
#Single value need a trailing comma
fruits2 = ('Apples',)
#Delte tuple
del fruits2
# print(len(fruits2))
# fruits[0] = 'Pears'
# A set is a collection which is unordered and unindexed. No duplicate members
# Create set
fruits_set = {'Apples', 'Oranges', 'Mango'}
# Check if in a set
print('Apples' in fruits_set)
#Add to set
fruits_set.add('Grape')
#Remove from set
# fruits_set.remove("Grape")
# #Clear set
# fruits_set.clear()
#Add duplicate
fruits_set.add('Apples')
print(fruits_set)
|
from utils import fizzbuzz, check_fizzbuzz
def test_assert_true():
assert True
def test_string_len():
assert len("1") == 1
# Todo : remove it at refactor phase
# def test_can_call_fizzbuzz():
# fizzbuzz(1)
def test_returns_1_with_1_passed():
check_fizzbuzz(1, "1")
def test_returns_2_with_2_passed():
check_fizzbuzz(2, "2")
def test_returns_fizz_with_3_passed():
check_fizzbuzz(3, "Fizz")
check_fizzbuzz(6, "Fizz")
check_fizzbuzz(9, "Fizz")
check_fizzbuzz(12, "Fizz")
check_fizzbuzz(18, "Fizz")
def test_returns_buzz_with_5_passed():
check_fizzbuzz(5, "Buzz")
check_fizzbuzz(25, "Buzz")
check_fizzbuzz(20, "Buzz")
check_fizzbuzz(35, "Buzz")
def test_returns_fizzbuzz_with_multiple3_or_5_passed():
check_fizzbuzz(30, "FizzBuzz")
check_fizzbuzz(15, "FizzBuzz")
check_fizzbuzz(45, "FizzBuzz")
check_fizzbuzz(90, "FizzBuzz")
def test_assert():
assert True
|
import numpy as np
import tensorflow as tf
import setting as st
class model_def:
def __init__(self):
self.fm = 64
self.fcnode = 64
# Modules
def init_weight_bias(self, name, shape, filtercnt, trainable):
weights = tf.get_variable(name=name + "w", shape=shape,
initializer=tf.contrib.layers.xavier_initializer_conv2d(),
dtype=tf.float32, trainable=trainable)
biases = tf.Variable(initial_value=tf.constant(0.1, shape=[filtercnt], dtype=tf.float32),
name=name + "b", trainable=trainable)
return weights, biases
def conv_layer(self, data, weight, bias, padding):
conv = tf.nn.conv2d(input=data, filter=weight, strides=[1, 1, 1, 1], padding=padding)
return tf.nn.relu(tf.nn.bias_add(conv, bias))
def batch_norm(self, data):
return tf.nn.batch_normalization(x=data, mean=0, variance=1, offset=None, scale=None, variance_epsilon=0.00000001)
def dropout(self, data, dropout):
return tf.nn.dropout(data, dropout)
def pool_layer(self, data):
return tf.nn.max_pool(value=data, ksize=[1, 1, 4, 1], strides=[1, 1, 4, 1], padding="VALID")
def fc_layer(self, data, weight, bias):
shape = data.get_shape().as_list()
shape = [shape[0], np.prod(shape[1:])]
hidden = tf.nn.bias_add(tf.matmul(tf.reshape(data, shape), weight), bias)
hidden = tf.nn.relu(hidden)
return hidden
def output_layer(self, data, weight, bias, label):
shape = data.get_shape().as_list()
shape = [shape[0], np.prod(shape[1:])]
hidden = tf.nn.bias_add(tf.matmul(tf.reshape(data, shape), weight), bias)
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=hidden, labels=label)), tf.nn.softmax(hidden)
# Models
def RCNN(self, train, channel_cnt, time_cnt):
if train:
batch_size = st.batch_size
dr = 0.5
else:
batch_size = 700-time_cnt+1
dr = 1.0
data_node = tf.placeholder(tf.float32, shape=(batch_size, channel_cnt, time_cnt, 1))
label_node = tf.placeholder(tf.int64, shape=batch_size)
#RCL1
w1, b1 = self.init_weight_bias(name="conv1", shape=[1, 1, 1, self.fm], filtercnt=self.fm, trainable=train)
conv1 = self.conv_layer(data=data_node, weight=w1, bias=b1, padding="SAME")
bn1 = self.batch_norm(conv1)
w1a, b1a = self.init_weight_bias(name="conv1a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1a = self.conv_layer(data=bn1, weight=w1a, bias=b1a, padding="SAME")
sum1a = conv1+conv1a
bn1a = self.batch_norm(sum1a)
w1b, b1b = self.init_weight_bias(name="conv1b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1b = self.conv_layer(data=bn1a, weight=w1b, bias=b1b, padding="SAME")
sum1b = conv1+conv1b
bn1b = self.batch_norm(sum1b)
w1c, b1c = self.init_weight_bias(name="conv1c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1c = self.conv_layer(data=bn1b, weight=w1c, bias=b1c, padding="SAME")
sum1c = conv1+conv1c
bn1c = self.batch_norm(sum1c)
# w1d, b1d, = self.init_weight_bias(name="conv1d", shape=[1,9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv1d = self.conv_layer(data=bn1c, weight=w1d, bias=b1d, padding="SAME")
# sum1d = conv1+conv1d
# bn1d = self.batch_norm(sum1d)
p1 = self.pool_layer(bn1c)
d1 = self.dropout(p1, dropout=dr)
# RCL2
w2, b2 = self.init_weight_bias(name="conv2", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2 = self.conv_layer(data=d1, weight=w2, bias=b2, padding="SAME")
bn2 = self.batch_norm(conv2)
w2a, b2a = self.init_weight_bias(name="conv2a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2a = self.conv_layer(data=bn2, weight=w2a, bias=b2a, padding="SAME")
sum2a = conv2 + conv2a
bn2a = self.batch_norm(sum2a)
w2b, b2b = self.init_weight_bias(name="conv2b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2b = self.conv_layer(data=bn2a, weight=w2b, bias=b2b, padding="SAME")
sum2b = conv2 + conv2b
bn2b = self.batch_norm(sum2b)
w2c, b2c = self.init_weight_bias(name="conv2c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2c = self.conv_layer(data=bn2b, weight=w2c, bias=b2c, padding="SAME")
sum2c = conv2 + conv2c
bn2c = self.batch_norm(sum2c)
# w2d, b2d, = self.init_weight_bias(name="conv2d", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv2d = self.conv_layer(data=bn2c, weight=w2d, bias=b2d, padding="SAME")
# sum2d = conv2 + conv2d
# bn2d = self.batch_norm(sum2d)
p2 = self.pool_layer(bn2c)
d2 = self.dropout(p2, dropout=dr)
#RCL3
w3, b3 = self.init_weight_bias(name="conv3", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3 = self.conv_layer(data=d2, weight=w3, bias=b3, padding="SAME")
bn3 = self.batch_norm(conv3)
w3a, b3a = self.init_weight_bias(name="conv3a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3a = self.conv_layer(data=bn3, weight=w3a, bias=b3a, padding="SAME")
sum3a = conv3+conv3a
bn3a = self.batch_norm(sum3a)
w3b, b3b = self.init_weight_bias(name="conv3b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3b = self.conv_layer(data=bn3a, weight=w3b, bias=b3b, padding="SAME")
sum3b = conv3+conv3b
bn3b = self.batch_norm(sum3b)
w3c, b3c = self.init_weight_bias(name="conv3c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3c = self.conv_layer(data=bn3b, weight=w3c, bias=b3c, padding="SAME")
sum3c = conv3+conv3c
bn3c = self.batch_norm(sum3c)
p3 = self.pool_layer(bn3c)
d3 = self.dropout(p3, dropout=dr)
#Spatial Convolutional layer
wsc, bsc = self.init_weight_bias(name="spatialconv", shape=[channel_cnt, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
spatialconv = self.conv_layer(data=d3, weight=wsc, bias=bsc, padding="VALID")
bnsc = self.batch_norm(spatialconv)
# psc = self.pool_layer(bnsc)
#Fully Connected layer
wfc, bfc = self.init_weight_bias(name="fclayer", shape=[8 * self.fm, self.fcnode], filtercnt=self.fcnode, trainable=train)
fclayer = self.fc_layer(data=bnsc, weight=wfc, bias=bfc)
bnfclayer = self.batch_norm(fclayer)
dfclayer = self.dropout(bnfclayer, dropout=dr)
#Output layer
wo, bo = self.init_weight_bias(name="output", shape=[self.fcnode, 4], filtercnt=4, trainable=train)
cross_entropy, soft_max = self.output_layer(dfclayer, weight=wo, bias=bo, label=label_node)
return cross_entropy, soft_max, data_node, label_node, wsc, bsc
def Inception_RCNN(self, train, channel_cnt, time_cnt):
if train:
batch_size = st.batch_size
else:
batch_size = 700-time_cnt+1
data_node = tf.placeholder(tf.float32, shape=(batch_size, channel_cnt, time_cnt, 1))
label_node = tf.placeholder(tf.int64, shape=batch_size)
#Inception_RCL1
rcl1w1a, rcl1b1a = self.init_weight_bias(name="rcl1conv1a", shape=[1, 1, 1, self.fm], filtercnt=self.fm, trainable=train)
rcl1conv1a = self.conv_layer(data=data_node, weight=rcl1w1a, bias=rcl1b1a, padding="SAME")
rcl1w1b, rcl1b1b = self.init_weight_bias(name="rlc1conv1b", shape=[1, 3, 1, self.fm], filtercnt=self.fm, trainable=train)
rcl1conv1b = self.conv_layer(data=data_node, weight=rcl1w1b, bias=rcl1b1b, padding="SAME")
rcl1w1c, rcl1b1c = self.init_weight_bias(name="rcl1conv1c", shape=[1, 5, 1, self.fm], filtercnt=self.fm, trainable=train)
rcl1conv1c = self.conv_layer(data=data_node, weight=rcl1w1c, bias=rcl1b1c, padding="SAME")
# rcl1w1d, rcl1b1d, = self.init_weight_bias(name="rcl1conv1d", shape=[1, 7, 1, self.fm], filtercnt=self.fm, trainable=train)
# rcl1conv1d = self.conv_layer(data=data_node, weight=rcl1w1d, bias=rcl1b1d, padding="SAME")
rcl1sum1 = rcl1conv1a + rcl1conv1b + rcl1conv1c# + rcl1conv1d
rcl1w1, rcl1b1 = self.init_weight_bias(name="rcl1conv1", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)###
rcl1conv1 = self.conv_layer(data=rcl1sum1, weight=rcl1w1, bias=rcl1b1, padding="SAME")####
rcl1w2a, rcl1b2a = self.init_weight_bias(name="rcl1conv2a", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl1conv2a = self.conv_layer(data=rcl1conv1, weight=rcl1w2a, bias=rcl1b2a, padding="SAME")
rcl1w2b, rcl1b2b = self.init_weight_bias(name="rcl1conv2b", shape=[1, 3, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl1conv2b = self.conv_layer(data=rcl1conv1, weight=rcl1w2b, bias=rcl1b2b, padding="SAME")
rcl1w2c, rcl1b2c = self.init_weight_bias(name="conv2c", shape=[1, 5, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl1conv2c = self.conv_layer(data=rcl1conv1, weight=rcl1w2c, bias=rcl1b2c, padding="SAME")
# rcl1w2d, rcl1b2d, = self.init_weight_bias(name="rcl1conv2d", shape=[1, 7, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# rcl1conv2d = self.conv_layer(data=rcl1conv1, weight=rcl1w2d, bias=rcl1b2d, padding="SAME")
rcl1sum2 = rcl1conv2a + rcl1conv2b + rcl1conv2c# + rcl1conv2d
rcl1sum2 = rcl1sum1 + rcl1sum2
rcl1w2, rcl1b2 = self.init_weight_bias(name="rcl1conv2", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)#######
rcl1conv2 = self.conv_layer(data=rcl1sum2, weight=rcl1w2, bias=rcl1b2, padding="SAME")########
rcl1w3a, rcl1b3a = self.init_weight_bias(name="rcl1conv3a", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl1conv3a = self.conv_layer(data=rcl1conv2, weight=rcl1w3a, bias=rcl1b3a, padding="SAME")
rcl1w3b, rcl1b3b = self.init_weight_bias(name="rcl1conv3b", shape=[1, 3, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl1conv3b = self.conv_layer(data=rcl1conv2, weight=rcl1w3b, bias=rcl1b3b, padding="SAME")
rcl1w3c, rcl1b3c = self.init_weight_bias(name="rcl1conv3c", shape=[1, 5, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl1conv3c = self.conv_layer(data=rcl1conv2, weight=rcl1w3c, bias=rcl1b3c, padding="SAME")
# rcl1w3d, rcl1b3d, = self.init_weight_bias(name="rcl1conv3d", shape=[1, 7, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# rcl1conv3d = self.conv_layer(data=rcl1conv2, weight=rcl1w3d, bias=rcl1b3d, padding="SAME")
rcl1sum3 = rcl1conv3a + rcl1conv3b + rcl1conv3c# + rcl1conv3d
rcl1sum3 = rcl1sum1 + rcl1sum3
rcl1w3, rcl1b3 = self.init_weight_bias(name="rcl1conv3", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train) #######
rcl1conv3 = self.conv_layer(data=rcl1sum3, weight=rcl1w3, bias=rcl1b3, padding="SAME") ########
rcl1w4a, rcl1b4a = self.init_weight_bias(name="rcl1conv4a", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl1conv4a = self.conv_layer(data=rcl1conv3, weight=rcl1w4a, bias=rcl1b4a, padding="SAME")
rcl1w4b, rcl1b4b = self.init_weight_bias(name="rcl1conv4b", shape=[1, 3, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl1conv4b = self.conv_layer(data=rcl1conv3, weight=rcl1w4b, bias=rcl1b4b, padding="SAME")
rcl1w4c, rcl1b4c = self.init_weight_bias(name="rcl1conv4c", shape=[1, 5, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl1conv4c = self.conv_layer(data=rcl1conv3, weight=rcl1w4c, bias=rcl1b4c, padding="SAME")
# rcl1w4d, rcl1b4d, = self.init_weight_bias(name="rcl1conv4d", shape=[1, 7, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# rcl1conv4d = self.conv_layer(data=rcl1conv3, weight=rcl1w4d, bias=rcl1b4d, padding="SAME")
rcl1sum4 = rcl1conv4a + rcl1conv4b + rcl1conv4c# + rcl1conv4d
rcl1sum4 = rcl1sum1 + rcl1sum4
rcl1w4, rcl1b4 = self.init_weight_bias(name="rcl1conv4", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train) #######
rcl1conv4 = self.conv_layer(data=rcl1sum4, weight=rcl1w4, bias=rcl1b4, padding="SAME") ########
rcl1bn1 = self.batch_norm(rcl1conv4)
rcl1p1 = self.pool_layer(rcl1bn1)
rcl1d1 = self.dropout(rcl1p1, dropout=self.dr)
#Inception_RCL2
rcl2w1a, rcl2b1a = self.init_weight_bias(name="rcl2conv1a", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl2conv1a = self.conv_layer(data=rcl1d1, weight=rcl2w1a, bias=rcl2b1a, padding="SAME")
rcl2w1b, rcl2b1b = self.init_weight_bias(name="rlc2conv1b", shape=[1, 3, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl2conv1b = self.conv_layer(data=rcl1d1, weight=rcl2w1b, bias=rcl2b1b, padding="SAME")
rcl2w1c, rcl2b1c = self.init_weight_bias(name="rcl2conv1c", shape=[1, 5, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl2conv1c = self.conv_layer(data=rcl1d1, weight=rcl2w1c, bias=rcl2b1c, padding="SAME")
# rcl2w1d, rcl2b1d, = self.init_weight_bias(name="rcl2conv1d", shape=[1, 7, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# rcl2conv1d = self.conv_layer(data=rcl1d1, weight=rcl2w1d, bias=rcl2b1d, padding="SAME")
rcl2sum1 = rcl2conv1a + rcl2conv1b + rcl2conv1c# + rcl2conv1d
rcl2w1, rcl2b1 = self.init_weight_bias(name="rcl2conv1", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train) #######
rcl2conv1 = self.conv_layer(data=rcl2sum1, weight=rcl2w1, bias=rcl2b1, padding="SAME") ########
rcl2w2a, rcl2b2a = self.init_weight_bias(name="rcl2conv2a", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl2conv2a = self.conv_layer(data=rcl2conv1, weight=rcl2w2a, bias=rcl2b2a, padding="SAME")
rcl2w2b, rcl2b2b = self.init_weight_bias(name="rcl2conv2b", shape=[1, 3, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl2conv2b = self.conv_layer(data=rcl2conv1, weight=rcl2w2b, bias=rcl2b2b, padding="SAME")
rcl2w2c, rcl2b2c = self.init_weight_bias(name="rcl2conv2c", shape=[1, 5, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl2conv2c = self.conv_layer(data=rcl2conv1, weight=rcl2w2c, bias=rcl2b2c, padding="SAME")
# rcl2w2d, rcl2b2d, = self.init_weight_bias(name="rcl2conv2d", shape=[1, 7, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# rcl2conv2d = self.conv_layer(data=rcl2conv1, weight=rcl2w2d, bias=rcl2b2d, padding="SAME")
rcl2sum2 = rcl2conv2a + rcl2conv2b + rcl2conv2c #+ rcl2conv2d
rcl2sum2 = rcl2sum1 + rcl2sum2
rcl2w2, rcl2b2 = self.init_weight_bias(name="rcl2conv2", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train) #######
rcl2conv2 = self.conv_layer(data=rcl2sum2, weight=rcl2w2, bias=rcl2b2, padding="SAME") ########
rcl2w3a, rcl2b3a = self.init_weight_bias(name="rcl2conv3a", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl2conv3a = self.conv_layer(data=rcl2conv2, weight=rcl2w3a, bias=rcl2b3a, padding="SAME")
rcl2w3b, rcl2b3b = self.init_weight_bias(name="rcl2conv3b", shape=[1, 3, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl2conv3b = self.conv_layer(data=rcl2conv2, weight=rcl2w3b, bias=rcl2b3b, padding="SAME")
rcl2w3c, rcl2b3c = self.init_weight_bias(name="rcl2conv3c", shape=[1, 5, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl2conv3c = self.conv_layer(data=rcl2conv2, weight=rcl2w3c, bias=rcl2b3c, padding="SAME")
# rcl2w3d, rcl2b3d, = self.init_weight_bias(name="rcl2conv3d", shape=[1, 7, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# rcl2conv3d = self.conv_layer(data=rcl2conv2, weight=rcl2w3d, bias=rcl2b3d, padding="SAME")
rcl2sum3 = rcl2conv3a + rcl2conv3b + rcl2conv3c# + rcl2conv3d
rcl2sum3 = rcl2sum1 + rcl2sum3
rcl2w3, rcl2b3 = self.init_weight_bias(name="rcl2conv3", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train) #######
rcl2conv3 = self.conv_layer(data=rcl2sum3, weight=rcl2w3, bias=rcl2b3, padding="SAME") ########
rcl2w4a, rcl2b4a = self.init_weight_bias(name="rcl2conv4a", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl2conv4a = self.conv_layer(data=rcl2conv3, weight=rcl2w4a, bias=rcl2b4a, padding="SAME")
rcl2w4b, rcl2b4b = self.init_weight_bias(name="rcl2conv4b", shape=[1, 3, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl2conv4b = self.conv_layer(data=rcl2conv3, weight=rcl2w4b, bias=rcl2b4b, padding="SAME")
rcl2w4c, rcl2b4c = self.init_weight_bias(name="rcl2conv4c", shape=[1, 5, self.fm, self.fm], filtercnt=self.fm, trainable=train)
rcl2conv4c = self.conv_layer(data=rcl2conv3, weight=rcl2w4c, bias=rcl2b4c, padding="SAME")
# rcl2w4d, rcl2b4d, = self.init_weight_bias(name="rcl2conv4d", shape=[1, 7, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# rcl2conv4d = self.conv_layer(data=rcl2conv3, weight=rcl2w4d, bias=rcl2b4d, padding="SAME")
rcl2sum4 = rcl2conv4a + rcl2conv4b + rcl2conv4c# + rcl2conv4d
rcl2sum4 = rcl2sum1 + rcl2sum4
rcl2w4, rcl2b4 = self.init_weight_bias(name="rcl2conv4", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train) #######
rcl2conv4 = self.conv_layer(data=rcl2sum4, weight=rcl2w4, bias=rcl2b4, padding="SAME") ########
rcl2bn1 = self.batch_norm(rcl2conv4)
rcl2p1 = self.pool_layer(rcl2bn1)
rcl2d1 = self.dropout(rcl2p1, dropout=self.dr)
#Spatial Convolutional layer
wsc, bsc = self.init_weight_bias(name="spatialconv", shape=[channel_cnt, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
spatialconv = self.conv_layer(data=rcl2d1, weight=wsc, bias=bsc, padding="VALID")
bnsc = self.batch_norm(spatialconv)
psc = self.pool_layer(bnsc)
# #Fully Connected layer
# wfc, bfc = self.init_weight_bias(name="fclayer", shape=[8 * self.fm, self.fcnode], filtercnt=self.fcnode, trainable=train)
# fclayer = self.fc_layer(data = psc, weight=wfc, bias=bfc)
# bnfclayer = self.batch_norm(fclayer)
# dfclayer = self.dropout(bnfclayer, dropout=self.dr)
#Output layer
wo, bo = self.init_weight_bias(name="output", shape=[8 * self.fm, 4], filtercnt=4, trainable=train)
cross_entropy, soft_max = self.output_layer(psc, weight=wo, bias=bo, label=label_node)
# cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=l_out, labels=label_node))
# soft_max = tf.nn.softmax(l_out)
return cross_entropy, soft_max, data_node, label_node, wsc ,bsc
def Inception_RCNN2(self, train, channel_cnt, time_cnt):
if train:
batch_size = st.batch_size
else:
batch_size = 700-time_cnt+1
data_node = tf.placeholder(tf.float32, shape=(batch_size, channel_cnt, time_cnt, 1))
label_node = tf.placeholder(tf.int64, shape=batch_size)
#RCL1
w1, b1, = self.init_weight_bias(name="conv1", shape=[1, 1, 1, self.fm], filtercnt=self.fm, trainable=train)
conv1 = self.conv_layer(data=data_node, weight=w1, bias=b1, padding="SAME")
bn1 = self.batch_norm(conv1)
w1a, b1a = self.init_weight_bias(name="conv1a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1a = self.conv_layer(data=bn1, weight=w1a, bias=b1a, padding="SAME")
sum1a = conv1+conv1a
bn1a = self.batch_norm(sum1a)
w1b, b1b = self.init_weight_bias(name="conv1b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1b = self.conv_layer(data=bn1a, weight=w1b, bias=b1b, padding="SAME")
sum1b = conv1+conv1b
bn1b = self.batch_norm(sum1b)
w1c, b1c = self.init_weight_bias(name="conv1c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1c = self.conv_layer(data=bn1b, weight=w1c, bias=b1c, padding="SAME")
sum1c = conv1+conv1c
bn1c = self.batch_norm(sum1c)
w1d, b1d = self.init_weight_bias(name="conv1d", shape=[1,9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1d = self.conv_layer(data=bn1c, weight=w1d, bias=b1d, padding="SAME")
sum1d = conv1+conv1d
bn1d = self.batch_norm(sum1d)
# p1 = self.pool_layer(bn1d)
# d1 = self.dropout(p1, dropout=self.dr)
# First inception module
win1a, bin1a = self.init_weight_bias(name="convin1a", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
convin1a = self.conv_layer(data=bn1d, weight=win1a, bias=bin1a, padding="SAME")
win1b, bin1b = self.init_weight_bias(name="convin1b", shape=[1, 3, self.fm, self.fm], filtercnt=self.fm, trainable=train)
convin1b = self.conv_layer(data=bn1d, weight=win1b, bias=bin1b, padding="SAME")
win1c, bin1c = self.init_weight_bias(name="convin1c", shape=[1, 5, self.fm, self.fm], filtercnt=self.fm, trainable=train)
convin1c = self.conv_layer(data=bn1d, weight=win1c, bias=bin1c, padding="SAME")
win1d, bin1d = self.init_weight_bias(name="convin1d", shape=[1, 7, self.fm, self.fm], filtercnt=self.fm, trainable=train)
convin1d = self.conv_layer(data=bn1d, weight=win1d, bias=bin1d, padding="SAME")
sumin1 = convin1a + convin1b + convin1c + convin1d
bnin1 = self.batch_norm(sumin1)
pin1 = self.pool_layer(bnin1)
din1 = self.dropout(pin1, dropout=self.dr)
# RCL2
w2, b2 = self.init_weight_bias(name="conv2", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2 = self.conv_layer(data=din1, weight=w2, bias=b2, padding="SAME")
bn2 = self.batch_norm(conv2)
w2a, b2a = self.init_weight_bias(name="conv2a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2a = self.conv_layer(data=bn2, weight=w2a, bias=b2a, padding="SAME")
sum2a = conv2 + conv2a
bn2a = self.batch_norm(sum2a)
w2b, b2b = self.init_weight_bias(name="conv2b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2b = self.conv_layer(data=bn2a, weight=w2b, bias=b2b, padding="SAME")
sum2b = conv2 + conv2b
bn2b = self.batch_norm(sum2b)
w2c, b2c = self.init_weight_bias(name="conv2c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2c = self.conv_layer(data=bn2b, weight=w2c, bias=b2c, padding="SAME")
sum2c = conv2 + conv2c
bn2c = self.batch_norm(sum2c)
w2d, b2d, = self.init_weight_bias(name="conv2d", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2d = self.conv_layer(data=bn2c, weight=w2d, bias=b2d, padding="SAME")
sum2d = conv2 + conv2d
bn2d = self.batch_norm(sum2d)
# p2 = self.pool_layer(bn2d)
# d2 = self.dropout(p2, dropout=self.dr)
# Second inception module
win2a, bin2a = self.init_weight_bias(name="convin2a", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
convin2a = self.conv_layer(data=bn2d, weight=win2a, bias=bin2a, padding="SAME")
win2b, bin2b = self.init_weight_bias(name="convin2b", shape=[1, 3, self.fm, self.fm], filtercnt=self.fm, trainable=train)
convin2b = self.conv_layer(data=bn2d, weight=win2b, bias=bin2b, padding="SAME")
win2c, bin2c = self.init_weight_bias(name="convin2c", shape=[1, 5, self.fm, self.fm], filtercnt=self.fm, trainable=train)
convin2c = self.conv_layer(data=bn2d, weight=win2c, bias=bin2c, padding="SAME")
win2d, bin2d = self.init_weight_bias(name="convin2d", shape=[1, 7, self.fm, self.fm], filtercnt=self.fm, trainable=train)
convin2d = self.conv_layer(data=bn2d, weight=win2d, bias=bin2d, padding="SAME")
sumin2 = convin2a + convin2b + convin2c + convin2d
bnin2 = self.batch_norm(sumin2)
pin2 = self.pool_layer(bnin2)
din2 = self.dropout(pin2, dropout=self.dr)
#Spatial Convolutional layer
wsc, bsc = self.init_weight_bias(name="spatialconv", shape=[channel_cnt, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
spatialconv = self.conv_layer(data=din2, weight=wsc, bias=bsc, padding="VALID")
bnsc = self.batch_norm(spatialconv)
psc = self.pool_layer(bnsc)
#Output layer
wf, bf = self.init_weight_bias(name="fc", shape=[1 * 8 * self.fm, 4], filtercnt=4, trainable=train)
cross_entropy, soft_max = self.output_layer(psc, weight=wf, bias=bf, label=label_node)
# cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=l_out, labels=label_node))
# soft_max = tf.nn.softmax(l_out)
return cross_entropy, soft_max, data_node, label_node, w1,b1
def RCNN2(self, train, channel_cnt, time_cnt):
if train:
batch_size = st.batch_size
else:
batch_size = 700-time_cnt+1
data_node = tf.placeholder(tf.float32, shape=(batch_size, channel_cnt, time_cnt, 1))
label_node = tf.placeholder(tf.int64, shape=batch_size)
#RCL1
#channel_cnt(22) X time_cnt @ feature_map(64)
w1, b1, = self.init_weight_bias(name="conv1", shape=[1, 1, 1, self.fm], filtercnt=self.fm, trainable=train)
conv1 = self.conv_layer(data=data_node, weight=w1, bias=b1, padding="SAME")
bn1 = self.batch_norm(conv1)
w1a, b1a = self.init_weight_bias(name="conv1a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1a = self.conv_layer(data=bn1, weight=w1a, bias=b1a, padding="SAME")
sum1a = conv1+conv1a
bn1a = self.batch_norm(sum1a)
w1b, b1b = self.init_weight_bias(name="conv1b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1b = self.conv_layer(data=bn1a, weight=w1b, bias=b1b, padding="SAME")
sum1b = conv1+conv1b
bn1b = self.batch_norm(sum1b)
w1c, b1c = self.init_weight_bias(name="conv1c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1c = self.conv_layer(data=bn1b, weight=w1c, bias=b1c, padding="SAME")
sum1c = conv1+conv1c
bn1c = self.batch_norm(sum1c)
p1 = self.pool_layer(bn1c)
d1 = self.dropout(p1, dropout=self.dr)
# RCL2
#channel_cnt(22) X time_cnt/4 @ feature_map(64)
w2, b2, = self.init_weight_bias(name="conv2", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm,
trainable=train)
conv2 = self.conv_layer(data=d1, weight=w2, bias=b2, padding="SAME")
bn2 = self.batch_norm(conv2)
w2a, b2a = self.init_weight_bias(name="conv2a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm,
trainable=train)
conv2a = self.conv_layer(data=bn2, weight=w2a, bias=b2a, padding="SAME")
sum2a = conv2 + conv2a
bn2a = self.batch_norm(sum2a)
w2b, b2b = self.init_weight_bias(name="conv2b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm,
trainable=train)
conv2b = self.conv_layer(data=bn2a, weight=w2b, bias=b2b, padding="SAME")
sum2b = conv2 + conv2b
bn2b = self.batch_norm(sum2b)
w2c, b2c = self.init_weight_bias(name="conv2c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm,
trainable=train)
conv2c = self.conv_layer(data=bn2b, weight=w2c, bias=b2c, padding="SAME")
sum2c = conv2 + conv2c
bn2c = self.batch_norm(sum2c)
p2 = self.pool_layer(bn2c)
d2 = self.dropout(p2, dropout=self.dr)
#Spatial Convolutional layer
#channel_cnt(22) X time_cnt/16 @ feature_map(64)
w3, b3 = self.init_weight_bias(name="conv3", shape=[channel_cnt, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3 = self.conv_layer(data=d2, weight=w3, bias=b3, padding="VALID")
bn3 = self.batch_norm(conv3)
p3 = self.pool_layer(bn3)
d3 = self.dropout(p3, dropout=self.dr)
#Sum feature map
#1 X time_cnt/16 @ 1
w4, b4 = self.init_weight_bias(name="conv4", shape=[1, 1, self.fm, 1], filtercnt=1, trainable=train)
conv4 = self.conv_layer(data = d3, weight=w4, bias=b4, padding="SAME")
bn4 = self.batch_norm(conv4)
d4 = self.dropout(bn4, dropout=self.dr)
#Fully connected layer
#1 X time_cnt/16 @ 1
wfc, bfc = self.init_weight_bias(name="fclayer", shape=[1 * 8 * 1, self.fcnode], filtercnt=self.fcnode, trainable=train)
fc = self.fc_layer(data=d4, weight=wfc, bias=bfc)
bnfc = self.batch_norm(fc)
#Output layer
wf, bf = self.init_weight_bias(name="output", shape=[self.fcnode, 4], filtercnt=4, trainable=train)
cross_entropy, soft_max = self.output_layer(bnfc, weight=wf, bias=bf, label=label_node)
# cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=l_out, labels=label_node))
# soft_max = tf.nn.softmax(l_out)
return cross_entropy, soft_max, data_node, label_node, w1,b1
def RCNN3(self, train, channel_cnt, time_cnt):
if train:
batch_size = st.batch_size
dr = 0.5
else:
batch_size = 700-time_cnt+1
dr = 1.0
data_node = tf.placeholder(tf.float32, shape=(batch_size, channel_cnt, time_cnt, 1))
label_node = tf.placeholder(tf.int64, shape=batch_size)
#RCL1
w1, b1 = self.init_weight_bias(name="conv1", shape=[1, 1, 1, self.fm], filtercnt=self.fm, trainable=train)
conv1 = self.conv_layer(data=data_node, weight=w1, bias=b1, padding="SAME")
bn1 = self.batch_norm(conv1)
w1a, b1a = self.init_weight_bias(name="conv1a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1a = self.conv_layer(data=bn1, weight=w1a, bias=b1a, padding="SAME")
sum1a = conv1+conv1a
bn1a = self.batch_norm(sum1a)
w1b, b1b = self.init_weight_bias(name="conv1b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1b = self.conv_layer(data=bn1a, weight=w1b, bias=b1b, padding="SAME")
sum1b = conv1+conv1b
bn1b = self.batch_norm(sum1b)
w1c, b1c = self.init_weight_bias(name="conv1c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1c = self.conv_layer(data=bn1b, weight=w1c, bias=b1c, padding="SAME")
sum1c = conv1+conv1c
bn1c = self.batch_norm(sum1c)
# w1d, b1d, = self.init_weight_bias(name="conv1d", shape=[1,9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv1d = self.conv_layer(data=bn1c, weight=w1d, bias=b1d, padding="SAME")
# sum1d = conv1+conv1d
# bn1d = self.batch_norm(sum1d)
p1 = self.pool_layer(bn1c)
d1 = self.dropout(p1, dropout=dr)
# RCL2
w2, b2 = self.init_weight_bias(name="conv2", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2 = self.conv_layer(data=d1, weight=w2, bias=b2, padding="SAME")
bn2 = self.batch_norm(conv2)
w2a, b2a = self.init_weight_bias(name="conv2a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2a = self.conv_layer(data=bn2, weight=w2a, bias=b2a, padding="SAME")
sum2a = conv2 + conv2a
bn2a = self.batch_norm(sum2a)
w2b, b2b = self.init_weight_bias(name="conv2b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2b = self.conv_layer(data=bn2a, weight=w2b, bias=b2b, padding="SAME")
sum2b = conv2 + conv2b
bn2b = self.batch_norm(sum2b)
w2c, b2c = self.init_weight_bias(name="conv2c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2c = self.conv_layer(data=bn2b, weight=w2c, bias=b2c, padding="SAME")
sum2c = conv2 + conv2c
bn2c = self.batch_norm(sum2c)
# w2d, b2d, = self.init_weight_bias(name="conv2d", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv2d = self.conv_layer(data=bn2c, weight=w2d, bias=b2d, padding="SAME")
# sum2d = conv2 + conv2d
# bn2d = self.batch_norm(sum2d)
p2 = self.pool_layer(bn2c)
d2 = self.dropout(p2, dropout=dr)
#RCL3
w3, b3 = self.init_weight_bias(name="conv3", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3 = self.conv_layer(data=d2, weight=w3, bias=b3, padding="SAME")
bn3 = self.batch_norm(conv3)
w3a, b3a = self.init_weight_bias(name="conv3a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3a = self.conv_layer(data=bn3, weight=w3a, bias=b3a, padding="SAME")
sum3a = conv3+conv3a
bn3a = self.batch_norm(sum3a)
w3b, b3b = self.init_weight_bias(name="conv3b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3b = self.conv_layer(data=bn3a, weight=w3b, bias=b3b, padding="SAME")
sum3b = conv3+conv3b
bn3b = self.batch_norm(sum3b)
w3c, b3c = self.init_weight_bias(name="conv3c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3c = self.conv_layer(data=bn3b, weight=w3c, bias=b3c, padding="SAME")
sum3c = conv3+conv3c
bn3c = self.batch_norm(sum3c)
p3 = self.pool_layer(bn3c)
d3 = self.dropout(p3, dropout=dr)
#Spatial Convolutional layer
wsc, bsc = self.init_weight_bias(name="spatialconv", shape=[channel_cnt, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
spatialconv = self.conv_layer(data=d3, weight=wsc, bias=bsc, padding="VALID")
bnsc = self.batch_norm(spatialconv)
# psc = self.pool_layer(bnsc)
#1st Fully Connected layer
wfc1, bfc1 = self.init_weight_bias(name="fclayer1", shape=[8 * self.fm, self.fcnode1], filtercnt=self.fcnode1, trainable=train)
fclayer1 = self.fc_layer(data=bnsc, weight=wfc1, bias=bfc1)
bnfclayer1 = self.batch_norm(fclayer1)
dfclayer1 = self.dropout(bnfclayer1, dropout=dr)
#2nd Fully Connected layer
wfc2, bfc2 = self.init_weight_bias(name="fclayer2", shape=[self.fcnode1, self.fcnode2], filtercnt=self.fcnode2, trainable=train)
fclayer2 = self.fc_layer(data=dfclayer1, weight=wfc2, bias=bfc2)
bnfclayer2 = self.batch_norm(fclayer2)
dfclayer2 = self.dropout(bnfclayer2, dropout=dr)
#Output layer
wo, bo = self.init_weight_bias(name="output", shape=[self.fcnode2, 4], filtercnt=4, trainable=train)
cross_entropy, soft_max = self.output_layer(dfclayer2, weight=wo, bias=bo, label=label_node)
return cross_entropy, soft_max, data_node, label_node, w1, b1
def RCNN4(self, train, channel_cnt, time_cnt):
if train:
batch_size = st.batch_size
dr = 0.5
else:
batch_size = 700-time_cnt+1
dr = 1.0
data_node = tf.placeholder(tf.float32, shape=(batch_size, channel_cnt, time_cnt, 1))
label_node = tf.placeholder(tf.int64, shape=batch_size)
#RCL1
w1, b1 = self.init_weight_bias(name="conv1", shape=[1, 1, 1, self.fm], filtercnt=self.fm, trainable=train)
conv1 = self.conv_layer(data=data_node, weight=w1, bias=b1, padding="SAME")
bn1 = self.batch_norm(conv1)
w1a, b1a = self.init_weight_bias(name="conv1a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1a = self.conv_layer(data=bn1, weight=w1a, bias=b1a, padding="SAME")
sum1a = conv1+conv1a
bn1a = self.batch_norm(sum1a)
w1b, b1b = self.init_weight_bias(name="conv1b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1b = self.conv_layer(data=bn1a, weight=w1b, bias=b1b, padding="SAME")
sum1b = conv1+conv1b
bn1b = self.batch_norm(sum1b)
w1c, b1c = self.init_weight_bias(name="conv1c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1c = self.conv_layer(data=bn1b, weight=w1c, bias=b1c, padding="SAME")
sum1c = conv1+conv1c
bn1c = self.batch_norm(sum1c)
# w1d, b1d, = self.init_weight_bias(name="conv1d", shape=[1,9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv1d = self.conv_layer(data=bn1c, weight=w1d, bias=b1d, padding="SAME")
# sum1d = conv1+conv1d
# bn1d = self.batch_norm(sum1d)
p1 = self.pool_layer(bn1c)
d1 = self.dropout(p1, dropout=dr)
# RCL2
w2, b2 = self.init_weight_bias(name="conv2", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2 = self.conv_layer(data=d1, weight=w2, bias=b2, padding="SAME")
bn2 = self.batch_norm(conv2)
w2a, b2a = self.init_weight_bias(name="conv2a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2a = self.conv_layer(data=bn2, weight=w2a, bias=b2a, padding="SAME")
sum2a = conv2 + conv2a
bn2a = self.batch_norm(sum2a)
w2b, b2b = self.init_weight_bias(name="conv2b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2b = self.conv_layer(data=bn2a, weight=w2b, bias=b2b, padding="SAME")
sum2b = conv2 + conv2b
bn2b = self.batch_norm(sum2b)
w2c, b2c = self.init_weight_bias(name="conv2c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2c = self.conv_layer(data=bn2b, weight=w2c, bias=b2c, padding="SAME")
sum2c = conv2 + conv2c
bn2c = self.batch_norm(sum2c)
# w2d, b2d, = self.init_weight_bias(name="conv2d", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv2d = self.conv_layer(data=bn2c, weight=w2d, bias=b2d, padding="SAME")
# sum2d = conv2 + conv2d
# bn2d = self.batch_norm(sum2d)
p2 = self.pool_layer(bn2c)
d2 = self.dropout(p2, dropout=dr)
#RCL3
w3, b3 = self.init_weight_bias(name="conv3", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3 = self.conv_layer(data=d2, weight=w3, bias=b3, padding="SAME")
bn3 = self.batch_norm(conv3)
w3a, b3a = self.init_weight_bias(name="conv3a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3a = self.conv_layer(data=bn3, weight=w3a, bias=b3a, padding="SAME")
sum3a = conv3+conv3a
bn3a = self.batch_norm(sum3a)
w3b, b3b = self.init_weight_bias(name="conv3b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3b = self.conv_layer(data=bn3a, weight=w3b, bias=b3b, padding="SAME")
sum3b = conv3+conv3b
bn3b = self.batch_norm(sum3b)
w3c, b3c = self.init_weight_bias(name="conv3c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3c = self.conv_layer(data=bn3b, weight=w3c, bias=b3c, padding="SAME")
sum3c = conv3+conv3c
bn3c = self.batch_norm(sum3c)
p3 = self.pool_layer(bn3c)
d3 = self.dropout(p3, dropout=dr)
#Spatial Convolutional layer
wsc, bsc = self.init_weight_bias(name="spatialconv", shape=[3, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
spatialconv = self.conv_layer(data=d3, weight=wsc, bias=bsc, padding="VALID")
bnsc = self.batch_norm(spatialconv)
# psc = self.pool_layer(bnsc)
#Fully Connected layer
wfc, bfc = self.init_weight_bias(name="fclayer", shape=[8 * 20 * self.fm, self.fcnode], filtercnt=self.fcnode, trainable=train)
fclayer = self.fc_layer(data=bnsc, weight=wfc, bias=bfc)
bnfclayer = self.batch_norm(fclayer)
dfclayer = self.dropout(bnfclayer, dropout=dr)
#Output layer
wo, bo = self.init_weight_bias(name="output", shape=[self.fcnode, 4], filtercnt=4, trainable=train)
cross_entropy, soft_max = self.output_layer(dfclayer, weight=wo, bias=bo, label=label_node)
return cross_entropy, soft_max, data_node, label_node, wsc, bsc
def RCNN5(self, train, channel_cnt, time_cnt):
if train:
batch_size = st.batch_size
dr = 0.5
else:
batch_size = 700-time_cnt+1
dr = 1.0
data_node = tf.placeholder(tf.float32, shape=(batch_size, channel_cnt, time_cnt, 1))
label_node = tf.placeholder(tf.int64, shape=batch_size)
#RCL1
w1, b1 = self.init_weight_bias(name="conv1", shape=[1, 1, 1, self.fm], filtercnt=self.fm, trainable=train)
conv1 = self.conv_layer(data=data_node, weight=w1, bias=b1, padding="SAME")
bn1 = self.batch_norm(conv1)
w1a, b1a = self.init_weight_bias(name="conv1a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1a = self.conv_layer(data=bn1, weight=w1a, bias=b1a, padding="SAME")
sum1a = conv1+conv1a
bn1a = self.batch_norm(sum1a)
w1b, b1b = self.init_weight_bias(name="conv1b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1b = self.conv_layer(data=bn1a, weight=w1b, bias=b1b, padding="SAME")
sum1b = conv1+conv1b
bn1b = self.batch_norm(sum1b)
w1c, b1c = self.init_weight_bias(name="conv1c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1c = self.conv_layer(data=bn1b, weight=w1c, bias=b1c, padding="SAME")
sum1c = conv1+conv1c
bn1c = self.batch_norm(sum1c)
# w1d, b1d, = self.init_weight_bias(name="conv1d", shape=[1,9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv1d = self.conv_layer(data=bn1c, weight=w1d, bias=b1d, padding="SAME")
# sum1d = conv1+conv1d
# bn1d = self.batch_norm(sum1d)
p1 = self.pool_layer(bn1c)
d1 = self.dropout(p1, dropout=dr)
# RCL2
w2, b2 = self.init_weight_bias(name="conv2", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2 = self.conv_layer(data=d1, weight=w2, bias=b2, padding="SAME")
bn2 = self.batch_norm(conv2)
w2a, b2a = self.init_weight_bias(name="conv2a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2a = self.conv_layer(data=bn2, weight=w2a, bias=b2a, padding="SAME")
sum2a = conv2 + conv2a
bn2a = self.batch_norm(sum2a)
w2b, b2b = self.init_weight_bias(name="conv2b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2b = self.conv_layer(data=bn2a, weight=w2b, bias=b2b, padding="SAME")
sum2b = conv2 + conv2b
bn2b = self.batch_norm(sum2b)
w2c, b2c = self.init_weight_bias(name="conv2c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2c = self.conv_layer(data=bn2b, weight=w2c, bias=b2c, padding="SAME")
sum2c = conv2 + conv2c
bn2c = self.batch_norm(sum2c)
# w2d, b2d, = self.init_weight_bias(name="conv2d", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv2d = self.conv_layer(data=bn2c, weight=w2d, bias=b2d, padding="SAME")
# sum2d = conv2 + conv2d
# bn2d = self.batch_norm(sum2d)
p2 = self.pool_layer(bn2c)
d2 = self.dropout(p2, dropout=dr)
#RCL3
# w3, b3 = self.init_weight_bias(name="conv3", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv3 = self.conv_layer(data=d2, weight=w3, bias=b3, padding="SAME")
# bn3 = self.batch_norm(conv3)
#
# w3a, b3a = self.init_weight_bias(name="conv3a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv3a = self.conv_layer(data=bn3, weight=w3a, bias=b3a, padding="SAME")
# sum3a = conv3+conv3a
# bn3a = self.batch_norm(sum3a)
#
# w3b, b3b = self.init_weight_bias(name="conv3b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv3b = self.conv_layer(data=bn3a, weight=w3b, bias=b3b, padding="SAME")
# sum3b = conv3+conv3b
# bn3b = self.batch_norm(sum3b)
#
# w3c, b3c = self.init_weight_bias(name="conv3c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv3c = self.conv_layer(data=bn3b, weight=w3c, bias=b3c, padding="SAME")
# sum3c = conv3+conv3c
# bn3c = self.batch_norm(sum3c)
# p3 = self.pool_layer(bn3c)
# d3 = self.dropout(p3, dropout=dr)
#Spatial Convolutional layer
wsc, bsc = self.init_weight_bias(name="spatialconv", shape=[channel_cnt, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
spatialconv = self.conv_layer(data=d2, weight=wsc, bias=bsc, padding="VALID")
bnsc = self.batch_norm(spatialconv)
# psc = self.pool_layer(bnsc)
#Fully Connected layer
wfc, bfc = self.init_weight_bias(name="fclayer", shape=[8 * 4 * self.fm, self.fcnode], filtercnt=self.fcnode, trainable=train)
fclayer = self.fc_layer(data=bnsc, weight=wfc, bias=bfc)
bnfclayer = self.batch_norm(fclayer)
dfclayer = self.dropout(bnfclayer, dropout=dr)
#Output layer
wo, bo = self.init_weight_bias(name="output", shape=[self.fcnode, 4], filtercnt=4, trainable=train)
cross_entropy, soft_max = self.output_layer(dfclayer, weight=wo, bias=bo, label=label_node)
return cross_entropy, soft_max, data_node, label_node, wsc, bsc
def RCNN6(self, train, channel_cnt, time_cnt):
if train:
batch_size = st.batch_size
dr = 0.5
else:
batch_size = 700-time_cnt+1
dr = 1.0
data_node = tf.placeholder(tf.float32, shape=(batch_size, channel_cnt, time_cnt, 1))
label_node = tf.placeholder(tf.int64, shape=batch_size)
#RCL1
w1, b1 = self.init_weight_bias(name="conv1", shape=[1, 1, 1, self.fm], filtercnt=self.fm, trainable=train)
conv1 = self.conv_layer(data=data_node, weight=w1, bias=b1, padding="SAME")
bn1 = self.batch_norm(conv1)
w1a, b1a = self.init_weight_bias(name="conv1a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1a = self.conv_layer(data=bn1, weight=w1a, bias=b1a, padding="SAME")
sum1a = conv1+conv1a
bn1a = self.batch_norm(sum1a)
w1b, b1b = self.init_weight_bias(name="conv1b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1b = self.conv_layer(data=bn1a, weight=w1b, bias=b1b, padding="SAME")
sum1b = conv1+conv1b
bn1b = self.batch_norm(sum1b)
w1c, b1c = self.init_weight_bias(name="conv1c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1c = self.conv_layer(data=bn1b, weight=w1c, bias=b1c, padding="SAME")
sum1c = conv1+conv1c
bn1c = self.batch_norm(sum1c)
# w1d, b1d, = self.init_weight_bias(name="conv1d", shape=[1,9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv1d = self.conv_layer(data=bn1c, weight=w1d, bias=b1d, padding="SAME")
# sum1d = conv1+conv1d
# bn1d = self.batch_norm(sum1d)
p1 = self.pool_layer(bn1c)
d1 = self.dropout(p1, dropout=dr)
# RCL2
w2, b2 = self.init_weight_bias(name="conv2", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2 = self.conv_layer(data=d1, weight=w2, bias=b2, padding="SAME")
bn2 = self.batch_norm(conv2)
w2a, b2a = self.init_weight_bias(name="conv2a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2a = self.conv_layer(data=bn2, weight=w2a, bias=b2a, padding="SAME")
sum2a = conv2 + conv2a
bn2a = self.batch_norm(sum2a)
w2b, b2b = self.init_weight_bias(name="conv2b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2b = self.conv_layer(data=bn2a, weight=w2b, bias=b2b, padding="SAME")
sum2b = conv2 + conv2b
bn2b = self.batch_norm(sum2b)
w2c, b2c = self.init_weight_bias(name="conv2c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2c = self.conv_layer(data=bn2b, weight=w2c, bias=b2c, padding="SAME")
sum2c = conv2 + conv2c
bn2c = self.batch_norm(sum2c)
# w2d, b2d, = self.init_weight_bias(name="conv2d", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv2d = self.conv_layer(data=bn2c, weight=w2d, bias=b2d, padding="SAME")
# sum2d = conv2 + conv2d
# bn2d = self.batch_norm(sum2d)
p2 = self.pool_layer(bn2c)
d2 = self.dropout(p2, dropout=dr)
#RCL3
# w3, b3 = self.init_weight_bias(name="conv3", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv3 = self.conv_layer(data=d2, weight=w3, bias=b3, padding="SAME")
# bn3 = self.batch_norm(conv3)
#
# w3a, b3a = self.init_weight_bias(name="conv3a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv3a = self.conv_layer(data=bn3, weight=w3a, bias=b3a, padding="SAME")
# sum3a = conv3+conv3a
# bn3a = self.batch_norm(sum3a)
#
# w3b, b3b = self.init_weight_bias(name="conv3b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv3b = self.conv_layer(data=bn3a, weight=w3b, bias=b3b, padding="SAME")
# sum3b = conv3+conv3b
# bn3b = self.batch_norm(sum3b)
#
# w3c, b3c = self.init_weight_bias(name="conv3c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv3c = self.conv_layer(data=bn3b, weight=w3c, bias=b3c, padding="SAME")
# sum3c = conv3+conv3c
# bn3c = self.batch_norm(sum3c)
# p3 = self.pool_layer(bn3c)
# d3 = self.dropout(p3, dropout=dr)
#Spatial Convolutional layer
wsc, bsc = self.init_weight_bias(name="spatialconv", shape=[channel_cnt, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
spatialconv = self.conv_layer(data=d2, weight=wsc, bias=bsc, padding="VALID")
bnsc = self.batch_norm(spatialconv)
# psc = self.pool_layer(bnsc)
#Fully Connected layer1
wfc1, bfc1 = self.init_weight_bias(name="fclayer1", shape=[8 * 4 * self.fm, self.fcnode * 2], filtercnt=self.fcnode * 2, trainable=train)
fclayer1 = self.fc_layer(data=bnsc, weight=wfc1, bias=bfc1)
bnfclayer1 = self.batch_norm(fclayer1)
dfclayer1 = self.dropout(bnfclayer1, dropout=dr)
#Fully Connected layer2
wfc2, bfc2 = self.init_weight_bias(name="fclayer2", shape=[self.fcnode * 2, self.fcnode], filtercnt=self.fcnode, trainable=train)
fclayer2 = self.fc_layer(data=dfclayer1, weight=wfc2, bias=bfc2)
bnfclayer2 = self.batch_norm(fclayer2)
dfclayer2 = self.dropout(bnfclayer2, dropout=dr)
#Output layer
wo, bo = self.init_weight_bias(name="output", shape=[self.fcnode, 4], filtercnt=4, trainable=train)
cross_entropy, soft_max = self.output_layer(dfclayer2, weight=wo, bias=bo, label=label_node)
return cross_entropy, soft_max, data_node, label_node, wsc, bsc
def RCNN7(self, train, channel_cnt, time_cnt):
if train:
batch_size = st.batch_size
dr = 0.5
else:
batch_size = 700-time_cnt+1
dr = 1.0
data_node = tf.placeholder(tf.float32, shape=(batch_size, channel_cnt, time_cnt, 1))
label_node = tf.placeholder(tf.int64, shape=batch_size)
#RCL1
# w1, b1 = self.init_weight_bias(name="conv1", shape=[1, 1, 1, self.fm], filtercnt=self.fm, trainable=train)
# conv1 = self.conv_layer(data=data_node, weight=w1, bias=b1, padding="SAME")
# bn1 = self.batch_norm(conv1)
w1a, b1a = self.init_weight_bias(name="conv1a", shape=[1, 9, 1, self.fm], filtercnt=self.fm, trainable=train)
conv1a = self.conv_layer(data=data_node, weight=w1a, bias=b1a, padding="SAME")
# sum1a = conv1+conv1a
bn1a = self.batch_norm(conv1a)
w1b, b1b = self.init_weight_bias(name="conv1b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1b = self.conv_layer(data=bn1a, weight=w1b, bias=b1b, padding="SAME")
sum1b = conv1a+conv1b
bn1b = self.batch_norm(sum1b)
w1c, b1c = self.init_weight_bias(name="conv1c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1c = self.conv_layer(data=bn1b, weight=w1c, bias=b1c, padding="SAME")
sum1c = conv1a+conv1c
bn1c = self.batch_norm(sum1c)
# w1d, b1d, = self.init_weight_bias(name="conv1d", shape=[1,9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv1d = self.conv_layer(data=bn1c, weight=w1d, bias=b1d, padding="SAME")
# sum1d = conv1+conv1d
# bn1d = self.batch_norm(sum1d)
p1 = self.pool_layer(bn1c)
d1 = self.dropout(p1, dropout=dr)
# RCL2
w2, b2 = self.init_weight_bias(name="conv2", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2 = self.conv_layer(data=d1, weight=w2, bias=b2, padding="SAME")
bn2 = self.batch_norm(conv2)
w2a, b2a = self.init_weight_bias(name="conv2a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2a = self.conv_layer(data=bn2, weight=w2a, bias=b2a, padding="SAME")
sum2a = conv2 + conv2a
bn2a = self.batch_norm(sum2a)
w2b, b2b = self.init_weight_bias(name="conv2b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2b = self.conv_layer(data=bn2a, weight=w2b, bias=b2b, padding="SAME")
sum2b = conv2 + conv2b
bn2b = self.batch_norm(sum2b)
w2c, b2c = self.init_weight_bias(name="conv2c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2c = self.conv_layer(data=bn2b, weight=w2c, bias=b2c, padding="SAME")
sum2c = conv2 + conv2c
bn2c = self.batch_norm(sum2c)
# w2d, b2d, = self.init_weight_bias(name="conv2d", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv2d = self.conv_layer(data=bn2c, weight=w2d, bias=b2d, padding="SAME")
# sum2d = conv2 + conv2d
# bn2d = self.batch_norm(sum2d)
p2 = self.pool_layer(bn2c)
d2 = self.dropout(p2, dropout=dr)
# RCL3
w3, b3 = self.init_weight_bias(name="conv3", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3 = self.conv_layer(data=d2, weight=w3, bias=b3, padding="SAME")
bn3 = self.batch_norm(conv3)
w3a, b3a = self.init_weight_bias(name="conv3a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3a = self.conv_layer(data=bn3, weight=w3a, bias=b3a, padding="SAME")
sum3a = conv3+conv3a
bn3a = self.batch_norm(sum3a)
w3b, b3b = self.init_weight_bias(name="conv3b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3b = self.conv_layer(data=bn3a, weight=w3b, bias=b3b, padding="SAME")
sum3b = conv3+conv3b
bn3b = self.batch_norm(sum3b)
w3c, b3c = self.init_weight_bias(name="conv3c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3c = self.conv_layer(data=bn3b, weight=w3c, bias=b3c, padding="SAME")
sum3c = conv3+conv3c
bn3c = self.batch_norm(sum3c)
p3 = self.pool_layer(bn3c)
d3 = self.dropout(p3, dropout=dr)
#Spatial Convolutional layer
wsc, bsc = self.init_weight_bias(name="spatialconv", shape=[channel_cnt, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
spatialconv = self.conv_layer(data=d3, weight=wsc, bias=bsc, padding="VALID")
bnsc = self.batch_norm(spatialconv)
# psc = self.pool_layer(bnsc)
#Fully Connected layer1
wfc1, bfc1 = self.init_weight_bias(name="fclayer1", shape=[8 * self.fm, self.fcnode], filtercnt=self.fcnode, trainable=train)
fclayer1 = self.fc_layer(data=bnsc, weight=wfc1, bias=bfc1)
bnfclayer1 = self.batch_norm(fclayer1)
dfclayer1 = self.dropout(bnfclayer1, dropout=dr)
#Fully Connected layer2
# wfc2, bfc2 = self.init_weight_bias(name="fclayer2", shape=[self.fcnode * 2, self.fcnode], filtercnt=self.fcnode, trainable=train)
# fclayer2 = self.fc_layer(data=dfclayer1, weight=wfc2, bias=bfc2)
# bnfclayer2 = self.batch_norm(fclayer2)
# dfclayer2 = self.dropout(bnfclayer2, dropout=dr)
#Output layer
wo, bo = self.init_weight_bias(name="output", shape=[self.fcnode, 4], filtercnt=4, trainable=train)
cross_entropy, soft_max = self.output_layer(dfclayer1, weight=wo, bias=bo, label=label_node)
return cross_entropy, soft_max, data_node, label_node, wsc, bsc
def RCNN8(self, train, channel_cnt, time_cnt):
if train:
batch_size = st.batch_size
dr = 0.5
else:
batch_size = 700-time_cnt+1
dr = 1.0
data_node = tf.placeholder(tf.float32, shape=(batch_size, channel_cnt, time_cnt, 1))
label_node = tf.placeholder(tf.int64, shape=batch_size)
#Spatial Convolutional layer
wsc, bsc = self.init_weight_bias(name="spatialconv", shape=[channel_cnt, 1, 1, self.fm], filtercnt=self.fm, trainable=train)
spatialconv = self.conv_layer(data=data_node, weight=wsc, bias=bsc, padding="VALID")
bnsc = self.batch_norm(spatialconv)
# psc = self.pool_layer(bnsc)
#RCL1
w1, b1 = self.init_weight_bias(name="conv1", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1 = self.conv_layer(data=bnsc, weight=w1, bias=b1, padding="SAME")
bn1 = self.batch_norm(conv1)
w1a, b1a = self.init_weight_bias(name="conv1a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1a = self.conv_layer(data=bn1, weight=w1a, bias=b1a, padding="SAME")
sum1a = conv1+conv1a
bn1a = self.batch_norm(sum1a)
w1b, b1b = self.init_weight_bias(name="conv1b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1b = self.conv_layer(data=bn1a, weight=w1b, bias=b1b, padding="SAME")
sum1b = conv1+conv1b
bn1b = self.batch_norm(sum1b)
w1c, b1c = self.init_weight_bias(name="conv1c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv1c = self.conv_layer(data=bn1b, weight=w1c, bias=b1c, padding="SAME")
sum1c = conv1+conv1c
bn1c = self.batch_norm(sum1c)
# w1d, b1d, = self.init_weight_bias(name="conv1d", shape=[1,9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv1d = self.conv_layer(data=bn1c, weight=w1d, bias=b1d, padding="SAME")
# sum1d = conv1+conv1d
# bn1d = self.batch_norm(sum1d)
p1 = self.pool_layer(bn1c)
d1 = self.dropout(p1, dropout=dr)
# RCL2
w2, b2 = self.init_weight_bias(name="conv2", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2 = self.conv_layer(data=d1, weight=w2, bias=b2, padding="SAME")
bn2 = self.batch_norm(conv2)
w2a, b2a = self.init_weight_bias(name="conv2a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2a = self.conv_layer(data=bn2, weight=w2a, bias=b2a, padding="SAME")
sum2a = conv2 + conv2a
bn2a = self.batch_norm(sum2a)
w2b, b2b = self.init_weight_bias(name="conv2b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2b = self.conv_layer(data=bn2a, weight=w2b, bias=b2b, padding="SAME")
sum2b = conv2 + conv2b
bn2b = self.batch_norm(sum2b)
w2c, b2c = self.init_weight_bias(name="conv2c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv2c = self.conv_layer(data=bn2b, weight=w2c, bias=b2c, padding="SAME")
sum2c = conv2 + conv2c
bn2c = self.batch_norm(sum2c)
# w2d, b2d, = self.init_weight_bias(name="conv2d", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv2d = self.conv_layer(data=bn2c, weight=w2d, bias=b2d, padding="SAME")
# sum2d = conv2 + conv2d
# bn2d = self.batch_norm(sum2d)
p2 = self.pool_layer(bn2c)
d2 = self.dropout(p2, dropout=dr)
# RCL3
w3, b3 = self.init_weight_bias(name="conv3", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3 = self.conv_layer(data=d2, weight=w3, bias=b3, padding="SAME")
bn3 = self.batch_norm(conv3)
w3a, b3a = self.init_weight_bias(name="conv3a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3a = self.conv_layer(data=bn3, weight=w3a, bias=b3a, padding="SAME")
sum3a = conv3+conv3a
bn3a = self.batch_norm(sum3a)
w3b, b3b = self.init_weight_bias(name="conv3b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3b = self.conv_layer(data=bn3a, weight=w3b, bias=b3b, padding="SAME")
sum3b = conv3+conv3b
bn3b = self.batch_norm(sum3b)
w3c, b3c = self.init_weight_bias(name="conv3c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3c = self.conv_layer(data=bn3b, weight=w3c, bias=b3c, padding="SAME")
sum3c = conv3+conv3c
bn3c = self.batch_norm(sum3c)
p3 = self.pool_layer(bn3c)
# d3 = self.dropout(p3, dropout=dr)
#Fully Connected layer1
wfc1, bfc1 = self.init_weight_bias(name="fclayer1", shape=[8 * self.fm, self.fcnode], filtercnt=self.fcnode, trainable=train)
fclayer1 = self.fc_layer(data=p3, weight=wfc1, bias=bfc1)
bnfclayer1 = self.batch_norm(fclayer1)
dfclayer1 = self.dropout(bnfclayer1, dropout=dr)
#Fully Connected layer2
# wfc2, bfc2 = self.init_weight_bias(name="fclayer2", shape=[self.fcnode * 2, self.fcnode], filtercnt=self.fcnode, trainable=train)
# fclayer2 = self.fc_layer(data=dfclayer1, weight=wfc2, bias=bfc2)
# bnfclayer2 = self.batch_norm(fclayer2)
# dfclayer2 = self.dropout(bnfclayer2, dropout=dr)
#Output layer
wo, bo = self.init_weight_bias(name="output", shape=[self.fcnode, 4], filtercnt=4, trainable=train)
cross_entropy, soft_max = self.output_layer(dfclayer1, weight=wo, bias=bo, label=label_node)
return cross_entropy, soft_max, data_node, label_node, wsc, bsc
def RCNN9(self, train, channel_cnt, time_cnt):
if train:
batch_size = st.batch_size
dr = 0.5
else:
batch_size = 700-time_cnt+1
dr = 1.0
data_node = tf.placeholder(tf.float32, shape=(batch_size, channel_cnt, time_cnt, 1))
label_node = tf.placeholder(tf.int64, shape=batch_size)
#RCL1
w1, b1 = self.init_weight_bias(name="conv1", shape=[1, 1, 1, self.fm/4], filtercnt=self.fm/4, trainable=train)
conv1 = self.conv_layer(data=data_node, weight=w1, bias=b1, padding="SAME")
bn1 = self.batch_norm(conv1)
w1a, b1a = self.init_weight_bias(name="conv1a", shape=[1, 9, self.fm/4, self.fm/4], filtercnt=self.fm/4, trainable=train)
conv1a = self.conv_layer(data=bn1, weight=w1a, bias=b1a, padding="SAME")
sum1a = conv1+conv1a
bn1a = self.batch_norm(sum1a)
w1b, b1b = self.init_weight_bias(name="conv1b", shape=[1, 9, self.fm/4, self.fm/4], filtercnt=self.fm/4, trainable=train)
conv1b = self.conv_layer(data=bn1a, weight=w1b, bias=b1b, padding="SAME")
sum1b = conv1+conv1b
bn1b = self.batch_norm(sum1b)
w1c, b1c = self.init_weight_bias(name="conv1c", shape=[1, 9, self.fm/4, self.fm/4], filtercnt=self.fm/4, trainable=train)
conv1c = self.conv_layer(data=bn1b, weight=w1c, bias=b1c, padding="SAME")
sum1c = conv1+conv1c
bn1c = self.batch_norm(sum1c)
# w1d, b1d, = self.init_weight_bias(name="conv1d", shape=[1,9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv1d = self.conv_layer(data=bn1c, weight=w1d, bias=b1d, padding="SAME")
# sum1d = conv1+conv1d
# bn1d = self.batch_norm(sum1d)
p1 = self.pool_layer(bn1c)
d1 = self.dropout(p1, dropout=dr)
# RCL2
w2, b2 = self.init_weight_bias(name="conv2", shape=[1, 1, self.fm/4, self.fm/2], filtercnt=self.fm/2, trainable=train)
conv2 = self.conv_layer(data=d1, weight=w2, bias=b2, padding="SAME")
bn2 = self.batch_norm(conv2)
w2a, b2a = self.init_weight_bias(name="conv2a", shape=[1, 9, self.fm/2, self.fm/2], filtercnt=self.fm/2, trainable=train)
conv2a = self.conv_layer(data=bn2, weight=w2a, bias=b2a, padding="SAME")
sum2a = conv2 + conv2a
bn2a = self.batch_norm(sum2a)
w2b, b2b = self.init_weight_bias(name="conv2b", shape=[1, 9, self.fm/2, self.fm/2], filtercnt=self.fm/2, trainable=train)
conv2b = self.conv_layer(data=bn2a, weight=w2b, bias=b2b, padding="SAME")
sum2b = conv2 + conv2b
bn2b = self.batch_norm(sum2b)
w2c, b2c = self.init_weight_bias(name="conv2c", shape=[1, 9, self.fm/2, self.fm/2], filtercnt=self.fm/2, trainable=train)
conv2c = self.conv_layer(data=bn2b, weight=w2c, bias=b2c, padding="SAME")
sum2c = conv2 + conv2c
bn2c = self.batch_norm(sum2c)
# w2d, b2d, = self.init_weight_bias(name="conv2d", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv2d = self.conv_layer(data=bn2c, weight=w2d, bias=b2d, padding="SAME")
# sum2d = conv2 + conv2d
# bn2d = self.batch_norm(sum2d)
p2 = self.pool_layer(bn2c)
d2 = self.dropout(p2, dropout=dr)
#RCL3
w3, b3 = self.init_weight_bias(name="conv3", shape=[1, 1, self.fm/2, self.fm], filtercnt=self.fm, trainable=train)
conv3 = self.conv_layer(data=d2, weight=w3, bias=b3, padding="SAME")
bn3 = self.batch_norm(conv3)
w3a, b3a = self.init_weight_bias(name="conv3a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3a = self.conv_layer(data=bn3, weight=w3a, bias=b3a, padding="SAME")
sum3a = conv3+conv3a
bn3a = self.batch_norm(sum3a)
w3b, b3b = self.init_weight_bias(name="conv3b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3b = self.conv_layer(data=bn3a, weight=w3b, bias=b3b, padding="SAME")
sum3b = conv3+conv3b
bn3b = self.batch_norm(sum3b)
w3c, b3c = self.init_weight_bias(name="conv3c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3c = self.conv_layer(data=bn3b, weight=w3c, bias=b3c, padding="SAME")
sum3c = conv3+conv3c
bn3c = self.batch_norm(sum3c)
p3 = self.pool_layer(bn3c)
d3 = self.dropout(p3, dropout=dr)
#Spatial Convolutional layer
wsc, bsc = self.init_weight_bias(name="spatialconv", shape=[channel_cnt, 1, self.fm, self.fm * 2], filtercnt=self.fm*2, trainable=train)
spatialconv = self.conv_layer(data=d3, weight=wsc, bias=bsc, padding="VALID")
bnsc = self.batch_norm(spatialconv)
# psc = self.pool_layer(bnsc)
#Fully Connected layer
wfc, bfc = self.init_weight_bias(name="fclayer", shape=[8 * self.fm*2, self.fcnode], filtercnt=self.fcnode, trainable=train)
fclayer = self.fc_layer(data=bnsc, weight=wfc, bias=bfc)
bnfclayer = self.batch_norm(fclayer)
dfclayer = self.dropout(bnfclayer, dropout=dr)
#Output layer
wo, bo = self.init_weight_bias(name="output", shape=[self.fcnode, 4], filtercnt=4, trainable=train)
cross_entropy, soft_max = self.output_layer(dfclayer, weight=wo, bias=bo, label=label_node)
return cross_entropy, soft_max, data_node, label_node, wsc, bsc
def RCNN10(self, train, channel_cnt, time_cnt):
if train:
batch_size = st.batch_size
dr = 0.5
else:
batch_size = 700-time_cnt+1
dr = 1.0
data_node = tf.placeholder(tf.float32, shape=(batch_size, channel_cnt, time_cnt, 1))
label_node = tf.placeholder(tf.int64, shape=batch_size)
#RCL1
#w1, b1 = self.init_weight_bias(name="conv1", shape=[1, 1, 1, self.fm], filtercnt=self.fm, trainable=train)
#conv1 = self.conv_layer(data=data_node, weight=w1, bias=b1, padding="SAME")
#bn1 = self.batch_norm(conv1)
w1a, b1a = self.init_weight_bias(name="conv1a", shape=[1, 9, 1, self.fm/4], filtercnt=self.fm/4, trainable=train)
conv1a = self.conv_layer(data=data_node, weight=w1a, bias=b1a, padding="SAME")
sum1a = conv1a #conv1+conv1a
bn1a = self.batch_norm(sum1a)
w1b, b1b = self.init_weight_bias(name="conv1b", shape=[1, 9, self.fm/4, self.fm/4], filtercnt=self.fm/4, trainable=train)
conv1b = self.conv_layer(data=bn1a, weight=w1b, bias=b1b, padding="SAME")
sum1b = sum1a + conv1b #conv1+conv1b
bn1b = self.batch_norm(sum1b)
w1c, b1c = self.init_weight_bias(name="conv1c", shape=[1, 9, self.fm/4, self.fm/4], filtercnt=self.fm/4, trainable=train)
conv1c = self.conv_layer(data=bn1b, weight=w1c, bias=b1c, padding="SAME")
sum1c = sum1a + conv1c #conv1+conv1c
bn1c = self.batch_norm(sum1c)
# w1d, b1d, = self.init_weight_bias(name="conv1d", shape=[1,9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv1d = self.conv_layer(data=bn1c, weight=w1d, bias=b1d, padding="SAME")
# sum1d = conv1+conv1d
# bn1d = self.batch_norm(sum1d)
p1 = self.pool_layer(bn1c)
d1 = self.dropout(p1, dropout=dr)
# RCL2
w2, b2 = self.init_weight_bias(name="conv2", shape=[1, 1, self.fm/4, self.fm/2], filtercnt=self.fm/2, trainable=train)
conv2 = self.conv_layer(data=d1, weight=w2, bias=b2, padding="SAME")
bn2 = self.batch_norm(conv2)
w2a, b2a = self.init_weight_bias(name="conv2a", shape=[1, 9, self.fm/2, self.fm/2], filtercnt=self.fm/2, trainable=train)
conv2a = self.conv_layer(data=bn2, weight=w2a, bias=b2a, padding="SAME")
sum2a = conv2 + conv2a
bn2a = self.batch_norm(sum2a)
w2b, b2b = self.init_weight_bias(name="conv2b", shape=[1, 9, self.fm/2, self.fm/2], filtercnt=self.fm/2, trainable=train)
conv2b = self.conv_layer(data=bn2a, weight=w2b, bias=b2b, padding="SAME")
sum2b = conv2 + conv2b
bn2b = self.batch_norm(sum2b)
w2c, b2c = self.init_weight_bias(name="conv2c", shape=[1, 9, self.fm/2, self.fm/2], filtercnt=self.fm/2, trainable=train)
conv2c = self.conv_layer(data=bn2b, weight=w2c, bias=b2c, padding="SAME")
sum2c = conv2 + conv2c
bn2c = self.batch_norm(sum2c)
# w2d, b2d, = self.init_weight_bias(name="conv2d", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
# conv2d = self.conv_layer(data=bn2c, weight=w2d, bias=b2d, padding="SAME")
# sum2d = conv2 + conv2d
# bn2d = self.batch_norm(sum2d)
p2 = self.pool_layer(bn2c)
d2 = self.dropout(p2, dropout=dr)
#RCL3
w3, b3 = self.init_weight_bias(name="conv3", shape=[1, 1, self.fm/2, self.fm], filtercnt=self.fm, trainable=train)
conv3 = self.conv_layer(data=d2, weight=w3, bias=b3, padding="SAME")
bn3 = self.batch_norm(conv3)
w3a, b3a = self.init_weight_bias(name="conv3a", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3a = self.conv_layer(data=bn3, weight=w3a, bias=b3a, padding="SAME")
sum3a = conv3+conv3a
bn3a = self.batch_norm(sum3a)
w3b, b3b = self.init_weight_bias(name="conv3b", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3b = self.conv_layer(data=bn3a, weight=w3b, bias=b3b, padding="SAME")
sum3b = conv3+conv3b
bn3b = self.batch_norm(sum3b)
w3c, b3c = self.init_weight_bias(name="conv3c", shape=[1, 9, self.fm, self.fm], filtercnt=self.fm, trainable=train)
conv3c = self.conv_layer(data=bn3b, weight=w3c, bias=b3c, padding="SAME")
sum3c = conv3+conv3c
bn3c = self.batch_norm(sum3c)
p3 = self.pool_layer(bn3c)
d3 = self.dropout(p3, dropout=dr)
#Spatial Convolutional layer
wsc, bsc = self.init_weight_bias(name="spatialconv", shape=[channel_cnt, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
spatialconv = self.conv_layer(data=d3, weight=wsc, bias=bsc, padding="VALID")
bnsc = self.batch_norm(spatialconv)
# psc = self.pool_layer(bnsc)
#1X1 Conv
wc, bc = self.init_weight_bias(name="finalconv", shape=[1, 1, self.fm, self.fm], filtercnt=self.fm, trainable=train)
finalconv = self.conv_layer(data=bnsc, weight=wc, bias=bc, padding="SAME")
bnfinal = self.batch_norm(finalconv)
#Fully Connected layer1
wfc1, bfc1 = self.init_weight_bias(name="fclayer1", shape=[8 * self.fm, 8 * self.fcnode], filtercnt=8 * self.fcnode, trainable=train)
fclayer1 = self.fc_layer(data=bnfinal, weight=wfc1, bias=bfc1)
bnfclayer1 = self.batch_norm(fclayer1)
dfclayer1 = self.dropout(bnfclayer1, dropout=dr)
#Fully Connected layer2
wfc2, bfc2 = self.init_weight_bias(name="fclayer2", shape=[8 * self.fcnode, self.fcnode], filtercnt=self.fcnode, trainable=train)
fclayer2 = self.fc_layer(data=dfclayer1, weight=wfc2, bias=bfc2)
bnfclayer2 = self.batch_norm(fclayer2)
dfclayer2 = self.dropout(bnfclayer2, dropout=dr)
#Output layer
wo, bo = self.init_weight_bias(name="output", shape=[self.fcnode, 4], filtercnt=4, trainable=train)
cross_entropy, soft_max = self.output_layer(dfclayer2, weight=wo, bias=bo, label=label_node)
return cross_entropy, soft_max, data_node, label_node, wsc, bsc
|
# Generated by Django 2.0 on 2018-02-06 02:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Food',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Plan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('reddit_link', models.URLField(unique=True)),
('user', models.CharField(max_length=200)),
('user_age', models.CharField(max_length=100)),
('user_sex', models.CharField(max_length=20)),
('user_height', models.IntegerField(default=0)),
('user_body_type_start', models.CharField(max_length=200)),
('user_body_type_end', models.CharField(max_length=200)),
('user_starting_weight', models.IntegerField()),
('user_ending_weight', models.IntegerField()),
('goals', models.CharField(max_length=100)),
('calories', models.IntegerField()),
('macro_protein', models.IntegerField()),
('macro_carb', models.IntegerField()),
('macro_fat', models.IntegerField()),
('period', models.FloatField()),
],
),
migrations.AddField(
model_name='food',
name='plan',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='workouts.Plan'),
),
]
|
# Imports file from FIMS: Tools -> System Utilities -> Admin Utilities -> User Permissions Report.
# Parses file, writes results to a normalized, tab-delimited *.CSV file.
# Note: Import file structure: First 61 characters are the "Activity" column, remaining
# characters contain comma-separated list of user IDs.
# See spreadsheet at:
# C:\Users\Mitchell.Hollberg\OneDrive - Community Foundation for Greater Atlanta\MH\FIMS\ADMIN\CFGA - FIMS Procedures\fims-security-module-listing.xlsx
import os
import re
from collections import OrderedDict
re_endline = re.compile('[,]$') # Match comma at end of line
file_in = os.path.abspath(r'FIMS_User_Permissions_report.txt')
file_out = 'FIMS_Security_20170906.csv'
mydict = OrderedDict()
mystr = ''
# Read FIMS Security report: Skip irrelevant lines, build dict of "Activity:[UsersIds]"
with open(file_in) as f:
for line in f:
""" Skip 'filler' lines, aka...
Activity AccessList
------------------------------------------------------------ ---------------------
Newline ('\n') or page break/form feed ('\f\n')
"""
if line == '\n' or line[:8] == 'Activity' or line[:3] == '---' or line == '\f\n':
continue
# Replace LEADING comma (replace don't remove to ensure 1st 61 characters == Activity column)
if line[0] == ',':
line = ' ' + line[1:]
# Wrapped lines in input file ENDS with a comma (,). Save current string, append following
if re.search(re_endline, line):
mystr = mystr + line.rstrip()
continue
mystr += line.rstrip()
activity = mystr[:61].strip()
mydict[activity] = mystr[61:].split(',')
mystr = ''
# Loop over clean dict: write 1 Row per "Activity/UserId" combination
with open(file_out, 'w') as f_out:
f_out.write('Activity \t User \n') #Write file header row, tab Delimited
for key, values in mydict.items():
for entry in values:
f_out.write(''.join(key) + '\t' + entry.strip() + '\n')
|
from __future__ import absolute_import, division, print_function
import signal
from contextlib import contextmanager
class TimeoutError(Exception):
pass
@contextmanager
def timeout(seconds):
def signal_handler(signum, frame):
raise TimeoutError("Timed out!")
if seconds > 0:
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
else: # infinite timeout
yield
|
# coding: latin-1
'''
Created on 3 may. 2019
Propuesta 2 (con complicaciones adicionales explicadas en informe)
@author: Antonio Pérez Oviedo
Librerías extra:
- pygame (1.9.6): para la interfaz gráfica,en este caso para que nuestra clase herede de la
clase Sprites y así encapsular la imagen asociada a la carta en la clase
Biblioteca estándar:
- sys: para deterner la ejecución en caso de excepción con sys.exit()
y para obtener los datos de dónde se produce la excepción en el código
- os: para la gestión de ficheros (comprobar si existe, ver en qué fichero
se encuentra la ejecución cuando se produce una excepción...)
Módulos propios:
- PantallaError: Clase que controla la pantalla de error que mostramos en caso de excepción
'''
import sys
import pygame
from Cartas import Utiles as utiles
from Front.PantallaError import PantallaError
import os
"""
Esta clase representa a cada una de las cartas que componen la baraja
Como estas cartas tendrán asociada una representación gráfica (imagen)
hemos hecho que la clase herede de la clase Sprite de pygame (para
que así podemos unir la representación gráfica de la carta y su lógica)
"""
class Carta(pygame.sprite.Sprite):
#Definimos el constructor para esta clase
#Recibiremos el valor de la carta en numero y su palo
def __init__(self,numero,palo):
pygame.sprite.Sprite.__init__(self)
#Igualamos los parámetros de entrada a los parámetros palo y número
#propios de la clase
#Además los definimos como no accesibles fuera
#de la clase, en primer lugar como método didáctico
#y en otro porque no nos interesa que se pueda modificar
#el valor de una carta una vez creada
self.__palo = palo
self.__numero =numero
try:
#Aquí asignamos la imagen de la carta a cada carta
#Para ello, usamos el método load del módulo image de pygame
#al que hay que pasarle la ruta a la imagen
#En utiles hemos definido una función a la que si le damos
#el valor y el palo de la carta, nos devuelve la ruta
#en la que tal imagen debería encontrarse
self.image = pygame.image.load(utiles.convertir_Carta_A_Ruta(self.palo, self.numero)).convert_alpha()
except FileNotFoundError as e:
#Ante cualquier excepción, primero imprimimos por
#consola un log del error
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("Ha ocurrido la siguiente excepción: {0}".format(e))
print(exc_type, fname, exc_tb.tb_lineno)
#Recogemos la excepción en caso de que el fichero no haya sido
#encontrado y devolvemos la pantalla de error
pantallaError = PantallaError(r"Error al buscar las imágenes de las cartas")
pantallaError.mostrarPantallaError()
sys.exit()
except Exception as e1:
#Ante cualquier excepción, primero imprimimos por
#consola un log del error
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("Ha ocurrido la siguiente excepción: {0}".format(e1))
print(exc_type, fname, exc_tb.tb_lineno)
#Recogemos cualquier otra excepción que pueda darse y devolvemos
#la pantalla de error
pantallaError = PantallaError(r"Error al buscar las imágenes de las cartas")
pantallaError.mostrarPantallaError()
sys.exit()
#En caso de que no haya habido errores, procedemos a
#terminar con la definición de las cartas
else:
#Definimos el rectángulo en el cual se imprime la imagen
self.rect = self.image.get_rect()
self.rect.centery = 350
#Definimos palo como propiedad de la clase CartaBaraja
#Eso significa que como no vamos a definir el setter con
#la anotación @palo.setter este parámetro no se podrá
#acceder directamente desde fuera al estar definido como
#__palo y sólo tener un método con la anotación @property
#por lo que solo será de lectura
@property
def palo(self):
return self.__palo
#Definimos numero como propiedad de la clase CartaBaraja
#Eso significa que como no vamos a definir el setter con
#la anotación @numero.setter este parámetro no se podrá
#acceder directamente desde fuera al estar definido como
#__numero y sólo tener un método con la anotación @property
#por lo que solo será de lectura
@property
def numero(self):
return self.__numero
#Sobreescribimos el método __str__ para que los métodos que convierten
#un objeto a string directamente o imprimen el objeto, lo impriman
#con el formato que queremos
def __str__(self, *args, **kwargs):
return "{0} de {1}".format(self.numero,self.palo)
|
from django.contrib import admin
# Register your models here.
from mainapp.models import Colors
admin.site.register(Colors)
|
from keras.layers import Conv2D, Input, MaxPooling2D, BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from baseModel import BaseModel
from keras.models import Model
class YoloV1Tiny(BaseModel):
def __init__(self,input_size):
# tensorflow format :(None,w,h,c)
input_image = Input(shape=(input_size, input_size, 3))
#layer1
x = Conv2D(16, (3,3), strides=(1,1), padding='same', name='conv_1', use_bias=True)(input_image)
x = BatchNormalization(name='norm_1')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2),strides=(2,2))(x)
#layer2
x = Conv2D(32, (3,3), strides=(1,1), padding='same', name='conv_2', use_bias=True)(x)
x = BatchNormalization(name='norm_2')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2),strides=(2,2))(x)
#layer3
x = Conv2D(64, (3,3), strides=(1,1), padding='same', name='conv_3', use_bias=True)(x)
x = BatchNormalization(name='norm_3')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2),strides=(2,2))(x)
#layer4
x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_4', use_bias=True)(x)
x = BatchNormalization(name='norm_4')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2),strides=(2,2))(x)
#layer5
x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_5', use_bias=True)(x)
x = BatchNormalization(name='norm_5')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2),strides=(2,2))(x)
#layer6
x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_6', use_bias=True)(x)
x = BatchNormalization(name='norm_6')(x)
x = LeakyReLU(alpha=0.1)(x)
#layer7
x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_7', use_bias=True)(x)
x = BatchNormalization(name='norm_7')(x)
# feature_extractor
self.feature_extractor = Model(input_image, x,name="yolov1Tiny")
def get_layers_info(self):
print self.feature_extractor.summary()
for layer in self.feature_extractor.layers:
print("{} output shape: {}".format(layer.name, layer.output_shape))
print layer.output
def get_layers_feauture(self):
return self.feature_extractor
def extractor_output(self,input_image):
return self.feature_extractor(input_image) |
#Dataframe manipulation library
import pandas as pd
#Math functions, we'll only need the sqrt function so let's import only that
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
# Step 1: Get the data
movies_df = pd.read_csv("movies.csv")
ratings_df = pd.read_csv("ratings.csv")
# These are some printing options so we can see all of the data
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', -1)
# See the data
print(movies_df.head(10))
print(ratings_df.head(10))
# Step 2: Preprocessing
# Since the year is also a feature that could be used for a recommendation
# we should seperate it
movies_df['year'] = movies_df.title.str.extract('(\(\d\d\d\d\))',expand=False)
#Removing the parentheses
movies_df['year'] = movies_df.year.str.extract('(\d\d\d\d)',expand=False)
#Removing the years from the 'title' column
movies_df['title'] = movies_df.title.str.replace('(\(\d\d\d\d\))', '')
#Applying the strip function to get rid of any ending whitespace characters that may have appeared
movies_df['title'] = movies_df['title'].apply(lambda x: x.strip())
print(movies_df.head())
# Now, we need to handle the genres.
# THe genres should be held in a listof genres for easier access, rather than seperated by |
movies_df['genres'] = movies_df.genres.str.split('|')
print(movies_df.head())
# THis is also pretty inefficient. We should use one-hot encoding
moviesWithGenres_df = movies_df.copy()
#For every row in the dataframe, iterate through the list of genres and place a 1 into the corresponding column
for index, row in movies_df.iterrows():
for genre in row['genres']:
moviesWithGenres_df.at[index, genre] = 1
#Filling in the NaN values with 0 to show that a movie doesn't have that column's genre
moviesWithGenres_df = moviesWithGenres_df.fillna(0)
print(moviesWithGenres_df.head())
# For the ratings DF, we can drop the timestamp entry.
ratings_df.drop("timestamp", axis=1, inplace=True)
print(ratings_df.head())
# We can simulate a user input
userInput = [
{'title':'Breakfast Club, The', 'rating':5},
{'title':'Toy Story', 'rating':3.5},
{'title':'Jumanji', 'rating':2},
{'title':"Pulp Fiction", 'rating':5},
{'title':'Akira', 'rating':4.5}
]
inputMovies = pd.DataFrame(userInput)
# We can now match these movies to their movie ID from the moviesdf
#Filtering out the movies by title
inputId = movies_df[movies_df['title'].isin(inputMovies['title'].tolist())]
inputMovies = pd.merge(inputId, inputMovies)
# Get rid of the genres and year
inputMovies = inputMovies.drop('genres', 1).drop('year', 1)
print(inputMovies.head())
#Get the user movies with genre data
userMovies = moviesWithGenres_df[moviesWithGenres_df['movieId'].isin(inputMovies['movieId'].tolist())]
#Resetting the index to avoid future issues
userMovies = userMovies.reset_index(drop=True)
#Dropping unnecessary issues due to save memory and to avoid issues
userGenreTable = userMovies.drop('movieId', 1).drop('title', 1).drop('genres', 1).drop('year', 1)
print(userGenreTable.head())
print(inputMovies["rating"])
# Make the user profile, now that we have the movies matrix and the ratings matrix
# Step 3: Recommendation systems. We need the user profile
#Dot product to get weights
userProfile = userGenreTable.transpose().dot(inputMovies['rating'])
print(userProfile.head())
#Now let's get the genres of every movie in our original dataframe
genreTable = moviesWithGenres_df.set_index(moviesWithGenres_df['movieId'])
#And drop the unnecessary information
genreTable = genreTable.drop('movieId', 1).drop('title', 1).drop('genres', 1).drop('year', 1)
print(genreTable.head())
# Now, we can make a weighted candidate matrix by multiplying the user profile with the movies matrix
#Multiply the genres by the weights and then take the weighted average
recommendationTable_df = ((genreTable*userProfile).sum(axis=1))/(userProfile.sum())
print(recommendationTable_df.head())
#Sort our recommendations in descending order
recommendationTable_df = recommendationTable_df.sort_values(ascending=False)
#Just a peek at the values
print(recommendationTable_df.head())
# Cool. Now we can print the result
print(movies_df.loc[movies_df['movieId'].isin(recommendationTable_df.head(20).keys())])
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 04 15:58:34 2017
@author: tolic
"""
import numpy as np
from sklearn.decomposition import PCA
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
iris = datasets.load_iris();
X_data = iris.data
y_data = iris.target
"""
Step 1.
"""
scaler = StandardScaler()#
scaler.fit(X_data)
X = scaler.transform(X_data)# 标准化
m = float(X.shape[0])
n = int(X.shape[1])
"""
Step 2.
"""
sigma = (1/m)*X.transpose().dot(X)
"""
Step 3.
"""
U,S,V = np.linalg.svd(sigma)
diagnoal = np.diag(S)
total_variance = float(np.sum(S))
k = 0
s = 0
for i in range(0,n):
s = s + S[i]# 确定能降到的最低维数
if s/total_variance >= 0.99:
k = i;
break
u = U[:,0:k+1]
"""
Step 4.
"""
x = X.dot(u)
print x
|
from plotutils import *
samples=[Sample('t#bar{t} spring15 powheg',ROOT.kBlue,'/nfs/dust/cms/user/kelmorab/Spring15_Base16thJuly/forTraining/ttbar/ttbar_nominal.root','') ,
Sample('t#bar{t} spring15 amcNLO',ROOT.kRed,'/nfs/dust/cms/user/kelmorab/Spring15_Base16thJuly/forLimit/ttbar/ttbar_nominal.root',''),
Sample('t#bar{t} phys14 madgraph',ROOT.kGreen-3,'/nfs/dust/cms/user/hmildner/trees0717/ttbar.root',''),
]
plots=[
Plot(ROOT.TH1F("Jet_CSVb" ,"CSVv2 IVF b-jets",40,0,1),"Jet_CSV","abs(Jet_Flav)>4.5&&abs(Jet_Flav)<5.5"),
Plot(ROOT.TH1F("Jet_CSVl" ,"CSVv2 IVF l-jets",40,0,1),"Jet_CSV","abs(Jet_Flav)<3.5||abs(Jet_Flav)>5.5"),
]
listOfhistoLists=createHistoLists_fromTree(plots,samples,'MVATree')
writeListOfhistoLists(listOfhistoLists,samples,'btag_plots')
roc_p=getROC(listOfhistoLists[0][0],listOfhistoLists[1][0],False)
roc_a=getROC(listOfhistoLists[0][1],listOfhistoLists[1][1],False)
writeListOfROCs([roc_p,roc_a],['powheg', 'amcnlo'],[ROOT.kBlue,ROOT.kRed],'btag_roc_pa',True,False)
eff_p=getEff(listOfhistoLists[0][0])
eff_a=getEff(listOfhistoLists[0][1])
writeListOfROCs([eff_p,eff_a],['powheg', 'amcnlo'],[ROOT.kBlue,ROOT.kRed],'btag_eff_pa',False,True)
roc_s15=getROC(listOfhistoLists[0][0],listOfhistoLists[1][0],False)
roc_p14=getROC(listOfhistoLists[0][2],listOfhistoLists[1][2],False)
writeListOfROCs([roc_s15,roc_p14],['spring15','phys14'],[ROOT.kBlue,ROOT.kRed],'btag_roc_sp',True,False)
eff_s15=getEff(listOfhistoLists[0][0])
eff_p14=getEff(listOfhistoLists[0][2])
writeListOfROCs([eff_s15,eff_p14],['spring15','phys14'],[ROOT.kBlue,ROOT.kRed],'btag_eff_sp',False,True)
|
import os
import shutil
import glob
masterdir = "/media/spl/D/MicroCT data/4th batch bone mets loading study/w0w0composite"
mvstldir = os.path.join(masterdir,"..","STL files")
if not os.path.exists(mvstldir):
os.mkdir(mvstldir)
for fd in os.listdir(masterdir):
if "week" in fd:
sampleID = fd[0:4]+fd[11:-14]
sampledir = os.path.join(mvstldir,sampleID)
if not os.path.exists(sampledir):
os.mkdir(sampledir)
stls = glob.glob(os.path.join(masterdir,fd,"*.stl"))
stls.sort
newname = [sampleID+" week 0 "+"trab.stl",
sampleID+" week 1 "+"trab.stl",
sampleID+" week 0 "+"cort.stl",
sampleID+" week 1 "+"cort.stl"]
for i in range(-1,len(stls)-1):
shutil.move(stls[i],os.path.join(sampledir,newname[i+1]))
|
from flask import Flask
from flask_sqlalchemy_core import FlaskSQLAlchemy
import os
from flask_login import LoginManager
app = Flask(__name__)
# Grabs the folder where the script runs.
basedir = os.path.abspath(os.path.dirname(__file__))
# Enable debug mode.
app.config["DEBUG"] = True
# Secret key for session management. You can generate random strings here:
# https://randomkeygen.com/
app.config["SECRET_KEY"] = '731958285'
# Connect to the database
#app.config["SQLALCHEMY_DATABASE_URI"] = 'sqlite:///' + os.path.join(basedir, 'database.db')
#app.config["SQLALCHEMY_DATABASE_URI"] = 'mysql://root:@localhost/socialdb'
db = FlaskSQLAlchemy('mysql+pymysql://root:@localhost/socialdb')
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
from app import views
|
# Assignment: Checkerboard
# Write a program that prints a 'checkerboard' pattern to the console.
for i in range(4):
print ("* " * 4)
print(' *' * 4)
|
""" A standard twisted tap file for an exposed service for txbonjour"""
from zope.interface import implements, implementer
from twisted.python import log, usage
from twisted.plugin import IPlugin
from twisted.internet import reactor
from twisted.application.service import IServiceMaker, MultiService
from txbonjour import version, name, description, service, discovery
class LoggingProtocol(discovery.BroadcastProtocol):
""" I am a logging protocol. I do nothing but log what I receive. """
def registerReceived(self, *args):
log.msg('now broadcasting: %r' % (args,))
def addService(self, *args):
log.msg('add service: %r' % (args,))
def removeService(self, *args):
log.msg('remove service: %r' % (args,))
def browseError(self, *args):
log.msg('browseError: %r' % (args,))
def resolveError(self, err, *args):
log.msg('resolveError: %s - %r' % (err, args,))
class Options(usage.Options):
optFlags = [
['resolve-domains', 'n', 'Resolve FQDM to ip addresses before '\
'reporting']
]
optParameters = [
['port', 'p', 9898, 'The port to broadcast to bonjour clients to '\
'connect to you', int],
['registration', 'r', '_examples._tcp', 'The mDNS registry for bonjour'\
', ie. <domain>.<transport>'],
['service-name', 's', 'Example-Service', 'The name bonjour clients '\
'will see for your service'],
]
class ServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = 'txbonjour'
description = description
version = version
options = Options
def makeService(self, options,
broadcast_protocol=None,
discovery_protocol=None):
""" Accepts options as a regular twistd plugin does. Also accepts
keyword arguments 'broadcast_protocol' for a procotcol *instance*
and 'discovery_protocol' for a protocol *instance*. Returns a
twisted.application.service.IService implementor.
"""
service_name = options.get('service-name')
resolve = options.get('resolve-domains')
port = options.get('port')
registry = options.get('registration')
service_name = options.get('service-name')
s = MultiService()
s.setName('txbonjour-%s' % (service_name,))
logging_proto = LoggingProtocol()
if broadcast_protocol is None:
broadcast_protocol = logging_proto
if discovery_protocol is None:
discovery_protocol = logging_proto
discover_service = discovery.listenBonjour(discovery_protocol,
registry,
resolve_ips=resolve,
)
discover_service.setName('discovery')
discover_service.setServiceParent(s)
def broadcast():
broadcast_service = discovery.connectBonjour(broadcast_protocol,
registry,
port,
service_name,
)
broadcast_service.setName('broadcast')
broadcast_service.setServiceParent(s)
reactor.callWhenRunning(broadcast)
return s
serviceMaker = ServiceMaker()
makeService = serviceMaker.makeService
|
from src.point import Point
class Triangle:
def __init__(self, a, b, c):
"""
:type: a: Point
:type: b: Point
:type: c: Point
"""
self.a = a
self.b = b
self.c = c
def __contains__(self, point):
"""
:type point: Point
"""
o1 = Point.test_orientation(self.a, self.b, point)
o2 = Point.test_orientation(self.b, self.c, point)
o3 = Point.test_orientation(self.c, self.a, point)
return o1 == 0 or o2 == 0 or o3 == 0 or o1 == o2 == o3
|
from openload import OpenLoad
import csv
import random
line = ["Title","EmbedCod","Video Duration","Thumbnail","Categories","Tags"]
output = "openload_out.csv"
with open(output, 'w', newline='') as file1:
writer = csv.writer(file1, delimiter=',')
writer.writerow(line)
user_folder = input("please input folder name : ")
username = 'your name'
key = 'key'
ol = OpenLoad(username, key)
tree_result = []
fold_tree = ol.list_folder()
folders = fold_tree["folders"]
for folder in folders:
tree_result.append(folder)
tree = ol.list_folder(folder["id"])
treelen = tree["folders"]
for tree3 in treelen:
tree_result.append(tree3)
tree1 = ol.list_folder(tree3["id"])
tree2en = tree1["folders"]
for tree4 in tree2en:
tree_result.append(tree4)
tree2 = ol.list_folder(tree4["id"])
tree5en = tree2["folders"]
for tree6 in tree5en:
tree_result.append(tree6)
tree5 = ol.list_folder(tree6["id"])
tree3en = tree5["folders"]
for tree7 in tree3en:
tree_result.append(tree7)
for folder in tree_result:
fold_name = folder["name"]
if user_folder != "":
if user_folder in fold_name:
resp = ol.list_folder(folder["id"])
files = resp["files"]
results = []
for file in files:
fi = []
fi.append(str(file["name"])[:-4])
file_id = file["linkextid"]
file_info = ol.file_info(file_id)
f_name = file_info[file_id]["name"]
file_name = str(f_name).replace(" ", "_")
embedcode = '<iframe src="https://openload.co/embed/' + str(
file_id) + '/' + file_name + '" scrolling="no" frameborder="0" width="700" height="430" allowfullscreen="true" webkitallowfullscreen="true" mozallowfullscreen="true"></iframe>'
fi.append(embedcode)
splash = ol.splash_image(file_id)
#fi.append("20:00")
fi.append(str(random.randint(1400,1500)))
fi.append(splash)
fi.append("Interracial")
fi.append("creamy")
results.append(fi)
output = " , ".join(fi)
print(output)
with open("openload_out.csv", 'a', newline='') as output_file:
writer = csv.writer(output_file)
for result in results:
writer.writerow(result)
print("-------done!--------") |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2017-11-19 11:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import djangocms_text_ckeditor.fields
import filer.fields.image
class Migration(migrations.Migration):
dependencies = [
('filer', '0004_auto_20160328_1434'),
('cms', '0014_auto_20160404_1908'),
('ungleich_page', '0008_ungleichserviceitem'),
]
operations = [
migrations.CreateModel(
name='UngleichHeader',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('carousel_data_interval', models.IntegerField(default=5000)),
('background_image', filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ungleich_header_background_image', to='filer.Image')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='UngleichHeaderItem',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('description', djangocms_text_ckeditor.fields.HTMLField()),
('image', filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ungleich_header_item_image', to='filer.Image')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
from contextlib import contextmanager
from ctypes import CDLL, c_int, c_int32, c_double, POINTER
from warnings import warn
from . import _dll
from .error import _error_handler
_dll.openmc_calculate_volumes.restype = None
_dll.openmc_finalize.restype = None
_dll.openmc_find.argtypes = [POINTER(c_double*3), c_int, POINTER(c_int32),
POINTER(c_int32)]
_dll.openmc_find.restype = c_int
_dll.openmc_find.errcheck = _error_handler
_dll.openmc_hard_reset.restype = None
_dll.openmc_init.argtypes = [POINTER(c_int)]
_dll.openmc_init.restype = None
_dll.openmc_get_keff.argtypes = [POINTER(c_double*2)]
_dll.openmc_get_keff.restype = c_int
_dll.openmc_get_keff.errcheck = _error_handler
_dll.openmc_plot_geometry.restype = None
_dll.openmc_run.restype = None
_dll.openmc_reset.restype = None
def calculate_volumes():
"""Run stochastic volume calculation"""
_dll.openmc_calculate_volumes()
def finalize():
"""Finalize simulation and free memory"""
_dll.openmc_finalize()
def find_cell(xyz):
"""Find the cell at a given point
Parameters
----------
xyz : iterable of float
Cartesian coordinates of position
Returns
-------
int
ID of the cell.
int
If the cell at the given point is repeated in the geometry, this
indicates which instance it is, i.e., 0 would be the first instance.
"""
uid = c_int32()
instance = c_int32()
_dll.openmc_find((c_double*3)(*xyz), 1, uid, instance)
return uid.value, instance.value
def find_material(xyz):
"""Find the material at a given point
Parameters
----------
xyz : iterable of float
Cartesian coordinates of position
Returns
-------
int or None
ID of the material or None is no material is found
"""
uid = c_int32()
instance = c_int32()
_dll.openmc_find((c_double*3)(*xyz), 2, uid, instance)
return uid.value if uid != 0 else None
def hard_reset():
"""Reset tallies, timers, and pseudo-random number generator state."""
_dll.openmc_hard_reset()
def init(intracomm=None):
"""Initialize OpenMC
Parameters
----------
intracomm : mpi4py.MPI.Intracomm or None
MPI intracommunicator
"""
if intracomm is not None:
# If an mpi4py communicator was passed, convert it to an integer to
# be passed to openmc_init
try:
intracomm = intracomm.py2f()
except AttributeError:
pass
_dll.openmc_init(c_int(intracomm))
else:
_dll.openmc_init(None)
def keff():
"""Return the calculated k-eigenvalue and its standard deviation.
Returns
-------
tuple
Mean k-eigenvalue and standard deviation of the mean
"""
k = (c_double*2)()
_dll.openmc_get_keff(k)
return tuple(k)
def plot_geometry():
"""Plot geometry"""
_dll.openmc_plot_geometry()
def reset():
"""Reset tallies and timers."""
_dll.openmc_reset()
def run():
"""Run simulation"""
_dll.openmc_run()
@contextmanager
def run_in_memory(intracomm=None):
"""Provides context manager for calling OpenMC shared library functions.
This function is intended to be used in a 'with' statement and ensures that
OpenMC is properly initialized/finalized. At the completion of the 'with'
block, all memory that was allocated during the block is freed. For
example::
with openmc.capi.run_in_memory():
for i in range(n_iters):
openmc.capi.reset()
do_stuff()
openmc.capi.run()
Parameters
----------
intracomm : mpi4py.MPI.Intracomm or None
MPI intracommunicator
"""
init(intracomm)
try:
yield
finally:
finalize()
class _DLLGlobal(object):
"""Data descriptor that exposes global variables from libopenmc."""
def __init__(self, ctype, name):
self.ctype = ctype
self.name = name
def __get__(self, instance, owner):
return self.ctype.in_dll(_dll, self.name).value
def __set__(self, instance, value):
self.ctype.in_dll(_dll, self.name).value = value
class _FortranObject(object):
def __repr__(self):
return "{}[{}]".format(type(self).__name__, self._index)
class _FortranObjectWithID(_FortranObject):
def __init__(self, uid=None, new=True, index=None):
# Creating the object has already been handled by __new__. In the
# initializer, all we do is make sure that the object returned has an ID
# assigned. If the array index of the object is out of bounds, an
# OutOfBoundsError will be raised here by virtue of referencing self.id
self.id
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
"""
@file: JosnUtil.py
@time: 2020/5/20 23:24
@software: PyCharm
@desc: json tool
"""
import json
class JsonUtils:
def __init__(self):
pass
def json_dumps(self,data):
"""
将 Python 对象编码成 JSON 字符串
:param data:
:return:
"""
jsondata = json.dumps(data)
return jsondata
def json_loads(self,data):
"""
将已编码的 JSON 字符串解码为 Python 对象
:param data:
:return:
"""
jsontext = json.loads(data)
return jsontext
if __name__ == '__main__':
stus = '{"loginName":"13600000001","password":"123456","loginSysName":"bonus"}'
s = JsonUtils().json_loads((stus))
print(s) |
"""operators in python
Arithmetic operators
Assignment operators
comparison operators
logical operators
Identity operators
bitwise operators"""
"""print("Arithmetic Operators")
print("5 + 7 is", 5 + 7)
print("5 - 7 is", 5 - 7)
print("5 * 7 is", 5 * 7)
print("5 / 7 is", 5 / 7)
print("5 // 7 is", 5 // 7)
print("5 % 7 is", 5 % 7)
print("5 ** 3 is", 5**3)
print("Assignment operators")
x = 1
x +=4
print(x)"""
"""print("Comparison operators")
i = 6
print(i==5)
print(i>5)
print(i>5)
print(i !=5)
print(i >= 5)
print(i <= 5)"""
"""print("Logical operators")
a = True
b = False
print(a and b)
print(a or b)"""
""""print("Identity operator")
print(3 is not 3)"""
"""print("membership operators")
list = [3,4,5,7,9,10,11,5,6]
print(34 not in list)""" |
#! usr/bin/env python3
# -*- coding: utf-8 -*-
# author: Kwinner Chen
# python: v 3.6.4
import re
import json
from lxml import etree
from urllib.parse import urljoin, urlsplit
from datetime import datetime
from downloader import page_downloader
# 该方法需要返回一个跟踪链接,和一个新闻链接可迭代对象
# 无跟踪链接返回None不作为停止信号(以免列队之后还有可用链接而被终止)
def news_list_parse(response):
if response:
html = response.text
tree = etree.HTML(html)
urls = map(lambda x: urljoin(response.url, x), tree.xpath('//p[@class="tit blue"]/a/@href'))
ne = ''.join(tree.xpath('//div[@class="pcauto_page"]//a[@class="next"]/@href'))
next_url = urljoin(response.url, ne) if ne else None
else:
urls = []
next_url = None
return urls, next_url
# 该方法解析新闻详情页面,返回一个字典,如无内容返回一个空字典。
def news_info_parse(response, info_dict):
if response:
response.encoding = response.apparent_encoding
tree = etree.HTML(response.text)
current_url = response.url
info_dict['URL'] = current_url
info_dict['FLLJ'] = '/'.join(tree.xpath('//div[@class="pos-mark"]/a/text()'))
info_dict['TITLE'] = ''.join(map(lambda x: x.strip(), tree.xpath('//div[@class="artDes"]//h1//text()')))
publish_time = ''.join(map(lambda x: x.strip(), tree.xpath('//span[@class="pubTime"]/text()')))
info_dict['PUBLISH_TIME'] = datetime.strptime(publish_time, '%Y-%m-%d %H:%M:%S') if publish_time else datetime.now()
nofllow = tree.xpath('//div[class="pcauto_page"]//a[@rel="nofollow"]/@href')
if nofllow:
content, image_url = total_page(nofllow)
info_dict['CONTENT'] = content
info_dict['IMAGE_URL'] = image_url
else:
info_dict['CONTENT'] = ''.join(map(lambda x: x.strip(), tree.xpath('//div[@class="artText clearfix"]/p//text()')))
info_dict['IMAGE_URL'] = ', '.join(map(lambda x: urljoin(current_url, x.strip()), tree.xpath('//div[@class="artText clearfix"]//img/@src')))
info_dict['KEY_WORDS'] = '/'.join(map(lambda x: x.strip(), tree.xpath('//div[@class="artExc"]/p/a/text()')))
info_dict['DATA_SOURCE'] = re.sub(r'来源[:: ]+', '', ''.join(map(lambda x: x.strip(), tree.xpath('//span[@class="ownner"]//text()'))))
read_num, comments_num = get_comments_num(current_url)
info_dict['READ_NUM'] = read_num
info_dict['COMMENTS_NUM'] = comments_num
info_dict['CRAWLER_TIME'] = datetime.now()
return info_dict
else:
info_dict={}
return info_dict
def get_comments_num(current_url):
base_url = 'https://cmt.pcauto.com.cn/action/topic/get_data.jsp?url=%s&callback=callbackFunc'
resp = page_downloader(base_url % current_url[6:])
if resp:
read_num = re.search(r'"commentRelNum":(\d*)', resp.text)
comments_num = re.search(r'"total":(\d*)', resp.text)
read_num = read_num.group(1) if read_num else ''
comments_num = comments_num.group(1) if comments_num else ''
else:
read_num = comments_num = ''
return read_num, comments_num
def total_page(url):
resp = page_downloader(url)
if resp:
resp.encoding = resp.apparent_encoding
html = resp.text
tree = etree.HTML(html)
content = ''.join(map(lambda x: x.strip(), tree.xpath('//div[@class="artText clearfix"]//text()')))
image_url = ', '.join(map(lambda x: urljoin(url, x.strip()), tree.xpath('//div[@class="artText clearfix"]//img/@src')))
else:
content = image_url = ''
return content, image_url
|
number_of_pair = 0
list_number_of_pair = []
for month in range(5):
if month == 0:
number_of_pair = 1
list_number_of_pair.append(number_of_pair)
elif month == 1:
number_of_pair = 2
list_number_of_pair.append(number_of_pair)
else:
number_of_pair = list_number_of_pair[month - 1] + list_number_of_pair[month - 2]
list_number_of_pair.append(number_of_pair)
for index, value in enumerate(list_number_of_pair):
print("Month {0}: {1} pair(s) of rabit".format(index, value)) |
'''
函数:
1. 功能性, 一个函数一定有某种功能
2. 隐藏细节, 内部实现如何复杂, 不关外部调用的关系
3. 复用性
'''
num = 1.244333;
result = round(num, 2); # 保留小数点后若干位 会根据后面的数字 四舍五入
print(result) |
from machine import Pin, SPI
from micropython import const
class RN8302B():
RN8302B_RMS_IA = const(0x0B)
RN8302B_RMS_IB = const(0x0C)
RN8302B_RMS_IC = const(0x0D)
RN8302B_RMS_IN = const(0x0E)
RN8302B_REG_GSIA = const(0x16)
RN8302B_REG_GSIB = const(0x17)
RN8302B_REG_GSIC = const(0x18)
RN8302B_REG_GSIN = const(0x19)
RN8302B_REG_WRITEANBLE = const(0x80)
RN8302B_REG_WORKMODE = const(0x81)
RN8302B_REG_MODSEL = const(0x86)
RN8302B_CMD_WREN = const(0xE5)
RN8302B_CMD_WRDIS = const(0xDC)
RN8302B_CMD_GOEMM = const(0xA2)
RN8302B_CMD_33 = const(0x33)
RN8302B_CMD_34 = const(0x00)
def __init__(self, spi, cs=2, rst=4):
self.spi = spi
self.cs = Pin(cs, Pin.OUT)
self.rst = Pin(rst, Pin.OUT)
self.FLOAT24 = 16777216.0 # 2^24
self.VOLTAGE_MULTIPLIER = (1 / self.FLOAT24 * 367)
self.CURRENT_MULTIPLIER = (1 / self.FLOAT24 * 5)
self.POWER_MULTIPLIER = (1 / self.FLOAT24 * 1.024 * 367 * 5 * 2)
self.reset()
self.start_up()
def read(self, LowAdd, HighAdd, count):
HighAdd = HighAdd * 16
buf = bytearray(count + 1)
self.cs.value(0)
self.spi.write(bytearray([LowAdd]))
self.spi.write(bytearray([HighAdd]))
check = LowAdd + HighAdd
self.spi.write_readinto(buf, buf) # buf
self.cs.value(1)
for i in range(count):
check = buf[i] + check
check = bytearray([~check])
if check[0] == buf[count]:
# print('read successful')
# for i in range(count + 1):
# print(buf[i])
return buf
else:
print('read error')
return None
def write(self, LowAdd, HighAdd, Data, count):
HighAdd = HighAdd * 16 + 0x80
self.cs.value(0)
self.spi.write(bytearray([LowAdd]))
self.spi.write(bytearray([HighAdd]))
check = LowAdd + HighAdd
self.spi.write(Data) # data
for i in range(count):
check += Data[i]
# check = bytearray([~check])
self.spi.write(bytearray([~check])) # send check bit
self.cs.value(1)
# print(b[0], b[1], b[2])
# print(check[0])
# print(b[count])
return 0
def reset(self):
import utime
self.rst.value(0)
utime.sleep_ms(500)
self.rst.value(1)
utime.sleep_ms(100)
def start_up(self):
import utime
self.write(self.RN8302B_REG_WRITEANBLE, 1, bytearray([self.RN8302B_CMD_WREN]), 1) # // 写使能
# RN8302_Write_Reg(0x62, 0x0000ff, 3); // 通道使能
self.write(self.RN8302B_REG_WORKMODE, 1, bytearray([self.RN8302B_CMD_GOEMM]), 1) # // 工作模式
self.write(self.RN8302B_REG_MODSEL, 1, bytearray([self.RN8302B_CMD_34]), 1) # 三相四线
# RN8302_Write_Reg(0x13, GSUA, 2); // Ua通道增益
# RN8302_Write_Reg(0x14, GSUB, 2); // Ub通道增益
# RN8302_Write_Reg(0x15, GSUC, 2); // UC通道增益
# RN8302_Write_Reg(0x16, GSIA, 2); // Ia通道增益
# RN8302_Write_Reg(0x17, GSIB, 2); // Ib通道增益
# RN8302_Write_Reg(0x18, GSIC, 2); // Ic通道增益
# PHSUA = G_EffectPar_Info.PHSUA;
# PHSUB = G_EffectPar_Info.PHSUB;
# PHSUC = G_EffectPar_Info.PHSUC;
# RN8302_Write_Reg(0x0C, PHSUA, 1); // Ua相位校正
# RN8302_Write_Reg(0x0D, PHSUB, 1); // Ub相位校正
# RN8302_Write_Reg(0x0E, PHSUC, 1); // Uc相位校正
self.write(self.RN8302B_REG_GSIA, 1, bytearray([0x06, 0x51]), 2) # A相电流校正
self.write(self.RN8302B_REG_GSIB, 1, bytearray([0x06, 0x51]), 2) # B相电流校正
self.write(self.RN8302B_REG_GSIC, 1, bytearray([0x06, 0x51]), 2) # C相电流校正
self.write(self.RN8302B_REG_GSIN, 1, bytearray([0x06, 0x51]), 2) # N相电流校正
self.read(self.RN8302B_RMS_IA, 0, 4)
self.read(self.RN8302B_RMS_IB, 0, 4)
self.read(self.RN8302B_RMS_IC, 0, 4)
self.read(self.RN8302B_RMS_IN, 0, 4)
utime.sleep_ms(100)
def read_i(self, current_channal):
current = self.read(current_channal, 0, 4)
if current:
temp = current[0] * 16777216 + current[1] * 65536 + current[2] * 256 + current[3]
I = 160 * temp / 134217728
return I
else:
return None
# rn8302b_write(0x82,1,0xFA, 1)#// 软件复位
def unit_test():
import utime
vspi = SPI(-1, polarity=0, phase=1, sck=Pin(5), mosi=Pin(23), miso=Pin(19), baudrate=200000) # -1 software spi
ts = RN8302B(vspi)
while True:
A = ts.read_i(ts.RN8302B_RMS_IA)
B = ts.read_i(ts.RN8302B_RMS_IB)
C = ts.read_i(ts.RN8302B_RMS_IC)
N = ts.read_i(ts.RN8302B_RMS_IN)
if A:
print('current=%.1fA' % (A))
if B:
print('current=%.1fA' % (B))
if C:
print('current=%.1fA' % (C))
if N:
print('current=%.1fA' % (N))
utime.sleep_ms(800)
# rn8302b_read(0x16, 1, 2)
# utime.sleep_ms(50)
# rn8302b_write(0x16, 1, bytearray([0x06, 0x51]), 2)
# utime.sleep_ms(50)
# rn8302b_read(0x16, 1, 2)
# utime.sleep_ms(50)
if __name__ == '__main__':
unit_test()
|
import csv
import numpy as np
import matplotlib.pyplot as plt
import gzip
import random
import neural
def readfile(data):
with open(data,"r") as csvfile:
item = list(csv.reader(csvfile))
for i in range(len(item)):
item[i] = [float(x) for x in item[i]]
return np.array(item)
if __name__ == '__main__':
mnist_data_folder = 'pics/'
#Name of training and testing files in gunzip format
train_images = readfile("pics/pics_train.csv")
train_labels = train_images[:,0]
train_images= train_images[:,1:]
test_images = readfile("pics/pics_test.csv")
test_labels = test_images[:,0]
test_images= test_images[:,1:]
#visualize(np.expand_dims(X[0].reshape(28,28),axis=0))
epoch=1500 #Setting training iterations
lr=0.005 #Setting learning rate
layers=[19,45,30,7]
w,b=neural.initial_weights_and_bias(layers)
lw=len(w)
leng=10
X1=train_images
Y1=train_labels
for i1 in range(epoch):
print(i1)
X1,Y1=neural.unison_shuffled_copies(X1, Y1)
for l1 in range(0,len(X1)//leng):
X=X1[(leng*l1):(leng*(l1+1))]
y=Y1[(leng*l1):(leng*(l1+1))]
y1=neural.givevalues(y,7)
#Forward Propogation
values=neural.forward(X,w,b)
#Backpropagation
error=neural.Error_calculation(w,values,y1)
w,b=neural.update_weights(X,w,b,error,values,lr)
neural.accuracy(X1,Y1,w,b)
#Accuracy
neural.accuracy(test_images,test_labels,w,b)
|
import unittest
from unittest.mock import Mock
from rescan import scanner
class TestScanner(unittest.TestCase):
"""core test cases."""
def test_mergeAreas_handle_holes(self):
colorCode = 'BLACK'
colorAreaA = Mock()
colorAreaA.getBoundingRectangle.return_value = (0, -1, 10, -1)
colorAreaA.colorCode = colorCode
colorAreaB = Mock()
colorAreaB.getBoundingRectangle.return_value = (10, -1, 10, -1)
colorAreaB.colorCode = colorCode
colorAreaC = Mock()
colorAreaC.getBoundingRectangle.return_value = (20, -1, 10, -1)
colorAreaC.colorCode = colorCode
sut = scanner.Scanner()
groups = sut.groupAreas([colorAreaA, colorAreaC, colorAreaB])
self.assertEqual(len(groups), 1)
self.assertEqual(groups[0].xMin, 0)
self.assertEqual(groups[0].xMax, 30)
def test_createStrips(self):
colorCode = 'BLACK'
colorAreaGroupA = Mock()
colorAreaGroupA.compute_xPos.return_value = 1
colorAreaGroupB = Mock()
colorAreaGroupB.compute_xPos.return_value = 2
colorAreaGroupC = Mock()
colorAreaGroupC.compute_xPos.return_value = 3
colorAreaGroupD = Mock()
colorAreaGroupD.compute_xPos.return_value = 4
colorAreaGroupE = Mock()
colorAreaGroupE.compute_xPos.return_value = 0
sut = scanner.Scanner()
strips = sut.createStrips([colorAreaGroupA, colorAreaGroupC, colorAreaGroupB, colorAreaGroupD, colorAreaGroupE])
self.assertEqual(len(strips), 5)
self.assertEqual(strips[0].xPos, 0)
self.assertEqual(strips[1].xPos, 1)
self.assertEqual(strips[2].xPos, 2)
self.assertEqual(strips[3].xPos, 3)
self.assertEqual(strips[4].xPos, 4)
def test_createStrips_raise(self):
colorCode = 'BLACK'
colorAreaGroupA = Mock()
colorAreaGroupA.compute_xPos.return_value = 1
colorAreaGroupB = Mock()
colorAreaGroupB.compute_xPos.return_value = 1
sut = scanner.Scanner()
with self.assertRaises(ValueError):
sut.createStrips([colorAreaGroupA, colorAreaGroupB])
if __name__ == '__main__':
unittest.main()
|
#%%
"Preamble for importing libraries"
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from scipy import stats
import seaborn as sns
import matplotlib as mlib
import matplotlib.pyplot as plt
import sklearn
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.datasets import load_iris
#%%
# load the iris datasets
iris = load_iris()
# Grab features (X) and the Target (Y)
X = iris.data
Y = iris.target
# Show the Built-in Data Description
#print iris.DESCR
#%%
model = GaussianNB()
# Split the data into Trainging and Testing sets
X_train, X_test, Y_train, Y_test = train_test_split(X, Y)
model.fit(X_train,Y_train)
# Predicted outcomes
predicted = model.predict(X_test)
# Actual Expected Outvomes
expected = Y_test
print (accuracy_score(expected, predicted))
# %%
|
import plotly.tools as to
import plotly.graph_objs as go
from plotly.offline import plot
from random import randint
from collections import OrderedDict
from plotly import tools
import re
from screeninfo import get_monitors
class Plot:
def __init__(self):
self.first = []
self.second = []
self.third = []
self.fourth = []
self.total_elements = 10
self.total_values = 8
self.iterations = range(5, 5 * self.total_values + 1, 5)
self.layout = None
self.subplots_title = []
self.memory_dict = OrderedDict()
self.show_legend = False
self.offset = 80
@staticmethod
def set_credentials_file():
to.set_credentials_file(username='karandeep7', api_key='EUSyFaGFeFaiAmxcqVf0')
def generate_random_data(self):
for i in range(self.total_values):
self.first.append(randint(5000, 20000))
self.second.append(randint(25000, 35000))
self.third.append(randint(2000, 7000))
self.fourth.append(randint(45000, 60000))
self.memory_dict = OrderedDict(
{'Native Alloc': self.first,
'Native Heap': self.second,
'Dalvik Alloc': self.third,
'Dalvik Heap': self.fourth
})
def get_trace(self, memory_name, memory_data):
trace = go.Scatter(
x=self.iterations,
y=memory_data,
name=memory_name,
)
return trace
@staticmethod
def get_screen_resolution():
default_resolution = [1366, 768]
resolution = None
for m in get_monitors():
resolution = str(m)
break
if resolution:
return re.findall(r'.*\((\d+)x(\d+).*', resolution)[0]
else:
return default_resolution
def get_all_traces(self):
all_traces = []
for i in range(self.total_elements):
for memory_name, memory_data in self.memory_dict.items():
all_traces.append(self.get_trace(memory_name, memory_data))
self.subplots_title.append('Memory Variation {}'.format(i + 1))
fig = tools.make_subplots(rows=self.total_elements, cols=1, subplot_titles=self.subplots_title)
for annotation in fig.layout.annotations:
annotation.font['size'] = 20
trace_row = 1
for trace_index, trace in enumerate(all_traces):
fig.append_trace(trace, trace_row, 1)
if ((trace_index + 1) % 4) == 0:
fig['layout']['xaxis{}'.format(trace_row)].update(title='Iterations')
fig['layout']['yaxis{}'.format(trace_row)].update(title='Memory Consumption (KB)')
trace_row += 1
width, height = map(int, Plot.get_screen_resolution())
if self.show_legend:
fig['layout'].update(width=width - self.offset, height=height * self.total_elements, title='Memory Report',
showlegend=self.show_legend)
else:
fig['layout'].update(width=width, height=height * self.total_elements, title='Memory Report',
showlegend=self.show_legend)
fig['layout']['title']['font']['size'] = 28
plot(fig, filename='memory_dump.html', auto_open=True)
def main():
p = Plot()
p.generate_random_data()
p.get_all_traces()
if __name__ == '__main__':
main()
|
#from git import git_pull, git_change
from capture import Camera
#git_pull()
dl = Camera('dl', 'dl_2021_01_09')
dl.capture_image()
dl.create_json_from_images()
dl.add_json_to_js()
#git_change()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#--wxPython Imports.
import wx
from wx.lib.combotreebox import ComboTreeBox
#- wxPython Demo --------------------------------------------------------------
__wxPyOnlineDocs__ = 'https://wxpython.org/Phoenix/docs/html/wx.lib.combotreebox.html'
__wxPyDemoPanel__ = 'TestComboTreeBox'
overview = wx.lib.combotreebox.__doc__
class TestComboTreeBox(wx.Panel):
def __init__(self, parent, log):
super(TestComboTreeBox, self).__init__(parent)
self.log = log
panelSizer = wx.FlexGridSizer(cols=2)
panelSizer.AddGrowableCol(1)
for style, labelText in [(0, 'Default style:'),
(wx.CB_READONLY, 'Read-only style:')]:
label = wx.StaticText(self, label=labelText)
panelSizer.Add(label, flag=wx.ALL|wx.ALIGN_CENTER_VERTICAL,
border=5)
comboBox = self._createComboTreeBox(style)
panelSizer.Add(comboBox, flag=wx.EXPAND|wx.ALL, border=5)
self.SetSizerAndFit(panelSizer)
def _createComboTreeBox(self, style):
comboBox = ComboTreeBox(self, style=style)
self._bindEventHandlers(comboBox)
for i in range(5):
child = comboBox.Append('Item %d'%i)
for j in range(5):
grandChild = comboBox.Append('Item %d.%d'%(i,j), child)
for k in range(5):
comboBox.Append('Item %d.%d.%d'%(i,j, k), grandChild)
return comboBox
def _bindEventHandlers(self, comboBox):
for eventType, handler in [(wx.EVT_COMBOBOX, self.OnItemSelected),
(wx.EVT_TEXT, self.OnItemEntered)]:
comboBox.Bind(eventType, handler)
def OnItemSelected(self, event):
self.log.WriteText('You selected: %s\n'%event.GetString())
event.Skip()
def OnItemEntered(self, event):
self.log.WriteText('You entered: %s\n'%event.GetString())
event.Skip()
#- wxPy Demo -----------------------------------------------------------------
def runTest(frame, nb, log):
win = TestComboTreeBox(nb, log)
return win
#- __main__ Demo --------------------------------------------------------------
class printLog:
def __init__(self):
pass
def write(self, txt):
print('%s' % txt)
def WriteText(self, txt):
print('%s' % txt)
class TestFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE, name='frame'):
wx.Frame.__init__(self, parent, id, title, pos, size, style, name)
log = printLog()
panel = TestComboTreeBox(self, log)
self.Bind(wx.EVT_CLOSE, self.OnDestroy)
def OnDestroy(self, event):
self.Destroy()
class TestApp(wx.App):
def OnInit(self):
gMainWin = TestFrame(None)
gMainWin.SetTitle('Test Demo')
gMainWin.Show()
return True
#- __main__ -------------------------------------------------------------------
if __name__ == '__main__':
import sys
print('Python %s.%s.%s %s' % sys.version_info[0:4])
print('wxPython %s' % wx.version())
gApp = TestApp(redirect=False,
filename=None,
useBestVisual=False,
clearSigInt=True)
gApp.MainLoop()
|
import bs4
import os
import pandas as pd
import output
import numpy as np
from sklearn.preprocessing import MinMaxScaler
def get_source(semester) :
semester = str(semester)
path = os.getcwd() + "/" + semester + "/"
dir = os.listdir(path)
source_lst = []
dpt_lst = ["Information Engineering" , "Finance and Banking" , "Accounting and Information Technology" , "Information Management"]
for i in dir :
if "_e" in i :
with open(path + "/" + i , 'r' , encoding = 'utf-8') as file :
source = file.read()
soup = bs4.BeautifulSoup(source , 'html.parser')
title = soup.select_one("head > title")
for j in dpt_lst :
if j in title.string and "Department" in title.string :
source_lst.append(source)
return source_lst
# 順序 : 資工、財金、會資、資管
def get_table(source_lst , semester) :
df_lst = []
remine_cols = ["Year Standing" , "Course ID" , "Course Title" , "Credit" , "Credit type" , "Day/Period" , "Remarks(Might contain Chinese due to course remarks which cannot be translated afterwards)"]
for i in source_lst :
df = pd.read_html(i)
df = df[0]
df = df[remine_cols]
df.columns = ["Year Standing" , "Course ID" , "Course Title" , "Credit" , "Credit type" , "Day/Period" , "Remarks"]
df['semester'] = semester
df_lst.append(df)
return df_lst
def concat_df_lst(df_lst1 , df_lst2) :
df_lst = []
for i in range(len(df_lst1)) :
df = pd.concat([df_lst1[i] , df_lst2[i]] , axis = 0)
df.reset_index()
df_lst.append(df)
return df_lst
def concat_inlist_df(df_lst) :
df = pd.concat([df_lst[0] , df_lst[1]] , axis = 0 , ignore_index = True)
for i in range(2 , len(df_lst)) :
df = pd.concat([df , df_lst[i]] , axis = 0 , ignore_index = True)
return df
#將由其他系支援的課程併入原本的系所課表內
def hard_insert() :
path = os.getcwd()
financial_df , mis_df , ie_df , accounting_df = read_file()
source_lst_1 = []
source_lst_2 = []
#數學系
with open(path + "/1081/2104_e.html" , 'r' , encoding = 'utf-8') as file :
source_lst_1.append(file.read())
with open(path + "/1082/2104_e.html" , 'r' , encoding = 'utf-8') as file :
source_lst_2.append(file.read())
#企管系
with open(path + "/1081/5204_e.html" , 'r' , encoding = 'utf-8') as file :
source_lst_1.append(file.read())
with open(path + "/1082/5204_e.html" , 'r' , encoding = 'utf-8') as file :
source_lst_2.append(file.read())
#經濟系
with open(path + "/1081/5104_e.html" , 'r' , encoding = 'utf-8') as file :
source_lst_1.append(file.read())
with open(path + "/1082/5104_e.html" , 'r' , encoding = 'utf-8') as file :
source_lst_2.append(file.read())
df_lst_1 = []
df_lst_2 = []
remine_cols = ["Year Standing" , "Course ID" , "Course Title" , "Credit" , "Credit type" , "Day/Period" , "Remarks(Might contain Chinese due to course remarks which cannot be translated afterwards)"]
for i in source_lst_1 :
df = pd.read_html(i)
df = df[0]
df = df[remine_cols]
df.columns = ["Year Standing" , "Course ID" , "Course Title" , "Credit" , "Credit type" , "Day/Period" , "Remarks"]
df['semester'] = 1
df_lst_1.append(df)
for i in source_lst_2 :
df = pd.read_html(i)
df = df[0]
df = df[remine_cols]
df.columns = ["Year Standing" , "Course ID" , "Course Title" , "Credit" , "Credit type" , "Day/Period" , "Remarks"]
df['semester'] = 2
df_lst_2.append(df)
df_lst = concat_df_lst(df_lst_1 , df_lst_2)
math_df = df_lst[0]
manage_df = df_lst[1]
eco_df = df_lst[2]
math_df.reset_index(inplace = True)
manage_df.reset_index(inplace = True)
eco_df.reset_index(inplace = True)
#新增數學系支援的課程
mis_insert_df = math_df.loc[math_df["Course Title"].str.contains("Calculus") & math_df["Remarks"].str.contains("Information Management")]
ie_insert_df = math_df.loc[math_df["Course Title"].str.contains("Calculus") & math_df["Remarks"].str.contains("Computer Science")]
financial_insert_df = math_df.loc[math_df["Course Title"].str.contains("Calculus") & math_df["Remarks"].str.contains("Finance and Banking")]
accounting_insert_df = math_df.loc[math_df["Course Title"].str.contains("Calculus") & math_df["Remarks"].str.contains("Accounting and Information Technology")]
#新增企管系支援的課程
financial_insert_df = pd.concat([financial_insert_df , manage_df.loc[manage_df["Course Title"].str.contains("Introduction to Business") & manage_df["Remarks"].str.contains("Finance and Banking")]] , axis = 0 , ignore_index = True)
accounting_insert_df = pd.concat([accounting_insert_df , manage_df.loc[manage_df["Course Title"].str.contains("Seminar on Humanistic and Business Ethics") & manage_df["Remarks"].str.contains("Accounting and Information Technology")]] , axis = 0 , ignore_index = True)
mis_insert_df = pd.concat([mis_insert_df , manage_df.loc[manage_df["Course Title"].str.contains("Introduction to Business") & manage_df["Remarks"].str.contains("Information Management")]] , axis = 0 , ignore_index = True)
mis_insert_df = pd.concat([mis_insert_df , manage_df.loc[manage_df["Course Title"].str.contains("Business Ethics") & manage_df["Remarks"].str.contains("Information Management")]] , axis = 0 , ignore_index = True)
#新增經濟學系支援的課程
financial_insert_df = pd.concat([financial_insert_df , eco_df.loc[eco_df["Course Title"].str.contains("Principle of Economics") & eco_df["Remarks"].str.contains("Finance and Banking")]] , axis = 0 , ignore_index = True)
financial_insert_df = pd.concat([financial_insert_df , eco_df.loc[eco_df["Course Title"].str.contains("Microeconomics") & eco_df["Remarks"].str.contains("Finance and Banking")]] , axis = 0 , ignore_index = True)
accounting_insert_df = pd.concat([accounting_insert_df , eco_df.loc[eco_df["Course Title"].str.contains("Principle of Economics") & eco_df["Remarks"].str.contains("Accounting and Information Technology")]] , axis = 0 , ignore_index = True)
mis_insert_df = pd.concat([mis_insert_df , eco_df.loc[eco_df["Course Title"].str.contains("Principle of Economics") & eco_df["Remarks"].str.contains("Information Management")]] , axis = 0 , ignore_index = True)
#新增資管系支援的課程
accounting_insert_df = pd.concat([accounting_insert_df , mis_df.loc[mis_df["Course Title"].str.contains("Introduction to Computer") & mis_df["Remarks"].str.contains("Accounting and Information Technology")]] , axis = 0 , ignore_index = True)
#新增財金系的支援課程
accounting_insert_df = pd.concat([accounting_insert_df , financial_df.loc[financial_df["Course Title"].str.contains("Statistics") & financial_df["Remarks"].str.contains("Accounting and Information Technology")]] , axis = 0 , ignore_index = True)
mis_insert_df = pd.concat([mis_insert_df , financial_df.loc[financial_df["Course Title"].str.contains("Statistics") & financial_df["Remarks"].str.contains("Information Management")]] , axis = 0 , ignore_index = True)
#新增會資系支援的課程
mis_insert_df = pd.concat([mis_insert_df , accounting_df.loc[accounting_df["Course Title"].str.contains("Accounting") & accounting_df["Remarks"].str.contains("Information Management")]] , axis = 0 , ignore_index = True)
financial_insert_df.drop(['Remarks' , "index"] , axis = 1 , inplace = True)
financial_df.drop("Remarks" , axis = 1 , inplace = True)
financial_df = pd.concat([financial_df , financial_insert_df] , axis = 0 , ignore_index = True)
mis_insert_df.drop(['Remarks' , 'index'] , axis = 1 , inplace = True)
mis_df.drop("Remarks" , axis = 1 , inplace = True)
mis_df = pd.concat([mis_df , mis_insert_df] , axis = 0 , ignore_index = True)
ie_insert_df.drop(['Remarks' , 'index'] , axis = 1 , inplace = True)
ie_df.drop("Remarks" , axis = 1 , inplace = True)
ie_df = pd.concat([ie_df , ie_insert_df] , axis = 0 , ignore_index = True)
accounting_insert_df.drop(['Remarks' , 'index'] , axis = 1 , inplace = True)
#手動刪除會資系的 Principle of Economics(II)
accounting_insert_df.drop(accounting_insert_df[accounting_insert_df["Course ID"] == 5101002].index , axis = 0 , inplace = True)
accounting_df.drop("Remarks" , axis = 1 , inplace = True)
accounting_df = pd.concat([accounting_df , accounting_insert_df] , axis = 0 , ignore_index = True , sort = True)
#按照年級和學期排列
financial_df.sort_values(by = ["Year Standing" , "semester"] , ascending = True , inplace = True)
mis_df.sort_values(by = ["Year Standing" , "semester"] , ascending = True , inplace = True)
ie_df.sort_values(by = ["Year Standing" , "semester"] , ascending = True , inplace = True)
accounting_df.sort_values(by = ["Year Standing" , "semester"] , ascending = True , inplace = True)
#對個別可以抵免的課程進行特殊處理
#會資系的微積分先暫定變成微積分(微積分(一) -> 微積分) (取消)
#會資系的統計學先暫定變成統計學(一) (方便比對)
#accounting_df.loc[accounting_df["Course Title"] == "Calculus (I)" , "Course Title"] = "Calculus"
accounting_df.loc[accounting_df["Course Title"] == "Statistics" , "Course Title"] = "Statistics (I)"
new_df_lst = [ie_df , financial_df , accounting_df , mis_df]
output.write_csv(new_df_lst)
#丟棄選修課,只留必修課,回傳課程df的list
def drop_elective() :
dpt_df_lst = read_file()
for i in range(len(dpt_df_lst)) :
dpt_df_lst[i] = dpt_df_lst[i].loc[dpt_df_lst[i]["Credit type"] == "Required"]
return dpt_df_lst
#讀取前面做好的csv,讀取順序 : 財金系 -> 資管系 -> 資工系 -> 會資系,回傳課程df的list
def read_file() :
path = os.getcwd()
financial = pd.read_csv(path + "/data/Finance and Banking.csv" , encoding = "big5")
mis = pd.read_csv(path + "/data/Information Management.csv" , encoding = "big5")
ie = pd.read_csv(path + "/data/Information Engineering.csv" , encoding = "big5")
accounting = pd.read_csv(path + "/data/Accounting and Information Technology.csv" , encoding = "big5")
dpt_df_lst = [financial , mis , ie , accounting]
return dpt_df_lst
def read_time_conflict_file() :
path = os.getcwd()
financial = pd.read_csv(path + "/data/Finance and Banking time conflict.csv" , encoding = "big5")
mis = pd.read_csv(path + "/data/Information Management time conflict.csv" , encoding = "big5")
ie = pd.read_csv(path + "/data/Information Engineering time conflict.csv" , encoding = "big5")
accounting = pd.read_csv(path + "/data/Accounting and Information Technology time conflict.csv" , encoding = "big5")
dpt_df_lst = [financial , mis , ie , accounting]
return dpt_df_lst
#計算資管系與其他系所的課程重疊率
def course_overlap() :
#dpt_name = ["financial" , "mis" , "ie" , "accounting"]
dpt_df_lst = course_title_procedure(drop_elective())
for i in range(len(dpt_df_lst)) :
dpt_df_lst[i] = dpt_df_lst[i][['Course ID' , 'Course Title']]
dpt_df_lst[i].drop_duplicates("Course ID", inplace = True)
dpt_df_lst[i].drop_duplicates("Course Title", inplace = True)
financial_course = dpt_df_lst[0]['Course Title'].to_list()
mis_course = dpt_df_lst[1]['Course Title'].to_list()
ie_course = dpt_df_lst[2]['Course Title'].to_list()
accounting_course = dpt_df_lst[3]['Course Title'].to_list()
mis_financial_overlap = 0
mis_ie_overlap = 0
mis_accounting_overlap = 0
for i in mis_course :
if i in financial_course :
mis_financial_overlap += 1
if i in ie_course :
mis_ie_overlap += 1
if i in accounting_course :
mis_accounting_overlap += 1
mis_financial_course_len = len(mis_course) + len(financial_course) - mis_financial_overlap
mis_ie_course_len = len(mis_course) + len(ie_course) - mis_ie_overlap
mis_accounting_course_len = len(mis_course) + len(accounting_course) - mis_accounting_overlap
mis_financial_ratio = mis_financial_overlap / mis_financial_course_len
mis_ie_ratio = mis_ie_overlap / mis_ie_course_len
mis_accounting_ratio = mis_accounting_overlap / mis_accounting_course_len
return_data = [[mis_financial_course_len , mis_ie_course_len , mis_accounting_course_len] , [mis_financial_overlap , mis_ie_overlap , mis_accounting_overlap] ,
[mis_financial_ratio , mis_ie_ratio , mis_accounting_ratio] , len(mis_course)]
return return_data
#處理課程名稱,將其盡量統一格式
def course_title_procedure(dpt_df_lst) :
course_lst = []
for i in dpt_df_lst :
course_lst.append(i['Course Title'].to_list())
for i in range(len(course_lst)) :
for j in range(len(course_lst[i])) :
course_lst[i][j] = course_lst[i][j].replace(" " , "")
course_lst[i][j] = course_lst[i][j].lower()
course_lst[i][j] = course_lst[i][j].replace("." , "")
if "(programmingrelated)" in course_lst[i][j] :
course_lst[i][j] = course_lst[i][j][ : -20]
if "(english-taught)" in course_lst[i][j] :
course_lst[i][j] = course_lst[i][j][ : -16]
if "ⅱ" in course_lst[i][j] :
course_lst[i][j] = course_lst[i][j].replace("ⅱ" , "ii")
if "(" in course_lst[i][j] :
course_lst[i][j] = course_lst[i][j].replace("(" , "(")
course_lst[i][j] = course_lst[i][j].replace(")" , ")")
if "&" in course_lst[i][j] :
course_lst[i][j] = course_lst[i][j].replace("&" , "and")
for i in range(len(dpt_df_lst)) :
dpt_df_lst[i]['Course Title'] = course_lst[i]
return dpt_df_lst
def rough_time_conflict() :
#財金,資管,資工,會資
dpt_df_lst = course_title_procedure(drop_elective())
time_str_lst = []
for i in dpt_df_lst :
time_str_lst.append(i['Day/Period'].to_list())
#將字母節的課轉換成數字
for i in range(len(time_str_lst)) :
for j in range(len(time_str_lst[i])) :
#避免Fri被替換成9ri
#將原本的"."改成"/"避免字母節的小數點造成混淆
time_str_lst[i][j] = time_str_lst[i][j].replace("." , "/")
time_str_lst[i][j] = time_str_lst[i][j].replace("Fri" , "fri")
time_str_lst[i][j] = time_str_lst[i][j].replace("A" , str(1.5))
time_str_lst[i][j] = time_str_lst[i][j].replace("B" , str(2.5))
time_str_lst[i][j] = time_str_lst[i][j].replace("C" , str(4.5))
time_str_lst[i][j] = time_str_lst[i][j].replace("D" , str(5.5))
time_str_lst[i][j] = time_str_lst[i][j].replace("E" , str(7.5))
time_str_lst[i][j] = time_str_lst[i][j].replace("F" , str(8.5))
time_str_lst[i][j] = time_str_lst[i][j].replace("G" , str(10.5))
time_str_lst[i][j] = time_str_lst[i][j].replace("H" , str(11.5))
time_str_lst[i][j] = time_str_lst[i][j].replace("I" , str(13.5))
time_str_lst[i][j] = time_str_lst[i][j].replace("J" , str(14.5))
time_str_lst[i][j] = time_str_lst[i][j].replace("fri" , "Fri")
first_period_lst = []
second_period_lst = []
for i in range(len(time_str_lst)) :
for j in time_str_lst[i] :
if " " in j :
first_period_lst.append(j.split(" ")[0])
second_period_lst.append(j.split(" ")[1])
else :
first_period_lst.append(j)
second_period_lst.append(np.nan)
dpt_df_lst[i]['period1'] = first_period_lst
dpt_df_lst[i]['period2'] = second_period_lst
first_period_lst = []
second_period_lst = []
mis_df = dpt_df_lst.pop(1)
pre_conflict_lst = []
#i : 科系的資料表
#j : 年級
#k : 學期
for i in range(len(dpt_df_lst)) :
dpt_df_lst[i]['is_conflict'] = 0
mis_df['is_conflict'] = 0
for i in dpt_df_lst :
for j in range(1 , 5) :
for k in range(1 , 3) :
mis_time_lst = mis_df.loc[(mis_df["Year Standing"] == j) & (mis_df["semester"] == k) , "period1"].to_list()
other_time_lst = i.loc[(i["Year Standing"] == j) & (i["semester"] == k) , "period1"].to_list()
result_lst1 = compare_time(mis_time_lst , other_time_lst)
for q in range(len(result_lst1)) :
if result_lst1[q] == 0 :
result_lst1[q] = i.loc[(i["Year Standing"] == j) & (i["semester"] == k) , "period2"].to_list()[q]
else :
result_lst1[q] = "already_conflict"
result_lst1 = compare_time(mis_time_lst , result_lst1)
mis_time_lst = mis_df.loc[(mis_df["Year Standing"] == j) & (mis_df["semester"] == k) , "period2"].to_list()
result_lst2 = compare_time(mis_time_lst , other_time_lst)
for q in range(len(result_lst2)) :
if result_lst2[q] == 0 :
result_lst2[q] = i.loc[(i["Year Standing"] == j) & (i["semester"] == k) , "period2"].to_list()[q]
else :
result_lst2[q] = "already_conflict"
result_lst2 = compare_time(mis_time_lst , result_lst2)
for q in range(len(result_lst1)) :
temp = result_lst1[q] + result_lst2[q]
if temp < 1 :
pre_conflict_lst.append(0)
else :
pre_conflict_lst.append(1)
i.loc[(i["Year Standing"] == j) & (i["semester"] == k) , "is_conflict"] = pre_conflict_lst
pre_conflict_lst = []
for i in dpt_df_lst :
print(i)
dpt_df_lst.insert(1 , mis_df)
output.write_time_conflict_csv(dpt_df_lst)
def compare_time(mis_time_lst , other_time_lst) :
compare_result = []
for i in mis_time_lst :
if i is np.nan :
compare_result.append(0)
continue
if i == "already_conflict" :
compare_result.append(1)
continue
mis_sction_lst = []
mis_day , mis_section = i.split("/")
mis_sction_lst = mis_section.split(",")
for j in other_time_lst :
if j is np.nan :
compare_result.append(0)
continue
if j == "already_conflict" :
compare_result.append(1)
continue
other_section_lst = []
other_day , other_section = j.split("/")
other_section_lst = other_section.split(",")
if mis_day == other_day :
for k in mis_sction_lst :
flag = False
for q in other_section_lst :
if abs(float(k) - float(q)) < 1 :
compare_result.append(1)
flag = True
break
if flag :
break
else :
compare_result.append(0)
else :
compare_result.append(0)
return_result = [0] * len(other_time_lst)
for i in range(len(other_time_lst)) :
for j in range(len(compare_result)) :
if j % len(other_time_lst) == i :
return_result[i] += compare_result[j]
for i in range(len(return_result)) :
if return_result[i] > 0 :
return_result[i] = 1
return return_result
def delicate_time_conflict() :
dpt_df_lst = read_time_conflict_file()
mis_df = dpt_df_lst.pop(1)
year_course_len = []
for i in range(len(dpt_df_lst)) :
temp = []
for j in range(1 , 5) :
temp_df = dpt_df_lst[i].loc[dpt_df_lst[i]["Year Standing"] == j , "Course Title"]
temp.append(len(temp_df.drop_duplicates().to_list()))
year_course_len.append(temp)
conflict_lst = []
all_course_number_lst = []
for i in range(len(dpt_df_lst)) :
course_title_lst = dpt_df_lst[i]["Course Title"].drop_duplicates().to_list()
all_course_number_lst.append(len(course_title_lst))
temp = []
for j in course_title_lst :
if sum(dpt_df_lst[i].loc[dpt_df_lst[i]["Course Title"] == j , "is_conflict"].to_list()) == len(dpt_df_lst[i].loc[dpt_df_lst[i]["Course Title"] == j , "is_conflict"].to_list()) :
temp.append(1)
else :
temp.append(0)
conflict_lst.append(temp)
for i in range(3) :
temp = []
for j in range(4) :
temp.append(year_course_len[i][j] + sum(year_course_len[i][0 : j]))
year_course_len[i] = temp
mis_financial_time_conflict = [sum(conflict_lst[0][0 : year_course_len[0][0]])]
mis_ie_time_conflict = [sum(conflict_lst[1][0 : year_course_len[1][0]])]
mis_accounting_time_conflict = [sum(conflict_lst[2][0 : year_course_len[2][0]])]
for i in range(1 , 4) :
mis_financial_time_conflict.append(sum(conflict_lst[0][year_course_len[0][i - 1] : year_course_len[0][i]]))
for i in range(1 , 4) :
mis_ie_time_conflict.append(sum(conflict_lst[1][year_course_len[1][i - 1] : year_course_len[1][i]]))
for i in range(1 , 4) :
mis_accounting_time_conflict.append(sum(conflict_lst[2][year_course_len[2][i - 1] : year_course_len[2][i]]))
return mis_financial_time_conflict , mis_ie_time_conflict , mis_accounting_time_conflict , conflict_lst
def free_score() :
dpt_df_lst = read_time_conflict_file()
mis_df = dpt_df_lst.pop(1)
#得到每個年級有幾門課(有幾個free_score屬於該年級)
year_course_len = []
for i in range(len(dpt_df_lst)) :
temp = []
for j in range(1 , 5) :
temp_df = dpt_df_lst[i].loc[dpt_df_lst[i]["Year Standing"] == j , "Course Title"]
temp.append(len(temp_df.drop_duplicates().to_list()))
year_course_len.append(temp)
#計算每個課程的自由度
free_score_lst = []
all_course_lst = []
for i in range(len(dpt_df_lst)) :
course_title_lst = dpt_df_lst[i]["Course Title"].drop_duplicates().to_list()
all_course_lst.append(len(course_title_lst))
temp = []
for j in course_title_lst :
score = sum(dpt_df_lst[i].loc[dpt_df_lst[i]["Course Title"] == j , "is_conflict"].to_list()) / len(dpt_df_lst[i].loc[dpt_df_lst[i]["Course Title"] == j , "is_conflict"].to_list())
score = 1 - score
temp.append(score)
free_score_lst.append(temp)
#將index轉換成累積index
for i in range(3) :
temp = []
for j in range(4) :
temp.append(year_course_len[i][j] + sum(year_course_len[i][0 : j]))
year_course_len[i] = temp
mis_financial_free_score = [sum(free_score_lst[0][0 : year_course_len[0][0]])]
mis_ie_free_score = [sum(free_score_lst[1][0 : year_course_len[1][0]])]
mis_accounting_free_score = [sum(free_score_lst[2][0 : year_course_len[2][0]])]
for i in range(1 , 4) :
mis_financial_free_score.append(sum(free_score_lst[0][year_course_len[0][i - 1] : year_course_len[0][i]]))
for i in range(1 , 4) :
mis_ie_free_score.append(sum(free_score_lst[1][year_course_len[1][i - 1] : year_course_len[1][i]]))
for i in range(1 , 4) :
mis_accounting_free_score.append(sum(free_score_lst[2][year_course_len[2][i - 1] : year_course_len[2][i]]))
return mis_financial_free_score , mis_ie_free_score , mis_accounting_free_score , free_score_lst
#計算最終分數,time_conflict & free_score 依年級由小到大賦予權重,因為年級越低的話,衝堂越容易擋到後面的課,造成的影響較大,故賦予不同的權重
#賦予權重完後,進行歸一化,再將各個變量 * 33,得到最終分數
def final_score() :
course_len_lst , overlap_lst , overlap_ratio_lst , mis_course_len = course_overlap()
mis_financial_time_conflict , mis_ie_time_conflict , mis_accounting_time_conflict , conflict_lst = delicate_time_conflict()
mis_financial_free_score , mis_ie_free_score , mis_accounting_free_score , free_score_lst = free_score()
weight = [1.3 , 1.2 , 1.1 , 1]
for i in range(len(mis_financial_time_conflict)) :
if mis_financial_time_conflict[i] == 0 :
pass
else :
mis_financial_time_conflict[i] = 1 / mis_financial_time_conflict[i]
mis_ie_time_conflict[i] = 1 / mis_ie_time_conflict[i]
mis_accounting_time_conflict[i] = 1 / mis_accounting_time_conflict[i]
mis_financial_time_conflict[i] = mis_financial_time_conflict[i] * weight[i]
mis_ie_time_conflict[i] = mis_ie_time_conflict[i] * weight[i]
mis_accounting_time_conflict[i] = mis_accounting_time_conflict[i] * weight[i]
mis_financial_free_score[i] = mis_financial_free_score[i] * weight[i]
mis_ie_free_score[i] = mis_ie_free_score[i] * weight[i]
mis_accounting_free_score[i] = mis_accounting_free_score[i] * weight[i]
time_conflict_ratio_lst = [sum(mis_financial_time_conflict) , sum(mis_ie_time_conflict) , sum(mis_accounting_time_conflict)]
free_score_ratio_lst = [sum(mis_financial_free_score) , sum(mis_ie_free_score) , sum(mis_accounting_free_score)]
all_data_lst = [overlap_ratio_lst , time_conflict_ratio_lst , free_score_ratio_lst]
mm = MinMaxScaler()
for i in range(len(all_data_lst)) :
all_data_lst[i] = mm.fit_transform(np.array(all_data_lst[i]).reshape(-1 , 1))
financial_final_score = []
ie_final_score = []
accounting_final_score = []
for i in all_data_lst :
financial_final_score.append(float(i[0] * 33))
ie_final_score.append(float(i[1] * 33))
accounting_final_score.append(float(i[2] * 33))
return financial_final_score , ie_final_score , accounting_final_score |
import paddle
import paddle.nn as nn
import paddle.vision.transforms as T
from ppim.units import load_model
from ppim.models.vit import VisionTransformer
from ppim.models.common import add_parameter
from ppim.models.common import trunc_normal_
def get_transforms(resize, crop):
transforms = T.Compose([
T.Resize(resize, interpolation='bicubic'),
T.CenterCrop(crop),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return transforms
transforms_224 = get_transforms(248, 224)
transforms_384 = get_transforms(384, 384)
urls = {
'deit_ti': r'https://bj.bcebos.com/v1/ai-studio-online/1e91e6ab967b4b0f9940891c6f77f98ca612d5a767b8482498c364c11d65b44b?responseContentDisposition=attachment%3B%20filename%3DDeiT_tiny_patch16_224.pdparams',
'deit_s': r'https://bj.bcebos.com/v1/ai-studio-online/56fb3b56543d495aa36cc244e8f25e3e321747cfcedd48c28830ea3a22f4a82a?responseContentDisposition=attachment%3B%20filename%3DDeiT_small_patch16_224.pdparams',
'deit_b': r'https://bj.bcebos.com/v1/ai-studio-online/38be4cdffc0240c18e9e4905641e9e8171277f42646947e5b3dbcd68c59a6d81?responseContentDisposition=attachment%3B%20filename%3DDeiT_base_patch16_224.pdparams',
'deit_ti_distilled': r'https://bj.bcebos.com/v1/ai-studio-online/dd0ff3e26c1e4fd4b56698a43a62febd35bdc8153563435b898cdd9480cd8720?responseContentDisposition=attachment%3B%20filename%3DDeiT_tiny_distilled_patch16_224.pdparams',
'deit_s_distilled': r'https://bj.bcebos.com/v1/ai-studio-online/5ab1d5f92e1f44d39db09ab2233143f8fd27788c9b4f46bd9f1d5f2cb760933e?responseContentDisposition=attachment%3B%20filename%3DDeiT_small_distilled_patch16_224.pdparams',
'deit_b_distilled': r'https://bj.bcebos.com/v1/ai-studio-online/24692c628ab64bfc9bb72fc8a5b3d209080b5ad94227472f98d3bb7cb6732e67?responseContentDisposition=attachment%3B%20filename%3DDeiT_base_distilled_patch16_224.pdparams',
'deit_b_384': r'https://bj.bcebos.com/v1/ai-studio-online/de491e7155e94ac2b13b2a97e432155ed6d502e8a0114e4e90ffd6ce9dce63cc?responseContentDisposition=attachment%3B%20filename%3DDeiT_base_patch16_384.pdparams',
'deit_b_distilled_384': r'https://bj.bcebos.com/v1/ai-studio-online/0a84b9ea45d0412d9bebae9ea3404e679221c3d0c8e542bf9d6a64f810983b25?responseContentDisposition=attachment%3B%20filename%3DDeiT_base_distilled_patch16_384.pdparams'
}
class DistilledVisionTransformer(VisionTransformer):
def __init__(self,
img_size=224,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=False,
norm_layer=nn.LayerNorm,
epsilon=1e-5,
class_dim=1000,
**kwargs):
super().__init__(
img_size=img_size,
patch_size=patch_size,
class_dim=class_dim,
embed_dim=embed_dim,
depth=depth,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
epsilon=epsilon,
**kwargs)
self.pos_embed = add_parameter(
self, paddle.zeros(
(1, self.patch_embed.num_patches + 2, self.embed_dim)
)
)
self.dist_token = add_parameter(
self, paddle.zeros((1, 1, self.embed_dim))
)
if class_dim > 0:
self.head_dist = nn.Linear(self.embed_dim, self.class_dim)
self.head_dist.apply(self._init_weights)
trunc_normal_(self.dist_token)
trunc_normal_(self.pos_embed)
def forward_features(self, x):
B = paddle.shape(x)[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand((B, -1, -1))
dist_token = self.dist_token.expand((B, -1, -1))
x = paddle.concat((cls_tokens, dist_token, x), axis=1)
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0], x[:, 1]
def forward(self, x):
x, x_dist = self.forward_features(x)
if self.class_dim > 0:
x = self.head(x)
x_dist = self.head_dist(x_dist)
return (x + x_dist) / 2
def deit_ti(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4,
qkv_bias=True,
epsilon=1e-6,
**kwargs)
if pretrained:
model = load_model(model, urls['deit_ti'])
return model, transforms_224
def deit_s(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4,
qkv_bias=True,
epsilon=1e-6,
**kwargs)
if pretrained:
model = load_model(model, urls['deit_s'])
return model, transforms_224
def deit_b(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
epsilon=1e-6,
**kwargs)
if pretrained:
model = load_model(model, urls['deit_b'])
return model, transforms_224
def deit_ti_distilled(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4,
qkv_bias=True,
epsilon=1e-6,
**kwargs)
if pretrained:
model = load_model(model, urls['deit_ti_distilled'])
return model, transforms_224
def deit_s_distilled(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4,
qkv_bias=True,
epsilon=1e-6,
**kwargs)
if pretrained:
model = load_model(model, urls['deit_s_distilled'])
return model, transforms_224
def deit_b_distilled(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
epsilon=1e-6,
**kwargs)
if pretrained:
model = load_model(model, urls['deit_b_distilled'])
return model, transforms_224
def deit_b_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
epsilon=1e-6,
**kwargs)
if pretrained:
model = load_model(model, urls['deit_b_384'])
return model, transforms_384
def deit_b_distilled_384(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
img_size=384,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
epsilon=1e-6,
**kwargs)
if pretrained:
model = load_model(model, urls['deit_b_distilled_384'])
return model, transforms_384
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-31 13:48
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flickr', '0003_auto_20170731_0952'),
]
operations = [
migrations.AlterField(
model_name='person',
name='info',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.AlterField(
model_name='person',
name='updated_at',
field=models.DateTimeField(null=True),
),
]
|
from sage.all import *
import ast, sys
#################################################################
#This file verifies Proposition 5.1 parts (1) and (2) of
#''A local-global principle for isogenies of composite degree''
#by Isabel Vogt
#Part (3) is verified by the computing the genus of each of the
#subgroups in Table A.1
#The following files must be in the subfolder data:
###'gl_2_N.txt' (for N = 4, 8, 16, 32, and 64)
###'label_N_cosets.txt' (for each of the groups H in appendix A.1)
#################################################################
ell = 2
#################################################################
#Functions to create groups from their label by searchin the file
#'gl2_N.txt'
#################################################################
def make_gens_from_label(n, search_string, search_file):
N = ell**n
for line in open(search_file + str(N) + ".txt", "r"):
label = line.split(":")[0][1:-1]
if str(label) == str(search_string):
gens = line.split(":")[1]
gens = ast.literal_eval(gens)
return gens
return False
def make_list_elements(string):
List = []
for line in open(string + ".txt", "r"):
List.append(ast.literal_eval(line))
return List
#################################################################
#All matrix/group computations are performed as tuples of entries
#to avoid initializing matrix groups in Sage.
#################################################################
XXM1 = {}
P1 = {}
V = {}
#Precomputes the groups G[i] = GL(2, Integers(2**i)),
#the set P1[i] = P1(Integers(2**i)),
#the set V[i] = (Integers(2**i))^2
#and the solution set XXM1[i] used in has_solution function
G = []
for i in range(7):
RR = Integers(ell**i)
G.append(GL(2, RR))
if ell == 2:
S = set([])
L = []
M = []
for x in RR:
S.add(x * (x - 1))
L.append([RR(1), RR(x)])
if ZZ(x) % ell == 0:
L.append([RR(x),RR(1)])
for y in RR:
M.append([RR(x),RR(y)])
XXM1[i] = S
P1[i] = L
V[i] = M
#multiplies the 2x2 matrix M represented as a list by the vector v
def mvmult(M,v):
return [M[0]*v[0] + M[1]*v[1], M[2]*v[0]+M[3]*v[1]]
#multiplies the 2x2 matrices M and N
def mmmult(M,N):
return [M[0]*N[0] + M[1]*N[2], M[0]*N[1]+M[1]*N[3], M[2]*N[0]+M[3]*N[2], M[2]*N[1]+M[3]*N[3]]
#computes the inverse of a 2x2 matrix M
def minv(M):
return [mdet(M)**(-1)*M[3], -mdet(M)**(-1)*M[1], -mdet(M)**(-1)*M[2], mdet(M)**(-1)*M[0]]
#reduces mod ell^n a matrix represented as a list
#with entries in Z/ell^mZ for some m>=n
def reduce_element(h, n):
R = Integers(ell**n)
return [R(h[i]) for i in range(len(h))]
def reduce_list(L, n):
return [reduce_element(h, n) for h in L]
#computes matrix trace
def mtrace(M):
return M[0] + M[3]
#computes matrix determinant
def mdet(M):
return M[0]*M[3]-M[1]*M[2]
#enumerates elements of H as lists using a FIXME algorithm
def enumerate_elements(Hgens, n):
R = Integers(ell**n)
yield reduce_element([1,0,0,1],n)
traced = set([tuple(reduce_element([1,0,0,1],n))])
todo = [reduce_element([1,0,0,1],n)]
while todo:
g = todo.pop()
for h in Hgens:
hred = reduce_element(h,n)
hg = mmmult(h,g)
if not tuple(hg) in traced:
yield hg
traced.add(tuple(hg))
todo.append(hg)
return
#################################################################
#check_exceptional_subgroups is the main function that determines
#if each subgroup in 'data/gl2_N.txt' is exceptional. The function
#then verifies that any exceptional subgroup is contained in one
#of the maximal exceptional subgroups listed in appendix A.1.
#################################################################
#checks for ALL exceptional subgroups -- not just minimally exceptional ones.
#subgroup labels and generators are fed in from the file stringN.txt
#exceptional subgroups are returned as a list
def check_exceptional_subgroups(n, string="gl2_"):
N = ell**n
bads = []
for line in open(string + str(N) + ".txt", "r"):
label = line.split(":")[0]
gens = line.split(":")[1]
Hgens = ast.literal_eval(gens)
#print('checking ', label)
#does it satisfy the conclusions?
if not check_conclusions(n, label, Hgens):
#does it satisfy the hypotheses?
if check_elements(Hgens, n):
bads.append([label, Hgens])
#f = open("bad_all_"+ string + str(N) + ".txt", "a")
#f.write(str(label)+":"+str(Hgens) + "\n")
#f.close()
#g = open("done_all_" + string + str(N) + ".txt", "a")
#g.write(str(label) + "\n")
#g.close()
return bads
#################################################################
#Functions to check the hypotheses of the LGP
#################################################################
#checks elements of H to see that their characteristic
#polynomial has a rational root
def check_elements(Hgens, n):
RR = Integers(2**n)
for h in enumerate_elements(Hgens, n):
if not has_solution(mtrace(h), mdet(h), RR, n):
#print h
return False
return True
#determines if x^2 - ax + b = 0 has a solution in RR
def has_solution(a, b, RR, n):
if ell != 2:
return (a**2 - 4 * b).is_square()
if a % 2 == 0:
alpha = RR(ZZ(a) / 2) # a = 2 * alpha --> x^2 - 2 * alpha * x + alpha**2 = alpha**2 - b
return (alpha**2 - b).is_square()
else:
alpha = RR(ZZ(a - 1) / 2) # a = 2 * alpha + 1 --> x^2 - 2 * alpha * x + alpha**2 - (x - alpha) = alpha**2 + alpha - b
return (alpha**2 + alpha - b) in XXM1[n]
#################################################################
#Functions to check the conclusions of the LGP
#################################################################
#checks to see if subgroup of ell^n torsion generated by Hgens
#satisfies the conclusions of the LGP, i.e., if its conjugate
#to a subgroup of A_{a,c,b}(ell^n) for a <= b <= c with b + c = n.
def check_conclusions(n, label, Hgens):
return in_A(Hgens,n)
#determines if v is an eigenvector of all of H
#by checking that it is an eigenvector for each
#h in Hgens
def is_eigenvector(Hgens, v):
assert(v[0] % ell != 0 or v[1] % ell != 0)
for h in Hgens:
hv = mvmult(h, v)
if (v[0] * hv[1] != v[1] * hv[0]):
return False
return True
#v is a vector with entries in Z/ell^(n-1)Z,
#which is nonzero mod ell
#computes all lifts of v to Z/ell^nZ up to rescaling
def lifts(v, n):
R = Integers(ell**n)
v = [R(v[0]), R(v[1])]
if v[0] % ell != 0:
for i in range(ell):
yield [v[0], v[1] + ell**(n - 1) * i]
else:
for i in range(ell):
yield [v[0] + ell**(n - 1) * i, v[1]]
return
#computes the set of borel subgroups of GL_2(Z/ell^nZ)
#containing H. Borels are indexed by a representative
#of the stable line.
#previousB is the list of Borels mod n-1
def borels(Hgens, n, previous=False, previousB = None):
output = []
if n == 1:
for v in P1[n]:
if is_eigenvector(Hgens, v):
output.append(v)
return output
if previous:
B = previousB
else:
B = borels(modell(n - 1, Hgens), n - 1)
for v in B:
for w in lifts(v, n):
if is_eigenvector(Hgens, w):
output.append(w)
return output
#checks to see if the group generated by Hgens is
#contained in a Cartan subgroup of GL_2(Z/ell^nZ)
def in_cartan(Hgens, n):
vecs = []
for v in P1[n]:
if len(vecs) == 1:
w = vecs[0]
if (v[0] * w[1] - w[0] * v[1]) % ell == 0:
continue
if is_eigenvector(Hgens, v):
vecs.append(v)
if len(vecs) == 2:
return True
return False
#reduces every element of Hgens (presented as a list)
#mod ell^n
def modell(n, Hgens):
R = Integers(2**n)
return [[R(h[0]), R(h[1]), R(h[2]), R(h[3])] for h in Hgens]
def independent_vectors(v, n):
ind_vecs = []
for w in V[n]:
if v[0]*w[1] - v[1]*w[0] % ell != 0:
ind_vecs.append(w)
return ind_vecs
#computes the coordinates for Hgens in new_basis = [v, w]
def change_basis(Hgens, new_basis):
COB = [new_basis[0][0], new_basis[1][0], new_basis[0][1], new_basis[1][1]]
return [mmmult(minv(COB), mmmult(h, COB)) for h in Hgens]
#finds the ell-adic valuation of b in Z/ell^m Z
def val(b,m):
for r in range(1, m+1):
if b % ell**r != 0:
return r-1
return m
#determines if the group generated by Hgens is contained in a group
#of the form A_{a, c, b}(ell^n) for some choice of a, c, b with
#a <= c <= b, and b+c = n; this forces a+b >= ceiling(n/2)
def in_A(Hgens, n):
#m = a+b, the power of ell mod which it is Borel
previous = False
previousB = None
for m in range(1, n+1):
Hgens_m = modell(m,Hgens)
B = borels(Hgens_m, m, previous, previousB)
#first check condition of being Borel mod ell^m and Cartan mod ell^{n-m}
if len(B) != 0:
if (m == n) or in_cartan(modell(n-m, Hgens), n-m):
#print('borel mod ell to the ',m,', cartan mod ell to the ',n-m)
return True
#then check if contained in another A_{a,c,b} group that's not Borel + Cartan
for v in B:
iv = independent_vectors(v, m)
for w in iv:
CB_Hgens = change_basis(Hgens_m, [v, w])
beta_min_val = m
delta_minus_alpha_min_val = m
for h in CB_Hgens:
assert h[2] % ell**m == 0
beta_min_val = min(beta_min_val, val(h[1],m))
delta_minus_alpha_min_val = min(delta_minus_alpha_min_val, val(h[3] - h[0],m))
a = delta_minus_alpha_min_val - beta_min_val
b = m - a
c = n - b
if (c > b) or (a > c) or (a < 0):
break
contained = True
for h in CB_Hgens:
if (h[3] - h[0] + ell**a * h[1]) % ell**c != 0:
contained = False
break
if contained:
#print('a,c,b,[v,w] are ', a, c, b, [v,w])
return True
previous = True
previousB = B
return False
#################################################################
#Function to check if exceptional group is contained in a maximal
#exceptional group
#################################################################
#cosreps_list is a list of coset reps of H2 in G.
#Tests to see if H1 (with generators H1list) is a subgroup of H2 (with
#elements H2_elem_list) by checking that for all g in G, and h in H1list,
#the conjugate of h by g is in H2.
def is_subgroup_from_list_cosets(n, label1, H1_gen_list, label2, H2_elem_list, cosreps_list):
#H2_elem_list = [g for g in enumerate_elements(H2_gen_list, n)]
H2_elem_list = set([tuple(i) for i in H2_elem_list])
for g in cosreps_list:
is_sub = True
for h in H1_gen_list:
if not( tuple(mmmult(minv(g), mmmult(h, g))) in H2_elem_list):
is_sub = False
break
if is_sub:
#print str(g.matrix()) + "\n"
return True
return False
#gens is a list of generators of the group with label 'label'
#Max_data is a tuple [max_label, elements, cosreps], where
#elements is a list of all elements of the group with label
#'max_label'
#checks to see if the group with label 'label' is a subgroup
#of the group specified by Max_data
def is_subgroup_of_max(n, label, gens, Max_data):
return is_subgroup_from_list_cosets(n, label, gens, Max_data[0], Max_data[1], Max_data[2])
#ExceptionalList is a list of tuples [label, generators],
#where label is the label of an exceptional group, and generators
#are generators of the group
#MaxList is a list of tuples [max_label, generators, cosreps], where
#elements is a list of all elements of the group with label
#'max_label', and cosreps is a list of coset reps in GL_2(Z/ell^nZ)
def sieve_maxs(n, ExceptionalList, MaxList):
ToDo = ExceptionalList
for Max_data in MaxList:
print('Sieving out subgroups of ', Max_data[0])
Max_data = [Max_data[0], [g for g in enumerate_elements(Max_data[1], n)], Max_data[2]]
NotFound = []
for l, l_gens in ToDo:
if not is_subgroup_of_max(n, l, l_gens, Max_data):
NotFound.append([l, l_gens])
ToDo = NotFound
if len(ToDo) == 0:
return True
else:
return ToDo
#################################################################
#The maximal exceptional subgroups of Appendix A.1
#################################################################
Max3_labels = [
'2147',
'2177',]
Max5_labels = [
'189551',
'189605',
'189621',
'189785',
'189892',
'189979',
'189981',
'189995',
'190318',
'190435',
'190487',
'190525',]
Max6_labels = [
'876594',
'878116',
'881772',
'885865',
'890995',
'891525',
'891526',
'891735',
'891737',
'893009',
'893011',
'893326',
'894711']
#Precompute a list consisting of tuples [label, gens, cosreps], where
#label is one of the maximal exceptional labels, and gens is a list
#of generators of the corresponding subgroup, and cosreps is a list of
#coset representatives of the group in GL2(Z/NZ)
Max3 = []
Max5 = []
Max6 = []
print('Precomputing the data for maximal subgroups mod 8.')
for l in Max3_labels:
gens = make_gens_from_label(3, l, 'data/gl2_')
Max3.append([l, gens, reduce_list(make_list_elements("data/" + l + "_" + str(8) + "_cosets"),3)])
print('Precomputing the data for maximal subgroups mod 32.')
for l in Max5_labels:
gens = make_gens_from_label(5, l, 'data/gl2_')
Max5.append([l, gens, reduce_list(make_list_elements("data/" + l + "_" + str(32) + "_cosets"),5)])
print('Precomputing the data for maximal subgroups mod 64.')
for l in Max6_labels:
gens = make_gens_from_label(6, l, 'data/gl2_')
Max6.append([l, gens, reduce_list(make_list_elements("data/" + l + "_" + str(64) + "_cosets"),6)])
Maxs = {2: [], 3: Max3, 4: [], 5: Max5, 6:Max6}
#################################################################
#Verify Proposition 5.1 parts (1) and (2)
#################################################################
print('This file verifies the computational assertions in Proposition 5.1 of "A local-global principle for isogenies of composite degree". It will take approximately 3 hours to run.')
for n in range(2,7):
N = ell**n
print('Checking which subgroups are exceptional for ', N, 'isogenies')
Max_list = Maxs[n]
Max_labels = [Max_list_item[0] for Max_list_item in Max_list]
exceptionals = check_exceptional_subgroups(n, string="data/gl2_")
print('All done checking for exceptional groups mod ', N, '.')
New = sieve_maxs(n, exceptionals, Max_list)
if New:
print('Every exceptional group is contained in one of: ', Max_labels, '.')
else:
print('The exceptional groups: ', New, ' are not contained in one of ', Max_labels, '.')
|
#!/usr/bin/python
# Gibson Vector Designer 2015, an open source project for biologists
import sys
import re
import time
import os
from Bio import SeqIO
import sqlite3 as lite
from PySide.QtCore import *
from PySide import QtGui
from PySide import QtCore
import collections
import itertools
import inputmessage
# Function for doing reverse complement
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
def reverse_complement(seq):
bases = list(seq)
bases = reversed([complement.get(base,base) for base in bases])
bases = ''.join(bases)
return bases
# Globals
gibson_dict = {}
gibson_sequence_names = []
global_parts = collections.OrderedDict()
# Class for the graphical user interface
class Ui_Gibson(object):
def setupUi(self, Gibson):
Gibson.setObjectName("Gibson")
Gibson.resize(1061, 667)
self.centralwidget = QtGui.QWidget(Gibson)
self.centralwidget.setObjectName("centralwidget")
self.gridLayoutWidget = QtGui.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(40, 60, 361, 66))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.label_2 = QtGui.QLabel(self.gridLayoutWidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.label = QtGui.QLabel(self.gridLayoutWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.lineEdit_2 = QtGui.QLineEdit(self.gridLayoutWidget)
self.lineEdit_2.setObjectName("lineEdit_2")
self.gridLayout.addWidget(self.lineEdit_2, 1, 1, 1, 1)
self.lineEdit = QtGui.QLineEdit(self.gridLayoutWidget)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout.addWidget(self.lineEdit, 0, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(180, 20, 66, 21))
self.label_3.setObjectName("label_3")
self.label_4 = QtGui.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(520, 20, 121, 21))
self.label_4.setObjectName("label_4")
self.label_5 = QtGui.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(150, 200, 131, 21))
self.label_5.setObjectName("label_5")
self.listWidget = QtGui.QListWidget(self.centralwidget)
self.listWidget.setGeometry(QtCore.QRect(450, 61, 251, 421))
self.listWidget.setObjectName("listWidget")
self.gridLayoutWidget_2 = QtGui.QWidget(self.centralwidget)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(40, 240, 361, 151))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_2 = QtGui.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_7 = QtGui.QLabel(self.gridLayoutWidget_2)
self.label_7.setObjectName("label_7")
self.gridLayout_2.addWidget(self.label_7, 1, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem, 4, 0, 1, 1)
self.pushButton_2 = QtGui.QPushButton(self.gridLayoutWidget_2)
self.pushButton_2.setObjectName("pushButton_2")
self.gridLayout_2.addWidget(self.pushButton_2, 5, 0, 1, 1)
self.pushButton_10 = QtGui.QPushButton(self.gridLayoutWidget_2)
self.pushButton_10.setObjectName("pushButton_10")
self.gridLayout_2.addWidget(self.pushButton_10, 5, 1, 1, 1)
self.label_8 = QtGui.QLabel(self.gridLayoutWidget_2)
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, 2, 0, 1, 1)
self.label_9 = QtGui.QLabel(self.gridLayoutWidget_2)
self.label_9.setObjectName("label_9")
self.gridLayout_2.addWidget(self.label_9, 3, 0, 1, 1)
self.lineEdit_4 = QtGui.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit_4.setObjectName("lineEdit_4")
self.gridLayout_2.addWidget(self.lineEdit_4, 2, 1, 1, 1)
self.lineEdit_3 = QtGui.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit_3.setObjectName("lineEdit_3")
self.gridLayout_2.addWidget(self.lineEdit_3, 3, 1, 1, 1)
self.lineEdit_5 = QtGui.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit_5.setObjectName("lineEdit_5")
self.gridLayout_2.addWidget(self.lineEdit_5, 1, 1, 1, 1)
self.horizontalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(40, 130, 361, 51))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton = QtGui.QPushButton(self.horizontalLayoutWidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.pushButton_6 = QtGui.QPushButton(self.horizontalLayoutWidget)
self.pushButton_6.setObjectName("pushButton_6")
self.horizontalLayout.addWidget(self.pushButton_6)
self.listWidget_2 = QtGui.QListWidget(self.centralwidget)
self.listWidget_2.setGeometry(QtCore.QRect(760, 60, 251, 421))
self.listWidget_2.setObjectName("listWidget_2")
self.label_6 = QtGui.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(830, 10, 111, 41))
self.label_6.setObjectName("label_6")
self.verticalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(760, 500, 251, 95))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_3 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_3.setObjectName("pushButton_3")
self.verticalLayout.addWidget(self.pushButton_3)
self.pushButton_4 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_4.setObjectName("pushButton_4")
self.verticalLayout.addWidget(self.pushButton_4)
self.pushButton_9 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_9.setObjectName("pushButton_9")
self.verticalLayout.addWidget(self.pushButton_9)
self.verticalLayoutWidget_2 = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(450, 500, 251, 95))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.pushButton_8 = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.pushButton_8.setObjectName("pushButton_8")
self.verticalLayout_2.addWidget(self.pushButton_8)
self.pushButton_7 = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.pushButton_7.setObjectName("pushButton_7")
self.verticalLayout_2.addWidget(self.pushButton_7)
self.pushButton_5 = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.pushButton_5.setObjectName("pushButton_5")
self.verticalLayout_2.addWidget(self.pushButton_5)
self.textBrowser = QtGui.QTextBrowser(self.centralwidget)
self.textBrowser.setGeometry(QtCore.QRect(40, 440, 361, 151))
self.textBrowser.setObjectName("textBrowser")
self.label_11 = QtGui.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(170, 410, 81, 21))
self.label_11.setObjectName("label_11")
Gibson.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(Gibson)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1061, 23))
self.menubar.setObjectName("menubar")
Gibson.setMenuBar(self.menubar)
QtCore.QMetaObject.connectSlotsByName(Gibson)
Gibson.setWindowTitle(QtGui.QApplication.translate("Gibson", "Gibson Primer Designer", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Gibson", "Part Name:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Gibson", "Sequence: ", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Gibson", "<html><head/><body><p><img src=\":/newPrefix/Inputs.png\"/></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("Gibson", "<html><head/><body><p><img src=\":/newPrefix/Inventory.png\"/></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("Gibson", "<html><head/><body><p><img src=\":/newPrefix/parameters.png\"/></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("Gibson", "<html><head/><body><p>Tm (<span style=\" vertical-align:super;\">o</span>C)</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_2.setText(QtGui.QApplication.translate("Gibson", "Build Vector Primers", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_10.setText(QtGui.QApplication.translate("Gibson", "View vector sequence", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setText(QtGui.QApplication.translate("Gibson", "Min primer length (bp)", None, QtGui.QApplication.UnicodeUTF8))
self.label_9.setText(QtGui.QApplication.translate("Gibson", "Max primer length (bp)", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("Gibson", "Add Part", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_6.setText(QtGui.QApplication.translate("Gibson", "Load from file", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("Gibson", "<html><head/><body><p><img src=\":/newPrefix/database.png\"/></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_3.setText(QtGui.QApplication.translate("Gibson", "Transfer part to inventory", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_4.setText(QtGui.QApplication.translate("Gibson", "View sequence of part", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_9.setText(QtGui.QApplication.translate("Gibson", "Delete part from database", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_8.setText(QtGui.QApplication.translate("Gibson", "Transfer part to database", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_7.setText(QtGui.QApplication.translate("Gibson", "View sequence of part", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_5.setText(QtGui.QApplication.translate("Gibson", "Reset vector parts", None, QtGui.QApplication.UnicodeUTF8))
self.label_11.setText(QtGui.QApplication.translate("Gibson", "<html><head/><body><p><img src=\":/newPrefix/System.png\"/></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
# Initialise objects to hold data
self.number_of_parts = None
self.size_of_vector = None
# These are two lists for the primer building algorithm
self.sequences = []
self.sequences_names = []
# This is a dictionary for the inventory name&sequence to hold them for transfer to DB
self.inventory = collections.OrderedDict()
# Connect everything up
self.pushButton.clicked.connect(self.addpart)
self.pushButton_5.clicked.connect(self.clearparts)
self.pushButton_6.clicked.connect(self.loadpart)
self.pushButton_2.clicked.connect(self.buildprimers)
self.pushButton_8.clicked.connect(self.addtodict)
self.pushButton_3.clicked.connect(self.exportfromdb)
self.pushButton_9.clicked.connect(self.deletefromdb)
self.pushButton_10.clicked.connect(self.view_vector_sequence)
# For toolbar
m = 45
# Initialise the database
con = lite.connect('./test.db')
l = []
with con:
cur = con.cursor()
cur.execute("SELECT * FROM Seqs")
rows = cur.fetchall()
for row in rows:
for item in row:
l.append(item)
print l
for i in range(0,len(l),2):
x = l[i]
self.listWidget_2.addItem(x)
######################## For functionality (non database)
def quit(self):
sys.exit()
def addpart(self):
toaddname = self.lineEdit.text()
toaddsequence = self.lineEdit_2.text()
if str(toaddsequence) == "":
self.textBrowser.append("error, you cant join an empty part")
else:
# to add to the list
self.sequences.append(str(toaddsequence))
self.sequences_names.append(str(toaddname)) # add the names back in later on for primer visualisation
self.listWidget.addItems([toaddname])
self.textBrowser.append("parts added")
self.update_statistics()
# to add to the dictionary
self.inventory[str(toaddname)] = str(toaddsequence)
print self.inventory
def clearparts(self):
# for list
self.listWidget.clear()
self.sequences = []
self.textBrowser.append("parts cleared")
self.update_statistics()
# for dict
self.inventory.clear()
def loadpart(self): # test this, also upgrade so user can input multi .fasta file
path, _ = QtGui.QFileDialog.getOpenFileName(mySW, "Open File", os.getcwd())
inputfasta = SeqIO.parse(path, 'fasta')
for record in inputfasta:
# for list
self.sequences.append(record.seq.tostring())
self.sequences_names.append(record.id)
# for dict
self.inventory[record.id] = record.seq.tostring()
self.listWidget.addItems([record.id])
self.update_statistics()
print self.sequences
print self.sequences_names
def update_statistics(self):
self.number_of_parts = self.listWidget.count()
self.textBrowser.append("no. of parts: " + str(self.number_of_parts))
vector = ''.join(self.sequences)
self.size_of_vector = str(len(vector)/1000) + " Kbp"
self.textBrowser.append("vector size: " + self.size_of_vector)
def view_vector_sequence(self):
global global_parts
global_parts = self.inventory
self.vector_window = vector_window()
self.vector.window.show()
######################## Database functions
def addtodict(self):
userselection1 = self.listWidget.currentItem()
userselection = userselection1.text()
print userselection
seq4db = {}
for k, v in self.inventory.iteritems():
if k == userselection: # replace with user selection
seq4db[k] = v
seq4db2 = [[k, v] for k, v in seq4db.iteritems()]
con = lite.connect('./test.db')
# Add to database
with con:
cur = con.cursor()
#cur.execute("DROP TABLE IF EXISTS Seqs")
cur.execute("CREATE TABLE IF NOT EXISTS Seqs(Id TEXT, Seq TEXT)")
cur.executemany("INSERT INTO Seqs VALUES(?, ?)", seq4db2)
# Display in list in GUI
for k in seq4db.keys():
self.listWidget_2.addItems([k])
def exportfromdb(self):
con = lite.connect('./test.db')
userselection1 = self.listWidget_2.currentItem()
userselection = userselection1.text()
var = [userselection] # replace with user selected input from database list
l = []
for item in var:
with con:
cur = con.cursor()
cur.execute("SELECT * FROM Seqs WHERE Id=?", (item,))
rows = cur.fetchall()
for row in rows:
for item in row:
l.append(item)
print self.inventory
self.inventory[l[0].encode("utf-8")] = l[1].encode("utf-8")
self.listWidget.addItem(l[0])
print self.inventory
def deletefromdb(self):
con = lite.connect('./test.db')
userselection1 = self.listWidget_2.currentItem()
userselection = userselection1.text()
with con:
cur = con.cursor()
cur.execute("DELETE FROM Seqs WHERE Id=?", (userselection,))
listItems = self.listWidget_2.selectedItems()
for item in listItems:
self.listWidget_2.takeItem(self.listWidget_2.row(item))
###################### Gibson primer selection algorithm
def buildprimers(self):
# Re-define self.sequences and self.sequences_names from self.inventory
# You can remove all non dictionary code earlier on at somepoint
print self.sequences_names
print self.sequences
self.sequences_names = []
self.sequences = []
for k in self.inventory.keys():
#print k
self.sequences_names.append(k)
for k, v in self.inventory.items():
#print k, v
self.sequences.append(v)
#
ERROR = 0 # set the error code to control the output window
self.textBrowser.append("building gibson primers")
# define possible errors
if (self.lineEdit_5.text() == "") or (self.lineEdit_4.text() == "") or (self.lineEdit_3.text()) == "":
ERROR = 1
self.textBrowser.append("error, parameters were not set")
self.tm = int(self.lineEdit_5.text())
self.min_size = int(self.lineEdit_4.text())
self.max_size = int(self.lineEdit_3.text())
# Creating two lists of sequence IDs
seqnumber = len(self.sequences)
self.sequences1 = []
self.sequences2 = []
for i in range(1,seqnumber+1,1):
theid = "sequence" + str(i)
self.sequences1.append(theid)
self.sequences2.append(theid)
self.sequences1 = self.sequences1[:seqnumber-1]
self.sequences2 = self.sequences2[1:seqnumber+1]
#print self.sequences1
#print self.sequences2
dict = collections.OrderedDict() # this creates a dictionary that maintains the initial order
m = 1
# Get primer sequences 5' and 3' without overlaps
for sequence in self.sequences:
n = self.min_size
n2 = self.min_size
primer = sequence[:n]
primer2 = sequence[-n:]
Tm = 0
Tm2 = 0
while (Tm < self.tm):
n += 1
primer = sequence[:n]
Tm = 64.9 + 41*(primer.count("G") + primer.count("C") - 16.4)/(primer.count("A") + primer.count("T") + primer.count("G") + primer.count("C"))
if n >= self.max_size:
print "warning maximum size reached for forward primer part", m
break
while (Tm2 < self.tm):
n2 += 1
primer2 = sequence[-n2:]
Tm2 = 64.9 + 41*(primer2.count("G") + primer2.count("C") - 16.4)/(primer2.count("A") + primer2.count("T") + primer2.count("G") + primer2.count("C"))
if n >= self.max_size:
print "warning maximum size reached for reverse primer part", m
break
if (Tm and Tm2) >= self.tm and (n and n2 < self.max_size):
x = "sequence" + str(m)
m += 1
fiveprime = sequence[:20]
threeprime = sequence[-20:]
dict[x] = [primer, primer2, Tm, Tm2, fiveprime, threeprime]
else:
self.textBrowser.append("cannot find primers within the parameter range")
ERROR = 1
# 5' ENDS extract and integrate
ends = {}
counter = 2
for key, value in sorted(dict.items()):
if key in self.sequences1:
name = "sequence" + str(counter)
ends[name] = [value[5]]
counter += 1
else:
name = "sequence" + str(1)
ends[name] = [value[5]]
for k, v in sorted(dict.items()):
for k2, v2 in sorted(ends.items()):
if k == k2:
dict[k] = [v2[0]+v[0], v[1], v[2], v[3], v[4], v[5]] # modify dict with the 5' ends
# 3' ENDS extract and integrate
ends2 = {}
counter2 = 1
for key, value in sorted(dict.items()): # This loop is not correct
if key in self.sequences2:
# for everything apart from sequence 1
name = "sequence" + str(counter2)
ends2[name] = [value[4]]
counter2 += 1
else:
name = "sequence" + str(seqnumber) # This variable will alter based on number of sequences added
ends2[name] = [value[4]]
for k, v in sorted(dict.items()):
for k2, v2 in sorted(ends2.items()):
if k == k2:
dict[k] = [v[0], v[1]+v2[0], v[2], v[3], v[4], v[5]]
# Add a loop that reverse complements the second primer
for k, v in sorted(dict.items()):
dict[k] = [v[0], reverse_complement(v[1]), v[2], v[3], v[4], v[5]]
# Put back in the original sequence names into the dict or newdict
i = 0
for k in dict.keys():
newid = self.sequences_names[i]
dict[newid] = dict.pop(k)
i += 1
# Add in primer lengths
for k, v in sorted(dict.items()):
lenforward = len(v[0]) - 20
lenreverse = len(v[1]) - 20
dict[k] = [v[0], v[1], v[2], v[3], v[4], v[5], lenforward, lenreverse]
# print out results in new window in .csv format with a header
global gibson_dict
gibson_dict = dict # I could not get the other class to recognise the dictionary any other way
global gibson_sequence_names
gibson_sequence_names = self.sequences_names
if ERROR == 0:
self.gibsonoutputwindow = gibsonoutputwindow()
self.gibsonoutputwindow.show()
self.textBrowser.append("primers built!")
class ControlGibsonWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(ControlGibsonWindow, self).__init__(parent)
self.ui = Ui_Gibson()
self.ui.setupUi(self)
self.show()
##################################### OUTPUT WINDOWS (I dont think you can import these as modules?)
class gibsonoutputwindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(gibsonoutputwindow, self).__init__(parent)
self.setWindowTitle('Gibson primer builder output')
self.setGeometry(500, 500, 1500, 500)
self.grid = QtGui.QGridLayout()
self.grid.setSpacing(10)
self.gibson_output = QtGui.QTextEdit(self)
self.grid.addWidget(self.gibson_output, 0, 0)
self.toolbar = self.addToolBar("Quit")
SaveAction = QtGui.QAction('Save as .csv file', self)
SaveAction.triggered.connect(self.save)
self.toolbar.addAction(SaveAction)
self.mainWidget = QtGui.QWidget()
self.mainWidget.setLayout(self.grid)
self.setCentralWidget(self.mainWidget)
self.show()
# change text to include gibson primer output # how to get dictionary from other class?
global gibson_dict
print gibson_dict
global gibson_sequence_names
print gibson_sequence_names
gibson_list = []
for k, v in gibson_dict.items():
gibson_list.append(k)
gibson_list.extend((v[0], v[1], str(v[2]), str(v[3]), str(v[6]), str(v[7])))
print gibson_list
empty = "part name, primer forward, primer reverse, tm forward, tm reverse, length forward, length reverse\n"
for i in range(0, len(gibson_list),7):
empty += ','.join(gibson_list[i:i+7]) + "\n"
self.gibson_output.setText(empty)
self.towrite = empty
def save(self):
path2, _ = QtGui.QFileDialog.getSaveFileName(self, "Save file", "", ".csv")
towrite = self.towrite
with open(path2+".csv", "w") as text_file:
text_file.write(towrite)
class vector_window(QtGui.QMainWindow):
def __init__(self, parent=None):
super(vector_window, self).__init__(parent)
self.setWindowTitle('Vector sequence viewer')
self.setGeometry(500, 500, 500, 500)
self.grid = QtGui.QGridLayout()
self.grid.setSpacing(10)
self.vector_viewer = QtGui.QTextEdit(self)
self.grid.addWidget(self.vector_viewer, 0, 0)
self.mainWidget = QtGui.QWidget()
self.mainWidget.setLayout(self.grid)
self.setCentralWidget(self.mainWidget)
self.show()
# print out vector sequence
list = []
global global_parts
for k,v in global_parts.iteritems():
list.append(v)
string = ''.join(list)
self.vector_viewer.setText(string)
############################################## End of classes
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
mySW = ControlGibsonWindow()
mySW.show()
sys.exit(app.exec_())
|
from threading import Thread
from rest_framework import status
from rest_framework.decorators import detail_route
from rest_framework.filters import SearchFilter
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated
from rest_framework.response import Response
from api.apps import ApiCli
from api.models import Answer, Message, Activity
from api.serializers import AnswerSerializer, CommentSerializer, AnswerCreateSerializer, AnswerUpdateSerializer
from api.utils.heat import HeatQueue
from api.utils import mixins
from api.utils.views import error, success, GenericViewSet
class AnswerViewSet(GenericViewSet,
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin):
filter_backends = (SearchFilter,)
search_fields = ('detail',)
permission_classes = (IsAuthenticatedOrReadOnly,)
queryset = Answer.objects.all()
def get_serializer_class(self):
if self.action == 'create':
return AnswerCreateSerializer
if self.action == 'get_comments':
return CommentSerializer
if self.action == 'update':
return AnswerUpdateSerializer
return AnswerSerializer
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
for instance in page:
ApiCli.process_answer(instance, request.user)
serializer = self.get_serializer(page, many=True)
temp = self.get_paginated_response(serializer.data)
return success(temp.data)
return error('no more data')
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
question = serializer.validated_data['question']
if question.answers.filter(author=request.user).exists():
return error('你已经回答过该问题了')
answer = self.perform_create(serializer)
answer.userSummary = request.user.profile
answer.has_favorite = False
answer.has_approve = False
answer.has_against = False
answer.comment_count = answer.comments.count()
seri = AnswerSerializer(answer, context={'request': request})
return success(seri.data)
key = list(serializer.errors.keys())[0]
return error(key+': '+serializer.errors[key][0])
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
if instance:
ApiCli.process_answer(instance, request.user)
serializer = self.get_serializer(instance)
return success(serializer.data)
return error('没有找到该回答')
@detail_route(methods=['GET'])
def get_comments(self, request, pk=None):
answer = self.get_object()
if answer is None:
return error("没有找到该评论所在的回答")
queryset = self.filter_queryset(answer.comments.all())
page = self.paginate_queryset(queryset)
if page is not None:
for comment in page:
ApiCli.process_comment(comment)
serializer = self.get_serializer(page, many=True)
temp = self.get_paginated_response(serializer.data)
return success(temp.data)
return error('没有更多了')
def perform_create(self, serializer):
user = self.request.user
user.profile.answerCount += 1
user.profile.save()
question = serializer.validated_data['question']
answer = serializer.save(author=user)
thread = Thread(target=msg_thread, args=(question, user, answer))
thread.start()
return answer
@detail_route(methods=['GET'],
permission_classes=[IsAuthenticated])
def agree(self, request, pk=None):
profile = request.user.profile
answer = self.get_object()
if answer is None:
return error('没有找到你想赞的回答')
if profile.agreed.filter(id=answer.id).exists():
return error('你已经赞过了')
answer.approve += 1
answer.save()
profile.agreed.add(answer)
profile.save()
if request.user.is_authenticated:
Activity.agreeAnswer(request.user.profile, answer)
return success()
@detail_route(methods=['GET'],
permission_classes=[IsAuthenticated])
def cancel_agree(self, request, pk=None):
profile = request.user.profile
answer = self.get_object()
if answer is None:
return error('没有找到你想取消赞的回答')
answer.approve -= 1
answer.save()
profile.agreed.remove(answer)
profile.save()
return success()
@detail_route(methods=['GET'],
permission_classes=[IsAuthenticated])
def disagree(self, request, pk=None):
profile = request.user.profile
answer = self.get_object()
if answer is None:
return error('没有找到你想踩的回答')
answer.against += 1
answer.save()
profile.disagreed.add(answer)
profile.save()
return success()
@detail_route(methods=['GET'],
permission_classes=[IsAuthenticated])
def cancel_disagree(self, request, pk=None):
profile = request.user.profile
answer = self.get_object()
if answer is None:
return error('没有找到你想取消踩的回答')
answer.against -= 1
answer.save()
profile.disagreed.remove(answer)
profile.save()
return success()
@detail_route(methods=['GET'], permission_classes=[IsAuthenticated])
def favorite(self, request, pk=None):
profile = request.user.profile
answer = self.get_object()
if answer is None:
return error('没有找到你想收藏的回答')
profile.favorites.add(answer)
profile.save()
return success()
@detail_route(methods=['GET'], permission_classes=[IsAuthenticated])
def cancel_favorite(self, request, pk=None):
profile = request.user.profile
answer = self.get_object()
if answer is None:
return error('没有找到你想取消收藏的回答')
profile.favorites.remove(answer)
profile.save()
return success()
def add_message(rec, question, answer, author):
message = Message.objects.create(receiver=rec,
question=question,
answer=answer,
author=author)
message.save()
def msg_thread(question, user, answer):
q_users = question.watchedUser.all()
u_users = user.watchedBy.all()
temp = set()
users = q_users | u_users # merge
for rec in users:
id = rec.user.id
if id in temp:
continue
temp.add(id)
receiver = rec.user
add_message(receiver, question, answer, user.profile)
for topic in question.topics.all():
HeatQueue.put_answered(topic)
|
import sys, os
import numpy as np
import cv2
from sklearn.decomposition import IncrementalPCA
from sklearn.cluster import MiniBatchKMeans
import cPickle as pickle
import math
############################################
# Autor: Pascual Andres Carrasco Gomez
# Entorno: python2.7
# Descripcion: Deteccion facial
# Algoritmo Schneiderman and Kanade
# Nota: El programa requiere:
# apt-get install python-pip
# pip install -U scikit-learn
# pip install -U scipy
# apt-get install python-opencv
############################################
# Cargar los parametros
fichero = file("datos_test.dat")
test = pickle.load(fichero)
fichero.close()
fichero = file("lambda.dat")
umbral = pickle.load(fichero)
fichero.close()
fichero = file("pca.dat")
pca = pickle.load(fichero)
fichero.close()
fichero = file("kmeans.dat")
kmeans = pickle.load(fichero)
fichero.close()
fichero = file("q_caras.dat")
q_caras = pickle.load(fichero)
fichero.close()
fichero = file("q_no_caras.dat")
q_no_caras = pickle.load(fichero)
fichero.close()
fichero = file("m_pos_q_caras.dat")
m_pos_q_caras = pickle.load(fichero)
fichero.close()
# Size ventana de analisis
l_sr = len(test[0][0])/16
# Evaluacion
vp = 0
vn = 0
fp = 0
fn = 0
total_caras = 0
total_no_caras = 0
for i in range(0,len(test)):
img = test[i][0]
clase = test[i][1]
if clase == 1:
total_caras += 1
else:
total_no_caras += 1
labels = []
for i in range(0,len(img),l_sr):
if i+l_sr <= len(img):
subregion = img[i:i+l_sr]
X = pca.transform([subregion])[0]
label = kmeans.predict([X])
labels.append(label[0])
aux_producto = 1
for i in range(0,len(labels)):
p_p_q_cara = m_pos_q_caras[i][labels[i]]
p_q_cara = q_caras[labels[i]]
p_q_no_cara = q_no_caras[labels[i]]
aux = (p_p_q_cara*p_q_cara)/(p_q_no_cara/16.0)
aux_producto = aux_producto * aux
if aux_producto > umbral: # cara
if clase == 1:
vp += 1
else:
fp += 1
else:
if clase == 0:
vn += 1
else:
fn += 1
# Resultados
print "-----------------------------------"
print "\t\tRESULTADOS"
print "-----------------------------------"
print "Verdaderos positivos (VP): ",vp
print "Falsos positivos (FP): ",fp
print "Verdaderos negativos (VN)",vn
print "Falsos negativos (FN)",fn
print "Caras analizadas: ", total_caras
print "No caras analizadas: ", total_no_caras
print "-----------------------------------"
|
import yaml, json
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
ic()
from yaml import Loader, Dumper
import pretty_errors
import sys, os
from icecream import ic
import re
from contextlib import contextmanager
class Configurator():
def __init__(self, config_path: str, autoparse: bool=True):
self.config_path = config_path
if autoparse:
self.parse()
def parse(self):
with open(self.config_path, 'r') as f:
if re.search('json', self.config_path):
raise NotImplementedError
elif re.search('yml', self.config_path):
data = yaml.load(f, Loader=Loader)
self._data = data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
@property
def data(self):
return self._data
@contextmanager
def setenv(self):
yield self.data
def showconf():
for key in config.keys():
ic(key, config[key])
if __name__ == "__main__":
conf = Configurator(config_path='RAW.yml', autoparse=True)
with conf.setenv() as config:
showconf()
|
# Unpacking Argument Lists...!
def arbitraryArgument(*args):
print(f"*args : {args}")
print(f"type(*args) : {type(args)}")
print(f"iterable value : ",end="")
for i in args:
print(i, end=" ")
arbitraryArgument(1, 2, 3, 4, 5)
print("\n__________________________________________")
tuple = (1, 2, 3, 4, 5)
list = [1, 2, 3, 4, 5]
arbitraryArgument(tuple)# Packing Argument Pass
print("\n__________________________________________")
arbitraryArgument(*tuple)# Unpacking Argument Pass
print("\n")
arbitraryArgument(*list)# Unpacking Argument Pass
|
from django.contrib.auth.models import AbstractUser
from django.db import models
class Languages(models.Model):
name = models.CharField(max_length=5)
def __str__(self):
return self.name
class User(AbstractUser):
is_labeler = models.BooleanField(default=False)
is_manager = models.BooleanField(default=False)
language = models.ForeignKey(Languages, on_delete=models.CASCADE)
class Datasets(models.Model):
name = models.CharField(max_length=50)
language = models.ForeignKey(Languages, on_delete=models.CASCADE)
countOfDownloadeds = models.IntegerField(default=0)
countOfDiarized = models.IntegerField(default=0)
countOfLabeleds = models.IntegerField(default=0)
def __str__(self):
return self.name +" - " +self.language.name
|
# Use the file name mbox-short.txt as the file name
fname = raw_input("Enter file name: ")
fh = open(fname)
def getConfidence(line):
di = line.find("0");
return float(line[di:]);
s = 0.0;
c = 0;
for line in fh:
if not line.startswith("X-DSPAM-Confidence:") : continue
s += getConfidence(line);
c += 1;
print "Average spam confidence: %.12g" %(s/c);
print "Done"
|
#!/usr/bin/python3
def common_elements(set_1, set_2):
if set_1 is None or set_2 is None:
return
nl = set()
for dat in set_1:
for dat2 in set_2:
if dat2 in dat:
nl.add(dat2)
return(nl)
|
# sequence of values, qualquer tipo, elementos da lista
list1 = [1, 2, 3, 4, 5, 6]
list2 = ['banana', 'pera', 'maca', 'uva']
list3 = ['uva', 0.1, 3, 'hello']
list4 = ['vi', 21, [17.123]]
empty_list = []
# unlike strings, lists are mutable, can be change the value of an element
print(list1[0])
list1[0] = 0
print(list1)
# Operator IN
cheeses = ['Cheddar', 'Edam', 'Gouda']
'Edam' in cheeses
# element by element
for cheese in cheeses:
print(cheese)
for i in range(len(list1)):
list1[i] = list1[i] * 2
# Operations in Lists
a = [1, 2, 3, 4]
b = [5, 6, 7, 8]
c = a + b
print(c)
repeat = [0] * 4
print(repeat)
# Slice List
t = ['a', 'b', 'c', 'd', 'e', 'f']
print(t[1:3])
# update multiple elements
t = ['a', 'b', 'c', 'd', 'e', 'f']
t[1:3] = ['x', 'y']
print(t)
# Methods for Lists
t.append('d') # add in final
t1 = ['a', 'b']
t2 = ['c', 'd']
t1.extend(t2)
print(t1)
t1.sort() # sort list
# delete element
x = t.pop(1)
t = ['a', 'b', 'c']
del t[1]
y = ['a', 'b', 'c']
y.remove('b')
t = ['a', 'b', 'c', 'd', 'e', 'f']
del t[1:5]
t = ['a', 'b', 'c', 'd', 'e', 'f']
# Lists and Functions
len(t)
max(t)
min(t)
sum(a)
sum(a) / len(a)
# sum only when list of numbers
# calcular media dos numeros inseridos por um usuario
count = 0
while (True):
inp = input('Enter a number: ')
if inp == 'done': break
value = float(inp)
total = total + value
count = count + 1
average = total / count
print('Average:', average)
# Lists and Strings
s = 'spam'
t = list(s)
print(t)
s = 'sentindo falta dos fiordes'
v = s.split()
print(v)
s = 'spam-spam-spam'
delimiter = '-'
s.split(delimiter)
# JOIN is the inverse of split
t = ['sentindo', 'falta', 'dos', 'fiordes']
delimiter = ' '
delimiter.join(t)
# Align lines
fhand = open('mbox-short.txt')
for line in fhand:
line = line.rstrip()
if not line.startswith('From '): continue
words = line.split()
print(words[2])
# Object and Values
a = 'banana'
b = 'banana'
a is b
a = [1, 2, 3]
b = [1, 2, 3]
a is b
# this two lists are equivalent but not identic, only values are equals
# Aliados
# A associação de uma variável a um objeto é chamada de referência.
# Um objeto com mais de uma referência possui mais de um nome, então dizemos que o objeto é aliado.
# Lists with argument
# Quando você passa uma lista como argumento de uma função, a função recebe uma referência a essa lista
def remove_primeiro_elemento(t):
del t[0]
fname = open(raw_input("Enter file name: "))
print ("ERROR: Invalid file name")
exit()
lst = list()
for line in fname:
line = line.rstrip()
l = line.split(" ")
for words in l:
if words in lst:
continue
else:
lst.append(words)
lst.sort()
print lst
|
import os
from net import Net
def make_softmax_net():
params = []
layer1 = {}
layer1["batch"] = 200
layer1["name"] = "Data"
layer1["type"] = "MnistDataLayer"
layer1["bottom"] = []
layer1["top"] = ["data", "label"]
params.append(layer1)
layer2 = {}
layer2["output"] = 3 # class num
layer2["name"] = "fc"
layer2["type"] = "FCLayer"
layer2["bottom"] = ["data"]
layer2["top"] = ["fc1"]
params.append(layer2)
layer3 = {}
layer3["class_num"] = 3
layer3["name"] = "softmax"
layer3["type"] = "SoftmaxLossLayer"
layer3["bottom"] = ["fc1", "label"]
layer3["top"] = ["loss"]
params.append(layer3)
net = Net()
net.init(params)
return net
def make_2layer_mlp():
params = []
layer1 = {}
layer1["batch"] = 128
layer1["name"] = "DATA"
layer1["type"] = "MnistDataLayer"
layer1["bottom"] = []
layer1["top"] = ["data", "label"]
layer1["path"] = "./data/mnist.pkl"
params.append(layer1)
layer2 = {}
layer2["output"] = 256
layer2["name"] = "FC1"
layer2["type"] = "FCLayer"
layer2["bottom"] = ["data"]
layer2["top"] = ["fc1"]
params.append(layer2)
layer3 = {}
layer3["name"] = "RELU1"
layer3["type"] = "ReLULayer"
layer3["bottom"] = ["fc1"]
layer3["top"] = ["relu1"]
params.append(layer3)
layer4 = {}
layer4["output"] = 10 # class num
layer4["name"] = "FC2"
layer4["type"] = "FCLayer"
layer4["bottom"] = ["relu1"]
layer4["top"] = ["fc2"]
params.append(layer4)
layer5 = {}
layer5["class_num"] = 10
layer5["name"] = "SOFTMAXLOSS"
layer5["type"] = "SoftmaxLossLayer"
layer5["bottom"] = ["fc2", "label"]
layer5["top"] = ["loss"]
params.append(layer5)
net = Net()
net.init(params)
return net
def make_3layer_mlp():
params = []
layer1 = {}
layer1["batch"] = 128
layer1["name"] = "DATA"
layer1["type"] = "MnistDataLayer"
layer1["bottom"] = []
layer1["top"] = ["data", "label"]
#layer1["path"] = "./data/mnist.pkl"
params.append(layer1)
layer2 = {}
layer2["output"] = 256
layer2["name"] = "FC1"
layer2["type"] = "FCLayer"
layer2["bottom"] = ["data"]
layer2["top"] = ["fc1"]
params.append(layer2)
layer3 = {}
layer3["name"] = "RELU1"
layer3["type"] = "ReLULayer"
layer3["bottom"] = ["fc1"]
layer3["top"] = ["relu1"]
params.append(layer3)
fc2_param = {}
fc2_param["output"] = 256
fc2_param["name"] = "FC2"
fc2_param["type"] = "FCLayer"
fc2_param["bottom"] = ["relu1"]
fc2_param["top"] = ["fc22"]
params.append(fc2_param)
relu2_param = {}
relu2_param["name"] = "RELU2"
relu2_param["type"] = "ReLULayer"
relu2_param["bottom"] = ["fc22"]
relu2_param["top"] = ["relu2"]
params.append(relu2_param)
layer4 = {}
layer4["output"] = 10 # class num
layer4["name"] = "FC2"
layer4["type"] = "FCLayer"
layer4["bottom"] = ["relu2"]
layer4["top"] = ["fc2"]
params.append(layer4)
layer5 = {}
layer5["class_num"] = 10
layer5["name"] = "SOFTMAXLOSS"
layer5["type"] = "SoftmaxLossLayer"
layer5["bottom"] = ["fc2", "label"]
layer5["top"] = ["loss"]
params.append(layer5)
net = Net()
net.init(params)
return net
def make_2layer_cnn():
params = []
layer1 = {}
layer1["batch"] = 8
layer1["name"] = "DATA"
layer1["type"] = "MnistDataLayer"
layer1["bottom"] = []
layer1["top"] = ["data", "label"]
layer1["path"] = "./data/mnist.pkl"
params.append(layer1)
layer2 = {}
layer2["output"] = 16
layer2["name"] = "CONV1"
layer2["type"] = "ConvLayer"
layer2["kernel_size"] = 3
layer2["pad"] = 1
layer2["bottom"] = ["data"]
layer2["top"] = ["conv1"]
params.append(layer2)
layer3 = {}
layer3["name"] = "RELU1"
layer3["type"] = "ReLULayer"
layer3["bottom"] = ["conv1"]
layer3["top"] = ["relu1"]
params.append(layer3)
layer4 = {}
layer4["output"] = 10 # class num
layer4["name"] = "FC2"
layer4["type"] = "FCLayer"
layer4["bottom"] = ["relu1"]
layer4["top"] = ["fc2"]
params.append(layer4)
layer5 = {}
layer5["class_num"] = 10
layer5["name"] = "SOFTMAXLOSS"
layer5["type"] = "SoftmaxLossLayer"
layer5["bottom"] = ["fc2", "label"]
layer5["top"] = ["loss"]
params.append(layer5)
net = Net()
net.init(params)
return net
def make_pooling_cnn():
params = []
layer1 = {}
layer1["batch"] = 32
layer1["name"] = "DATA"
layer1["type"] = "MnistDataLayer"
layer1["bottom"] = []
layer1["top"] = ["data", "label"]
layer1["path"] = "./data/mnist.pkl"
params.append(layer1)
layer2 = {}
layer2["output"] = 128
layer2["name"] = "CONV1"
layer2["type"] = "ConvLayer"
layer2["kernel_size"] = 3
layer2["pad"] = 1
layer2["bottom"] = ["data"]
layer2["top"] = ["conv1"]
params.append(layer2)
layer3 = {}
layer3["name"] = "RELU1"
layer3["type"] = "ReLULayer"
layer3["bottom"] = ["conv1"]
layer3["top"] = ["relu1"]
params.append(layer3)
layer4 = {}
layer4["name"] = "POOLING1"
layer4["type"] = "PoolingLayer"
layer4["bottom"] = ["relu1"]
layer4["top"] = ["pooling1"]
params.append(layer4)
layer5 = {}
layer5["output"] = 10 # class num
layer5["name"] = "FC2"
layer5["type"] = "FCLayer"
layer5["bottom"] = ["pooling1"]
layer5["top"] = ["fc2"]
params.append(layer5)
layer6 = {}
layer6["class_num"] = 10
layer6["name"] = "SOFTMAXLOSS"
layer6["type"] = "SoftmaxLossLayer"
layer6["bottom"] = ["fc2", "label"]
layer6["top"] = ["loss"]
params.append(layer6)
net = Net()
net.init(params)
return net
def make_LeNet():
params = []
layer1 = {}
layer1["batch"] = 64
layer1["name"] = "DATA"
layer1["type"] = "MnistDataLayer"
layer1["bottom"] = []
layer1["top"] = ["data", "label"]
layer1["path"] = "./data/mnist.pkl"
params.append(layer1)
layer2 = {}
layer2["output"] = 20
layer2["name"] = "CONV1"
layer2["type"] = "ConvLayer"
layer2["kernel_size"] = 5
layer2["pad"] = 0
layer2["bottom"] = ["data"]
layer2["top"] = ["conv1"]
params.append(layer2)
layer3 = {}
layer3["name"] = "RELU1"
layer3["type"] = "ReLULayer"
layer3["bottom"] = ["conv1"]
layer3["top"] = ["relu1"]
params.append(layer3)
layer4 = {}
layer4["name"] = "POOL1"
layer4["type"] = "PoolingLayer"
layer4["bottom"] = ["relu1"]
layer4["top"] = ["pooling1"]
params.append(layer4)
# conv2
layer_conv2 = {}
layer_conv2["name"] = "CONV2"
layer_conv2["type"] = "ConvLayer"
layer_conv2["output"] = 50
layer_conv2["kernel_size"] = 5
layer_conv2["pad"] = 0
layer_conv2["bottom"] = ["pooling1"]
layer_conv2["top"] = ["conv2"]
params.append(layer_conv2)
# relu_conv2
relu2_param = {}
relu2_param["name"] = "RELU2"
relu2_param["type"] = "ReLULayer"
relu2_param["bottom"] = ["conv2"]
relu2_param["top"] = ["relu2"]
params.append(relu2_param)
# pool2
pool2_param = {}
pool2_param["name"] = "POOL2"
pool2_param["type"] = "PoolingLayer"
pool2_param["bottom"] = ["relu2"]
pool2_param["top"] = ["pool2"]
params.append(pool2_param)
# fc1
fc1_param = {}
fc1_param["name"] = "FC1"
fc1_param["type"] = "FCLayer"
fc1_param["output"] = 500
fc1_param["bottom"] = ["pool2"]
fc1_param["top"] = ["fc1"]
params.append(fc1_param)
# fc1_relu
fc1_relu_param = {}
fc1_relu_param["name"] = "FC1_RELU"
fc1_relu_param["type"] = "ReLULayer"
fc1_relu_param["bottom"] = ["fc1"]
fc1_relu_param["top"] = ["fc1_relu"]
params.append(fc1_relu_param)
layer5 = {}
layer5["output"] = 10 # class num
layer5["name"] = "FC2"
layer5["type"] = "FCLayer"
layer5["bottom"] = ["fc1_relu"]
layer5["top"] = ["fc2"]
params.append(layer5)
layer6 = {}
layer6["class_num"] = 10
layer6["name"] = "SOFTMAXLOSS"
layer6["type"] = "SoftmaxLossLayer"
layer6["bottom"] = ["fc2", "label"]
layer6["top"] = ["loss"]
params.append(layer6)
net = Net()
net.init(params)
return net
def make_LeNet9():
params = []
layer1 = {}
layer1["batch"] = 64
layer1["name"] = "DATA"
layer1["type"] = "MnistDataLayer"
layer1["bottom"] = []
layer1["top"] = ["data", "label"]
layer1["path"] = "./data/mnist.pkl"
params.append(layer1)
layer2 = {}
layer2["output"] = 24
layer2["name"] = "CONV1"
layer2["type"] = "ConvLayer"
layer2["kernel_size"] = 5
layer2["pad"] = 2
layer2["bottom"] = ["data"]
layer2["top"] = ["conv1"]
params.append(layer2)
layer3 = {}
layer3["name"] = "RELU1"
layer3["type"] = "ReLULayer"
layer3["bottom"] = ["conv1"]
layer3["top"] = ["relu1"]
params.append(layer3)
layer4 = {}
layer4["name"] = "POOL1"
layer4["type"] = "PoolingLayer"
layer4["bottom"] = ["relu1"]
layer4["top"] = ["pooling1"]
params.append(layer4)
# conv2
layer_conv2 = {}
layer_conv2["name"] = "CONV2"
layer_conv2["type"] = "ConvLayer"
layer_conv2["output"] = 48
layer_conv2["kernel_size"] = 3
layer_conv2["pad"] = 1
layer_conv2["bottom"] = ["pooling1"]
layer_conv2["top"] = ["conv2"]
params.append(layer_conv2)
# relu_conv2
relu2_param = {}
relu2_param["name"] = "RELU2"
relu2_param["type"] = "ReLULayer"
relu2_param["bottom"] = ["conv2"]
relu2_param["top"] = ["relu2"]
params.append(relu2_param)
# conv3
layer_conv3 = {}
layer_conv3["name"] = "CONV3"
layer_conv3["type"] = "ConvLayer"
layer_conv3["output"] = 48
layer_conv3["kernel_size"] = 3
layer_conv3["pad"] = 1
layer_conv3["bottom"] = ["relu2"]
layer_conv3["top"] = ["conv3"]
params.append(layer_conv3)
# relu_conv3
relu3_param = {}
relu3_param["name"] = "RELU3"
relu3_param["type"] = "ReLULayer"
relu3_param["bottom"] = ["conv3"]
relu3_param["top"] = ["relu3"]
params.append(relu3_param)
# conv4
layer_conv4 = {}
layer_conv4["name"] = "CONV4"
layer_conv4["type"] = "ConvLayer"
layer_conv4["output"] = 48
layer_conv4["kernel_size"] = 3
layer_conv4["pad"] = 1
layer_conv4["bottom"] = ["relu3"]
layer_conv4["top"] = ["conv4"]
params.append(layer_conv4)
# relu_conv4
relu4_param = {}
relu4_param["name"] = "RELU4"
relu4_param["type"] = "ReLULayer"
relu4_param["bottom"] = ["conv4"]
relu4_param["top"] = ["relu4"]
params.append(relu4_param)
# pool2
pool2_param = {}
pool2_param["name"] = "POOL2"
pool2_param["type"] = "PoolingLayer"
pool2_param["bottom"] = ["relu4"]
pool2_param["top"] = ["pool2"]
params.append(pool2_param)
# conv5
layer_conv5 = {}
layer_conv5["name"] = "CONV5"
layer_conv5["type"] = "ConvLayer"
layer_conv5["output"] = 96
layer_conv5["kernel_size"] = 3
layer_conv5["pad"] = 1
layer_conv5["bottom"] = ["pool2"]
layer_conv5["top"] = ["conv5"]
params.append(layer_conv5)
# relu_conv5
relu5_param = {}
relu5_param["name"] = "RELU5"
relu5_param["type"] = "ReLULayer"
relu5_param["bottom"] = ["conv5"]
relu5_param["top"] = ["relu5"]
params.append(relu5_param)
# conv6
layer_conv6 = {}
layer_conv6["name"] = "CONV6"
layer_conv6["type"] = "ConvLayer"
layer_conv6["output"] = 96
layer_conv6["kernel_size"] = 3
layer_conv6["pad"] = 1
layer_conv6["bottom"] = ["relu5"]
layer_conv6["top"] = ["conv6"]
params.append(layer_conv6)
# relu_conv6
relu6_param = {}
relu6_param["name"] = "RELU6"
relu6_param["type"] = "ReLULayer"
relu6_param["bottom"] = ["conv6"]
relu6_param["top"] = ["relu6"]
params.append(relu6_param)
# conv7
layer_conv7 = {}
layer_conv7["name"] = "CONV7"
layer_conv7["type"] = "ConvLayer"
layer_conv7["output"] = 96
layer_conv7["kernel_size"] = 3
layer_conv7["pad"] = 1
layer_conv7["bottom"] = ["relu6"]
layer_conv7["top"] = ["conv7"]
params.append(layer_conv7)
# relu_conv7
relu7_param = {}
relu7_param["name"] = "RELU7"
relu7_param["type"] = "ReLULayer"
relu7_param["bottom"] = ["conv7"]
relu7_param["top"] = ["relu7"]
params.append(relu7_param)
layer5 = {}
layer5["output"] = 10 # class num
layer5["name"] = "FC"
layer5["type"] = "FCLayer"
layer5["bottom"] = ["relu7"]
layer5["top"] = ["fc"]
params.append(layer5)
layer6 = {}
layer6["class_num"] = 10
layer6["name"] = "SOFTMAXLOSS"
layer6["type"] = "SoftmaxLossLayer"
layer6["bottom"] = ["fc", "label"]
layer6["top"] = ["loss"]
params.append(layer6)
net = Net()
net.init(params)
return net
def make_LeNet7():
params = []
layer1 = {}
layer1["batch"] = 32
layer1["name"] = "DATA"
layer1["type"] = "MnistDataLayer"
layer1["bottom"] = []
layer1["top"] = ["data", "label"]
layer1["path"] = "./data/mnist.pkl"
params.append(layer1)
layer2 = {}
layer2["output"] = 24
layer2["name"] = "CONV1"
layer2["type"] = "ConvLayer"
layer2["kernel_size"] = 5
layer2["pad"] = 0
layer2["bottom"] = ["data"]
layer2["top"] = ["conv1"]
params.append(layer2)
layer3 = {}
layer3["name"] = "RELU1"
layer3["type"] = "ReLULayer"
layer3["bottom"] = ["conv1"]
layer3["top"] = ["relu1"]
params.append(layer3)
layer4 = {}
layer4["name"] = "POOL1"
layer4["type"] = "PoolingLayer"
layer4["bottom"] = ["relu1"]
layer4["top"] = ["pooling1"]
params.append(layer4)
# conv2
layer_conv2 = {}
layer_conv2["name"] = "CONV2"
layer_conv2["type"] = "ConvLayer"
layer_conv2["output"] = 48
layer_conv2["kernel_size"] = 3
layer_conv2["pad"] = 1
layer_conv2["bottom"] = ["pooling1"]
layer_conv2["top"] = ["conv2"]
params.append(layer_conv2)
# relu_conv2
relu2_param = {}
relu2_param["name"] = "RELU2"
relu2_param["type"] = "ReLULayer"
relu2_param["bottom"] = ["conv2"]
relu2_param["top"] = ["relu2"]
params.append(relu2_param)
# conv3
layer_conv3 = {}
layer_conv3["name"] = "CONV3"
layer_conv3["type"] = "ConvLayer"
layer_conv3["output"] = 48
layer_conv3["kernel_size"] = 3
layer_conv3["pad"] = 1
layer_conv3["bottom"] = ["relu2"]
layer_conv3["top"] = ["conv3"]
params.append(layer_conv3)
# relu_conv3
relu3_param = {}
relu3_param["name"] = "RELU3"
relu3_param["type"] = "ReLULayer"
relu3_param["bottom"] = ["conv3"]
relu3_param["top"] = ["relu3"]
params.append(relu3_param)
# conv4
layer_conv4 = {}
layer_conv4["name"] = "CONV4"
layer_conv4["type"] = "ConvLayer"
layer_conv4["output"] = 48
layer_conv4["kernel_size"] = 3
layer_conv4["pad"] = 1
layer_conv4["bottom"] = ["relu3"]
layer_conv4["top"] = ["conv4"]
params.append(layer_conv4)
# relu_conv4
relu4_param = {}
relu4_param["name"] = "RELU4"
relu4_param["type"] = "ReLULayer"
relu4_param["bottom"] = ["conv4"]
relu4_param["top"] = ["relu4"]
params.append(relu4_param)
# pool2
pool2_param = {}
pool2_param["name"] = "POOL2"
pool2_param["type"] = "PoolingLayer"
pool2_param["bottom"] = ["relu4"]
pool2_param["top"] = ["pool2"]
params.append(pool2_param)
# dropout1
drop1_param = {}
drop1_param["name"] = "DROP1"
drop1_param["type"] = "DropoutLayer"
drop1_param["bottom"] = ["pool2"]
drop1_param["top"] = ["drop1"]
drop1_param["keep_rate"] = 0.5
params.append(drop1_param)
# fc1
fc1_param = {}
fc1_param["name"] = "FC1"
fc1_param["type"] = "FCLayer"
fc1_param["output"] = 256
fc1_param["bottom"] = ["drop1"]
fc1_param["top"] = ["fc1"]
params.append(fc1_param)
# fc1_relu
fc1_relu_param = {}
fc1_relu_param["name"] = "FC1_RELU"
fc1_relu_param["type"] = "ReLULayer"
fc1_relu_param["bottom"] = ["fc1"]
fc1_relu_param["top"] = ["fc1_relu"]
params.append(fc1_relu_param)
# dropout2
drop2_param = {}
drop2_param["name"] = "DROP2"
drop2_param["type"] = "DropoutLayer"
drop2_param["bottom"] = ["fc1_relu"]
drop2_param["top"] = ["drop2"]
drop2_param["keep_rate"] = 0.5
params.append(drop2_param)
layer5 = {}
layer5["output"] = 10 # class num
layer5["name"] = "FC2"
layer5["type"] = "FCLayer"
layer5["bottom"] = ["drop2"]
layer5["top"] = ["fc2"]
params.append(layer5)
layer6 = {}
layer6["class_num"] = 10
layer6["name"] = "SOFTMAXLOSS"
layer6["type"] = "SoftmaxLossLayer"
layer6["bottom"] = ["fc2", "label"]
layer6["top"] = ["loss"]
params.append(layer6)
net = Net()
net.init(params)
return net
def make_LeNet5(net_params=None):
params = []
layer1 = {}
layer1["batch"] = 64
layer1["name"] = "DATA"
layer1["type"] = "MnistDataLayer"
layer1["bottom"] = []
layer1["top"] = ["data", "label"]
layer1["path"] = "./data/mnist.pkl"
layer1["rotate"] = 10
layer1["shift"] = 2
params.append(layer1)
layer2 = {}
layer2["output"] = 32
layer2["name"] = "CONV1"
layer2["type"] = "ConvLayer"
layer2["kernel_size"] = 5
layer2["pad"] = 2
layer2["bottom"] = ["data"]
layer2["top"] = ["conv1"]
params.append(layer2)
layer3 = {}
layer3["name"] = "RELU1"
layer3["type"] = "ReLULayer"
layer3["bottom"] = ["conv1"]
layer3["top"] = ["relu1"]
params.append(layer3)
# conv2
layer_conv2 = {}
layer_conv2["name"] = "CONV2"
layer_conv2["type"] = "ConvLayer"
layer_conv2["output"] = 32
layer_conv2["kernel_size"] = 5
layer_conv2["pad"] = 2
layer_conv2["bottom"] = ["relu1"]
layer_conv2["top"] = ["conv2"]
params.append(layer_conv2)
# relu_conv2
relu2_param = {}
relu2_param["name"] = "RELU2"
relu2_param["type"] = "ReLULayer"
relu2_param["bottom"] = ["conv2"]
relu2_param["top"] = ["relu2"]
params.append(relu2_param)
layer4 = {}
layer4["name"] = "POOL1"
layer4["type"] = "PoolingLayer"
layer4["bottom"] = ["relu2"]
layer4["top"] = ["pooling1"]
params.append(layer4)
# conv3
layer_conv3 = {}
layer_conv3["name"] = "CONV3"
layer_conv3["type"] = "ConvLayer"
layer_conv3["output"] = 64
layer_conv3["kernel_size"] = 3
layer_conv3["pad"] = 1
layer_conv3["bottom"] = ["pooling1"]
layer_conv3["top"] = ["conv3"]
params.append(layer_conv3)
# relu_conv3
relu3_param = {}
relu3_param["name"] = "RELU3"
relu3_param["type"] = "ReLULayer"
relu3_param["bottom"] = ["conv3"]
relu3_param["top"] = ["relu3"]
params.append(relu3_param)
# conv4
layer_conv4 = {}
layer_conv4["name"] = "CONV4"
layer_conv4["type"] = "ConvLayer"
layer_conv4["output"] = 64
layer_conv4["kernel_size"] = 3
layer_conv4["pad"] = 1
layer_conv4["bottom"] = ["relu3"]
layer_conv4["top"] = ["conv4"]
params.append(layer_conv4)
# relu_conv4
relu4_param = {}
relu4_param["name"] = "RELU4"
relu4_param["type"] = "ReLULayer"
relu4_param["bottom"] = ["conv4"]
relu4_param["top"] = ["relu4"]
params.append(relu4_param)
# pool2
pool2_param = {}
pool2_param["name"] = "POOL2"
pool2_param["type"] = "PoolingLayer"
pool2_param["bottom"] = ["relu4"]
pool2_param["top"] = ["pool2"]
params.append(pool2_param)
# dropout1
drop1_param = {}
drop1_param["name"] = "DROP1"
drop1_param["type"] = "DropoutLayer"
drop1_param["bottom"] = ["pool2"]
drop1_param["top"] = ["drop1"]
drop1_param["keep_rate"] = 0.25
params.append(drop1_param)
# fc1
fc1_param = {}
fc1_param["name"] = "FC1"
fc1_param["type"] = "FCLayer"
fc1_param["output"] = 256
fc1_param["bottom"] = ["drop1"]
fc1_param["top"] = ["fc1"]
params.append(fc1_param)
# fc1_relu
fc1_relu_param = {}
fc1_relu_param["name"] = "FC1_RELU"
fc1_relu_param["type"] = "ReLULayer"
fc1_relu_param["bottom"] = ["fc1"]
fc1_relu_param["top"] = ["fc1_relu"]
params.append(fc1_relu_param)
# dropout2
drop2_param = {}
drop2_param["name"] = "DROP2"
drop2_param["type"] = "DropoutLayer"
drop2_param["bottom"] = ["fc1_relu"]
drop2_param["top"] = ["drop2"]
drop2_param["keep_rate"] = 0.5
params.append(drop2_param)
layer5 = {}
layer5["output"] = 10 # class num
layer5["name"] = "FC2"
layer5["type"] = "FCLayer"
layer5["bottom"] = ["drop2"]
layer5["top"] = ["fc2"]
params.append(layer5)
layer6 = {}
layer6["class_num"] = 10
layer6["name"] = "SOFTMAXLOSS"
layer6["type"] = "SoftmaxLossLayer"
layer6["bottom"] = ["fc2", "label"]
layer6["top"] = ["loss"]
params.append(layer6)
net = Net()
if type(net_params)!=type(None):
net.init(params, net_params)
else:
net.init(params)
return net
def make_LeNet5_test(net_params=None):
data_name = net_params["data"]
label_name = net_params["label"]
output_name = net_params["output"]
params = []
layer2 = {}
layer2["output"] = 32
layer2["name"] = "CONV1"
layer2["type"] = "ConvLayer"
layer2["kernel_size"] = 5
layer2["pad"] = 2
layer2["bottom"] = [data_name]
layer2["top"] = ["conv1"]
params.append(layer2)
layer3 = {}
layer3["name"] = "RELU1"
layer3["type"] = "ReLULayer"
layer3["bottom"] = ["conv1"]
layer3["top"] = ["relu1"]
params.append(layer3)
# conv2
layer_conv2 = {}
layer_conv2["name"] = "CONV2"
layer_conv2["type"] = "ConvLayer"
layer_conv2["output"] = 32
layer_conv2["kernel_size"] = 5
layer_conv2["pad"] = 2
layer_conv2["bottom"] = ["relu1"]
layer_conv2["top"] = ["conv2"]
params.append(layer_conv2)
# relu_conv2
relu2_param = {}
relu2_param["name"] = "RELU2"
relu2_param["type"] = "ReLULayer"
relu2_param["bottom"] = ["conv2"]
relu2_param["top"] = ["relu2"]
params.append(relu2_param)
layer4 = {}
layer4["name"] = "POOL1"
layer4["type"] = "PoolingLayer"
layer4["bottom"] = ["relu2"]
layer4["top"] = ["pooling1"]
params.append(layer4)
# conv3
layer_conv3 = {}
layer_conv3["name"] = "CONV3"
layer_conv3["type"] = "ConvLayer"
layer_conv3["output"] = 64
layer_conv3["kernel_size"] = 3
layer_conv3["pad"] = 1
layer_conv3["bottom"] = ["pooling1"]
layer_conv3["top"] = ["conv3"]
params.append(layer_conv3)
# relu_conv3
relu3_param = {}
relu3_param["name"] = "RELU3"
relu3_param["type"] = "ReLULayer"
relu3_param["bottom"] = ["conv3"]
relu3_param["top"] = ["relu3"]
params.append(relu3_param)
# conv4
layer_conv4 = {}
layer_conv4["name"] = "CONV4"
layer_conv4["type"] = "ConvLayer"
layer_conv4["output"] = 64
layer_conv4["kernel_size"] = 3
layer_conv4["pad"] = 1
layer_conv4["bottom"] = ["relu3"]
layer_conv4["top"] = ["conv4"]
params.append(layer_conv4)
# relu_conv4
relu4_param = {}
relu4_param["name"] = "RELU4"
relu4_param["type"] = "ReLULayer"
relu4_param["bottom"] = ["conv4"]
relu4_param["top"] = ["relu4"]
params.append(relu4_param)
# pool2
pool2_param = {}
pool2_param["name"] = "POOL2"
pool2_param["type"] = "PoolingLayer"
pool2_param["bottom"] = ["relu4"]
pool2_param["top"] = ["pool2"]
params.append(pool2_param)
# dropout1
drop1_param = {}
drop1_param["name"] = "DROP1"
drop1_param["type"] = "DropoutLayer"
drop1_param["bottom"] = ["pool2"]
drop1_param["top"] = ["drop1"]
drop1_param["keep_rate"] = 0.25
params.append(drop1_param)
# fc1
fc1_param = {}
fc1_param["name"] = "FC1"
fc1_param["type"] = "FCLayer"
fc1_param["output"] = 256
fc1_param["bottom"] = ["drop1"]
fc1_param["top"] = ["fc1"]
params.append(fc1_param)
# fc1_relu
fc1_relu_param = {}
fc1_relu_param["name"] = "FC1_RELU"
fc1_relu_param["type"] = "ReLULayer"
fc1_relu_param["bottom"] = ["fc1"]
fc1_relu_param["top"] = ["fc1_relu"]
params.append(fc1_relu_param)
# dropout2
drop2_param = {}
drop2_param["name"] = "DROP2"
drop2_param["type"] = "DropoutLayer"
drop2_param["bottom"] = ["fc1_relu"]
drop2_param["top"] = ["drop2"]
drop2_param["keep_rate"] = 0.5
params.append(drop2_param)
layer5 = {}
layer5["output"] = 10 # class num
layer5["name"] = "FC2"
layer5["type"] = "FCLayer"
layer5["bottom"] = ["drop2"]
layer5["top"] = ["fc2"]
params.append(layer5)
layer6 = {}
layer6["class_num"] = 10
layer6["name"] = "SOFTMAXLOSS"
layer6["type"] = "SoftmaxLossLayer"
layer6["bottom"] = ["fc2", "label"]
layer6["top"] = [output_name]
params.append(layer6)
net = Net()
if type(net_params)!=type(None):
net.init(params, net_params)
else:
net.init(params)
return net |
# Databricks notebook source
# MAGIC %md
# MAGIC # 01_04_Analysis_Whole_Data
# COMMAND ----------
# MAGIC %md
# MAGIC The goal of this notebook is to conduct some basic analysis on the whole dataset.<br>
# MAGIC It is based on the code from https://github.com/HansjoergW/bfh_cas_bgd_fs2020_sa/blob/master/01_04_Analysis_Whole_Data.ipynb
# COMMAND ----------
# imports
from pathlib import Path
from typing import List, Tuple, Union, Set
from pyspark.sql.dataframe import DataFrame
import pandas as pd
import shutil # provides high level file operations
import time # used to measure execution time
import os
import sys
# COMMAND ----------
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
%matplotlib inline
sns.set()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Basic definitions
# COMMAND ----------
# folder with the whole dataset as a single parquet
all_parquet_folder = "/usr/parquet/"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Loading the dataset
# COMMAND ----------
df_all = spark.read.parquet(all_parquet_folder).cache()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Print all the contained column names
# COMMAND ----------
_ = [print(x, end=", ") for x in df_all.columns] # print the name of the columns for convenience
# COMMAND ----------
# MAGIC %md
# MAGIC ## Total number of records
# COMMAND ----------
print("Entries in Test: ", "{:_}".format(df_all.count()))
# COMMAND ----------
# MAGIC %md
# MAGIC To load the data initially into the cache and execute the count()-operation took about 9 minutes.<br>
# MAGIC Executing the same operation another time, after everything was cached, took about 42 seconds.<br>
# COMMAND ----------
# MAGIC %md
# MAGIC ## Number of different CIKs
# COMMAND ----------
print("{:_}".format(df_all.select("cik").distinct().count()))
# COMMAND ----------
# MAGIC %md
# MAGIC ## Number of reports by type
# COMMAND ----------
p_df_all_reports_by_type = df_all.select(["adsh","form"]).distinct().toPandas()
# COMMAND ----------
ct = pd.crosstab(index=p_df_all_reports_by_type['form'], columns='count')
print(ct[:5])
# COMMAND ----------
my_order = p_df_all_reports_by_type.groupby(by=['form'])['form'].count().sort_values(ascending=False).index
g = sns.catplot(x='form', kind='count', data=p_df_all_reports_by_type, order=my_order, color='skyblue')
g.set_xticklabels( ## Beschriftung der x-Achse
rotation=90, ha='center') ## alias set_horizontalalignment
g.fig.set_figwidth(12)
g.fig.set_figheight(4)
plt.show()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Number of companies per exchange
# COMMAND ----------
p_df_all_ciks_at_echange = df_all.select(["cik","exchange"]).distinct().toPandas() # convert to pandas in order to visualize the data
# COMMAND ----------
print("not traded at an exchange: ", p_df_all_ciks_at_echange['exchange'].isnull().sum(axis = 0))
print("traded at an exchange : ", p_df_all_ciks_at_echange['exchange'].count())
# COMMAND ----------
ct = pd.crosstab(index=p_df_all_ciks_at_echange['exchange'], columns='count')
print(ct)
# COMMAND ----------
my_order = p_df_all_ciks_at_echange.groupby(by=['exchange'])['exchange'].count().sort_values(ascending=False).index
g = sns.catplot(x='exchange', kind='count', data=p_df_all_ciks_at_echange, order=my_order, color='skyblue')
g.set_xticklabels( ## Beschriftung der x-Achse
rotation=90, ha='center') ## alias set_horizontalalignment
g.fig.set_figwidth(12)
g.fig.set_figheight(4)
plt.show()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Filer status
# MAGIC
# MAGIC There is the field "afs" which defines the "Filer status with the SEC at the time of submission". This field can have the values
# MAGIC * LAF=Large Accelerated,
# MAGIC * ACC=Accelerated,
# MAGIC * SRA=Smaller Reporting Accelerated,
# MAGIC * NON=Non-Accelerated,
# MAGIC * SML=Smaller Reporting Filer,
# MAGIC * NULL=not assigned
# COMMAND ----------
p_df_filer_state = df_all.select(["cik","afs"]).distinct().toPandas() # convert to pandas in order to visualize the data
# COMMAND ----------
print("without filer state: ", p_df_filer_state['afs'].isnull().sum(axis = 0))
print("with filer state : ", p_df_filer_state['afs'].count())
# COMMAND ----------
ct = pd.crosstab(index=p_df_filer_state['afs'], columns='count')
print(ct)
# COMMAND ----------
my_order = p_df_filer_state.groupby(by=['afs'])['afs'].count().sort_values(ascending=False).index
g = sns.catplot(x='afs', kind='count', data=p_df_filer_state, order=my_order, color='skyblue')
g.set_xticklabels( ## Beschriftung der x-Achse
rotation=90, ha='center') ## alias set_horizontalalignment
g.fig.set_figwidth(12)
g.fig.set_figheight(4)
plt.show()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Primary financial statement
# COMMAND ----------
print("with filer state : ", "{:_}".format(df_all.select('stmt').where("stmt is not null").count()))
# COMMAND ----------
p_df_prim_fin_rep_count = df_all.cube('stmt').count().toPandas()
print(p_df_prim_fin_rep_count)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Tags
# COMMAND ----------
df_all_report_lines = df_all.select(['stmt', 'tag', 'version']).where("stmt is not null and version NOT LIKE '00%'").cache()
# COMMAND ----------
# MAGIC %md
# MAGIC ### How many different tags/version are there in the while dataset
# COMMAND ----------
p_df_stmt_tags = df_all_report_lines.distinct().toPandas()
# COMMAND ----------
p_df_stmt_tags[:10]
# COMMAND ----------
tag_version_per_type = pd.crosstab(index=p_df_stmt_tags['stmt'], columns='count')
tag_version_per_type
# COMMAND ----------
# MAGIC %md
# MAGIC ### Check if there are different versions of the same tag
# COMMAND ----------
version_per_tag = pd.crosstab(index=p_df_stmt_tags[p_df_stmt_tags['stmt'] == 'BS']['tag'], columns='count')
version_per_tag.sort_values('count', ascending=False)[:15]
# COMMAND ----------
p_df_stmt_tags[p_df_stmt_tags['tag'] == 'Cash']['version'].unique()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Ignoring the version - how many differnt tags are there
# COMMAND ----------
p_df_stmt_tags_no_Version = p_df_stmt_tags[['stmt', 'tag']].drop_duplicates()
tag_per_type = pd.crosstab(index=p_df_stmt_tags_no_Version['stmt'], columns='count')
tag_per_type
# COMMAND ----------
# MAGIC %md
# MAGIC ### Find the most important tags for the BalanceSheet
# COMMAND ----------
# check in how many reports a tag from a BS appears
p_all_BS_rows = df_all.select(['adsh','stmt', 'tag', 'version']).where("stmt like 'BS' and version NOT LIKE '00%'").distinct().cache()
# COMMAND ----------
p_all_BS_rows.count()
# COMMAND ----------
count_per_BS_tag = p_all_BS_rows.select(['tag']).cube('tag').count().toPandas()
# COMMAND ----------
sorted_count_per_BS_tag = count_per_BS_tag.sort_values('count', ascending=False)
sorted_count_per_BS_tag.reset_index(drop = True, inplace = True)
sorted_count_per_BS_tag = sorted_count_per_BS_tag[1:] # ignoring the first row, which contains the "None" Tag
sorted_count_per_BS_tag[:10]
# COMMAND ----------
display(sorted_count_per_BS_tag)
# COMMAND ----------
display(sorted_count_per_BS_tag[:100])
# COMMAND ----------
sorted_count_per_BS_tag.to_csv("/dbfs/usr/bs_tags.csv",index=False,header=True)
# COMMAND ----------
|
import sys
import Class.SeleniumBrowser
import Module.Algorithms
import Module.Utility
import Module.logger
import Module.getObject
import Module.CleanUp
import Module.Report
import Class.UserDefinedException
def clickOnLink(driverObject,lnkName):
Excep = Class.UserDefinedException.UserDefinedException()
success = 0
if lnkName == None:
Module.logger.ERROR("Link name not provided")
obj = Module.getObject.getObjByRepo(driverObject,"link",lnkName)
if obj != None:
try:
obj.click()
Module.logger.INFO("Link " + lnkName + " is selected")
success = 1
except:
Module.logger.ERROR("Link "+lnkName+ "is not clickable")
else:
Module.logger.INFO("Object " + lnkName + " is not found in Repository")
if success == 0:
obj = Module.getObject.getObjByAlgo(driverObject,"link",lnkName)
if obj != None:
try:
obj.click()
Module.logger.INFO("Link " + lnkName + " is selected")
except:
Module.Report.Failure(driverObject,"Link " + lnkName + "is not clickable")
# Clean up before raising exception
Module.CleanUp.killAllProcess()
Excep.raiseException("Link " + lnkName + "is not clickable")
else:
Module.Report.Failure(driverObject,"No link found "+lnkName)
Excep.raiseException("No Object found for link "+lnkName)
|
from django import forms
from models import ManPower,Employee,Project,Salary,Shift
from django.contrib.admin import widgets
SHIFT_CHOICES=(('abc','def'),('ghi','gkl'))
class ManPowerForm(forms.ModelForm):
employee=forms.ModelChoiceField(queryset=Employee.objects.all())
project=forms.ModelChoiceField(queryset=Project.objects.all())
time_in=forms.DateTimeField(widget=widgets.AdminSplitDateTime())
time_out=forms.DateTimeField(widget=widgets.AdminSplitDateTime())
lunch=forms.IntegerField(initial=1)
shift=forms.ModelChoiceField(queryset=Shift.objects.all(),initial={'name':'D1'})
class Meta:
model=ManPower
fields=['employee','project','time_in','time_out','lunch','shift','working_time']
|
vel = float(input('Digite a velocidade do veiculo em km/h: '))
if vel > 80:
print('Você foi multado devido a está acima do limite de 80 km/h')
mul = (vel - 80) * 7
print('A multa vai custar R${:.2f}'.format(mul))
else:
print('Você está andando na velocidade correta') |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: GreenJedi
# @Date: 2018-01-31 12:40:10
# @Last Modified by: JJLasher
# @Last Modified time: 2018-01-31 13:12:49
from flask import render_template
from app import app
@app.route('/')
@app.route('/index')
def index():
return render_template("index.html")
|
#!/usr/bin/env python
#
# simple test of classes and gamestate
import sys
import os
import time
import unittest
from Distances import *
class Camp:
def __init__(self,x,y, owner):
self.X=x
self.Y=y
self.owner=owner
def getX(self):
return self.X
def getY(self):
return self.Y
def getOwner(self):
return self.owner
def createCamps():
a=[]
a.append(Camp(4, 4, 1))
a.append(Camp(8, 4, 0))
a.append(Camp(27, 4, 3))
a.append(Camp(23, 4, 0))
a.append(Camp(6, 7, 1))
a.append(Camp(2, 7, 0))
a.append(Camp(29, 7, 0))
a.append(Camp(25, 7, 3))
a.append(Camp(11, 9, 0))
a.append(Camp(20, 9, 0))
a.append(Camp(3, 10, 0))
a.append(Camp(25, 10, 0))
a.append(Camp(6, 10, 0))
a.append(Camp(28, 10, 0))
a.append(Camp(24, 14, 0))
a.append(Camp(22, 14, 0))
a.append(Camp(9, 14, 0))
a.append(Camp(7, 14, 0))
a.append(Camp(9, 17, 0))
a.append(Camp(24, 17, 0))
a.append(Camp(22, 17, 0))
a.append(Camp(7, 17, 0))
a.append(Camp(3, 21, 0))
a.append(Camp(6, 21, 0))
a.append(Camp(25, 21, 0))
a.append(Camp(28, 21, 0))
a.append(Camp(11, 22, 0))
a.append(Camp(20, 22, 0))
a.append(Camp(2, 24, 0))
a.append(Camp(6, 24, 4))
a.append(Camp(25, 24, 2))
a.append(Camp(29, 24, 0))
a.append(Camp(4, 27, 4))
a.append(Camp(23, 27, 0))
a.append(Camp(27, 27, 2))
a.append(Camp(8, 27, 0))
return a
class DistanceTest(unittest.TestCase):
def testCamps(self):
camps = createCamps()
self.failUnlessEqual(len(camps),36)
c =camps[2]
self.failUnlessEqual(c.getX(),27)
self.failUnlessEqual(c.getY(),4)
self.failUnlessEqual(c.getOwner(),3)
def testDistance(self):
camps = createCamps()
dists = Distances(camps)
self.failUnlessEqual(dists.getMinDistance(4), 9)
self.failUnlessEqual(dists.getClosestId(4), 12)
reihe = dists.getList(4)
print reihe
i = reihe.index(min(reihe))
print i
reihe[i] = 99999999
i = reihe.index(min(reihe))
print i
reihe[i] = 99999999
i = reihe.index(min(reihe))
print i
reihe[i] = 99999999
i = reihe.index(min(reihe))
print i
reihe[i] = 99999999
if __name__ == "__main__":
unittest.main()
|
import socket
import threading
from debugging import print
class CommunicationThread(threading.Thread):
def __init__(self, connection, addr, shareGameData, id):
super().__init__(name="CommunicationThread to " + str(addr))
# todo : check id limit and print error
self.connection = connection
self.address = addr
self.shareGameData = shareGameData
self.id = id
##############
## OVERRIDE ##
##############
def run(self):
print('start handling gameClientConnection', self.address)
while True:
data = self.connection.recv(1024)
data = data.decode('utf-8')
print('received data from', self.address, ':', data)
threading.Thread(target=self.respondClient,args=[data]).start()
def respondClient(self,data):
result = self.__executeCommand(data)
if result is not None:
self.connection.send(str.encode(str(result)))
############
## PUBLIC ##
############
def getConnection(self):
return self.connection
def getAddress(self):
return self.address
def getPlayerData(self):
return self.shareGameData.players[self.id]
#####################
## PRIVATE HELPERS ##
#####################
def __executeCommand(self, strDataIn):
try: #to call method according to header
dataIn = self.__convertToTuple(strDataIn) #convert to tuple
header = self.__extractHeader(dataIn)
values = self.__extractParameters(dataIn)
actionMethod = self.__getCorrespondingMethod(header)
output = self.__performAction(actionMethod, values)
return output
except Exception as e:
print('error found for request:', strDataIn)
print('with exception:', e)
return None
def __convertToTuple(self, strData):
# <--example--> convert "'getGameState'," to ('gameState',)
manipulatedString = '('+strData+')'
return eval(manipulatedString)
def __extractHeader(self, tupleData):
return tupleData[0]
def __extractParameters(self, tupleData):
return tupleData[1:]
def __getCorrespondingMethod(self, header):
#admin actions
if header == 'reset':
return self.shareGameData.reset
if header == 'start':
return self.shareGameData.start
#client actions
playerData = self.shareGameData.players[self.id]
return getattr(playerData, header)
def __performAction(self, actionMethod, values):
return actionMethod(*values)
|
import argparse
import ast
import csv
import math
import os
import sys
from operator import add
import tqdm
from protgraph.graph_statistics import _add_lists
csv.field_size_limit(sys.maxsize)
def _check_if_file_exists(s: str):
""" checks if a file exists. If not: raise Exception """
# TODO copied from prot_graph.py
if os.path.isfile(s):
return s
else:
raise Exception("File '{}' does not exists".format(s))
def parse_args():
parser = argparse.ArgumentParser(
description="Graph-Generator for Proteins/Peptides and Exporter to various formats"
)
# Statistics file
parser.add_argument(
"input_csv", type=_check_if_file_exists, nargs=1,
help="File containing the statistics output from ProtGraph "
"(E.G.: Generated via '-cnp', '-cnpm' or '-cnph', ...). "
"All columns beginning with 'num' or 'list' can be used."
)
# Number of entries in csv
parser.add_argument(
"--num_entries", "-n", type=int, default=None,
help="Number of entries in csv. If provided, an estimate of needed time can be reported. Defaults to none"
)
# Number of entries in csv
parser.add_argument(
"--column_index", "-cidx", type=int, default=2,
help="The Index of the column of the graph-statistics file which should be summed up. "
"Defaults to 2 (num_var_seq (isoforms)) (The column for counting can be different depending on the layout)"
)
return parser.parse_args()
def main():
args = parse_args()
# Parameters
statistics_file = args.input_csv[0]
num_entries = args.num_entries
# Static parameter, always 11th entry in statistics file
column_index = args.column_index
# Open al files and sort them accordingly
with open(statistics_file, "r") as in_file:
# Initialize CSV reader
csv_in = csv.reader(in_file)
# Skip Header
headers = next(csv_in)
# Set up summation method depending on type
first = next(csv_in)[column_index]
if not first:
raise Exception("Column '{}' (on index {}) contains no entries".format(headers[column_index], column_index))
try:
parsed_entry = ast.literal_eval(first)
except Exception:
raise Exception("Column '{}' (on index {}) cannot be evaluated".format(headers[column_index], column_index))
if type(parsed_entry) == int:
exe_func = add
summation = 0
elif type(parsed_entry) == list:
exe_func = _add_lists
summation = []
else:
raise Exception("Column '{}' (on index {}) cannot be summed, type is not list or int".format(
headers[column_index], column_index)
)
# Sum all entries depending on type
try:
for row in tqdm.tqdm(csv_in, unit="rows", total=num_entries):
if row[column_index]:
summation = exe_func(
summation, ast.literal_eval(row[column_index])
)
except Exception:
raise Exception("Column '{}' (on index {}) contains different typed/corrupted entries".format(
headers[column_index], column_index)
)
# Print results
if type(summation) == int:
summation = [summation]
idx_len = str(int(math.log10(len(summation)))+1)
entry_len = str(int(math.log10(max(summation)))+1)
print("Results from column '{}':\n".format(headers[column_index]))
print("Sum of each entry")
for idx, entry in enumerate(summation):
print(("{:>" + idx_len + "}: {:>" + entry_len + "}").format(idx, entry))
print("\n\n\nCummulative sum")
k = 0
for idx, entry in enumerate(summation):
k += entry
print(("{:>" + idx_len + "}: {:>" + entry_len + "}").format(idx, k))
|
#!/usr/bin/env python
import ROOT
import sys
import os
import glob
import re
from array import array
from math import sqrt
def process_time(rootFile):
times_tree = ROOT.TNtuple("arrival_time", "Detection Time", "time")
tf = ROOT.TFile(rootFile, "READ")
tf.ReadAll()
trees = list(tf.GetList())
for run, tree in enumerate(trees):
n = tree.Draw("time",
"det==1", "goff")
times = tree.GetV1()
times.SetSize(n)
for t in list(times):
times_tree.Fill(t)
tf.Close()
return times_tree
if __name__ == '__main__':
ROOT.gROOT.SetStyle("Plain")
ROOT.gStyle.SetPalette(1)
tree = process_time(sys.argv[1])
ROOT.TPython.Prompt()
|
# -*- coding: utf-8 -*-
"""Definition of the Exam content type.
"""
from zope.interface import implements
from Products.Archetypes import atapi
from Products.ATContentTypes.content import folder
from Products.ATContentTypes.content.schemata import finalizeATCTSchema
from eduintelligent.evaluation.utility import hideMetadataSchema
from eduintelligent.evaluation.interfaces import IExam, IEvaluation
from eduintelligent.evaluation.config import PROJECTNAME
from eduintelligent.evaluation import evaluationMessageFactory as _
from eduintelligent.evaluation.content.evaluation import Evaluation
from eduintelligent.evaluation.content.schemas import quiz_schema, exam_schema, message_schema
ExamFolderSchema = folder.ATFolderSchema.copy() + quiz_schema.copy() + exam_schema.copy() + message_schema.copy()
ExamFolderSchema['title'].storage = atapi.AnnotationStorage()
ExamFolderSchema['description'].storage = atapi.AnnotationStorage()
finalizeATCTSchema(ExamFolderSchema, folderish=True, moveDiscussion=False)
hideMetadataSchema(ExamFolderSchema, excludeFromNav=True)
class Exam(Evaluation):
"""Contains multiple questions.
"""
implements(IExam, IEvaluation)
portal_type = "Exam"
_at_rename_after_creation = True
schema = ExamFolderSchema
title = atapi.ATFieldProperty('title')
description = atapi.ATFieldProperty('description')
atapi.registerType(Exam, PROJECTNAME)
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('coronacases.csv', sep=',')
data = data[['id','cases']]
print(data.head())
#prepare data
x = np.array(data['id']).reshape(-1,1)
y = np.array(data['cases']).reshape(-1,1)
plt.plot(y,'-m')
#plt.show()
from sklearn.preprocessing import PolynomialFeatures
polyfature = PolynomialFeatures(degree=3)
x = polyfature.fit_transform(x)
print(x)
#training data
from sklearn import linear_model
model = linear_model.LinearRegression()
model.fit(x,y)
accuracy = model.score(x,y)
print(f'accuracy:{round(accuracy*100,3)}%')
y0 = model.predict(x)
plt.plot(y0,'--b')
plt.show()
#prediction
days = 2
print(f'preciction - cases after{days} days:',end='')
print(round(int(model.predict(polyfature.fit_transform([[242+days]])))/1000000,2),'millions')
|
import tensorflow
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model, load_model
from tensorflow.keras import backend as K
from numpy.random import seed
from tensorflow import set_random_seed
from trainer.checkpointers import *
from trainer.utils import *
from trainer.accuracy import *
from tensorflow.python.keras.callbacks import TensorBoard
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
seed(11)
set_random_seed(12)
job_dir = 'output'
data_path = 'dataset'
train_csv = "train_triplet_pairs_reduced.csv"
val_csv = "val_triplet_pairs_reduced.csv"
train_epoch = 25
batch_size = 16
lr = 0.001
img_width, img_height = 224, 224
batch_size *= 3
def accuracy_function(y_true, y_pred):
def euclidian_dist(p, q):
return K.sqrt(K.sum((p-q)**2))
y_pred = K.clip(y_pred, K.epsilon(), 1-K.epsilon())
accuracy = 0
for i in range(0, batch_size, 3):
try:
query_embed = y_pred[i+0]
pos_embed = y_pred[i+1]
neg_embed = y_pred[i+2]
dist_query_pos = euclidian_dist(query_embed, pos_embed)
dist_query_neg = euclidian_dist(query_embed, neg_embed)
accuracy += tf.cond(dist_query_neg >
dist_query_pos, lambda: 1, lambda: 0)
except:
continue
accuracy = tf.cast(accuracy, tf.float32)
return accuracy*100/(batch_size/3)
def loss_function(y_true, y_pred):
def euclidian_dist(p, q):
return K.sqrt(K.sum((p - q)**2))
def contrastive_loss(y, dist):
one = tf.constant(1.0, shape=[1], dtype=tf.float32)
square_dist = K.square(dist)
square_margin = K.square(K.maximum(one - dist, 0))
return K.mean(y * square_dist + (one - y) * square_margin)
y_pred = K.clip(y_pred, K.epsilon(), 1-K.epsilon())
loss = tf.convert_to_tensor(0, dtype=tf.float32)
pos = tf.constant(1.0, shape=[1], dtype=tf.float32)
neg = tf.constant(0.0, shape=[1], dtype=tf.float32)
for i in range(0, batch_size, 3):
try:
query_embed = y_pred[i+0]
pos_embed = y_pred[i+1]
neg_embed = y_pred[i+2]
dist_query_pos = euclidian_dist(query_embed, pos_embed)
dist_query_neg = euclidian_dist(query_embed, neg_embed)
loss_query_pos = contrastive_loss(pos, dist_query_pos)
loss_query_neg = contrastive_loss(neg, dist_query_neg)
loss = (loss + loss_query_pos + loss_query_neg)
except:
continue
loss *= 1/(batch_size*2/3)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss, zero)
def ranknet():
# ranknet: uses pre-trained VGG19 + 2 shallow CNNs
vgg_model = VGG19(weights="imagenet", include_top=False,
input_shape=(224, 224, 3))
convnet_output = GlobalAveragePooling2D()(vgg_model.output)
convnet_output = Dense(4096, activation='relu')(convnet_output)
convnet_output = Dropout(0.5)(convnet_output)
convnet_output = Dense(4096, activation='relu')(convnet_output)
convnet_output = Dropout(0.5)(convnet_output)
convnet_output = Lambda(
lambda x: K.l2_normalize(x, axis=1))(convnet_output)
s1 = MaxPool2D(pool_size=(4, 4), strides=(4, 4),
padding='valid')(vgg_model.input)
s1 = ZeroPadding2D(padding=(4, 4), data_format=None)(s1)
s1 = Conv2D(96, kernel_size=(8, 8), strides=(4, 4), padding='valid')(s1)
s1 = ZeroPadding2D(padding=(2, 2), data_format=None)(s1)
s1 = MaxPool2D(pool_size=(7, 7), strides=(4, 4), padding='valid')(s1)
s1 = Flatten()(s1)
s2 = MaxPool2D(pool_size=(8, 8), strides=(8, 8),
padding='valid')(vgg_model.input)
s2 = ZeroPadding2D(padding=(4, 4), data_format=None)(s2)
s2 = Conv2D(96, kernel_size=(8, 8), strides=(4, 4), padding='valid')(s2)
s2 = ZeroPadding2D(padding=(1, 1), data_format=None)(s2)
s2 = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='valid')(s2)
s2 = Flatten()(s2)
merge_one = concatenate([s1, s2])
merge_one_norm = Lambda(lambda x: K.l2_normalize(x, axis=1))(merge_one)
merge_two = concatenate([merge_one_norm, convnet_output], axis=1)
emb = Dense(4096)(merge_two)
l2_norm_final = Lambda(lambda x: K.l2_normalize(x, axis=1))(emb)
final_model = Model(inputs=vgg_model.input, outputs=l2_norm_final)
return final_model
def Mildnet_vgg19():
vgg_model = VGG19(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
for layer in vgg_model.layers[:10]:
layer.trainable = False
intermediate_layer_outputs = get_layers_output_by_name(vgg_model,
["block1_pool", "block2_pool", "block3_pool", "block4_pool"])
convnet_output = GlobalAveragePooling2D()(vgg_model.output)
for layer_name, output in intermediate_layer_outputs.items():
output = GlobalAveragePooling2D()(output)
convnet_output = concatenate([convnet_output, output])
convnet_output = Dense(2048, activation='relu')(convnet_output)
convnet_output = Dropout(0.6)(convnet_output)
convnet_output = Dense(2048, activation='relu')(convnet_output)
convnet_output = Lambda(lambda x: K.l2_normalize(x, axis=1))(convnet_output)
final_model = tf.keras.models.Model(inputs=vgg_model.input, outputs=convnet_output)
return final_model
def Mildnet_all_trainable():
vgg_model = VGG16(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
intermediate_layer_outputs = get_layers_output_by_name(vgg_model,
["block1_pool", "block2_pool", "block3_pool", "block4_pool"])
convnet_output = GlobalAveragePooling2D()(vgg_model.output)
for layer_name, output in intermediate_layer_outputs.items():
output = GlobalAveragePooling2D()(output)
convnet_output = concatenate([convnet_output, output])
convnet_output = Dense(2048, activation='relu')(convnet_output)
convnet_output = Dropout(0.6)(convnet_output)
convnet_output = Dense(2048, activation='relu')(convnet_output)
convnet_output = Lambda(lambda x: K.l2_normalize(x, axis=1))(convnet_output)
final_model = tf.keras.models.Model(inputs=vgg_model.input, outputs=convnet_output)
return final_model
model = ranknet()
dg = DataGenerator({
"rescale": 1. / 255,
"horizontal_flip": True,
"vertical_flip": True,
"zoom_range": 0.2,
"shear_range": 0.2,
"rotation_range": 30
}, data_path, train_csv, val_csv, target_size=(img_width, img_height))
train_generator = dg.get_train_generator(batch_size, False)
test_generator = dg.get_test_generator(batch_size)
_loss_tensor = loss_function
accuracy = accuracy_function
model.compile(loss=_loss_tensor, optimizer=tf.train.MomentumOptimizer(
learning_rate=lr, momentum=0.9, use_nesterov=True), metrics=[accuracy])
model_checkpoint_path = "weights-improvement-{epoch:02d}-{val_loss:.2f}.h5"
model_checkpointer = ModelCheckpoint(job_dir, model_checkpoint_path, save_best_only=True, save_weights_only=True,
monitor="val_loss", verbose=1)
tensorboard = TensorBoard(log_dir=job_dir + '/logs/',
histogram_freq=0, write_graph=True, write_images=True)
callbacks = [model_checkpointer, tensorboard]
model.fit_generator(train_generator,
steps_per_epoch=(train_generator.n //
(train_generator.batch_size)),
validation_data=test_generator,
epochs=train_epoch,
validation_steps=(test_generator.n //
(test_generator.batch_size)),
callbacks=callbacks)
model.save_weights('output/model.h5')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# bucketstorage.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Apr 2019
# License: MIT - see the LICENSE file for the full text.
"""This contains functions that handle AWS S3/Digital Ocean
Spaces/S3-compatible bucket operations.
"""
#############
## LOGGING ##
#############
import logging
from vizinspect import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import os.path
import os
import boto3
#######################
## SPACES OPERATIONS ##
#######################
def client(
keyfile,
region='sfo2',
endpoint='https://sfo2.digitaloceanspaces.com'
):
'''This makes a new bucket client.
Requires a keyfile containing the access token and the secret key in the
following format::
access_token secret_key
The default `region` and `endpoint` assume you're using Digital Ocean
Spaces.
If you're using S3, see:
https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region to figure
out the values for `region` and `endpoint`.
'''
if isinstance(keyfile, str):
with open(keyfile,'r') as infd:
access_token, secret_key = infd.read().strip('\n').split()
elif isinstance(keyfile, (tuple,list)):
access_token, secret_key = keyfile
session = boto3.Session()
client = session.client(
's3',
region_name=region,
endpoint_url=endpoint,
aws_access_key_id=access_token,
aws_secret_access_key=secret_key
)
return client
def list_buckets(
client=None,
keyfile=None,
region=None,
endpoint=None,
raiseonfail=False,
):
'''
This lists all buckets in the region.
'''
if not client:
client = client(keyfile, region=region, endpoint=endpoint)
try:
return client.list_buckets().get('Buckets')
except Exception as e:
LOGEXCEPTION("Could not list buckets.")
if raiseonfail:
raise
return None
def list_bucket_contents(
bucket,
maxobjects=100,
startingkey=None,
keyprefix=None,
client=None,
keyfile=None,
region=None,
endpoint=None,
raiseonfail=False,
):
'''
This lists a bucket's contents.
'''
if not client:
client = client(keyfile, region=region, endpoint=endpoint)
try:
if not startingkey:
startingkey = ''
if not keyprefix:
keyprefix = ''
# DO uses v1 of the list_objects protocol
ret = client.list_objects(
Bucket=bucket,
MaxKeys=maxobjects,
Prefix=keyprefix,
Marker=startingkey
)
content_list = ret.get('Contents')
return content_list
except Exception as e:
LOGEXCEPTION("Could not list buckets.")
if raiseonfail:
raise
return None
def get_file(
bucket,
filename,
local_file,
client=None,
keyfile=None,
region=None,
endpoint=None,
raiseonfail=False
):
"""This gets a file from abucket.
Parameters
----------
bucket : str
The bucket name.
filename : str
The full filename of the file to get from the bucket
local_file : str
Path to where the downloaded file will be stored.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
str
Path to the downloaded filename or None if the download was
unsuccessful.
"""
if not client:
client = client(keyfile, region=region, endpoint=endpoint)
try:
client.download_file(bucket, filename, local_file)
return local_file
except Exception as e:
LOGEXCEPTION('could not download %s from bucket: %s' %
(filename, bucket))
if raiseonfail:
raise
return None
def put_file(
local_file,
bucket,
client=None,
keyfile=None,
region=None,
endpoint=None,
raiseonfail=False
):
"""This uploads a file to a bucket.
Parameters
----------
local_file : str
Path to the file to upload to bucket.
bucket : str
The bucket to upload the file to.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
True
If the file upload is successful, returns True
"""
if not client:
client = client(keyfile, region=region, endpoint=endpoint)
try:
client.upload_file(local_file, bucket, os.path.basename(local_file))
return True
except Exception as e:
LOGEXCEPTION('could not upload %s to bucket: %s' % (local_file,
bucket))
if raiseonfail:
raise
return False
def delete_file(
bucket,
filename,
client=None,
keyfile=None,
region=None,
endpoint=None,
raiseonfail=False
):
"""This deletes a file from a bucket.
Parameters
----------
bucket : str
The bucket to delete the file from.
filename : str
The full file name of the file to delete, including any prefixes.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
bool
If the file was successfully deleted, will return True.
"""
if not client:
client = client(keyfile, region=region, endpoint=endpoint)
try:
resp = client.delete_object(Bucket=bucket, Key=filename)
if not resp:
LOGERROR('could not delete file %s from bucket %s' % (filename,
bucket))
else:
meta = resp.get('ResponseMetadata')
return meta.get('HTTPStatusCode') == 204
except Exception as e:
LOGEXCEPTION('could not delete file %s from bucket %s' % (filename,
bucket))
if raiseonfail:
raise
return None
|
# Generated by Django 2.2.5 on 2019-10-15 09:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0005_auto_20191015_0952'),
]
operations = [
migrations.AlterField(
model_name='project',
name='supervisor',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='projects', to='home.Supervisor'),
),
]
|
def swap_case(s):
name=list(s)
for i in range(0,len(s)):
if s[i].islower():
name[i]=s[i].upper()
elif s[i].isupper():
name[i]=s[i].lower()
else:
name[i]=s[i]
name = ''.join(name)
return name
if __name__ == '__main__': |
import lxml.etree
import lxml.builder
E = lxml.builder.ElementMaker()
ROOT = E.root
DOC = E.doc
component = E.component
packagename = E.packagename
conponentname = E.conponentname
action = E.action
the_doc = ROOT(
DOC(component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.free.HomeFree'),
action('android.intent.action.MAIN'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetConfigureActivityCompact'),
action('android.appwidget.action.APPWIDGET_CONFIGURE'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetConfigureActivityHorizontal'),
action('android.appwidget.action.APPWIDGET_CONFIGURE'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetConfigureActivityVertical'),
action('android.appwidget.action.APPWIDGET_CONFIGURE'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetProviderCompact'),
action('android.appwidget.action.APPWIDGET_UPDATE'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetProviderCompact'),
action('android.appwidget.action.APPWIDGET_ENABLED'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetProviderCompact'),
action('android.appwidget.action.APPWIDGET_DELETED'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetProviderCompact'),
action('android.appwidget.action.APPWIDGET_DISABLED'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetProviderHorz'),
action('android.appwidget.action.APPWIDGET_UPDATE'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetProviderHorz'),
action('android.appwidget.action.APPWIDGET_ENABLED'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetProviderHorz'),
action('android.appwidget.action.APPWIDGET_DELETED'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetProviderHorz'),
action('android.appwidget.action.APPWIDGET_DISABLED'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetProviderVert'),
action('android.appwidget.action.APPWIDGET_UPDATE'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetProviderVert'),
action('android.appwidget.action.APPWIDGET_ENABLED'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetProviderVert'),
action('android.appwidget.action.APPWIDGET_DELETED'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.WidgetProviderVert'),
action('android.appwidget.action.APPWIDGET_DISABLED'),
),
component(
packagename('ch.smalltech.battery.free'),
conponentname('ch.smalltech.battery.core.widgets.BootCompletedReceiver'),
action('android.intent.action.BOOT_COMPLETED'),
),
)
)
print lxml.etree.tostring(the_doc, pretty_print=True)
# Open a file
fo = open('actions.txt', 'wb')
fo.write( lxml.etree.tostring(the_doc, pretty_print=True));
# Close opend file
fo.close()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 14 14:08:54 2020
@author: andyb
dist, ss, phi, psi = {domain}_Y.pt
dist: tensor of size LxL containing integers from 0 to 32
ss: tensor of size 1xL containing integers from 0 to 7
phi: tensor of size 1xL containing integers from 0 to 36
psi: tensor of size 1xL containing integers from 0 to 36
ss:
H = 1
B = 2
E = 3
G = 4
I = 5
T = 6
S = 7
L = - = 8
missing data = 0
phi, psi:
[-180, -170) = 1
[-170, -160) = 2
...
[-10, 0) = 18
[0, 10) = 19
...
[160, 170) = 35
[170, 180) = 36
missing data = 0
"""
# %%
import numpy as np
import argparse
import torch
# %%
# args = argparse.Namespace()
# args.dist_file = "/faststorage/project/deeply_thinking_potato/data/"\
# "our_input/distance_maps/distance_maps32/139lA00.pt"
# args.ss_file = "/faststorage/project/deeply_thinking_potato/data/"\
# "our_input/secondary/139lA00.sec"
# args.phi_file = "/faststorage/project/deeply_thinking_potato/data/"\
# "our_input/torsion/phi/139lA00_phi.pt"
# args.psi_file = "/faststorage/project/deeply_thinking_potato/data/"\
# "our_input/torsion/psi/139lA00_psi.pt"
# args.output_file = "/faststorage/project/deeply_thinking_potato/data/"\
# "our_input/Y_tensors/139lA00_Y.pt"
# %%
parser = argparse.ArgumentParser()
parser.add_argument('--dist_file', required=True)
parser.add_argument('--ss_file', required=True)
parser.add_argument('--phi_file', required=True)
parser.add_argument('--psi_file', required=True)
parser.add_argument('--output_file', required=True)
args = parser.parse_args()
# %%
dist = torch.load(args.dist_file)
# %%
ss = open(args.ss_file, "r").readlines()
assert len(ss) == 1
translation = {
'H': 1,
'B': 2,
'E': 3,
'G': 4,
'I': 5,
'T': 6,
'S': 7,
'-': 8,
}
out = np.zeros((len(ss[0])))
for i, letter in enumerate(ss[0]):
out[i] = translation[letter]
ss = torch.tensor(out)
# %%
# build with assumption that the angles are from [-180, 180]
# (-np.inf, -180) -> 0
# [-180, -170) -> 1
# [-170, -160) -> 2
# ...
# [160, 170) -> 35
# [170, np.inf) -> 36
bins = np.linspace(-180, 170, 36)
phi = torch.load(args.phi_file).numpy()
phi = torch.tensor(np.digitize(phi, bins))
# %%
psi = torch.load(args.psi_file).numpy()
psi = torch.tensor(np.digitize(psi, bins))
# %%
Y = (dist, ss, phi, psi)
torch.save(Y, args.output_file)
|
import reactor
import atom
import copy
def genEmptyMap():
return [["E" for Y in range(0,8)] for X in range(0,10)]
r = reactor.Reactor()
r.setVerbose(True)
blueArrowMap = genEmptyMap()
redArrowMap = genEmptyMap()
blueActionMap = genEmptyMap()
redActionMap = genEmptyMap()
# gen of puzzle
redArrowMap[1][6] = "D"
redArrowMap[1][2] = "R"
redArrowMap[3][2] = "U"
redArrowMap[3][4] = "R"
redArrowMap[6][4] = "D"
redArrowMap[6][1] = "L"
redArrowMap[2][1] = "U"
redArrowMap[2][5] = "R"
redArrowMap[7][5] = "U"
redArrowMap[7][6] = "L"
redActionMap[4][6] = "SL"
redActionMap[3][6] = "INA"
redActionMap[1][6] = "GD"
redActionMap[1][4] = "B-"
redActionMap[1][3] = "SY"
redActionMap[1][2] = "SY"
redActionMap[3][4] = "B-"
redActionMap[5][4] = "SY"
redActionMap[6][4] = "SY"
redActionMap[2][4] = "B+"
redActionMap[7][6] = "GD"
redActionMap[6][6] = "OUTY"
blueArrowMap[3][1] = "U"
blueArrowMap[3][7] = "D"
blueArrowMap[2][3] = "U"
blueArrowMap[2][5] = "R"
blueArrowMap[7][5] = "D"
blueArrowMap[7][1] = "L"
blueActionMap[3][1] = "SU"
blueActionMap[3][2] = "SY"
blueActionMap[3][3] = "FLL0"
blueActionMap[3][4] = "GD"
blueActionMap[3][6] = "SY"
blueActionMap[3][7] = "SY"
blueActionMap[2][3] = "B+"
blueActionMap[2][4] = "GD"
blueActionMap[7][3] = "GD"
blueActionMap[7][1] = "OUTW"
#Input A
a = atom.Atom("H",False)
a.setLocation(0,6)
b = atom.Atom("H",False)
b.setLocation(1,7)
c = atom.Atom("H",False)
c.setLocation(1,5)
d = atom.Atom("H",False)
d.setLocation(2,7)
e = atom.Atom("H",False)
e.setLocation(2,5)
f = atom.Atom("H",False)
f.setLocation(3,6)
g = atom.Atom("C",False)
g.setLocation(1,6)
h = atom.Atom("C",False)
h.setLocation(2,6)
a.bindWith(g,"R")
b.bindWith(g,"D")
c.bindWith(g,"U")
d.bindWith(h,"D")
e.bindWith(h,"U")
f.bindWith(h,"L")
g.bindWith(h,"R")
#Output Y
aa = atom.Atom("H",False)
bb = atom.Atom("H",False)
cc = atom.Atom("H",False)
dd = atom.Atom("H",False)
ee = atom.Atom("C",False)
ff = atom.Atom("C",False)
aa.bindWith(ee,"D")
bb.bindWith(ee,"U")
cc.bindWith(ff,"D")
dd.bindWith(ff,"U")
ee.bindWith(ff,"R")
ee.bindWith(ff,"R")
#Output w
aaa = atom.Atom("H",False)
bbb = atom.Atom("H",False)
aaa.bindWith(bbb,"R")
r.setMolecule("A",a)
r.setMolecule("Y",aa)
r.setMolecule("W",aaa)
r.setGoal("Y",10)
r.setGoal("W",10)
r.setArrowMapBlue(blueArrowMap)
r.setActionMapBlue(blueActionMap)
r.setArrowMapRed(redArrowMap)
r.setActionMapRed(redActionMap)
r.setBonders([[2,4],[3,4]])
for i in range(1,1000):
if(r.doCycle(i)):
print("Reactor has crash")
quit()
if(r.isFinished()):
print("Reactor has completed is goal in " + str(i) + " cycles")
quit()
|
import random
import datetime
class Task:
# taskid: int
# it is the id of the taskid
# taskWeightage: int of chois 1, 2, 3
# where 1 is highWeightage, 2 is mediumWeightage and 3 is lowEighatge
# masDelay: float
# it is the maximum delay tolarable by Task
# arrivalTime: datetime
# it is the task arrval time
# startTime: datetime
# it is the starting time of task execution
# responseTime:datetime
# it is the response time of Task
# completionTime:datetime
# it is the completionTime of task
def __init__(self, taskid, instructionlength):
self.taskid = taskid
self.instructionlength = instructionlength
if(instructionlength >= 10000 and instructionlength < 20000):
self.taskWeightage = 3
elif (instructionlength >= 20000 and instructionlength < 30000):
self.taskWeightage = 2
else:
self.taskWeightage = 1
# self.maxDelay = random.choice([0.009, 0.01, 0.02])
# self.arrivalTime = datetime.datetime.now()
# self.startTime = datetime.datetime.now()
# self.responseTime = datetime.datetime.now()
# self.completionTime = datetime.datetime.now()
|
# Daydream
S = "erasedream"
add_d = ['dream', 'dreamer', 'erase', 'eraser']
T = []
# Sからadd_dを確認し
# add_dに該当した場合、文字数分Sから削除する
# 文字数が減ったSについて再度 add_dに該当するか確認する。
for i in range(len(S)):
if S[-3] == add_d[i][-3]:
T.insert(0, add_d[i])
if len(S) == len(T):
print('YES')
exit()
elif len(S) < len(T):
print('NO')
exit()
else:
T.insert(0, add_d[i])
if S[-len(T)-3] == T[-len(T)-3]:
if len(S) == len(T):
print('YES')
exit()
elif len(S) < len(T):
print('NO')
exit()
else:
T.insert(0, add_d[i])
else:
print('NO')
# T = T + add_d[i]
else:
pass
# Tにadd_dを任意の順で結合させる
print(len(S))
# for i in range(4):
# T = T + add_d[i]
# if S == T :
# print('YES')
# exit()
# else:
# pass
# print('NO')
# 文字数がS<Tなら 一致しないので確認を止まる
len(S) < len(T)
print(len(T))
print(T)
# if S == T :
# print(YES)
# else:
# print(NO)
# print(add_d) |
i = 0
while i < 10:
print("————————————————++++++++++%d"%i)
i += 1
i = 1
num = 0
while i <= 100:
num += i
i += 1
print(f"1~100的和是: {num}")
ii = 1
jj = 1
while ii <= 7:
jj = 1
while jj <= ii:
print(f"*", end=" ")
jj += 1
print()
ii += 1
# jj = 1
# while jj <= 7:
# print("*")
# jj += 1 |
# while loop are used to execute an instruction until a given condition is satisfied
count = 0
while count < 3:
count = count + 1
print("Hello")
# using else statement with while loop
# count = 0
# while count < 3:
# count = count + 1
# print("Hello world")
# else:
# print("world")
#
# # break statement -it brings control out of the loop
# i = 1
# while i < 100:
# print(i)
# if i == 20:
# break
# i = i+1
# continue- It returns the control to the beginning of the loop
i = 1
while i < 100:
i = i + 1
if i < 50:
continue
print(i)
|
from typing import List
from collections import Counter
class Solution:
def countCharacters(self, words: List[str], chars: str) -> int:
if len(words) < 1 or len(chars) < 1:
return 0
char_dic = Counter(chars)
res = 0
for w in words:
tmp = Counter(w)
for c in tmp:
if c not in char_dic or tmp[c] > char_dic[c]:
break
else:
res += len(w)
return res
words = ["hello","world","leetcode"]
chars = "welldonehoneyr"
print(Solution().countCharacters(words, chars))
|
import requests
from bs4 import BeautifulSoup
from src import config
from src.RentalObject import RentalObject
from src.dao import base, write_dao
def parse_main_page(str):
if str == "bostad":
session_requests = requests.session()
response = session_requests.get(config.BU_LOGIN_URL)
soup = BeautifulSoup(response.text, features="lxml")
for n in soup('input'):
if n['name'] == '__RequestVerificationToken':
authenticity_token = n['value']
break
print(authenticity_token)
payload = {
"LoginDetails.PersonalIdentityNumberOrEmail": config.USERNAME,
"LoginDetails.Password": config.PASSWORD,
"__RequestVerificationToken": authenticity_token
}
response = session_requests.post(
config.BU_LOGIN_URL,
data=payload,
headers=dict(referer=config.BU_LOGIN_URL)
)
print(response)
response = session_requests.get(
config.BU_URL,
headers=dict(referer=config.BU_LOGIN_URL)
)
soup = BeautifulSoup(response.text, features="lxml")
results = soup.find_all("div", "rentalobject")
rentals = []
for result in results:
rental = RentalObject(result)
write_dao.save_object(rental.get_insert_list())
base.write_commit()
base.close_connection()
'''
for span in spans.items():
print(span)
print(len(spans))
'''
elif str == "nationsgardarna":
raise NotImplementedError
|
#10-3
filename = "guest_book.txt"
"""
userName = input("Please enter your full name (q to quit):\n")
with open(filename, "a") as guestFile:
while userName != 'q':
guestFile.write(f"\n{userName}")
print(f"Welcome, {userName}. Thank you for joining us.")
userName = input("Please enter your full name (q to quit):\n")
"""
import os
import random
if os.path.isfile(filename):
os.remove(filename)
takenRoomNumbers = []
newUserName = input("Please let us know your name (q to quit):\n")
with open(filename, "w") as guestList:
while newUserName != 'q':
roomNumber = random.randint(1,50)
while takenRoomNumbers.__contains__(roomNumber) == True:
roomNumber = random.randint(1,50)
guestAndRoom = f"{newUserName} - Room #{roomNumber}\n"
guestList.write(guestAndRoom)
print(f"Hello, {newUserName}. Your room number is {roomNumber}")
newUserName = input("Please let us know your name (q to quit):\n")
with open(filename) as guestList:
for guest in guestList:
print(f"{guest}") |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-10 19:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fb', '0002_register_total'),
]
operations = [
migrations.RemoveField(
model_name='register',
name='total',
),
]
|
from __future__ import unicode_literals
from builtins import str as text
from .utils import concat_args, concat_commands, zpl_bool
class Command(bytes):
"""
Convenience class for generating bytes to be sent to a ZPL2 printer.
"""
ENCODING = 'cp1252'
COMMAND_TYPE_CONTROL = b'~'
COMMAND_TYPE_FORMAT = b'^'
FIELD_BLOCK = 'FB'
FIELD_DATA = 'FD'
FIELD_INVERT = 'FR'
FIELD_ORIGIN = 'FO'
FIELD_SEPARATOR = 'FS'
FONT = 'A'
GRAPHIC_BOX = 'GB'
GRAPHIC_CIRCLE = 'GC'
LABEL_END = 'XZ'
LABEL_HOME = 'LH'
LABEL_START = 'XA'
BARCODE_EAN = 'BE'
BARCODE_FIELD_DEFAULT = 'BY'
def __new__(cls, command_name, *args, **options):
"""
Constructor for a command returning bytes in an acceptable encoding.
The following options are available:
command_type
Either Command.TYPE_FORMAT or Command.TYPE_CONTROL.
(Defaults to Command.TYPE_FORMAT.)
"""
command_type = cls.get_command_type(options)
command_args = concat_args(args).encode(cls.ENCODING, errors='replace')
obj = (
command_type +
command_name.encode(cls.ENCODING) +
command_args
)
return obj
######################
# Command primitives #
######################
@classmethod
def field_block(
cls, width, n_lines='',
line_spacing='', justify='', left_margin=''):
"A bounding box for the printed field data."
return cls(
cls.FIELD_BLOCK,
width, n_lines, line_spacing, justify, left_margin,
)
block = field_block
@classmethod
def field_data(cls, data):
"The content data of the field."
return cls(cls.FIELD_DATA, data)
@classmethod
def field_invert(cls):
"Prints the field black-on-white."
return cls(cls.FIELD_INVERT)
@classmethod
def field_origin(cls, x, y):
"Sets the origin coordinates of a field."
return cls(cls.FIELD_ORIGIN, x, y)
@classmethod
def field_separator(cls):
"Must be placed between separate field definitions."
return cls(cls.FIELD_SEPARATOR)
@classmethod
def graphic_box(cls, x, y, width=None, height=None, thickness=1, color='B', r=0):
"""
Graphic box.
R is the degree of corner rounding, such that 0 is unrounded and 8 is max rounding.
"""
return concat_commands(
cls.field_origin(x, y),
cls(
cls.GRAPHIC_BOX,
width or thickness,
height or thickness,
thickness,
color,
r,
),
cls.field_separator(),
)
@classmethod
def graphic_circle(cls, x, y, diameter=50, border=3, color='B'):
return concat_commands(
cls.field_origin(x, y),
cls(cls.GRAPHIC_CIRCLE, diameter, border, color),
cls.field_separator(),
)
@classmethod
def label_end(cls):
"Ends a series of label print commands."
return cls(cls.LABEL_END)
@classmethod
def label_home(cls, x, y):
"Sets the origin for an entire label."
return cls(cls.LABEL_HOME, x, y)
@classmethod
def label_start(cls):
"Starts a series of label print commands."
return cls(cls.LABEL_START)
@classmethod
def font(cls, name, height, width=None, orientation=''):
"""
Sets the current font.
name
The name of a font available to the printer. Typically
indicated by 0-9 or A-Z.
height
The height of the font in points.
width
The width of the font in points. Defaults to matching
the height.
orientation
The direction of the text. See ORIENTATION_*.
"""
if width is None:
width = height
# Font name and orientation are not comma-separated
name_with_orientation = ''.join((str(name), orientation))
return cls(cls.FONT, name_with_orientation, height, width)
@classmethod
def barcode_ean(
cls, orientation='', height='',
interpretation_line=True, interpretation_line_is_above=True):
"""
Indicates that the current field is an EAN-13 barcode.
orientation
The direction of the barcode. See ORIENTATION_*.
height
The height of the barcode.
interpretation_line
Indicates whether the barcode numbers should be printed.
interpretation_line_is_above
Indicates that the barcode numbers should be printed above
the barcode, not below.
"""
return cls(
cls.BARCODE_EAN, orientation, height,
zpl_bool(interpretation_line),
zpl_bool(interpretation_line_is_above),
)
@classmethod
def barcode_field_default(cls, width, heigth='', ratio=''):
"Sets barcode field defaults. Applied until new defaults are applied."
return cls(cls.BARCODE_FIELD_DEFAULT, width, ratio, heigth)
####################
# Complex commands #
####################
@classmethod
def field(
cls, text, x=0, y=0, font=None,
block=None, barcode=None, invert=False):
"""
Convenience method for generating a field, which is basically
a concatenation of several related subcommands.
text
Field data -- e.g. text or a barcode value.
x, y
Placement within the label.
invert
Inverts the colors of the printed text.
"""
invert_cmd = cls.field_invert() if invert else None
return concat_commands(
font,
cls.field_origin(x, y),
block,
barcode,
invert_cmd,
cls.field_data(text),
cls.field_separator(),
)
#############
# Utilities #
#############
@classmethod
def get_command_type(cls, options):
"""
Gets the command type from the options, verifying that it is valid.
"""
valid_command_types = [
cls.COMMAND_TYPE_CONTROL,
cls.COMMAND_TYPE_FORMAT,
]
# Get the command type from options
command_type = options.pop('type', cls.COMMAND_TYPE_FORMAT)
if command_type not in valid_command_types:
raise TypeError('command_type must be in {}'.format(
', '.join(valid_command_types)
))
return command_type
@classmethod
def to_bytes(cls, data):
"""
Encodes the given data in an encoding understood by the printer.
"""
return text(data).encode(cls.ENCODING)
class Font(text):
"""
Utility object for defining fonts, then scaling them in later use.
Use in the following way:
# Create a default font and a scaled variation
font_default = Font('T', height=20, width=15)
font_header = font_default(50, 50)
# Pass as font parameter
header = Command.field('Header',
font=font_header)
body = Command.field('Body', y=header_font.height + 10,
font=font_default)
"""
def __new__(cls, name, height=30, width=None, orientation=''):
cmd = cls.as_command(
name,
height=height,
width=width,
orientation=orientation,
)
# The returned object is a compiled font command as a string.
obj = text.__new__(
cls, cmd.decode(Command.ENCODING)
)
# Attach the properties for use in `with_props`
obj.name = name
obj.height = height
obj.width = width
obj.orientation = orientation
return obj
def with_props(self, **props):
"""
Returns a modified version of current font, scaled to the given height
and width.
"""
# Default known props to the same as self
props.setdefault('name', self.name)
props.setdefault('height', self.height)
props.setdefault('width', self.width)
props.setdefault('orientation', self.orientation)
return self.__class__(**props)
def __call__(self, height=None, width=None, **kwargs):
return self.with_props(height=height, width=width, **kwargs)
@staticmethod
def as_command(name, height, width, orientation):
"""
Returns the font as a Command.
"""
return Command.font(
name=name,
height=height,
width=width,
orientation=orientation,
)
|
import socket
from django.contrib import admin
from app.RecursosHumanos.models import Persona
from app.RecursosHumanos.models import PersonaTelefono
from app.RecursosHumanos.models import Trabajador
from app.RecursosHumanos.models import Area
from app.RecursosHumanos.models import Puesto
# from app.RecursosHumanos.models import TrabajadorPuesto
admin.site.register(Persona)
admin.site.register(PersonaTelefono)
admin.site.register(Trabajador)
admin.site.register(Area)
admin.site.register(Puesto)
# admin.site.register(TrabajadorPuesto)
|
"""
finger_bing.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import w3af.core.controllers.output_manager as om
import w3af.core.data.parsers.parser_cache as parser_cache
from w3af.core.controllers.plugins.infrastructure_plugin import InfrastructurePlugin
from w3af.core.controllers.exceptions import BaseFrameworkException, ScanMustStopOnUrlError
from w3af.core.controllers.exceptions import RunOnce
from w3af.core.controllers.misc.decorators import runonce
from w3af.core.controllers.misc.is_private_site import is_private_site
from w3af.core.data.search_engines.bing import bing as bing
from w3af.core.data.options.opt_factory import opt_factory
from w3af.core.data.options.option_list import OptionList
from w3af.core.data.kb.info import Info
class finger_bing(InfrastructurePlugin):
"""
Search Bing to get a list of users for a domain.
:author: Andres Riancho (andres.riancho@gmail.com)
"""
def __init__(self):
InfrastructurePlugin.__init__(self)
# Internal variables
self._accounts = []
# User configured
self._result_limit = 300
@runonce(exc_class=RunOnce)
def discover(self, fuzzable_request, debugging_id):
"""
:param debugging_id: A unique identifier for this call to discover()
:param fuzzable_request: A fuzzable_request instance that contains
(among other things) the URL to test.
"""
if not is_private_site(fuzzable_request.get_url().get_domain()):
bingSE = bing(self._uri_opener)
self._domain = fuzzable_request.get_url().get_domain()
self._domain_root = fuzzable_request.get_url().get_root_domain()
results = bingSE.get_n_results('@' + self._domain_root,
self._result_limit)
# Send the requests using threads:
self.worker_pool.map(self._find_accounts, results)
def _find_accounts(self, page):
"""
Finds emails in bing result.
:return: A list of valid accounts
"""
try:
url = page.URL
om.out.debug('Searching for emails in: %s' % url)
grep = True if self._domain == url.get_domain() else False
response = self._uri_opener.GET(page.URL, cache=True,
grep=grep)
except ScanMustStopOnUrlError:
# Just ignore it
pass
except BaseFrameworkException, w3:
msg = 'ExtendedUrllib exception raised while fetching page in' \
' finger_bing, error description: "%s"'
om.out.debug(msg % w3)
else:
# I have the response object!
get_document_parser_for = parser_cache.dpc.get_document_parser_for
try:
document_parser = get_document_parser_for(response, cache=False)
except BaseFrameworkException:
# Failed to find a suitable parser for the document
pass
else:
# Search for email addresses
for mail in document_parser.get_emails(self._domain_root):
if mail not in self._accounts:
self._accounts.append(mail)
desc = 'The mail account: "%s" was found at: "%s".'
desc = desc % (mail, page.URL)
i = Info('Email account', desc, response.id,
self.get_name())
i.set_url(page.URL)
i['mail'] = mail
i['user'] = mail.split('@')[0]
i['url_list'] = {page.URL}
self.kb_append('emails', 'emails', i)
def get_options(self):
"""
:return: A list of option objects for this plugin.
"""
ol = OptionList()
d1 = 'Fetch the first "result_limit" results from the Bing search'
o = opt_factory('result_limit', self._result_limit, d1, 'integer')
ol.add(o)
return ol
def set_options(self, options_list):
"""
This method sets all the options that are configured using the user
interface generated by the framework using the result of get_options().
:param options_list: A dictionary with the options for the plugin.
:return: No value is returned.
"""
self._result_limit = options_list['result_limit'].get_value()
def get_long_desc(self):
"""
:return: A DETAILED description of the plugin functions and features.
"""
return """
This plugin finds mail addresses in Bing search engine.
One configurable parameter exist:
- result_limit
This plugin searches Bing for : "@domain.com", requests all search
results and parses them in order to find new mail addresses.
"""
|
import math
from qlazy import QState
def measure(phase):
qs = QState(1)
qs.h(0)
freq_list = qs.m([0], shots=100, angle=0.5, phase=phase).frq
prob = freq_list[0] / 100
print("===")
print("phase = {0:.4f} PI".format(phase))
print("[measured] prob. of up-spin = {0:.4f}".format(prob))
print("[theoretical] cos(phase/2)^2 = {0:.4f}".format((math.cos(phase*math.pi/2))**2))
def main():
measure(0.0)
measure(0.25)
measure(0.5)
measure(0.75)
measure(1.0)
if __name__ == '__main__':
main()
|
"""Created on Fri Oct 30 10:07:23 2020.
@author: Guilherme Bresciani de Azevedo
"""
# TO DO
# Try to format value got from excel based on 'CELL.NumberFormat' info.
import os
import time
import sys
import tkinter as tk
from tkinter import filedialog
import logging
from PIL import ImageGrab
import win32com.client as client
from utils import utils_general as u_gen
from utils import utils_files as u_fil
from utils import utils_excel as u_exc
# Global declaration
TIME_TO_SLEEP = 5
# Window app declaration
APP_ROOT = tk.Tk()
APP_ROOT.attributes("-topmost", True) # to open dialogs in front of all
#APP_ROOT.lift()
APP_ROOT.withdraw() # hide application main window
try:
APP_ROOT.iconbitmap(os.getcwd() + '\\icon.ico')
except Exception:
pass
# Get email Template .htm* file
HTML_FILE_PATH = filedialog.askopenfilename(parent=APP_ROOT,
title="Select email template",
filetypes=[('HTML files',
['*.htm', '*.html'])])
try: # try to read html file on path informed by user
with open(HTML_FILE_PATH) as USER_FILE:
HTML_BODY = USER_FILE.read() # read as one string
except Exception:
print("Error reading HTML file '{}' !!!".format(HTML_FILE_PATH))
time.sleep(TIME_TO_SLEEP)
sys.exit()
if len(HTML_BODY) == 0:
print("File empty '{}' !!!".format(HTML_FILE_PATH))
time.sleep(TIME_TO_SLEEP)
sys.exit()
# Define files folder
LIST_POSSIBLE_FOLDERS = ['_arquivos', '_files', '_file', '_fichiers']
FILES_DIR = ''
for POSSIBLE_FOLDER in LIST_POSSIBLE_FOLDERS:
# check if files folder exists in same path o HTML file
if os.path.exists(HTML_FILE_PATH.split('.htm')[0] + POSSIBLE_FOLDER):
FILES_DIR = HTML_FILE_PATH.split('.htm')[0] + POSSIBLE_FOLDER
break
if FILES_DIR == '': # folder don't exists with expected names in the folder
print("No file folder corresponding to file '{}' was found in path '{}'"
" !!!".format('/'.join(HTML_FILE_PATH.split('/')[0:-1]),
HTML_FILE_PATH.split('/')[-1]))
time.sleep(TIME_TO_SLEEP)
sys.exit()
# Log declaration
u_fil.delete_file(FILES_DIR + '/' + 'log.log') # logging only last time run
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
file_formatter = logging.Formatter('%(levelname)s : %(message)s')
file_handler = logging.FileHandler(FILES_DIR + '/' + 'log.log')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(file_formatter)
stream_formatter = logging.Formatter('%(levelname)s : %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.WARNING)
stream_handler.setFormatter(stream_formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.propagate = False # avoid duplication of registers
# Check Manual values setting file .txt
TXT_FILE_PATH = FILES_DIR + "/setting_value_manually.txt"
USER_VAL_MAN_LIST = u_fil.get_list_from_txt_file_by_line(TXT_FILE_PATH,
remove_header=True,
raise_not_found=False)
# Start dict of values to replace
VAL_DICT = {}
for ID in USER_VAL_MAN_LIST: # IDs defined in setting_value_manually.txt
USER_MAN_VALUE = str(input("Write a value for the ID tag named '{}':\n"
.format(ID))) # Ask for value of a key
VAL_DICT[ID] = USER_MAN_VALUE
# Check Image setting file .txt
TXT_FILE_PATH = FILES_DIR + "/setting_image.txt"
USER_IMG_LIST = u_fil.get_list_from_txt_file_by_line(TXT_FILE_PATH, True, [],
u_gen.SafeDict(VAL_DICT))
# Check Value setting file .txt
TXT_FILE_PATH = FILES_DIR + "/setting_value.txt"
USER_VAL_LIST = u_fil.get_list_from_txt_file_by_line(TXT_FILE_PATH, True, [],
u_gen.SafeDict(VAL_DICT))
# Check Send To setting file.txt
TXT_FILE_PATH = FILES_DIR + "/setting_send_to.txt"
USER_SEND_TO_LIST = u_fil.get_list_from_txt_file_by_line(
TXT_FILE_PATH, True, [], u_gen.SafeDict(VAL_DICT))
# Check Subject setting file.txt
TXT_FILE_PATH = FILES_DIR + "/setting_subject.txt"
USER_SUBJECT_LIST = u_fil.get_list_from_txt_file_by_line(
TXT_FILE_PATH, True, [], u_gen.SafeDict(VAL_DICT))
USER_SUBJECT = USER_SUBJECT_LIST[0]
# Ask for Excel files to get information from
DIALOG_TITLE = "Select one Excel file to get information"
DIALOG_2ND_TITLE = "Select another Excel file to get information or Cancel"
DIALOG_FILE_TYPES = [('Excel files', ['*.xlsx', '*.xlsx', '*.xlsm'])]
XLS_FILE_LIST = u_fil.ask_for_files(APP_ROOT, DIALOG_TITLE,
DIALOG_2ND_TITLE, DIALOG_FILE_TYPES,
more_than_one=True)
# Check if at least one Excel file was selected
if XLS_FILE_LIST is None: # user closed the first ask dialog
u_fil.safe_exit(handler=[file_handler, stream_handler],
app_root=APP_ROOT) # exit and log intentional exit
# List names of Worsheets for indexing data collection by name
XLS_FILE_NAMES = [FILE_PATH.split('/')[-1] for FILE_PATH in XLS_FILE_LIST]
################################# ↓ EXCEL ↓ #################################
# New instance of Excel
EXCEL = client.DispatchEx('Excel.Application')
logger.info("Opened Excel application.")
if int(EXCEL.Version[0:2]) < 16:
#EXCEL.ScreenUpdating = True
#EXCEL.DisplayAlerts = True
EXCEL.Visible = True # necessary to .CopyPicture() in Version 15.0 (2013)
# Open workbooks
WORKBOOK = []
for PATH in XLS_FILE_LIST:
try:
WORKBOOK.append(EXCEL.Workbooks.Open(PATH))
logger.info("Opened Workbook '{}'.".format(PATH))
except Exception:
logger.error("Error opening the Workbook '{}' !!!".format(PATH))
u_fil.safe_exit(to_Close=WORKBOOK, to_Quit=EXCEL,
handler=[file_handler, stream_handler],
app_root=APP_ROOT)
# List worksheets in same index order of WORKBOOK list
SHEET_LISTS = []
for BOOK in WORKBOOK:
SHEET_LIST = []
for SHEET in BOOK.Worksheets:
SHEET_LIST.append(SHEET.Name)
SHEET_LISTS.append(SHEET_LIST)
logger.info("Listed Worsheets by Workbook '{}'.".format(SHEET_LISTS))
######################## ↓ GET VALUES ↓ ########################
for SETUP in USER_VAL_LIST: # SETUP = "ID Book Sheet Cell"
# format setup with values previously got manually and automatically
SETUP = SETUP.format_map(u_gen.SafeDict(VAL_DICT))
KEY = str(SETUP.split(sep='\t')[0]) # dict key to format_map
BOOK_INDEX = u_gen.get_index_by_str(SETUP.split(sep='\t')[1],
XLS_FILE_NAMES)
if BOOK_INDEX is None:
logger.warning("Could not found Workbook for setup '{}' !"
.format(SETUP))
next
BOOK = WORKBOOK[BOOK_INDEX] # user Book=1 will access by index 0
SHEET_INDEX = u_gen.get_index_by_str(SETUP.split(sep='\t')[2],
SHEET_LISTS[BOOK_INDEX])
if SHEET_INDEX is None:
logger.warning("Could not found Worksheet for setup '{}' !"
.format(SETUP))
next
SHEET = BOOK.Sheets[SHEET_INDEX] # user Sheet=1 will access by index 0
LOCATION = SETUP.split(sep='\t')[3]
CELL = u_exc.get_cell(SHEET, LOCATION) # decode LOCATION if logic present
if CELL is not None: # successfully got cell
try: # try to get .Value attribute of cell
VAL_DICT[KEY] = str(CELL.Value)
except Exception:
logger.warning("Value not found for Cell in setup '{}' !"
.format(SETUP))
else:
logger.warning("Cell not found for Value setup '{}' !".format(SETUP))
# Log and print values found to user
logger.info("Values found for replacement are '{}'.".format(VAL_DICT))
print("\n")
print("Values found for replacement are:")
[print(KEY + ": " + VAL_DICT[KEY]) for KEY in VAL_DICT]
print("\n")
######################## ↑ GET VALUES ↑ ########################
######################## ↓ GET IMAGES ↓ ########################
for SETUP in USER_IMG_LIST: # SETUP = "ID Book Sheet Type Cell/Num"
# format setup with values previously got manually and automatically
SETUP = SETUP.format_map(u_gen.SafeDict(VAL_DICT))
IMAGE_ID = SETUP.split(sep='\t')[0] # image file with extension
BOOK_INDEX = u_gen.get_index_by_str(SETUP.split(sep='\t')[1],
XLS_FILE_NAMES)
if BOOK_INDEX is None:
logger.warning("Could not found Workbook for setup '{}' !"
.format(SETUP))
u_fil.delete_file(FILES_DIR + '/' + IMAGE_ID) # delete wrong old image
next
BOOK = WORKBOOK[BOOK_INDEX] # user Book=1 will access by index 0
SHEET_INDEX = u_gen.get_index_by_str(SETUP.split(sep='\t')[2],
SHEET_LISTS[BOOK_INDEX])
if SHEET_INDEX is None:
logger.warning("Could not found Worksheet for setup '{}' !"
.format(SETUP))
u_fil.delete_file(FILES_DIR + '/' + IMAGE_ID) # delete wrong old image
next
SHEET = BOOK.Sheets[SHEET_INDEX] # user Sheet=1 will access by index 0
TYPE = SETUP.split(sep='\t')[3] # Type = 'table' or 'chart'
LOCATION = SETUP.split(sep='\t')[4]
if TYPE == 'table':
BGN_LOCATION = str(LOCATION.split(sep=':')[0]) # get A1 of range A1:B2
END_LOCATION = str(LOCATION.split(sep=':')[1]) # get B2 of range A1:B2
BGN_CELL = u_exc.get_cell(SHEET, BGN_LOCATION) # decode logic present
END_CELL = u_exc.get_cell(SHEET, END_LOCATION) # decode logic present
if (BGN_CELL is not None and
END_CELL is not None): # location could be recognized
try: # try to select and copy range
RANGE = SHEET.Range(BGN_CELL, END_CELL)
RANGE.CopyPicture(Appearance=1, Format=2)
try: # try to save image copied
ImageGrab.grabclipboard().save(FILES_DIR + '/' + IMAGE_ID)
except Exception:
logger.warning("Could not save image for setup '{}' !"
.format(SETUP))
u_fil.delete_file(FILES_DIR + '/' + IMAGE_ID) # delete old
next
except Exception:
logger.warning("Range not found for setup '{}' !"
.format(SETUP))
u_fil.delete_file(FILES_DIR + '/' + IMAGE_ID) # delete old
next
else:
logger.warning("Cell not found for Value setup '{}' !"
.format(SETUP))
u_fil.delete_file(FILES_DIR + '/' + IMAGE_ID) # delete old
next
elif TYPE == 'chart':
try:
CHART = SHEET.ChartObjects(int(LOCATION)) # select chart by number
CHART.Activate() # avoid exporting an image with nothing inside
try:
CHART.Chart.Export(FILES_DIR + '/' + IMAGE_ID)
except Exception:
logger.warning("Could not save image for setup '{}' !"
.format(SETUP))
u_fil.delete_file(FILES_DIR + '/' + IMAGE_ID) # delete old
next
except Exception:
logger.warning("Chart no reachable for setup '{}' !".format(SETUP))
u_fil.delete_file(FILES_DIR + '/' + IMAGE_ID) # delete old
next
else: # user defined type not recognizable
logger.warning("Type not recognized for setup '{}' !".format(SETUP))
u_fil.delete_file(FILES_DIR + '/' + IMAGE_ID) # delete old
next
# Image not reachable do not raise error, only warn and assure no wrong image
######################## ↑ GET IMAGES ↑ ########################
# Close Workbooks and Excel
u_fil.safe_exit(to_Close=WORKBOOK, to_Quit=EXCEL, opt_exit=False)
logger.info("Closed Excel applications.")
################################# ↑ EXCEL ↑ #################################
################################ ↓ OUTLOOK ↓ ################################
# Instance of Outlook
OUTLOOK = client.Dispatch('Outlook.Application')
logger.info("Opened Outlook application.")
# Create a message
MESSAGE = OUTLOOK.CreateItem(0)
# Format Send To
SEND_TO_LIST = []
for LINE in USER_SEND_TO_LIST:
try: # try to format 'recipient' with 'value' found in Excel
SEND_TO_LIST.append(LINE.format_map(u_gen.SafeDict(VAL_DICT)))
except (KeyError, ValueError): # a value was not found for a key
SEND_TO_LIST.append(LINE) # mantaining original line
logger.warning("Key/Value error formating '{}' user 'sent to' settings"
" !".format(LINE))
pass
except Exception:
logger.exception("Error formating '{}' user 'send to' settings !!!"
.format(LINE))
pass
# Format Subject
SUBJECT = ''
try: # try to format 'subject' with 'value' found in Excel
SUBJECT = USER_SUBJECT.format_map(u_gen.SafeDict(VAL_DICT))
except (KeyError, ValueError): # a value was not found for a key
SUBJECT = USER_SUBJECT # mantaining original line
logger.warning("Key/Value error formating '{}' user 'subject' settings"
" !".format(USER_SUBJECT))
pass
except Exception:
logger.exception("Error formating '{}' user 'subject' settings !!!"
.format(USER_SUBJECT))
pass
# List files
try:
LIST_DIR = os.listdir(FILES_DIR)
except Exception:
logger.exception("Error listing files in folder '{}' !!! "
.format(FILES_DIR))
u_fil.safe_exit([*WORKBOOK, MESSAGE], [EXCEL, OUTLOOK],
[file_handler, stream_handler], APP_ROOT)
# Filter list of files by image
LIST_IMG = [IMG for IMG in LIST_DIR
if IMG.lower().endswith(('.png', '.jpg', '.jpeg', '.gif'))]
if len(LIST_IMG) == 0: # no image in folder
logger.warning("No image in files folder '{}' !".format(FILES_DIR))
# Set properties to Image files
for IMAGE in LIST_IMG:
try: # set microsoft properties to use CID to insert image in HTML
attachment = MESSAGE.Attachments.Add(FILES_DIR + '/' + IMAGE)
attachment.PropertyAccessor.SetProperty(
"http://schemas.microsoft.com/mapi/proptag/0x3712001F",
IMAGE.split(sep='.')[0])
logger.info("Properties set to image '{}'.".format(IMAGE))
# To add <img src="cid:IMAGE"> inside HTML file
except Exception:
logger.exception("Error setting properties to image '{}' !!!"
.format(IMAGE))
pass
# Format HTML Body
HTML_LIST = u_fil.get_list_from_txt_file_by_line(HTML_FILE_PATH) # List lines
for INDEX, LINE in enumerate(HTML_LIST):
try: # format line by line to not be stoped by errors
HTML_LIST[INDEX] = LINE.format_map(u_gen.SafeDict(VAL_DICT)) # format
except (KeyError, ValueError): # error caused by false dict key '{' read
HTML_LIST[INDEX] = LINE # mantaining original line
logger.debug("False Key got in line '{}' in text '{}' ###"
.format(INDEX, LINE))
pass
except Exception:
logger.exception("Error formating line '{}' !!!".format(LINE))
u_fil.safe_exit([*WORKBOOK, MESSAGE], [EXCEL, OUTLOOK],
[file_handler, stream_handler], APP_ROOT)
HTML_BODY = '\n'.join(HTML_LIST) # join separated lines in one string
# Set the message properties
MESSAGE.To = ';'.join(SEND_TO_LIST) # str
MESSAGE.Subject = SUBJECT # str
MESSAGE.HTMLBody = HTML_BODY # str
# Ask for attachment files to email
DIALOG_TITLE = "Select one file to attach to the email or Cancel"
DIALOG_2ND_TITLE = "Select another file to attach to the email or Cancel"
ATT_FILE_LIST = u_fil.ask_for_files(APP_ROOT, DIALOG_TITLE, DIALOG_2ND_TITLE,
at_least_one=False, more_than_one=True)
# Attach files to email
for ATT_FILE_PATH in ATT_FILE_LIST:
try:
MESSAGE.Attachments.Add(ATT_FILE_PATH) # Attach
logger.info("Attached file '{}' to the email".format(ATT_FILE_PATH))
except Exception:
logger.exception("Error attaching file '{}' to the email"
.format(ATT_FILE_PATH))
# Display the message to user review
MESSAGE.Display()
# Send the message
#MESSAGE.Send()
################################ ↑ OUTLOOK ↑ ################################
u_fil.safe_exit(to_Close=WORKBOOK, to_Quit=EXCEL,
handler=[file_handler, stream_handler], app_root=APP_ROOT)
|
from flask import Blueprint, render_template, abort, current_app, request, redirect, url_for, jsonify, session
from flask.ext.misaka import Misaka
from models import *
import hashlib, json
import Routes, Config
blog = Blueprint("blog", __name__, template_folder=Config.template_folder, static_folder=Config.static_folder)
def init(app):
md = Misaka(autolink=True,tables=True,fenced_code=True,no_intra_emphasis=True,strikethrough=True,escape=True,wrap=True, toc=True)
md.init_app(app)
app.secret_key = Config.secret_key
def generate_json_error(**kwargs):
return jsonify(status = "ERROR", **kwargs)
def generate_json_success(**kwargs):
return jsonify(status = "OK", **kwargs)
def get_posts_at_page(page):
all_posts = Post.select().where(Post.visible == True)
posts_to_return = all_posts.order_by(Post.time.desc()).paginate(page, Config.post_per_page)
return posts_to_return, all_posts.count()
######
# # # #### ####
# # # # # # #
###### # # # #
# # # # # # ###
# # # # # # #
###### ###### #### ####
@blog.route(Routes.index)
def index():
return show_page(1)
@blog.route(Routes.show_page)
def show_page(page):
if(page<=0):
return "404", 404
posts_to_render,total_posts = get_posts_at_page(page)
return render_posts(posts_to_render,page,total_posts)
@blog.route(Routes.view_post_only_id)
def view_post_only_id(post_id):
return view_post(post_id,"")
@blog.route(Routes.view_post)
def view_post(post_id,post_url):
try:
post = Post.get(Post.id == post_id, Post.visible)
tags = string_to_tag_list(post.tags)
return render_template("post.html",post = post, tags = tags)
except Post.DoesNotExist:
return "404", 404
@blog.route(Routes.view_tag)
def view_tag(tag):
try:
tag_id = Tag.get(Tag.tag == tag.lower())
except Tag.DoesNotExist:
return render_posts(None, tag = tag.lower())
posts = [x.post for x in Post_To_Tag.select().where(Post_To_Tag.tag == tag_id).join(Post).where(Post.visible == True).order_by(Post.time.desc()) ]
return render_posts(posts, tag = tag.lower())
def render_posts(posts,page = None,total_posts = None,tag = None):
show_prev = False
show_next = False
empty = False
if not posts:
empty = True
else:
if not type(posts) is list:
if posts.count() == 0:
empty = True
else:
if len(posts) == 0:
empty = True
if not empty:
if page and total_posts:
if page == 1:
show_prev = False
else:
show_prev = True
if total_posts > page*Config.post_per_page: #if there is something in the next page
show_next = True
else: #this is last page
show_next = False
return render_template("posts.html", tag = tag, posts = posts, page = page, show_prev = show_prev, show_next = show_next)
else: #no post given! error!
if tag:
return render_template("posts.html",tag = tag, error = "No post tagged with " + tag)
else:
return render_template("posts.html",error = "No Post Found")
#
# # ##### # # # # #
# # # # ## ## # ## #
# # # # # ## # # # # #
####### # # # # # # # #
# # # # # # # # ##
# # ##### # # # # #
@blog.route(Routes.admin_login, methods=['GET', 'POST'])
def admin_login():
if is_logged_in():
return redirect(url_for('blog.admin_panel'))
if request.method == 'POST':
if request.form.get('username', None) and request.form.get('password', None):
username = request.form.get('username')
password = hashlib.sha512(request.form.get('password').decode()).hexdigest()
if username == Config.username and password == Config.password:
session_login()
return redirect(url_for('blog.admin_panel'))
else:
return render_template('login.html', error='Wrong username or password')
else:
return render_template('login.html', error='Username or password missing')
else:
return render_template('login.html')
@blog.route(Routes.admin_logout)
def admin_logout():
session.clear()
return redirect(url_for('blog.admin_login'))
def session_login():
session['logged_in']=True
def is_logged_in():
return True if 'logged_in' in session else False
@blog.route(Routes.admin_panel)
def admin_panel():
if is_logged_in():
posts = Post.select().order_by(Post.time.desc())
return render_template("admin.html",posts = posts)
else:
return redirect(url_for("blog.admin_login"))
@blog.route(Routes.admin_add_post, methods=["GET", "POST"])
def admin_add_post():
if request.method == "GET":
if is_logged_in():
return render_template("addPost.html")
else:
return redirect(url_for("blog.admin_login"))
else:
if not is_logged_in():
return generate_json_error("Not logged in")
try:
post = validate_post_form()
if not post.isError:
post.save()
tag(post)
return jsonify(status = "OK", url = url_for("blog.view_post",post_id = post.id, post_url=post.url))
else:
return post.error
except Exception as e:
return generate_json_error(str(e))
@blog.route(Routes.admin_edit_post, methods=["GET", "POST"])
def admin_edit_post(post_id):
if request.method == "GET":
if is_logged_in():
try:
post = Post.get(Post.id == post_id)
return render_template("editPost.html",post = post)
except Post.DoesNotExist:
return redirect(url_for('blog.admin'))
else:
return redirect(url_for("blog.admin_login"))
else:
if not is_logged_in():
return generate_json_error("Not logged in")
try:
post = validate_post_form(post_id)
if post:
if not post.isError:
post.save()
tag(post)
return jsonify(status = "OK", url = url_for("blog.view_post",post_id = post.id, post_url=post.url))
else:
return generate_json_error(post.error)
else:
return generate_json_error("Can't find post with ID " + post_id)
except Exception as e:
return generate_json_error(str(e))
@blog.route(Routes.admin_delete_post, methods=["GET", "DELETE"])
def admin_delete_post(post_id):
if request.method == "DELETE":
if not is_logged_in():
return generate_json_error("Not logged in")
try:
post = Post.get(Post.id == post_id)
post.delete_instance()
return jsonify(status = "OK",postRemoved = post_id)
except Post.DoesNotExist:
return generate_json_error("Can't find post with Id " + str(post_id))
else:
return redirect(url_for('blog.admin'))
def string_to_tag_list(string):
if len(string.replace(",","").strip())>0:
tags = [ x.strip().replace(" ","_") for x in string.split(",") if len(x.strip())>0]
return tags
else:
return []
def tag(post):
if validate_form_field(request.form,"tags"):
tags = request.form["tags"]
if len(tags.replace(",","").strip())>0:
tags = [ x.strip() for x in tags.split(",") if len(x.strip())>0]
for tag in tags:
post.addTag(tag)
else:
post.addTag("untagged")
else:
post.addTag("untagged")
def validate_post_form(post_id = None):
if post_id:
try:
post = Post.get(Post.id == post_id)
except Post.DoesNotExist:
return None
else:
post = Post()
post.isError = True
if validate_form_field(request.form,"title"):
post.title = clean_string(request.form["title"])
else:
post.error = generate_json_error("Title is required.")
return post
if validate_form_field(request.form,"shortDescription"):
post.short_description = clean_string(request.form["shortDescription"])
else:
post.error = generate_json_error("Description is required.")
return post
if validate_form_field(request.form,"body"):
post.body = clean_string(request.form["body"])
else:
post.error = generate_json_error("Body is required.")
return post
if len(clean_string(request.form["customUrl"]))>0:
post.url = post.create_url(clean_string(request.form["customUrl"]))
else:
post.url = post.create_url(post.title)
if request.form.get('hide'):
post.visible = False
else:
post.visible = True
post.tags = request.form["tags"].strip().lower()
post.isError = False
return post
def validate_form_field(form,field):
return True if len(form[field].strip()) > 0 else False
def clean_string(string):
return string.strip()
# ###### ###
# # # # #
# # # # #
# # ###### #
####### # #
# # # #
# # # ###
#https://github.com/coleifer/flask-peewee/blob/master/flask_peewee/utils.py#L70-L89
def model_to_dictionary(model, fields=None, exclude=None):
model_class = type(model)
data = {}
fields = fields or {}
exclude = exclude or {}
curr_exclude = exclude.get(model_class, [])
curr_fields = fields.get(model_class, model._meta.get_field_names())
for field_name in curr_fields:
if field_name in curr_exclude:
continue
field_obj = model_class._meta.fields[field_name]
field_data = model._data.get(field_name)
if isinstance(field_obj, ForeignKeyField) and field_data and field_obj.rel_model in fields:
rel_obj = getattr(model, field_name)
data[field_name] = get_dictionary_from_model(rel_obj, fields, exclude)
else:
data[field_name] = field_data
return data
@blog.route(Routes.api_get_page)
def api_get_page(page):
if (page<=0):
return generate_json_error(error = "Invalid page number")
posts, total_posts = get_posts_at_page(page)
posts = [model_to_dictionary(post) for post in posts]
return generate_json_success(total_posts = total_posts, posts = posts)
@blog.route(Routes.api_get_post)
def api_get_post(post_id):
try:
post = Post.get(Post.id == post_id, Post.visible)
return generate_json_success(post = model_to_dictionary(post))
except Post.DoesNotExist:
return generate_json_error(error = "Can't find post with that id"), 404
@blog.route(Routes.api_get_post_with_tag)
def api_get_post_with_tag(tag):
try:
tag_id = Tag.get(Tag.tag == tag.lower())
except Tag.DoesNotExist:
return generate_json_success(tag = tag.lower(), total_posts = 0)
posts = [model_to_dictionary(x.post) for x in Post_To_Tag.select().where(Post_To_Tag.tag == tag_id).join(Post).where(Post.visible == True).order_by(Post.time.desc()) ]
return generate_json_success(tag = tag.lower(), posts = posts) |
print(1>3>4)
# since 1 is not greater than 3 and 3 is not greater than 4 and even 1 is not greater than 4 it prints false
print(4>2>3)
#in this 4 is greater than 2 and 2 is not greater then 3 hence false
print(4>3>2)
#in this all condition are satisfied hence true |
from pathlib import Path
import random
random.seed(0)
N = 400 * 3
category_ratio = {
"food": 0.3,
"utility": 0.1,
"transport": 0.1,
"hobby": 0.2,
"socializing": 0.2,
"daily_miscellaneous": 0.1,
}
expense_ratio = {
100: 0.6,
1000: 0.3,
10000: 0.1,
}
categories = []
for k, v in category_ratio.items():
categories += [k] * int(N * v)
random.shuffle(categories)
expenses = []
for k, v in expense_ratio.items():
expenses += [random.randint(1, 9) * k for _ in range(int(N * v))]
random.shuffle(expenses)
dates = [
f"202{random.randint(0, 2)}-{random.randint(1, 12):02d}-{random.randint(1, 28):02d}" for _ in range(N)]
lines = sorted([f"{dates[i]},{categories[i]},{expenses[i]}\n" for i in range(N)])
with open(Path("account.csv"), "w") as f:
f.writelines(lines)
|
#!/usr/bin/env python3
"""
Author : Yukun Feng
Date : 2018/07/01
Email : yukunfg@gmail.com
Description : Misc utils
"""
import logging
import torch
import numpy as np
def get_logger(log_file=None):
"""
Logger from opennmt
"""
log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if log_file and log_file != '':
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
logger.addHandler(console_handler)
return logger
def word_ids_to_sentence(id_tensor, vocab, word_len=20):
"""Converts a sequence of word ids to a sentence
id_tensor: torch-based tensor
vocab: torchtext vocab
"""
all_strings = ""
for row in id_tensor:
row_string = ""
for col in row:
word = vocab.itos[col][0:word_len]
word = word.ljust(word_len)
row_string += word + " "
all_strings += row_string + "\n"
return all_strings
def probability_lookup(id_tensor, field, most_n_word=30):
"""
probability_lookup, id_tensor are logits before softmax
"""
softmax = torch.nn.Softmax(dim=0)
probabilities = softmax(id_tensor)[0: most_n_word]
numbers, indexs = id_tensor.sort(descending=True)
probabilities = softmax(numbers)[0: most_n_word]
numbers = numbers[0: most_n_word]
indexs = indexs[0: most_n_word]
word_list = []
for index in indexs:
word = field.vocab.itos[index]
word_list.append(word)
return numbers, probabilities, word_list, indexs
def save_word_embedding(vocab, emb, file_name):
"""Saving word emb"""
with open(file_name, 'x') as fh:
fh.write(f"{emb.size(0)} {emb.size(1)}\n")
for word, vec in zip(vocab, emb):
str_vec = [f"{x.item():5.4f}" for x in vec]
line = word + " " + " ".join(str_vec) + "\n"
fh.write(line)
def load_word_embedding(file_path):
embeddings = []
with open(file_path, 'r') as fh:
for count, line in enumerate(fh, 0):
line = line.strip()
# Skip empty lines
if line == "":
continue
items = line.split()
# Skip first line if it is miklov-style vectors
if count == 0 and len(items) == 2:
continue
embedding = [float(val) for val in items[1:]]
embeddings.append(embedding)
return torch.tensor(embeddings)
def save_word_embedding_test():
vocab = ["a", "b", "c"]
emb = torch.rand(len(vocab), 5)
save_word_embedding(vocab, emb, "vec.txt")
def load_word_embedding_test():
emb = load_word_embedding("tmp.vec")
print(emb)
if __name__ == "__main__":
# Unit test
# save_word_embedding_test()
load_word_embedding_test()
|
from collections import defaultdict
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: the root
@return: all the values with the highest frequency in any order
"""
def findFrequentTreeSum(self, root):
def find_freq_sum(root):
nonlocal max_freq
if not root:
return 0
s = root.val + find_freq_sum(root.left) + find_freq_sum(root.right)
frequencies[s] += 1
if frequencies[s] > max_freq:
max_freq = frequencies[s]
return s
frequencies = defaultdict(int)
max_freq = 0
find_freq_sum(root)
return [key for key,value in frequencies.items() if value == max_freq] |
import sys
#------------------------------------------------------------------------#
def load_log_file(filename, start_step):
f = open(filename)
# read lines in log file until the thermo data is reached
line = 0
for i in range(start_step-1):
f.readline()
# load in data (1000 steps is about 1 K)
data = []
N = 0
temp = 0
press = 0
vol = 0
energy = 0
pressvol = 0
for i in range(250001):
fields = f.readline().strip().split()
# average T, P, V, E every 1000 timesteps (~1 K)
T = float(fields[2])
P = float(fields[4])
V = float(fields[8])
Etot = float(fields[10])
PV = P*V
N += 1
if N < 1000:
temp += T
press += P
vol += V
energy += Etot
pressvol += PV
else:
# recored the ensemble average
# convert P from bar to Pa, Vol from A^3 to m^3, E from eV to J
data.append((temp/N, press/N*100000, vol/N*(10**-30), energy/N*(1.60218*10**-19),pressvol/N*100000*(10**-30)))
# reset the T, P, V, E, N
temp = T
press = P
vol = V
energy = Etot
pressvol = PV
N = 1
f.close()
return data
def compute_enthalpy(data):
# set the number of atoms in the system
NSi = 3000
NO = 5340
NC = 3000
N = NSi+NO+NC
Na = 6.022e23 # avagadros number
# # mSi = 4.6637066e-26 # in kg
# # mO = 2.6567626e-26 # in kg
# # mC = 1.9944325e-26 # in kg
# M = NSi*mSi + NO*mO + NC*mC
#values at 50 K
T0, P0, V0, E0, PV0 = data[0]
# values at 300 K
T2, P2, V2, E2, PV2 = data[-1]
# compute the change in enthalpy between 300K and 50K
delta_U = (E2 - E0)*Na/N/1000 # in kJ/mol
delta_PV = (PV2 - PV0)*Na/N/1000 # in kJ/mol
delta_H = delta_U + delta_PV # in kJ/mol
return (delta_U, delta_PV, delta_H)
def write_enthalpy(delta_H, load):
f = open('enthalpy.txt', 'a')
f.write('{%s, %.8f}, ' %(load,delta_H))
def write_internal_energy(delta_U, load):
f = open('internal_energy.txt', 'a')
f.write('{%s, %.8f}, ' %(load,delta_U))
def write_work_done(delta_PV, load):
f = open('work_done.txt', 'a')
f.write('{%s, %.8f}, ' %(load,delta_PV))
#------------------------------------------------------------------------#
if len(sys.argv) < 4:
print 'Usage:'
print ' python %s <filename> <start step> <applied load>' %sys.argv[0]
exit()
filename = sys.argv[1]
start_step = int(sys.argv[2])
load = sys.argv[3]
# compute the enthalpy
data = load_log_file(filename, start_step)
delta_U, delta_PV, delta_H = compute_enthalpy(data)
# write out data
write_enthalpy(delta_H, load)
write_internal_energy(delta_U, load)
write_work_done(delta_PV, load)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 24 18:04:25 2017
@author: Mehdi
"""
from bs4 import BeautifulSoup
import requests
import pandas as pd
import re
df1=pd.read_csv('video_dates.csv')
videoids=df1['videId']
all_video_captions=pd.DataFrame(columns=['videoid','caption','link_in_caption'])
for videoid in videoids:
url_page="https://www.youtube.com/watch?v"+videoid
page = requests.get(url_page)
soup = BeautifulSoup(page.text, "lxml")
video_caption=list(soup.find_all('div', attrs={'id': 'watch-description-text'}))
video_caption_text=video_caption[0].text
link_in_caption_text=str(video_caption[0].select('a')[0])
no00=re.findall('href=".*\/"',link_in_caption_text)[0]
link_in_caption=re.sub('"|href=','',no00)
df_video=pd.DataFrame([videoid,video_caption_text,link_in_caption],columns=['videoid','caption','link_in_caption'])
all_video_captions.append(df_video, ignore_index=True)
|
import argparse
import datetime
import logging
import os
from multiprocessing import Pool
import pandas as pd
from git import GitCommandError, Repo
from termcolor import colored
def git_clone(number, name, url, output_dir):
dirname = number + '_' + name
into = os.path.join(output_dir, dirname)
try:
Repo.clone_from(url, into)
print(dirname, colored('OK', 'green'))
except GitCommandError:
print(dirname, colored('FAIL', 'red'))
logging.error(dirname)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input', help='input file')
parser.add_argument('output', help='output dir')
parser.add_argument('-n', '--num_processes', type=int,
help='number of processes')
args = parser.parse_args()
# Create dir
logdir = os.path.join(args.output, 'logs')
os.makedirs(logdir, exist_ok=True)
# Logger
logfile = datetime.datetime.now().isoformat() + '.log'
fh = logging.FileHandler(os.path.join(logdir, logfile))
logger = logging.getLogger()
logger.addHandler(fh)
# Process Pool
pool = Pool(args.num_processes)
df = pd.read_excel(args.input)
for index, row in df.iterrows():
number = row['StudentNumber']
name = row['Name']
url = row['GitRepo']
number = str(number)
pool.apply_async(git_clone, args=(number, name, url, args.output))
# git_clone(number, name, url, args.output)
pool.close()
pool.join()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.