index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
28,294
|
cmedanielle/ProgIIBST
|
refs/heads/master
|
/bst.py
|
class BST:
def __init__(self):
# inicializa uma árvore vazia
self.root = None
# verifica se a árvore está vazia ou não
def isEmpty(self):
return self.root is None
# if self.root == None:
# return True
# return False
# cria um nó de uma árvore genérica
# def createNode(self, data, left = None, right = None):
# return TreeNode(data, left, right)
# insere um valor em uma árvore binária de busca
def insert(self, key):
if self.isEmpty():
# se a árvore estiver vazia, o novo nó será a raíz
self.root = TreeNode(key)
else:
# caso contrário, é preciso buscar o local correto de inserção do novo nó recursivamente
self._insert(key, self.root)
# função auxiliar para inserir recursivamente um valor em uma árvore binária de busca
def _insert(self, key, root):
if root is None:
return TreeNode(key)
elif key < root.data:
root.left = self._insert(key, root.left)
elif key > root.data:
root.right = self._insert(key, root.right)
return root
# verifica qual o maior valor contido em uma subárvore
def maxValue(self, root):
if root is None:
return None
if root.right is not None:
return self.maxValue(root.right)
return root.data
# verifica qual o menor valor contido em uma subárvore
def minValue(self):
return self._minValue(self.root)
# verifica qual o menor valor contido em uma subárvore (recursivamente)
def _minValue(self, root):
if root is None:
return None
if root.left is not None:
return self._minValue(root.left)
return root.data
# remove um nó de uma árvore binária de busca
def remove(self, key, root):
# chave não existe na árvore
if root is None:
return None
# busca pelo nó, caso a chave exista
elif key < root.data:
root.left = self.remove(key, root.left)
return root
elif key > root.data:
root.right = self.remove(key, root.right)
return root
else:
# caso 1: remoção de nó folha
if (root.right is None) and (root.left is None):
return None
# caso 2: remoção de nó que possui apenas um filho
elif root.left is None:
return root.right
elif root.right is None:
return root.left
# caso 3: remoção de nó que possui ambos os filhos
else:
valorMinimo = self._minValue(root.right)
root.data = valorMinimo
root.right = self.remove(valorMinimo, root.right)
return root
# calcula a altura de uma árvore
def height(self, root):
if root is None:
return 0
else:
return 1 + max(self.height(root.left), self.height(root.right))
# atribui um determinado nó à raíz da árvore
def setRoot(self, root):
if self.isEmpty():
self.root = root
# encamihamento em pré ordem
def preOrderTransversal(self):
if not self.isEmpty():
self._preOrderTransversal(self.root)
def _preOrderTransversal(self, root):
if root is not None:
print(root.data, end = ' ')
self.preOrderTransversal(root.left)
self.preOrderTransversal(root.right)
# encamihamento em ordem
def inOrderTransversal(self):
if not self.isEmpty():
self._inOrderTransversal(self.root)
def _inOrderTransversal(self, root):
if root is not None:
self._inOrderTransversal(root.left)
print(root.data, end = ' ')
self._inOrderTransversal(root.right)
# encamihamento em pós ordem
def postOrderTransversal(self):
if not self.isEmpty():
self._postOrderTransversal(self.root)
def _postOrderTransversal(self, root):
if root is not None:
self._postOrderTransversal(root.left)
self._postOrderTransversal(root.right)
print(root.data, end = ' ')
# calcula a soma dos valores armazenados nos nós de uma árvore
def sumTree(self, root):
pass
# calcula o total de números pares armazenados em uma árvore
def totalEvenKeys(self, root):
pass
# calcula o total de números ímpares armazenados em uma árvore
def totalOddKeys(self, root):
pass
# dado um determinado valor, calcula o total de números
# armazenados em uma árvore que são maiores que este valor
def totalGreaterThan(self, key, root):
pass
# dado um determinado valor, calcula o total de números
# armazenados em uma árvore que são menores que este valor
def totalLessThan(self, key, root):
pass
# classe auxiliar para criação de nós de uma árvore binária
class TreeNode:
def __init__(self, data = None, left = None, right = None):
self.data = data
self.left = left
self.right = right
|
{"/bst_test.py": ["/bst.py"]}
|
28,296
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/mutate.py
|
import click
import os
import random
from Bio import SeqIO
import numpy as np
def random_base(reference, amino):
if amino:
bases = {'A': 1, 'R': 2, 'N': 3, 'D': 4, 'C': 5, 'Q': 6, 'E': 7, 'G': 8, 'H': 9, 'I': 10,
'L': 11, 'K': 12, 'M': 13, 'F': 14, 'P': 15, 'S': 16, 'T': 17, 'W': 18, 'Y': 19, 'V': 20}
else:
bases = {'A': 1, 'C': 2, 'T': 3, 'G': 4}
try:
del bases[reference.upper()]
except:
pass
rbases = [i for i in bases.keys()]
random.shuffle(rbases)
return rbases[0]
def num_to_base(reference, amino):
if amino:
bases = {1: 'A', 2: 'R', 3: 'N', 4: 'D', 5: 'C', 6: 'Q', 7: 'E', 8: 'G', 9: 'H', 10: 'I',
11: 'L', 12: 'K', 13: 'M', 14: 'F', 15: 'P', 16: 'S', 17: 'T', 18: 'W', 19: 'Y', 20: 'V'}
else:
bases = {1: 'A', 2: 'C', 3: 'T', 4: 'G'}
return bases[reference]
def insert(sequence, positions, max_indel_size, amino):
opts = [i for i in range(1, max_indel_size+1)]
for pos in positions:
random.shuffle(opts)
num_indel = opts[0]
indel = "".join([num_to_base(random.randint(1, 4), amino)
for _ in range(num_indel)])
sequence[pos] = indel
return sequence
def delete(sequence, positions, max_indel_size, amino):
opts = [i for i in range(1, max_indel_size+1)]
for ix, pos in enumerate(positions):
random.shuffle(opts)
num_indel = opts[0]
try:
del sequence[pos-ix:num_indel+pos-ix]
except:
pass
return sequence
def mismatch(sequence, positions, amino):
for pos in positions:
reference = sequence[pos]
sequence[pos] = random_base(reference, amino)
return sequence
@click.command()
@click.option('--input-file', default='', help='input fasta file to mutate fasta file')
@click.option('--mutations', default=10, help='percentage of mutations in the sequence (default: 5%)')
@click.option('--insertions', default=30, help='percentage of insertions out of the total mutations (default: 30%)')
@click.option('--deletions', default=30, help='percentage of deletions out of the total mutations (default: 30%)')
@click.option('--mismatches', default=40, help='percentage of mismatches out of the total mutations (default: 40%)')
@click.option('--max-indel-size', default=5, help='maximum indel size (default: 5)')
@click.option('--prefix', default='0', help='prefix added to output file')
@click.option('--prot', default=False, is_flag=True, help='aminoacid sequences')
def mutate(input_file, insertions, deletions, mismatches, mutations, max_indel_size, prefix, prot):
'''
Mutate a nucleotide sequence:
This script takes a fasta file as input an mutates the sequence according to the insertion, deletion, and mismatches rates.
Output is a fasta file with the modified entries.
'''
if not input_file:
os.system('genetools mutate --help')
exit()
fo = open(input_file+'.M'+str(mutations)+'.m'+str(mismatches)+'.i' +
str(insertions)+'.d'+str(deletions)+'.prefix-'+prefix+'.mut.fasta', 'w')
# mutations are 2x
mutations = int(mutations/2)
# load fasta file
fasta_file = SeqIO.parse(input_file, 'fasta')
for record in fasta_file:
# get the positions to mutate
_id = record.id
_sequence = list(record.seq)
positions = np.array(range(len(_sequence)))
random.shuffle(positions)
# print(positions)
_mutation_rate = int(len(_sequence) * mutations / 100)
positions_to_mutate = positions[:_mutation_rate]
random.shuffle(positions_to_mutate)
# print(positions_to_mutate)
_mismatches_rate = int(mismatches * _mutation_rate / 100)
mismatches_positions = positions_to_mutate[:_mismatches_rate]
# print(mismatches_positions)
_insertions_rate = int(insertions * _mutation_rate / 100)
insertions_positions = positions_to_mutate[_mismatches_rate:
_mismatches_rate + _insertions_rate]
# print(insertions_positions)
deletions_positions = positions_to_mutate[_mismatches_rate +
_insertions_rate:]
# print(deletions_positions)
_sequence = mismatch(_sequence, mismatches_positions, prot)
_sequence = insert(_sequence, insertions_positions,
max_indel_size, prot)
_sequence = delete(_sequence, deletions_positions,
max_indel_size, prot)
entry = "".join(['>mut-', _id, '\n', "".join(_sequence), '\n'])
fo.write(entry)
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,297
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/run_all_samples.py
|
import sys
import os
files = [i.split() for i in open(sys.argv[1])][1:]
outdir = sys.argv[3]
indir = sys.argv[2]
for sample,fastq1gz,fastq2gz in files:
fastq1 = ".".join(fastq1gz.split('.')[:-1])
fastq2 = ".".join(fastq2gz.split('.')[:-1])
print('processing sample: '+sample)
print('uncompressing files')
os.system('gunzip -c '+indir+"/"+fastq1gz+" > "+outdir+"/"+fastq1 )
os.system('gunzip -c '+indir+"/"+fastq2gz+" > "+outdir+"/"+fastq2 )
print('running chfilter')
os.system('chfilter remove --paired-1 '+outdir+'/'+fastq1+' --paired-2 '+outdir+'/'+fastq2+' --out-dir '+outdir )
print('running randomfq')
os.system('sh randomfq.sh '+outdir+'/'+fastq1+'.no-chl.fastq '+outdir+'/'+fastq2+'.no-chl.fastq 12700000 0')
print('removing temp files')
os.system('rm '+outdir+"/"+fastq1+" "+outdir+"/"+fastq2+' '+outdir+"/bowtie*")
print(':) done!')
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,298
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/fasta2len.py
|
import sys
from Bio import SeqIO
import click
@click.command()
@click.option('--fasta-file', required=True, help='fasta input file')
@click.option('--separator', default="|", help='header separator [default: "|" ]')
@click.option('--label', default=0, help='label position [default: 0 ]')
def fasta2len(fasta_file, separator, label):
'''
Get the lengths of each sequence in a fasta file.
Write to stout
'''
data = {}
for record in SeqIO.parse(open(fasta_file), "fasta"):
id = record.id.split(separator)[label]
try:
data[id]['len'] += len(record.seq)
data[id]['count'] += 1
except Exception as e:
data[id] = {"len": len(record.seq), "count": 1}
for i in data:
print(i+'\t'+str(data[i]['len']/data[i]['count']))
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,299
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/metastorm/network.py
|
import click
import json
import logging
import pandas as pd
def get_scaffolds(path='', database='', sample_name='', scaffolds={}, evalue=1e-5, identity=80, bitscore=100):
data = pd.read_csv(path, sep="\t", names=['query', 'subject', 'identity', 'length',
'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore'])
data = data[data['evalue'] <= evalue]
data = data[data['identity'] >= identity]
data = data[data['bitscore'] >= bitscore]
print('{} Hits {}'.format(database, len(data)))
for ix, i in data.iterrows():
scaffold_id = sample_name+"_.*._"+i['query'].split('_')[1]
subject = database+"_.*._"+str(i['subject'])
try:
scaffolds[scaffold_id].append(subject)
except:
scaffolds[scaffold_id] = [subject]
@click.command()
@click.option('--metadata', default='', help='directory where all the genomes have been downloaded (e.g, /genomes/)')
@click.option('--output-file', default='', help='File where to store the fasta format with the requested genes')
@click.option('--tsv', default=False, is_flag=True, help='metadata is a tab separated file [default comma separated file]')
def network(metadata, output_file, tsv):
'''
Retrieve genes based on origin
This script subtract genes from the *.PATRIC.ffn file or *.PATRIC.faa files. By looking at the speciallity genes:
Antibiotic Resistance, Drug Target, Essential Gene, Human Homolog, Transporter, Virulence Factor.
'''
logging.basicConfig(
filename=output_file + '.log',
level=logging.DEBUG,
filemode="w",
format="%(levelname)s %(asctime)s - %(message)s"
)
log = logging.getLogger()
sep = ','
if tsv:
sep = '\t'
_metadata = pd.read_csv(metadata, sep=sep)
target_db = {i['database']: i['is_target']
for ix, i in _metadata.iterrows()}
scaffolds = {}
for ix, i in _metadata.iterrows():
get_scaffolds(
path=i['path'],
database=i['database'],
scaffolds=scaffolds,
sample_name=i['sample_name'],
evalue=i['evalue'],
identity=i['identity'],
bitscore=i['bitscore']
)
nodes = {}
edges = {}
for i in scaffolds.values():
for ik, k in enumerate(i):
try:
nodes[k] += 1
except:
nodes[k] = 1
for il in range(ik+1, len(i)):
try:
edges[(k, i[il])] += 1
except:
edges[(k, i[il])] = 1
fo = open('{}.nodes.csv'.format(output_file), 'w')
fo.write('Node,Database,Weight\n')
for i in nodes:
database, gene = i.split('_.*._')
fo.write(",".join([gene, database, str(nodes[i])])+'\n')
fo.close()
fo = open('{}.edges.csv'.format(output_file), 'w')
fo.write('Source,Target,source_database,target_database,Weight\n')
for i in edges:
s_database, s_gene = i[0].split('_.*._')
t_database, t_gene = i[1].split('_.*._')
if target_db[s_database] or target_db[t_database]:
counts = str(edges[i])
fo.write(
",".join([s_gene, t_gene, s_database, t_database, counts])+'\n')
fo.close()
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,300
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/fq2fa.py
|
import sys
from Bio.SeqIO.QualityIO import FastqGeneralIterator
def retrieve(fi='', listf={}):
fo = open(fi.replace("fastq", "").replace("fq", "")+"fasta", "w")
for _id, seq, qual in FastqGeneralIterator(open(fi)):
header = _id.split(' ')[0]
read = '>%s\n%s' % (_id, seq)
fo.write(read+"\n")
retrieve(fi=sys.argv[1])
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,301
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/nanoarg/nanoarg.py
|
import click
from GeneTools.nanoarg.mapping_table import mapping_table
from GeneTools.nanoarg.network import network
@click.group()
def nanoarg():
'''
Tools for processing the JSON file from nanoARG.
'''
pass
nanoarg.add_command(mapping_table)
nanoarg.add_command(network)
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,302
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/nanoarg/network.py
|
import click
import json
import logging
import pandas as pd
from tqdm import tqdm
import sys
import networkx as nx
origins = {
1: 'ARGs',
2: 'MGEs',
4: 'MRGs',
3: 'Functional Genes'
}
pathogens = {
1352: 'Enterococcus faecium',
1280: 'Staphylococcus aureus',
573: 'Klebsiella pneumonia',
470: 'Acinetobacter baumannii',
287: 'Pseudomonas aeruginosa',
42895: 'Enterobacter spp.',
543: 'Enterobacteriaceae',
1352: 'Enterococcus faecium',
1280: 'Staphylococcus aureus',
210: 'Helicobacter pylori',
205: 'Campylobacter sp',
590: 'Salmonellae',
485: 'Neisseria gonorrhoeae',
1313: 'Streptococcus pneumoniae',
727: 'Haemophilus influenzae',
625: 'Shigella sp'
}
def format_gene(gene):
gene['gene_id'] = gene['metadata'][0]
gene['category'] = gene['metadata'][3]
gene['gene_name'] = gene['metadata'][4]
gene['read'] = gene['block_id']
gene['group'] = origins[gene['origin']]
if origins[gene['origin']] == 'MRGs':
gene['gene_name'] = gene['category']
if origins[gene['origin']] == 'Functional Genes':
gene['gene_name'] = gene['category']
return gene
def get_node_edges(genes):
if len(genes) > 1:
for ix, source in enumerate(genes[:-1]):
source = format_gene(source)
for _, target in enumerate(genes[ix + 1:]):
target = format_gene(target)
yield {
'source': source['gene_name'],
'target': target['gene_name'],
'source_group': source['group'],
'target_group': target['group'],
}
def get_taxa(read):
gene = {}
gene['NCBI_taxa_id'] = read['read'][0]['taxa_id']
gene['taxa_centrifuge_score'] = read['read'][0]['taxa_score']
gene['species'] = read['read'][0]['taxa_species']
try:
assert(pathogens[int(gene['NCBI_taxa_id'])])
gene['is_pathogen'] = 'Yes'
except:
gene['is_pathogen'] = 'No'
return gene
def build_network(data):
network = {}
for read in tqdm(data):
taxa = get_taxa(read)
for edge in get_node_edges(read['data']):
edge['is_pathogen'] = taxa['is_pathogen']
try:
network['{}_{}'.format(
edge['source'], edge['target'])]['weight'] += 1
except:
network['{}_{}'.format(edge['source'], edge['target'])] = edge
network['{}_{}'.format(
edge['source'], edge['target'])]['weight'] = 1
try:
network['{}_{}'.format(
edge['source'], taxa['NCBI_taxa_id']
)]['weight'] += 1
except:
network['{}_{}'.format(
edge['source'], taxa['NCBI_taxa_id']
)] = {
'source': edge['source'],
'source_group': edge['source_group'],
'target': taxa['NCBI_taxa_id']+'|'+taxa['species'],
'target_group': 'Taxonomy',
'weight': 1,
'is_pathogen': edge['is_pathogen']
}
try:
network['{}_{}'.format(
edge['target'], taxa['NCBI_taxa_id']
)]['weight'] += 1
except:
network['{}_{}'.format(
edge['target'], taxa['NCBI_taxa_id']
)] = {
'source': edge['target'],
'source_group': edge['target_group'],
'target': taxa['NCBI_taxa_id']+'|'+taxa['species'],
'target_group': 'Taxonomy',
'weight': 1,
'is_pathogen': edge['is_pathogen']
}
return network.values()
@click.command()
@click.option('--input-file', default='', help='JSON fil downloaded from NanoARG')
@click.option('--output-file', default='', help='file with the mapping table as shown in the genes mapped to nanopore reads')
def network(input_file, output_file):
'''
Generate table of genes mapped to nanopore reads
This tool will generate the full table named "genes
mapped to nanopore reads" under the NanoARG website.
https://bench.cs.vt.edu/nanoarg/
'''
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format="%(levelname)s %(asctime)s - %(message)s"
)
log = logging.getLogger()
log.info('loading input file ' + input_file)
data = json.load(open(input_file))
log.info('traversing file ' + input_file)
dataset = pd.DataFrame(build_network(data[0]))
log.info('Storing table to ' + output_file)
dataset.to_csv(output_file, index=False)
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,303
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/setup.py
|
from setuptools import setup, find_packages
setup(
name='GeneTools',
version='0.1',
packages=find_packages(),
include_package_data=True,
install_requires=[
'Click',
'BioPython',
'ete3',
'h5py',
'tqdm',
'pandas',
'networkx'
],
entry_points='''
[console_scripts]
genetools=GeneTools.entry:cli
''',
)
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,304
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/patric/subtract_genes.py
|
import click
from Bio import SeqIO
import json
import logging
def overlap(intervals):
sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0])
merged = []
for higher in sorted_by_lower_bound:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
# test for intersection between lower and higher:
# we know via sorting that lower[0] <= higher[0]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
# replace by merged interval
# print(lower[0], upper_bound)
merged[-1][0], merged[-1][1] = (
lower[0], upper_bound)
merged[-1][2] += higher[2]
else:
merged.append(higher)
return merged
@click.command()
@click.option('--input-directory', default='', help='directory where all the genomes have been downloaded (e.g, /genomes/)')
@click.option('--genome-id', default='', help='genome identifier to process (e.g., 83332.12)')
@click.option('--output-file', default='', help='File where to store the fasta format with the requested genes')
@click.option('--property', default='Essential Gene', help='Select genes under this property (e.g., Essential Gene)')
@click.option('--extend', default=0, help='Add at the end of each gene # of nucleoties (default: 0)')
@click.option('--faa', default=False, is_flag=True, help='retrieve protein sequences')
def subtract_genes(input_directory, genome_id, output_file, property, extend, faa):
'''
Retrieve genes based on origin
This script subtract genes from the *.PATRIC.ffn file or *.PATRIC.faa files. By looking at the speciallity genes:
Antibiotic Resistance, Drug Target, Essential Gene, Human Homolog, Transporter, Virulence Factor.
'''
logging.basicConfig(
filename=output_file + '.log',
level=logging.DEBUG,
filemode="w",
format="%(levelname)s %(asctime)s - %(message)s"
)
log = logging.getLogger()
metadata_file = open(input_directory + '/' + genome_id +
'/' + genome_id + '.PATRIC.spgene.tab')
metadata = {}
log.info(('metdata file', input_directory + '/' + genome_id +
'/' + genome_id + '.PATRIC.spgene.tab'))
for ix, i in enumerate(metadata_file):
# identify keys
if ix == 0:
keys = i.strip().split('\t')
continue
entry = i.strip().split('\t')
item = {keys[hx]: h for hx, h in enumerate(entry)}
if property in item['property']:
metadata.update({item['patric_id']: item})
log.debug(metadata)
log.info(('loading features file', input_directory + '/' +
genome_id + '/' + genome_id + '.PATRIC.features.tab'))
features = []
for ix, i in enumerate(open(input_directory + '/' + genome_id + '/' + genome_id + '.PATRIC.features.tab')):
# identify keys
if ix == 0:
keys = i.strip().split('\t')
continue
# now get only filtered genes
entry = i.strip().split('\t')
item = {keys[hx]: h for hx, h in enumerate(entry)}
try:
assert (metadata[item['patric_id']])
features.append(item)
except Exception as e:
pass
log.debug(('fetures: ', features))
genomes = {}
for i in features:
try:
genomes[i['accession']].append(i)
except Exception as e:
genomes[i['accession']] = [i]
log.debug(('identified genomes', genomes))
if not faa:
fofasta = open(output_file+'.fasta', 'w')
# Now: traverse the selected genes and retrieve the sequences
for record in SeqIO.parse(open(input_directory + '/' + genome_id + '/' + genome_id + '.fna'), "fasta"):
genome_id = record.id
log.debug(('processing genome', genome_id))
genome_data = genomes[genome_id]
sequence = record.seq
intervals = []
for genome in genome_data:
log.debug(('Procesing entry:', genome))
start = min(int(genome['start']), int(genome['end']))
end = max(int(genome['start']), int(genome['end']))
if start - extend > 0:
start = start - extend
if len(sequence) - extend > 0:
end = end + extend
header = genome['patric_id']+'|' + \
genome['start']+'|'+genome['end']
intervals.append([start, end, [header]])
intervals = overlap(intervals)
for interval in intervals:
header = "|".join(
[">"+genome['genome_id'], genome['accession'], 'start:'+str(interval[0]), 'end:'+str(interval[1]), "["+",".join(interval[2]) + "]"])
_sequence = sequence[int(interval[0]):int(interval[1])]
fofasta.write(header+'\n'+str(_sequence)+'\n')
# print(overlap(intervals))
json.dump([genomes, metadata], open(output_file + '.json', 'w'))
else:
fofasta = open(output_file + '.fasta', 'w')
genome_data = genomes
for record in SeqIO.parse(open(input_directory + '/' + genome_id + '/' + genome_id + '.PATRIC.faa'), "fasta"):
# check in metadata, the metadata dict has the keys as the patric ids
protein_id = "|".join(record.id.split("|")[:2])
try:
assert (metadata[protein_id])
log.debug(record)
log.debug(metadata[protein_id])
# fortmat of output fasta file
# id|category|gene_name|gene_group
_id = protein_id.replace('|', ":")
_category = metadata[protein_id]['property']
_gene_name = metadata[protein_id]['gene']
_gene_group = metadata[protein_id]['gene']
if not _gene_name:
_gene_name = 'other'
_gene_group = 'other'
header = "|".join(
[_id, _category, _gene_name, _gene_group]).replace(" ", "_")
sequence = str(record.seq)
if len(sequence) < 200:
continue
fofasta.write(">"+header+'\n'+sequence+'\n')
except:
pass
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,305
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/filterTaxa.py
|
import click
import os
import random
from Bio import SeqIO
import numpy as np
from ete3 import NCBITaxa
import logging
@click.command()
@click.option('--input-file', required=True, help='input file with read_id and taxa_id')
@click.option('--taxa', required=True, help='Filter all sequences that contain this taxa id (look at all levels)')
@click.option('--update-taxa-db', is_flag=True, help='Update ncbi taxonomy database')
@click.option('--read-pos', default=0, help='column index of read id (default: 0)')
@click.option('--taxa-pos', default=2, help='column index of the taxonomy id (default: 2)')
@click.option('--sep', default="\t", help='separator of file fields (default: tab "\\t")')
def filter_taxa(input_file, taxa, update_taxa_db, read_pos, taxa_pos, sep):
'''
Get a list of taxa ID and filter the reads under certain taxa
For instance: retrieve all sequences that are under the family Enterobacteriaceae (546 taxa id).
The default values are used for centrifuge output, if you use a different type of file, please specify the column number where the read id is and taxa id.
You can also use a different separator, by default it uses "\t".
'''
logging.basicConfig(
filename=input_file + '.log',
level=logging.DEBUG,
format="%(levelname)s %(asctime)s - %(message)s"
)
log = logging.getLogger()
log.info('Load NCBI taxa database')
ncbi = NCBITaxa()
if update_taxa_db:
log.info('Updating NCBI taxa database')
ncbi.update_taxonomy_database()
fo = open(input_file+'.selected.'+taxa+'.tsv', 'w')
for ix, i in enumerate(open(input_file)):
if ix == 0: continue
i = i.strip().split(sep)
read_id = i[read_pos]
taxa_id = i[taxa_pos]
# get lineage of the taxa id
lineage = ncbi.get_lineage(int(taxa_id))
try:
if int(taxa) in lineage:
fo.write("\t".join([read_id, taxa_id]) + "\n")
except Exception as inst:
log.error(str(inst)+' '+read_id)
fo.close()
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,306
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/fasta2rand.py
|
import click
import os
import random
from Bio import SeqIO
import numpy as np
@click.command()
@click.option('--input-file', default='', help='input fasta file')
@click.option('--fr', default=0.2, help='fraction of sequences to get randomly (default: 0.2)')
def fasta2rand(input_file, fr):
'''
Retrieve random sequences from a fasta file.
'''
total_reads = 0
for record in SeqIO.parse(open(input_file), "fasta"):
total_reads += 1
reads_to_subtract = int(total_reads * fr)
selected_reads = [i for i in range(total_reads)]
random.shuffle(selected_reads)
selected_reads = {i: True for i in selected_reads[:reads_to_subtract]}
fo = open(input_file+'.sel_'+str(reads_to_subtract)+'.fasta', 'w')
index = 0
for record in SeqIO.parse(open(input_file), "fasta"):
try:
assert (selected_reads[index])
header = record.description
seq = str(record.seq)
fo.write('>'+header+'\n'+seq+'\n')
except Exception as e:
pass
index += 1
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,307
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/fasta2reads.py
|
import sys
import click
from Bio import SeqIO
import logging
import gzip
import json
import random
@click.command()
@click.option('--fasta', required=True, help='fasta input file')
@click.option('--read-length', default=100, help='length of the reads to generate')
@click.option('--min-sread', default=1, help='minimum reads per sequence')
@click.option('--max-sread', default=10, help='maximum reads per sequence')
def fasta2reads(fasta, read_length, min_sread, max_sread):
'''
From a fasta file build a set of random reads
This script will take an input a fasta file and for each
sequence will take between the min-sread number of reads to
the max-sreads maximum number of reads.
reads are picked up at random positions in the sequence
'''
fo = open(fasta.replace('.fasta', '').replace('.fa', '') +
'.reads.'+str(read_length)+'.fasta', 'w')
for record in SeqIO.parse(open(fasta), "fasta"):
# discard sequences that are smaller than read length.
if len(record.seq) <= read_length:
continue
for iread, read in enumerate(range(0, random.randint(min_sread, max_sread))):
random_position = random.randint(
0, len(record.seq) - read_length-1)
read_sequence = record.seq[random_position:random_position + read_length]
fo.write(">"+record.id+'|'+str(iread)+'\n'+str(read_sequence)+'\n')
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,308
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/fasta2trainKmers.py
|
import sys
import click
from Bio import SeqIO
import logging
import gzip
import json
import re
import numpy as np
import os
def genearte_one_genome(genome='ATCGATATACCA', k=3):
_genome = genome
_sentence = split_genome(genome=_genome, k=k)
return _sentence
def split_genome(genome="ATCGATATACCA", k=3):
return re.findall('.'*k, genome)
def genearte_genomes(genome='ATCGATATACCA', k=3, words=50):
sentences = []
for index in range(0, k):
_genome = genome[index:]
_sentence = split_genome(genome=_genome, k=k)
_fraction = int(len(genome) / k) - len(_sentence)
if _fraction > 0:
_sentence.append('')
sentences.append(np.array(_sentence, dtype="U"))
return np.array(sentences)
def genome_to_doc(input_file="", kmer=16, label="", f5=""):
''' This function transforms a sequence genome to a document of kmers '''
records = []
for record in SeqIO.parse(input_file, 'fasta'):
_genome = str(record.seq).upper()
_kmer_count = int(len(_genome) / kmer)
records.append({
'sentences': genearte_genomes(genome=_genome, k=kmer),
'id': record.id,
'_kmer_count': _kmer_count,
'label': label
})
return records
@click.command()
@click.option('--fasta-file', required=True, help='fasta input file')
@click.option('--kmer', default=11, help='kmer length')
@click.option('--out-file', required=True, help='output file with embeddings')
def fasta2trainKmers(fasta_file, kmer, out_file):
'''
Convert a fasta file into a word/sentence file.
This file contains all consecutive kmers from positions
i, i+1, i+2, ...., i+n where n is the lenght of the kmers.
In other words it produces consecutieve kmers versions of
the input sequence.
'''
x = genome_to_doc(input_file=fasta_file, kmer=kmer)
fo = open(out_file, 'w')
for i in x:
for j in i['sentences']:
fo.write(" ".join(j)+'\n')
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,309
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/nanoarg/mapping_table.py
|
import click
import json
import logging
import pandas as pd
from tqdm import tqdm
import sys
origins = {
1:'ARGs',
2:'MGEs',
4:'MRGs',
3:'Functional Genes'
}
pathogens = {
1352: 'Enterococcus faecium',
1280: 'Staphylococcus aureus',
573: 'Klebsiella pneumonia',
470: 'Acinetobacter baumannii',
287: 'Pseudomonas aeruginosa',
42895: 'Enterobacter spp.',
543: 'Enterobacteriaceae',
1352: 'Enterococcus faecium',
1280: 'Staphylococcus aureus',
210: 'Helicobacter pylori',
205: 'Campylobacter sp',
590: 'Salmonellae',
485: 'Neisseria gonorrhoeae',
1313: 'Streptococcus pneumoniae',
727: 'Haemophilus influenzae',
625: 'Shigella sp'
}
def traverse_data(data):
for read in tqdm(data):
for gene in read['data']:
gene['gene_id'] = gene['metadata'][0]
gene['category'] = gene['metadata'][3]
gene['gene_name'] = gene['metadata'][4]
gene['read'] = gene['block_id']
gene['group'] = origins[gene['origin']]
if origins[gene['origin']] == 'MRGs':
gene['gene_name'] = gene['category']
if origins[gene['origin']] == 'Functional Genes':
gene['gene_name'] = gene['category']
gene['NCBI_taxa_id'] = read['read'][0]['taxa_id']
gene['taxa_centrifuge_score'] = read['read'][0]['taxa_score']
gene['species'] = read['read'][0]['taxa_species']
try:
assert(pathogens[int(gene['NCBI_taxa_id'])])
gene['is_pathogen'] = 1
except:
gene['is_pathogen'] = 0
del gene['metadata']
del gene['block_id']
del gene['color']
del gene['origin']
del gene['stroke_width']
del gene['total_reads']
del gene['value']
del gene['score']
del gene['position']
yield gene
@click.command()
@click.option('--input-file', default='', help='JSON fil downloaded from NanoARG')
@click.option('--output-file', default='', help='file with the mapping table as shown in the genes mapped to nanopore reads')
def mapping_table(input_file, output_file):
'''
Generate table of genes mapped to nanopore reads
This tool will generate the full table named "genes
mapped to nanopore reads" under the NanoARG website.
https://bench.cs.vt.edu/nanoarg/
'''
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format="%(levelname)s %(asctime)s - %(message)s"
)
log = logging.getLogger()
log.info('loading input file ' + input_file)
data = json.load(open(input_file))
log.info('traversing file ' + input_file)
reads = pd.DataFrame(traverse_data(data[0]))
dataset = reads[
[
'read',
'gene_id',
'gene_name',
'group',
'category',
'start',
'end',
'strand',
'identity',
'bitscore',
'evalue',
'NCBI_taxa_id',
'taxa_centrifuge_score',
'species',
'coverage',
'is_pathogen'
]
]
log.info('Storing table to '+ output_file)
dataset.to_csv(output_file, index=False)
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,310
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/filter_fastq.py
|
import click
from Bio import SeqIO
import logging
import gzip
@click.command()
@click.option('--qfile', required=True, help='fastq file')
@click.option('--qfilter', required=True, help='input tabular file with sequence ids')
@click.option('--outfile', required=True, help='Save fastq file to this filename')
@click.option('--qcolumn', default=0, help='Column where the sequences ids (default: 1)')
def filter_fastq(qfilter, qcolumn, qfile, outfile):
'''
Subtract fastq reads from a list of entries.
This scrip picks up sequences in qfilter (fastq file) from qfile (tabular file where first column corresponds to read id).
'''
logging.basicConfig(
filename=qfile + '.log',
level=logging.DEBUG,
format="%(levelname)s %(asctime)s - %(message)s"
)
log = logging.getLogger()
log.info('Index sequences to subtract from fastq file')
_index = {i.split()[qcolumn]: True for i in open(qfilter)}
log.info('Traverse fastq file to filter sequences of interest')
fo = gzip.open(outfile, 'wb')
with gzip.open(qfile, 'rt') as handle:
for record in SeqIO.parse(handle, "fastq"):
try:
assert (_index[record.id])
fo.write(record.format("fastq").encode())
except Exception as e:
pass
fo.close()
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,311
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/FilterFastaLength.py
|
import click
import os
import random
from Bio import SeqIO
import numpy as np
@click.command()
@click.option('--input-file', default='', help='input fasta file')
@click.option('--minl', default=1000, help='Minimum length of sequences to keep (default: 1000)')
def FilterFastaLength(input_file, minl):
'''
Remove sequences under some length.
This scrip removes sequences that are under a specified sequence length. Works for any input fasta
ideally for using when removing short contigs from assembled librariesself.
'''
fo = open(input_file+'.minL_'+str(minl)+'.fasta', 'w')
for record in SeqIO.parse(open(input_file), "fasta"):
header = record.description
seq = str(record.seq)
if len(seq) < minl: continue
fo.write('>'+header+'\n'+seq+'\n')
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,312
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/fasta_subset.py
|
import sys
import click
from Bio import SeqIO
import logging
import gzip
import json
@click.command()
@click.option('--fasta', required=True, help='fasta input file')
@click.option('--entries', required=True, help='tabular file with entries')
def fasta_subset(fasta, entries):
'''
Search and retrieve sequences from fasta file
This script hashes the --entries and traverses the --fasta file until all entries are found.
The running time depends on the length of the file
'''
# file with list of sequences to filter
finp = {i.strip(): True for i in open(entries)}
# total_entries = len(finp)
for record in SeqIO.parse(open(fasta), "fasta"):
# terminate the program if all reads have been reached.
# if total_entries <= 0: exit()
_id = record.id
if not finp:
exit()
try:
assert(finp[_id])
print(">"+_id+"\n"+str(record.seq))
except Exception as e:
pass
# total_entries -= 1
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,313
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/taxa_file_to_table.py
|
import click
from Bio import SeqIO
import logging
import gzip
from ete3 import NCBITaxa
import pandas as pd
@click.command()
@click.option('--taxa-file', required=True, help='taxonomy files separated by comma')
@click.option('--sample-names', required=True, help='sample names separated by comma')
@click.option('--output-file', required=True, help='output table')
@click.option('--taxa-column', default=1, help='column in the taxa file that contains the taxonomy IDs (starts from 0)')
def taxa_file_to_table(taxa_file, sample_names, taxa_column, output_file):
'''
Convert files with taxonomy NCBI id to a matrix of counts.
Tested on centrifuge output
'''
logging.basicConfig(
filename=output_file + '.log',
filemode="w",
level=logging.INFO,
format="%(levelname)s %(asctime)s - %(message)s"
)
log = logging.getLogger()
log.info('Index sequences to subtract from fastq file')
ncbi = NCBITaxa()
files = taxa_file.split(",")
samples = sample_names.split(",")
log.debug("Input files: %s"%files)
log.debug("Input samples: %s"%sample_names)
metadata = [(i, samples[ix]) for ix, i in enumerate(files)]
taxa_dict = {}
for taxa_file, sample_name in metadata:
for item in open(taxa_file):
taxa_id = item.strip().split('\t')[taxa_column]
try:
assert(int(taxa_id))
except Exception as e:
log.info('File has header: '+item.strip())
continue
try:
assert(taxa_dict["TaxaID_"+str(taxa_id)])
except Exception as e:
taxa_dict["TaxaID_"+str(taxa_id)] = {i: 0 for i in samples}
lineage = ncbi.get_lineage(int(taxa_id))
names = ncbi.get_taxid_translator(lineage)
ranks = ncbi.get_rank(lineage)
taxa_info = [ ranks[taxid][0]+'__'+names[taxid] for taxid in lineage if ranks[taxid] in ['genus', 'family', 'phylum', 'class', 'order']]
taxa_dict["TaxaID_"+str(taxa_id)]['lineage'] = "r__Root;"+";".join(taxa_info)
taxa_dict["TaxaID_"+str(taxa_id)][sample_name] += 1
_table = pd.DataFrame.from_dict(taxa_dict).transpose()
_table.index.name = 'otu_id'
log.debug(_table)
_table.to_csv(output_file, sep="\t")
metadata = pd.DataFrame.from_dict({i: {'name': i} for i in samples}).transpose()
metadata.index.name='sample_id'
metadata.to_csv(output_file+'.metadata.tsv', sep="\t")
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,314
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/split_fasta.py
|
import sys
import click
from Bio import SeqIO
import logging
import gzip
import json
import time
@click.command()
@click.option('--fasta', required=True, help='fasta input file')
@click.option('--outdir', required=True, help='output directory')
def split_fasta(fasta, outdir):
'''
Filter sequences from fasta file
This script hashes the --entries and traverses the --fasta file until all entries are found.
The running time depends on the length of the file
'''
fo2 = open(fasta+'.list', 'w')
# total_entries = len(finp)
for record in SeqIO.parse(open(fasta), "fasta"):
# terminate the program if all reads have been reached.
# if total_entries <= 0: exit()
key = str(int(100000 * time.time()))
ofile = outdir + '/' + key + '.fasta'
fo = open(ofile , 'w')
_id = record.description
fo.write(">" + _id + "\n" + str(record.seq) + '\n')
fo.close()
fo2.write(ofile+"\t"+key+'\n')
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,315
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/deeparg_table.py
|
import click
from Bio import SeqIO
import logging
import gzip
from ete3 import NCBITaxa
import pandas as pd
@click.command()
@click.option('--deeparg-files', required=True, help='deeparg files separated by comma')
@click.option('--sample-names', required=True, help='sample names separated by comma')
@click.option('--output-file', required=True, help='output table')
@click.option('--counts', is_flag=True, default=False, help="report table with counts instead of 16s normalized [default False]")
@click.option('--header', is_flag=True, default=True, help="First line of the file is the file header [default True]")
def deeparg_table(deeparg_files, sample_names, output_file, counts, header):
'''
From the deepARG resutls build a table for analysis.
'''
logging.basicConfig(
filename=output_file + '.log',
filemode="w",
level=logging.DEBUG,
format="%(levelname)s %(asctime)s - %(message)s"
)
log = logging.getLogger()
log.info("Starting")
files = deeparg_files.split(",")
samples = sample_names.split(",")
log.debug("Input files: %s"%files)
log.debug("Input samples: %s"%sample_names)
metadata = [(i, samples[ix]) for ix, i in enumerate(files)]
if not counts:
index_abn = 2
else:
index_abn = 1
abundance_dict = {}
for deeparg_file, sample_name in metadata:
for ix, item in enumerate(open(deeparg_file)):
if ix == 0 and header: continue
arg_id = item.split()[0]
try:
assert(abundance_dict[arg_id])
except Exception as e:
abundance_dict[arg_id] = {i: 0 for i in samples}
abundance_dict[arg_id][sample_name] += float(item.split()[index_abn])
_table = pd.DataFrame.from_dict(abundance_dict).transpose()
_table.index.name = 'category'
log.debug(_table)
_table.to_csv(output_file, sep="\t")
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,316
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/patric/patric.py
|
import click
from GeneTools.patric.subtract_genes import subtract_genes
@click.group()
def patric():
'''
Tools for processing data from the PATRIC (https://www.patricbrc.org/) database.
Several scripts to postprocess the data from the PATRIC database.
'''
pass
patric.add_command(subtract_genes)
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,317
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/entry.py
|
import click
from GeneTools.mutate import mutate
from GeneTools.patric.patric import patric
from GeneTools.metastorm.metastorm import metastorm
from GeneTools.fasta2rand import fasta2rand
from GeneTools.FilterFastaLength import FilterFastaLength
from GeneTools.filterTaxa import filter_taxa
from GeneTools.filter_fastq import filter_fastq
from GeneTools.taxa_file_to_table import taxa_file_to_table
from GeneTools.deeparg_table import deeparg_table
from GeneTools.deeparg_abundance import deeparg_abundance
from GeneTools.fasta_subset import fasta_subset
from GeneTools.split_fasta import split_fasta
from GeneTools.fasta2kmers import fasta2kmers
from GeneTools.fasta2trainKmers import fasta2trainKmers
from GeneTools.fasta_filter_seq import fasta_filter_seq
from GeneTools.fasta2len import fasta2len
from GeneTools.fasta2reads import fasta2reads
from GeneTools.nanoarg.nanoarg import nanoarg
@click.group()
def cli():
'''
Gene Tools: Is a suit of scripts useful for the manipulation of NGS and genomics data.
Author(s): Gustavo Arango (gustavo1@vt.edu)
Usage: genetools --help
'''
pass
cli.add_command(mutate)
cli.add_command(patric)
cli.add_command(fasta2rand)
cli.add_command(FilterFastaLength)
cli.add_command(filter_taxa)
cli.add_command(filter_fastq)
cli.add_command(taxa_file_to_table)
cli.add_command(deeparg_table)
cli.add_command(deeparg_abundance)
cli.add_command(fasta_subset)
cli.add_command(split_fasta)
cli.add_command(fasta2kmers)
cli.add_command(fasta_filter_seq)
cli.add_command(fasta2trainKmers)
cli.add_command(fasta2len)
cli.add_command(metastorm)
cli.add_command(nanoarg)
cli.add_command(fasta2reads)
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,318
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/fasta2kmers.py
|
import sys
import click
from Bio import SeqIO
import logging
import gzip
import json
import re
import numpy as np
def split_genome(genome="ATCGATATACCA", k=3):
return re.findall('.'*k, genome)
def genearte_one_genome(genome='ATCGATATACCA', k=3):
_genome = genome
_sentence = split_genome(genome=_genome, k=k)
return _sentence
@click.command()
@click.option('--fasta-file', required=True, help='fasta input file')
@click.option('--kmer', default=11, help='kmer length')
@click.option('--out-file', required=True, help='output file with embeddings')
def fasta2kmers(fasta_file, kmer, out_file):
'''
Convert a fasta file into a word/sentence file
'''
# traverse the fasta file
fo = open(out_file + '.sentences', 'w')
fo2 = open(out_file + '.headers', 'w')
for record in SeqIO.parse(fasta_file, 'fasta'):
_genome = str(record.seq).upper()
sentences = genearte_one_genome(genome=_genome, k=kmer)
fo.write(" ".join(sentences) + '\n')
fo2.write(record.description + "\t" + str(len(sentences)) + '\n')
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,319
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/metastorm/metastorm.py
|
import click
from GeneTools.metastorm.network import network
@click.group()
def metastorm():
'''
Tools for processing data from the PATRIC (https://www.patricbrc.org/) database.
Several scripts to postprocess the data from the PATRIC database.
'''
pass
metastorm.add_command(network)
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,320
|
gaarangoa/genomic-scripts
|
refs/heads/master
|
/GeneTools/fqsplit.py
|
import sys
from Bio.SeqIO.QualityIO import FastqGeneralIterator
def retrieve(fi='', listf={}):
fo1 = open(fi.replace("fastq", "").replace("fq","")+"R1.fq", "w")
fo2 = open(fi.replace("fastq", "").replace("fq","")+"R2.fq", "w")
for _id,seq,qual in FastqGeneralIterator(open(fi)):
header = _id.split(' ')[1]
#print _id, header
read = '@%s\n%s\n+\n%s' %(_id, seq, qual)
if(header[0]=='1'): fo1.write(read+"\n")
if(header[0]=='2'): fo2.write(read+"\n")
retrieve(fi=sys.argv[1])
|
{"/GeneTools/nanoarg/nanoarg.py": ["/GeneTools/nanoarg/mapping_table.py", "/GeneTools/nanoarg/network.py"], "/GeneTools/patric/patric.py": ["/GeneTools/patric/subtract_genes.py"], "/GeneTools/entry.py": ["/GeneTools/mutate.py", "/GeneTools/patric/patric.py", "/GeneTools/metastorm/metastorm.py", "/GeneTools/fasta2rand.py", "/GeneTools/FilterFastaLength.py", "/GeneTools/filterTaxa.py", "/GeneTools/filter_fastq.py", "/GeneTools/taxa_file_to_table.py", "/GeneTools/deeparg_table.py", "/GeneTools/fasta_subset.py", "/GeneTools/split_fasta.py", "/GeneTools/fasta2kmers.py", "/GeneTools/fasta2trainKmers.py", "/GeneTools/fasta2len.py", "/GeneTools/fasta2reads.py", "/GeneTools/nanoarg/nanoarg.py"], "/GeneTools/metastorm/metastorm.py": ["/GeneTools/metastorm/network.py"]}
|
28,323
|
ryanjclark/text_similarity
|
refs/heads/master
|
/app/main.py
|
from fastapi import FastAPI
from app.routers import similarity
app = FastAPI()
app.include_router(similarity.router)
@app.get("/")
async def root():
return {"message": "hit this with POST at /text-similarity"}
|
{"/app/routers/similarity.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/tests/test_levenshtein_service.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/app/services/levenshtein.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/similarity.py", "/app/utils/matrix.py"], "/app/services/similarity.py": ["/app/models/similarity_score.py"]}
|
28,324
|
ryanjclark/text_similarity
|
refs/heads/master
|
/app/models/string_pair.py
|
from pydantic import BaseModel
class StringPair(BaseModel):
doc_1: str
doc_2: str
|
{"/app/routers/similarity.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/tests/test_levenshtein_service.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/app/services/levenshtein.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/similarity.py", "/app/utils/matrix.py"], "/app/services/similarity.py": ["/app/models/similarity_score.py"]}
|
28,325
|
ryanjclark/text_similarity
|
refs/heads/master
|
/app/routers/similarity.py
|
from fastapi import APIRouter
from ..models.similarity_score import SimilarityScore
from ..models.string_pair import StringPair
from ..services.levenshtein import LevenshteinService
router = APIRouter(prefix="/api/v1", tags=["levenshtein"])
@router.post("/text-similarity", response_model=SimilarityScore)
async def get_similarity_score(pair: StringPair):
score = LevenshteinService(pair).get_similarity_score()
return score
|
{"/app/routers/similarity.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/tests/test_levenshtein_service.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/app/services/levenshtein.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/similarity.py", "/app/utils/matrix.py"], "/app/services/similarity.py": ["/app/models/similarity_score.py"]}
|
28,326
|
ryanjclark/text_similarity
|
refs/heads/master
|
/tests/test_levenshtein_service.py
|
import os
import pytest
from app.models.similarity_score import SimilarityScore
from app.models.string_pair import StringPair
from app.services.levenshtein import LevenshteinService
samples = []
files = [os.path.join("samples", f) for f in os.listdir("samples")]
for file in files:
f = open(file)
sample = f.read()
samples.append(sample)
@pytest.mark.parametrize(
"doc_1,doc_2,expected",
[
("hey", "hi", 2),
(samples[0], samples[1], 291),
(samples[0], samples[2], 298),
(samples[1], samples[2], 66),
],
)
def test_levenshtein(doc_1, doc_2, expected):
pair = StringPair(doc_1=doc_1, doc_2=doc_2)
service = LevenshteinService(pair)
distance = service._levenshtein()
assert distance == expected
@pytest.mark.parametrize(
"doc_1,doc_2,expected",
[
("hey", "hi", 0.3333),
(samples[0], samples[1], 0.2988),
(samples[0], samples[2], 0.2819),
(samples[1], samples[2], 0.8136),
],
)
def test_normalize(doc_1, doc_2, expected):
pair = StringPair(doc_1=doc_1, doc_2=doc_2)
service = LevenshteinService(pair)
distance = service._levenshtein()
score = service._normalize(distance)
assert score == expected
@pytest.mark.parametrize(
"doc_1,doc_2,expected",
[
("hey", "hi", SimilarityScore(score=0.3333)),
(samples[0], samples[1], SimilarityScore(score=0.2988)),
(samples[0], samples[2], SimilarityScore(score=0.2819)),
(samples[1], samples[2], SimilarityScore(score=0.8136)),
],
)
def test_get_similarity(doc_1, doc_2, expected):
pair = StringPair(doc_1=doc_1, doc_2=doc_2)
service = LevenshteinService(pair)
similarity_score = service.get_similarity_score()
assert similarity_score == expected
|
{"/app/routers/similarity.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/tests/test_levenshtein_service.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/app/services/levenshtein.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/similarity.py", "/app/utils/matrix.py"], "/app/services/similarity.py": ["/app/models/similarity_score.py"]}
|
28,327
|
ryanjclark/text_similarity
|
refs/heads/master
|
/app/utils/matrix.py
|
from typing import List
def create_matrix(rows: int, cols: int) -> List[List[float]]:
"""Takes two integers that define the dimensions of a matrix.
Returns a matrix of 0s."""
matrix = []
for i in range(rows):
arr = []
for i in range(cols):
arr.append(float(0))
matrix.append(arr)
return matrix
|
{"/app/routers/similarity.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/tests/test_levenshtein_service.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/app/services/levenshtein.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/similarity.py", "/app/utils/matrix.py"], "/app/services/similarity.py": ["/app/models/similarity_score.py"]}
|
28,328
|
ryanjclark/text_similarity
|
refs/heads/master
|
/app/services/levenshtein.py
|
from ..models.similarity_score import SimilarityScore
from ..models.string_pair import StringPair
from ..services.similarity import SimilarityService
from ..utils.matrix import create_matrix
class LevenshteinService(SimilarityService):
def __init__(self, pair: StringPair):
self.pair = pair
def _levenshtein(self) -> int:
"""Applies Levenshtein's distance algorithm to the string pair.
Sets distance attribute asan int that represents how many additions,
modifications, and deletions one string would require to become another
string.
"""
# create empty matrix
matrix = create_matrix(len(self.pair.doc_1) + 1, len(self.pair.doc_2) + 1)
# assign the first row and column with default values
for i in range(len(self.pair.doc_1) + 1):
matrix[i][0] = i
for j in range(len(self.pair.doc_2) + 1):
matrix[0][j] = j
# loop through each position
for i in range(1, len(self.pair.doc_1) + 1):
for j in range(1, len(self.pair.doc_2) + 1):
# if the comparing letters are the same,
# assign the same value as the previous corner
if self.pair.doc_2[j - 1] == self.pair.doc_1[i - 1]:
matrix[i][j] = matrix[i - 1][j - 1]
# else assign the minimum of the 3 surrounding neighbors plus 1
else:
matrix[i][j] = (
min(matrix[i - 1][j - 1], matrix[i][j - 1], matrix[i - 1][j])
+ 1
)
# return the bottom corner of the matrix
return matrix[-1][-1]
def _normalize(self, distance: int) -> float:
"""Normalizes the Levenshtein score and returns
the complement of the normalized ratio, rounded
to four decimals."""
max_distance = max(len(self.pair.doc_1), len(self.pair.doc_2))
return round(1 - (distance / max_distance), 4)
def get_similarity_score(self) -> SimilarityScore:
distance = self._levenshtein()
score = self._normalize(distance)
return SimilarityScore(score=score)
|
{"/app/routers/similarity.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/tests/test_levenshtein_service.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/app/services/levenshtein.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/similarity.py", "/app/utils/matrix.py"], "/app/services/similarity.py": ["/app/models/similarity_score.py"]}
|
28,329
|
ryanjclark/text_similarity
|
refs/heads/master
|
/app/services/similarity.py
|
from ..models.similarity_score import SimilarityScore
class SimilarityService:
def __init__(self):
pass
def get_similarity_score(self) -> SimilarityScore:
pass
|
{"/app/routers/similarity.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/tests/test_levenshtein_service.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/app/services/levenshtein.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/similarity.py", "/app/utils/matrix.py"], "/app/services/similarity.py": ["/app/models/similarity_score.py"]}
|
28,330
|
ryanjclark/text_similarity
|
refs/heads/master
|
/app/models/similarity_score.py
|
from pydantic import BaseModel
class SimilarityScore(BaseModel):
score: float
|
{"/app/routers/similarity.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/tests/test_levenshtein_service.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/levenshtein.py"], "/app/services/levenshtein.py": ["/app/models/similarity_score.py", "/app/models/string_pair.py", "/app/services/similarity.py", "/app/utils/matrix.py"], "/app/services/similarity.py": ["/app/models/similarity_score.py"]}
|
28,347
|
sofianelounici/EasyTraffic
|
refs/heads/master
|
/Car.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 24 15:34:44 2018
@author: Sofiascope
Car contains the Car and CarPool class
"""
import time
import utils
import pandas as pd
import numpy as np
class Car:
""" Car class,
x : Segment number where the car is currently in
y : Position in segment where the car is currently in
lane : Current lane
img : Image to print
DIPSURF : Surface to print
m,n : size of the image to print
wantPassingStart : Boolean to state if a car want to start passing
isPassing : Boolean to state if car is in a passing process
goBack : Boolean to state if a car is in a going back process
timeUp : Timer to enable a car to go back"""
def __init__(self, x, y, lane, img,DISPSURF):
self.x=x
self.y=y
self.lane=lane
self.img = img
self.DISPSURF=DISPSURF
m,n = img.get_rect().size
self.m=m
self.n=n
self.wantPassingStart=False
self.isPassing=False
self.goBack=False
self.timeUp=time.time()
self.changingLane=False
self.countPassing=0
self.leap=0
self.leapStart=time.time()
self.numberPoints=500
self.accident=0
self.confidencePassing=1
def get_confidencePassing(self):
return self.confidencePassing
def set_confidencePassing(self, value):
self.confidencePassing = value
def get_accident(self):
return self.accident
def update_accident(self):
self.accident+=1
def reset_accident(self):
self.accident=0
def get_Points(self):
return self.numberPoints
def get_countPassing(self):
return self.countPassing
def get_leap(self):
return self.leap
def get_leapStart(self):
return self.leapStart
def set_leap(self):
self.leap+=1
def set_leapStart(self):
self.leapStart=time.time()
def updateCountPassing(self):
self.countPassing+=1
def resetCountPassing(self):
self.countPassing=0
def set_changingLane(self, value):
self.changingLane=value
def get_changingLane(self):
return self.changingLane
def set_goBack(self, value):
self.goBack=value
def get_goBack(self):
return self.goBack
def set_timeUp(self):
self.timeUp=time.time()
def get_timeUp(self):
return self.timeUp
def get_isPassing(self):
return self.isPassing
def set_isPassing(self, value):
self.isPassing=value
def get_lane(self):
return self.lane
def carPrint(self):
self.DISPSURF.blit(self.img, (self.imgx-self.m/2\
,self.imgy-self.n/2))
def path(self, pathX, pathY):
self.pathX=pathX
self.pathY=pathY
def get_curr_position(self):
return (self.imgx,self.imgy)
def update_pos(self):
""" Update position of car according to the path"""
self.imgx=self.pathX[min(self.x,len(self.pathX)-1)]\
[min(self.y,len(self.pathX[self.x])-1)]
self.imgy=self.pathY[min(self.x,len(self.pathY)-1)]\
[min(self.y,len(self.pathY[self.x])-1)]
def get_imgx(self):
return self.imgx
def get_imgy(self):
return self.imgy
def set_isBlocked(self, value):
self.isBlocked=value
def get_isBlocked(self):
return self.isBlocked
def wPassingStart(self):
return self.wantPassingStart
def setwPassingStart(self, value):
self.wantPassingStart=value
class CarPool():
""" Car Pool class, list of cars
listCars : List of cars in the map
listSpeed : List of current speeds (can be modified)
wantedSpeed : List of inital speed (cannot be modified)"""
def __init__(self, listCars, listSpeed):
self.listCars=listCars
self.listSpeed=listSpeed
self.wantedSpeed=listSpeed.copy()
self.df_stats=pd.DataFrame()
def get_listCars(self):
return self.listCars
def update_cars(self, newCars):
self.listCars=newCars
def get_listSpeed(self):
return self.listSpeed
def get_listWantSpeed(self):
return self.wantedSpeed
def updateDfStats(self, c, listLanes):
i = self.listCars.index(c)
lp=c.get_leap()
points=c.numberPoints
accidents=c.get_accident()
confidence = c.get_confidencePassing()
if(lp==0):
testArray=pd.DataFrame(np.array([i, c.get_countPassing(), 0, 0, points,0,confidence])).T
else:
t = time.time()-c.get_leapStart()
lane=listLanes.get_lane(c.get_lane()).get_radius()
v=2*np.pi*lane*t/1000
testArray=pd.DataFrame(np.array([i, c.get_countPassing(), lp, round(v,2), points,accidents,confidence])).T
if(len(self.df_stats)==0):
self.df_stats=testArray
self.df_stats.columns=["CarId","NumberPassing","Leap","AverageSpeed","NumberPoints","Accidents","Confidence"]
else:
testArray.columns=["CarId","NumberPassing","Leap","AverageSpeed","NumberPoints","Accidents","Confidence"]
self.df_stats=pd.concat([self.df_stats, testArray])
c.reset_accident()
c.set_leapStart()
c.set_leap()
self.listCars[i]=c
def get_Stats(self):
return self.df_stats
def record_Start(self):
writer = pd.ExcelWriter('stats.xlsx')
self.df_stats.to_excel(writer,'Sheet1')
writer.save()
|
{"/Car.py": ["/utils.py"], "/utils.py": ["/Car.py", "/Map.py"], "/Map.py": ["/utils.py"], "/main.py": ["/utils.py"]}
|
28,348
|
sofianelounici/EasyTraffic
|
refs/heads/master
|
/utils.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 24 15:34:44 2018
@author: Sofiascope
Utils contains a list of useful function
The most important one is updatePosition, where the position of cars is
updated
"""
import numpy as np
import time
import operator
import random
import pygame
from Car import Car, CarPool
from Map import Map, Lane, Passing
def mapStruct(radius, numberVertices):
""" Build a lane
radius : radius of the circle, integer
numberVertices : integer"""
theta=np.linspace(0, 360, numberVertices)
x=[]
y=[]
for i in range(len(theta)):
x.append(radius*np.sin(theta[i]*2*np.pi/360)+450)
y.append(radius*np.cos(theta[i]*2*np.pi/360)+400)
listLines=[]
for i in range(len(x)-1):
listLines.append(([x[i],y[i]],[x[i+1],y[i+1]]))
listPositions=[t[0] for t in listLines]
# Return the list of vertices and their position
return listLines, listPositions
def path(listLines):
""" Return the path of a car in a line
listLines : list of lines (vertices)"""
xLines=[]
yLines=[]
st=0
for l in listLines:
startM, endM = l
#Compute the slope
slope=(endM[1]-startM[1])/(endM[0]-startM[0])
x=np.linspace(startM[0],endM[0],\
int(distance(listLines[st][0], listLines[st][1])))
b=startM[1]-slope*startM[0]
y=[slope*x[i]+b for i in range(len(x))]
xLines.append(x)
yLines.append(y)
st=st+1
return xLines,yLines
def closest(listLane, point, numberVertices):
""" Return the point of the map needed to be change
listLane : List of lane (1,2,3)
point : Coordinates of the click point
numberofVertices : integer"""
distances=[]
sizes=[]
for lane in listLane:
if isinstance(lane,(list,))==False:
sizes.append(len(lane.get_positions()))
for l in lane.get_positions():
distances.append(((l[0]-point[0])**2+(l[1]-point[1])**2)**0.5)
minimum= np.argmin(distances)
# Compute the line and the corner from the line
whichLane=minimum//numberVertices
corner=minimum%numberVertices
if(whichLane in [1,2]):
corner=corner+whichLane
if(corner==numberVertices-1):
corner=0
whichLane+=1
return whichLane, corner
def closestPassing(passing, point, whichLane, isEnd):
""" Return the passing line of the map needed to be change
listLane : List of lane (1,2,3)
whichLane : Lane where the passing lines belong.
isEnd : Boolean, True ==> Consider the end of the passing line"""
segment={}
for p in passing:
if(isEnd):
toTest=p.get_end()
else:
toTest=p.get_start()
p1= ((toTest[0]-point[0])**2+(toTest[1]-point[1])**2)**0.5
segment[p]=p1
closeSegment = min(segment, key=lambda key: segment[key])
return passing.index(closeSegment)
def distance(a,b):
""" Euclidean distance"""
return ((a[0]-b[0])**2+(a[1]-b[1])**2)**0.5
def toflatten(pathX, pathY):
""" Flatten list of paths"""
flatten=[]
for i in range(len(pathX)):
for j in range(len(pathX[i])):
flatten.append([pathX[i][j], pathY[i][j]])
return flatten
def okay(flatten, xImg, yImg, listCars, car, distSecur, lane):
""" Check if there is another car in front of the current one
flatten : Flatten list
xImg, yImg : Graphic position of the car
listCars : List of cars
car : The current car
distSecur : The security distance
lane : The lane where the car is driving"""
# Consider all the other cars
listCarToCheck=listCars.copy()
listCarToCheck.pop(car)
# Boolean value toReturn
toReturn=True
findC=0
accident=False
for c in listCarToCheck:
if c.get_lane()==lane: #Check only car in same lane
if([xImg, yImg] in flatten):
posX= flatten.index([xImg, yImg])
# Check if there is a car in the interval between
# the current position and the security distance
# Be careful because car are looping
if(posX+distSecur>len(flatten)-1):
toCheckP1 = posX+distSecur-len(flatten)+1
toCheckP2 = distSecur-toCheckP1
positionToCheck1=\
[i for i in range(0,toCheckP1)]
positiontoCheck2=\
[i for i in range(len(flatten)-toCheckP2, len(flatten))]
positionToCheck=positiontoCheck2+positionToCheck1
else:
toCheck=posX+distSecur
positionToCheck=[i for i in range(toCheck-distSecur,toCheck)]
find=False
# We modify the boolean value
for t in positionToCheck:
verifX=abs(flatten[t][0]-c.get_imgx())
verifY=abs(flatten[t][1]-c.get_imgy())
if(verifX<1 and verifY<1 and find==False):
if(abs(t-posX)<int(distSecur*0.9)):
accident=True
toReturn=False
findC=c
return toReturn, findC, accident
def checkPassing(listCars, passingPoints, listLanes, l, wEnd, k,toTransfer):
""" Start the passing phase when needed
listCars : List of cars
passingPoints : List of passing points
listLanes : List of lanes
l : Current car
wEnd : End of the passing path
k : kth passing line
toTransfer : Indicates changes between lines"""
close=listCars.get_listCars()[l].get_curr_position()
closeX=abs(passingPoints[0][0]-close[0])
closeY=abs(passingPoints[0][1]-close[1])
testReturn=False
# Check if the car want to pass, to start the process
if(listCars.get_listCars()[l].wPassingStart()==True and \
(closeX<1 and closeY<1)):
pathX, pathY = listLanes.get_path_passing(toTransfer, k)
listCars.get_listCars()[l].path(pathX, pathY)
listCars.get_listCars()[l].x=0
listCars.get_listCars()[l].y=0
listCars.get_listCars()[l].lane=3+toTransfer
listCars.get_listCars()[l].setwPassingStart(False)
listCars.get_listCars()[l].set_isPassing(True)
listCars.get_listSpeed()[l]=listCars.get_listWantSpeed()[l]
listCars.get_listCars()[l].update_pos()
closeX=abs(passingPoints[1][0]-close[0])
closeY=abs(passingPoints[1][1]-close[1])
# Once the car choose the passing line, change path
if(listCars.get_listCars()[l].wPassingStart()==False and \
(closeX<1 and closeY<1) and \
listCars.get_listCars()[l].get_isPassing()==True):
pathX, pathY = listLanes.get_path_lane(1+toTransfer)
listCars.get_listCars()[l].path(pathX, pathY)
listCars.get_listCars()[l].x=wEnd
listCars.get_listCars()[l].y=0
listCars.get_listCars()[l].lane=1+toTransfer
listCars.get_listCars()[l].set_isPassing(False)
listCars.get_listCars()[l].setwPassingStart(True)
listCars.get_listCars()[l].update_pos()
listCars.get_listCars()[l].set_goBack(True)
listCars.get_listCars()[l].set_timeUp()
return listCars, listLanes
def goBack(listCars, passingPoints, listLanes, l, wEnd, k,toTransfer):
""" Start the going back phase when needed
listCars : List of cars
passingPoints : List of passing points
listLanes : List of lanes
l : Current car
wEnd : End of the passing path
k : kth passing line
toTransfer : Indicates changes between lines"""
close=listCars.get_listCars()[l].get_curr_position()
closeX=abs(passingPoints[0][0]-close[0])
closeY=abs(passingPoints[0][1]-close[1])
testReturn=False
# A car can only go back after 500ms
listCars.get_listSpeed()[l]=listCars.get_listWantSpeed()[l]
if(time.time()-listCars.get_listCars()[l].get_timeUp()>0.5):
# Check if the car want to pass, to start the process
if(listCars.get_listCars()[l].wPassingStart()==True and \
(closeX<1 and closeY<1)):
pathX, pathY = listLanes.get_path_passing(toTransfer, k)
listCars.get_listCars()[l].path(pathX, pathY)
listCars.get_listCars()[l].x=0
listCars.get_listCars()[l].y=0
listCars.get_listCars()[l].lane=3+toTransfer
listCars.get_listCars()[l].setwPassingStart(False)
listCars.get_listCars()[l].set_isPassing(True)
listCars.get_listCars()[l].update_pos()
closeX=abs(passingPoints[1][0]-close[0])
closeY=abs(passingPoints[1][1]-close[1])
# Once the car choose the passing line, change path
if(listCars.get_listCars()[l].wPassingStart()==False and \
(closeX<1 and closeY<1)):
if(toTransfer==2):
lane=0
if(toTransfer==3):
lane=1
pathX, pathY = listLanes.get_path_lane(lane)
listCars.get_listCars()[l].path(pathX, pathY)
listCars.get_listCars()[l].x=wEnd
listCars.get_listCars()[l].y=0
listCars.get_listCars()[l].lane=lane
listCars.get_listCars()[l].setwPassingStart(True)
listCars.get_listCars()[l].update_pos()
testReturn=True
listCars.get_listCars()[l].set_goBack(True)
return listCars, listLanes, testReturn
def toBlit(listCars):
listToBlit=[]
myfont = pygame.font.SysFont("monospace", 15)
# POINTS
values = rankingCarPoint(listCars)
sortDict = sorted(values.items(), key=lambda x: x[1])
sortDict=sortDict[::-1]
label = myfont.render("HIGH SCORE", 30, (0,0,0))
listToBlit.append((label, (1100, 80)))
x=0
for i in range(len(sortDict)):
text="CAR "+str(sortDict[i][0])+ " : "+str(sortDict[i][1])
label = myfont.render(text, 30, (0,0,0))
listToBlit.append((label, (1050, 160+x)))
x+=20
return listToBlit
def decideChangeLane(c):
""" Decide whether or not a car should change line
c : car"""
nbPassing = c.get_countPassing()
p=0.5
if(nbPassing>0):
if(random.random()>p/nbPassing):
c.setwPassingStart(True)
c.set_changingLane(1)
else:
if(random.random()<p):
c.set_goBack(True)
c.setwPassingStart(True)
c.set_changingLane(2)
c.resetCountPassing()
return c
def rankingCar(df):
values={}
for i in df.CarId.unique():
lastLeap = np.max(df[df.CarId==i].Leap)
values[i]=df[(df.CarId==i) &(df.Leap==lastLeap)].AverageSpeed.values[0]
return values
def carAccidents(df):
values={}
for i in df.CarId.unique():
lastLeap = np.max(df[df.CarId==i].Leap)
values[i]=df[(df.CarId==i) &(df.Leap==lastLeap)].Accidents.values[0]
return values
def rankingCarPoint(listCars):
values={}
for i in range(len(listCars.get_listCars())) :
values[i] = listCars.get_listCars()[i].numberPoints
return values
def checkSpeedPoint(listCars, df):
values = rankingCar(df)
for key in values:
speed = values[key]
if(speed>281.25):
print("Car ",int(key), " has been punished !")
listCars.get_listCars()[int(key)].numberPoints-=350
else:
listCars.get_listCars()[int(key)].numberPoints+=50
return listCars
def checkAccidentPoint(listCars, df):
values = carAccidents(df)
for key in values:
speed = values[key]
confidencePassing = listCars.get_listCars()[int(key)].get_confidencePassing()
if(speed>0):
print("Car ",int(key), " has been punished !")
sigma=0.9
mu=np.log(250)
penalty=int(np.random.lognormal(mu, sigma)/3)
listCars.get_listCars()[int(key)].numberPoints-=penalty
newConfidence=1-np.random.lognormal(mu, sigma)/1000
newValue=max(confidencePassing*newConfidence, 0.2)
listCars.get_listCars()[int(key)].set_confidencePassing(newValue)
else:
newValue=min(confidencePassing*1.1, 1)
listCars.get_listCars()[int(key)].set_confidencePassing(newValue)
return listCars
def rewardPoint(listCars):
values = rankingCarPoint(listCars)
sortDict = sorted(values.items(), key=lambda x: x[1])
rankCar = [sortDict[i][0] for i in range(len(sortDict))]
print("Car ",int(rankCar[0])," is the fastest !")
listCars.wantedSpeed[int(rankCar[0])]+=75
for i in range(len(sortDict)):
if(sortDict[i][1]<0):
curr=listCars.wantedSpeed[int(rankCar[i])]
newValue=curr-80
if(newValue<0):
newValue=curr
listCars.wantedSpeed[int(rankCar[i])]=newValue
return listCars
def rewardSpeed(listCars, df):
values = rankingCar(df)
sortDict = sorted(values.items(), key=lambda x: x[1])
rankCar = [sortDict[i][0] for i in range(len(sortDict))]
listCars.wantedSpeed[int(rankCar[0])]+=75
return listCars
def updatePosition(l, listStart, listCars, listLanes, distSecur, flatten, \
listPassingLine, wEnd):
""" Update the position of the cars :
l : Current car
listStart : List of time markers, keeping track of refresh
listLanes : List of lanes
distSecur : Security distance
flatten : Flatten list of paths
listPassingLine : List of passing/Go back Lines
wEnd : End of the passing path"""
# Check time markers to see if we can update position
if(time.time()-listStart[l]>(1/(listCars.get_listSpeed()[l]))):
listCars.get_listCars()[l].update_pos()
# Test to check if there is a car in front of the current car
toReturn, findC, accident =okay(flatten[listCars.get_listCars()[l].get_lane()], \
listCars.get_listCars()[l].get_imgx(), \
listCars.get_listCars()[l].get_imgy(), \
listCars.get_listCars(), l, distSecur, \
listCars.get_listCars()[l].get_lane())
if(accident):
listCars.get_listCars()[l].update_accident()
if(toReturn):
listCars.get_listCars()[l].y=listCars.get_listCars()[l].y+1
if(listCars.get_listCars()[l].y>=\
len(listLanes.get_path(listCars.get_listCars()[l].get_lane())[0]\
[listCars.get_listCars()[l].x])):
listCars.get_listCars()[l].x=listCars.get_listCars()[l].x+1
listCars.get_listCars()[l].y=0
else:
# If there is one, we update the speed of the car
m = listCars.get_listCars().index(findC)
changeSpeed=listCars.get_listSpeed()[m]
listCars.get_listSpeed()[l]=changeSpeed
confidencePassing=listCars.get_listCars()[l].get_confidencePassing()
if(random.random()<confidencePassing):
listCars.get_listCars()[l].setwPassingStart(True)
# Check if the car is in a Going Back phase
if(listCars.get_listCars()[l].get_goBack()==False):
toTransfer = listCars.get_listCars()[l].get_lane()
if(toTransfer in [3]):
toTransfer=0
if(toTransfer in [2,4]):
toTransfer=1
for k in range(len(listPassingLine[toTransfer])):
passingPoints = (listPassingLine[toTransfer][k].get_start(),\
listPassingLine[toTransfer][k].get_end())
listCars, listLanes, = checkPassing(listCars, passingPoints, \
listLanes, l, wEnd[k], k, toTransfer)
#Otherwise, we can check if there is a need for passing
else:
if(listCars.get_listCars()[l].get_changingLane()%2==0):
toTransfer = listCars.get_listCars()[l].get_lane()
if(toTransfer in [6,2]):
toTransfer=3
if(toTransfer in [5,1]):
toTransfer=2
for k in range(len(listPassingLine[toTransfer])):
passingPoints = (listPassingLine[toTransfer][k].get_start(),\
listPassingLine[toTransfer][k].get_end())
listCars, listLanes, toReturn = goBack(listCars, passingPoints, \
listLanes, l, wEnd[k], k, toTransfer)
if(toReturn):
if(listCars.get_listCars()[l].get_changingLane()==2):
listCars.get_listCars()[l].set_changingLane(0)
else:
listCars.get_listCars()[l].updateCountPassing()
listCars.get_listCars()[l].set_goBack(False)
listCars.get_listCars()[l].setwPassingStart(False)
listCars.get_listCars()[l].set_isPassing(False)
listCars.get_listCars()[l].set_timeUp()
else:
listCars.get_listCars()[l].set_changingLane(0)
listCars.get_listCars()[l].set_goBack(False)
listCars.get_listCars()[l].setwPassingStart(False)
listCars.get_listCars()[l].set_isPassing(False)
listCars.get_listCars()[l].set_timeUp()
listStart[l]=time.time()
return listStart, listCars, listLanes, flatten
def computeCars(numberCars, nbLanes,numberVertices,DISPSURF):
occupied=[]
speeds=[]
cars=[]
for i in range(nbLanes):
occupied.append([])
for i in range(numberCars):
carImg = pygame.image.load('spacestation.png').convert_alpha()
lane=random.randint(0,2)
x=random.randint(0,numberVertices)
while(x in occupied[lane]):
x=random.randint(0,numberVertices)
occupied[lane].append(x)
car=Car(x,0,lane,carImg,DISPSURF)
cars.append(car)
speeds.append(random.randint(100, 250))
listCars=CarPool(cars, speeds)
return listCars
def prepareMap(numberVertices, DISPSURF, distSecur):
""" Build the map
numberVertices : Number of vertices
DISPSURF : The surface to print on
distSecur : Security Distance"""
# Load cars
listCars = computeCars(24, 3,numberVertices, DISPSURF)
listLanes=[]
# Load lanes
lane0 = Lane(0, 350, numberVertices)
lane0.path()
lane1 = Lane(0, 335, numberVertices)
lane1.path()
lane2 = Lane(0, 330, numberVertices)
lane2.path()
# Load the passing lanes
listStartEnd, listPassingLine1, listPassingLinePoint1, whereEnd =\
computePassingLane(lane0, lane1, numberVertices)
_, listPassingLine2, listPassingLinePoint2, _ =\
computePassingLane(lane1, lane2, numberVertices)
_, listPassingLine3, listPassingLinePoint3, _ =\
computeGetBack(lane1, lane0, numberVertices)
_, listPassingLine4, listPassingLinePoint4, _ =\
computeGetBack(lane2, lane1, numberVertices)
# Build the map
listLanes = Map(lane0, lane1, lane2, \
listPassingLine1, listPassingLine2,listPassingLine3,listPassingLine4,\
listPassingLinePoint1,listPassingLinePoint2,listPassingLinePoint3,\
listPassingLinePoint4)
# Compute the inital paths of the cars
for c in listCars.get_listCars():
for l in range(len(listLanes.get_listLanes())):
if (c.get_lane()==l):
X, Y = listLanes.get_listLanes()[l].get_path_tuple()
c.path(X,Y)
c.update_pos()
# Compute the flatten array
flatten=[toflatten(lane0.get_pathX(),lane0.get_pathY()),\
toflatten(lane1.get_pathX(),lane1.get_pathY()),\
toflatten(lane2.get_pathX(),lane2.get_pathY())]
for i in range(len(listPassingLine1)):
flatten.append(toflatten(\
listPassingLine1[i].get_pathX(),\
listPassingLine1[i].get_pathY()))
for i in range(len(listPassingLine2)):
flatten.append(toflatten(\
listPassingLine2[i].get_pathX(),\
listPassingLine2[i].get_pathY()))
for i in range(len(listPassingLine3)):
flatten.append(toflatten(\
listPassingLine3[i].get_pathX(),\
listPassingLine3[i].get_pathY()))
for i in range(len(listPassingLine4)):
flatten.append(toflatten(\
listPassingLine4[i].get_pathX(),\
listPassingLine4[i].get_pathY()))
# Init the time markers
listStart=[time.time() for c in listCars.get_listCars()]
return listCars, listLanes, listStart, listStartEnd, flatten,\
listPassingLine1 , listPassingLine2, listPassingLine3,\
listPassingLine4, whereEnd
def computePassingLane(laneA, laneB, numberVertices):
""" Build the passing lanes from laneA to laneB
laneA : The starting lane
laneB : The ending lane:
numberVertices : The number of vertices0"""
listStartEnd=[]
listPassingLine=[]
listPassingLinePoint=[]
whereEnd=[]
# Compute list of starting/ending points
i=0
while(i<numberVertices-2):
listStartEnd.append((i, i+2))
i=i+2
# Build the passing line objects
for i in range(len(listStartEnd)):
a,b=listStartEnd[i]
start=laneA.get_positions()[a]
end=laneB.get_positions()[b]
p=Passing(start, end, 0, a, b)
p.path()
listPassingLine.append(p)
listPassingLinePoint.append((p.get_start(),\
p.get_end()))
whereEnd.append(b)
return listStartEnd, listPassingLine, listPassingLinePoint, whereEnd
def computeGetBack(laneA, laneB, numberVertices):
""" Build the passing lanes from laneA to laneB (Going Back)
laneA : The starting lane
laneB : The ending lane:
numberVertices : The number of vertices0"""
listStartEnd=[]
listPassingLine=[]
listPassingLinePoint=[]
whereEnd=[]
i=0
while(i<numberVertices-2):
listStartEnd.append((i, i+2))
i=i+2
for i in range(len(listStartEnd)):
a,b=listStartEnd[i]
start=laneA.get_positions()[a]
end=laneB.get_positions()[b]
p=Passing(start, end, 0, a, b)
p.path()
listPassingLine.append(p)
listPassingLinePoint.append((p.get_start(),\
p.get_end()))
whereEnd.append(b)
return listStartEnd, listPassingLine, listPassingLinePoint, whereEnd
|
{"/Car.py": ["/utils.py"], "/utils.py": ["/Car.py", "/Map.py"], "/Map.py": ["/utils.py"], "/main.py": ["/utils.py"]}
|
28,349
|
sofianelounici/EasyTraffic
|
refs/heads/master
|
/Map.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 24 15:34:44 2018
@author: Sofiascope
Map contains the Lane, Passing and Map classes
Map mostly manages the modification of the map
"""
import utils
class Lane:
""" Lane class, describing a lane
nb : Number of the lane
radius, numberVertices : geomtric parameters of the lane"""
def __init__(self, nb, radius, numberVertices):
self.radius=radius
self.lines, self.positions = utils.mapStruct(radius, numberVertices)
self.nb=nb
def get_radius(self):
return self.radius
def path(self):
self.pathX, self.pathY = utils.path(self.lines)
def get_path_tuple(self):
return (self.pathX, self.pathY)
def get_pathX(self):
return self.pathX
def get_pathY(self):
return self.pathY
def get_lines(self):
return self.lines
def get_positions(self):
return self.positions
def updatePath(self, pathX, pathY):
self.pathX=pathX
self.pathY=pathY
def get_nb(self):
return self.nb
class Passing():
""" Passing class, describing a passing line
x = Starting point of the passing line,
as a position in the list of vertices
y = Ending point of the passing line,
as a position in the list of vertices
startPoint : Starting point, in cartesian coordinates
endPoint : Ending point, in cartesian coordinates """
def __init__(self, startPoint, endPoint, startLane, x, y):
self.startPoint=startPoint
self.endPoint=endPoint
self.startLane=startLane
self.x=x
self.y=y
def get_start(self):
return self.startPoint
def get_end(self):
return self.endPoint
def path(self):
self.pathX, self.pathY = utils.path([(self.startPoint, \
self.endPoint)])
def get_path_tuple(self):
return (self.pathX, self.pathY)
def get_pathX(self):
return self.pathX
def get_pathY(self):
return self.pathY
def get_positions(self):
return [self.startPoint, self.endPoint]
def update_points(self, start, end):
self.startPoint=start
self.endPoint=end
self.path()
class Map():
""" Map class, list of Passing Lane and Regular Lane
lane0, lane1, lane2 : Regular lanes
passing01, passing12, passing10,passing21 : Passing Lines
passingPoint01, passingPoint12, passingPoint10, passingPoint21 :
passing lines position points"""
def __init__(self, lane0, lane1, lane2, \
passing01, passing12, passing10,passing21,\
passingPoint01, passingPoint12, passingPoint10, passingPoint21):
self.lane0=lane0
self.lane1=lane1
self.lane2=lane2
self.passing=passing01
self.passing12=passing12
self.passing10=passing10
self.passing21=passing21
self.passing01Point=passingPoint01
self.passing12Point=passingPoint12
self.passing10Point=passingPoint10
self.passing21Point=passingPoint21
self.passingPoint=[self.passing10Point,self.passing12Point,\
self.passing12Point, self.passing21]
self.listLanes=[self.lane0, self.lane1, self.lane2, self.passing, \
self.passing12, self.passing10,self.passing21]
def get_listLanes(self):
return self.listLanes
def get_lane(self,nb):
return self.listLanes[nb]
def get_passing(self, toTransfer, nb):
return self.listLanes[3+toTransfer]
def get_path_lane(self, nb):
return self.listLanes[nb].get_path_tuple()
def get_path_passing(self, toTransfer, nb):
return self.listLanes[3+toTransfer][nb].get_path_tuple()
def get_path(self, nb):
if(nb<3):
return self.listLanes[nb].get_path_tuple()
else:
return self.listLanes[nb][0].get_path_tuple()
def update(self, x0,x1, flatten, whichLane, corner, listCars, \
listStartEnd):
""" Update the map, trigger by the user click
x0,x1 : Click coordinates
flatten : Flatten array
whichLane : Lane to modify
corner : Vertex of the lane to modify
listCars : List of cars
listStartEnd : list of passing line points"""
self.listLanes[whichLane].get_positions()[corner]=[x0,x1]
# Update the lanes
for k in range(len(self.listLanes[whichLane].get_positions())-1):
self.listLanes[whichLane].get_lines()[k]=\
(self.listLanes[whichLane].get_positions()[k],\
self.listLanes[whichLane].get_positions()[k+1])
self.listLanes[whichLane].get_lines()[-1]=\
(self.listLanes[whichLane].get_positions()[-1],\
self.listLanes[whichLane].get_positions()[0])
# For each lane, we locate the closest passing line / going back lanes
# to modify them (graphically and modify their path)
if(whichLane==0):
#0 to 1
idx = utils.closestPassing(self.passing, (x0, x1), whichLane, 0)
start=self.listLanes[0].get_positions()[listStartEnd[idx][0]]
end=self.listLanes[1].get_positions()[listStartEnd[idx][1]]
self.listLanes[3][idx].update_points(start, end)
self.passing = self.listLanes[3]
pathX, pathY=self.listLanes[3][idx].get_path_tuple()
flatten[3+whichLane]=utils.toflatten(pathX, pathY)
#1 to 0
idx = utils.closestPassing(self.passing, (x0, x1), whichLane, 1)
start=self.listLanes[1].get_positions()[listStartEnd[idx][0]]
end=self.listLanes[0].get_positions()[listStartEnd[idx][1]]
self.listLanes[5][idx].update_points(start, end)
self.passing = self.listLanes[5]
pathX, pathY=self.listLanes[5][idx].get_path_tuple()
flatten[5+whichLane]=utils.toflatten(pathX, pathY)
if(whichLane==2):
#1 to 2
idx = utils.closestPassing(self.passing, (x0, x1), whichLane, 1)
start=self.listLanes[1].get_positions()[listStartEnd[idx][0]]
end=self.listLanes[2].get_positions()[listStartEnd[idx][1]]
self.listLanes[4][idx].update_points(start, end)
pathX, pathY=self.listLanes[4][idx].get_path_tuple()
flatten[3+whichLane]=utils.toflatten(pathX, pathY)
#2 to 1
idx = utils.closestPassing(self.passing, (x0, x1), whichLane, 0)
start=self.listLanes[2].get_positions()[listStartEnd[idx][0]]
end=self.listLanes[1].get_positions()[listStartEnd[idx][1]]
self.listLanes[6][idx].update_points(start, end)
pathX, pathY=self.listLanes[6][idx].get_path_tuple()
flatten[6+whichLane]=utils.toflatten(pathX, pathY)
if(whichLane==1):
#0 to 1
idx = utils.closestPassing(self.passing, (x0, x1), whichLane,1)
start=self.listLanes[0].get_positions()[listStartEnd[idx][0]]
end=self.listLanes[1].get_positions()[listStartEnd[idx][1]]
self.listLanes[3][idx].update_points(start, end)
pathX, pathY=self.listLanes[3][idx].get_path_tuple()
flatten[3+whichLane]=utils.toflatten(pathX, pathY)
#1 to 0
idx = utils.closestPassing(self.passing, (x0, x1), whichLane,0)
start=self.listLanes[1].get_positions()[listStartEnd[idx][0]]
end=self.listLanes[0].get_positions()[listStartEnd[idx][1]]
self.listLanes[5][idx].update_points(start, end)
pathX, pathY=self.listLanes[5][idx].get_path_tuple()
flatten[5+whichLane]=utils.toflatten(pathX, pathY)
#1 to 2
idx = utils.closestPassing(self.passing, (x0, x1), whichLane,0)
start=self.listLanes[1].get_positions()[listStartEnd[idx][0]]
end=self.listLanes[2].get_positions()[listStartEnd[idx][1]]
self.listLanes[4][idx].update_points(start, end)
pathX, pathY=self.listLanes[4][idx].get_path_tuple()
flatten[3+whichLane]=utils.toflatten(pathX, pathY)
#2 to 1
idx = utils.closestPassing(self.passing, (x0, x1), whichLane, 1)
start=self.listLanes[2].get_positions()[listStartEnd[idx][0]]
end=self.listLanes[1].get_positions()[listStartEnd[idx][1]]
self.listLanes[6][idx].update_points(start, end)
pathX, pathY=self.listLanes[6][idx].get_path_tuple()
flatten[6+whichLane]=utils.toflatten(pathX, pathY)
# Update the position of the car
if(whichLane in [0,1,2]):
pathXl, pathYl= utils.path(self.listLanes[whichLane].get_lines())
self.listLanes[whichLane].updatePath(pathXl, pathYl)
flatten[whichLane]=utils.toflatten(pathXl, pathYl)
for c in listCars:
if(c.get_lane()==whichLane):
c.path(pathXl,pathYl)
c.update_pos()
return listCars, flatten
|
{"/Car.py": ["/utils.py"], "/utils.py": ["/Car.py", "/Map.py"], "/Map.py": ["/utils.py"], "/main.py": ["/utils.py"]}
|
28,350
|
sofianelounici/EasyTraffic
|
refs/heads/master
|
/main.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 24 15:34:44 2018
@author: Sofiascope
"""
import pygame
import sys
from pygame.locals import QUIT
import numpy as np
import time
import utils
import random
pygame.init()
display_width = 1500
display_height = 800
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (227, 27, 27)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
RCB = (27, 27, 27)
crashed = False
DISPSURF = pygame.display.set_mode((display_width, display_height), 0, 32)
pygame.display.set_caption("Easy Traffic")
numberVertices=500
distSecur=40
listCars, listLanes, listStart, listStartEnd, flatten, \
listPassingLine, listPassingLine2, listPassingLine3, \
listPassingLine4, whereEnd = \
utils.prepareMap(numberVertices, DISPSURF, distSecur)
print(len(listPassingLine))
changeSpeedTime=time.time()
radar=time.time()
listToBlit=utils.toBlit(listCars)
while not crashed:
for c in listCars.get_listCars():
for l in [0,1,2]:
if(c.x==len(listLanes.get_path_lane(l)[0])):
listCars.updateDfStats(c, listLanes)
c=utils.decideChangeLane(c)
c.x=0
c.j=0
if(time.time()-changeSpeedTime>20):
df = listCars.get_Stats()
listCars=utils.rewardSpeed(listCars, df)
changeSpeedTime=time.time()
if(time.time()-radar>30+np.random.randint(10)):
listCars=utils.checkSpeedPoint(listCars, df)
listCars=utils.rewardPoint(listCars)
listCars=utils.checkAccidentPoint(listCars, df)
listToBlit=utils.toBlit(listCars)
radar=time.time()
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
listCars.record_Start()
elif event.type == pygame.MOUSEBUTTONDOWN:
x0, x1 = pygame.mouse.get_pos()
whichLane, corner = utils.closest(listLanes.get_listLanes(),\
[x0,x1], numberVertices)
elif event.type == pygame.MOUSEBUTTONUP:
DISPSURF.fill(WHITE)
x0, x1 = pygame.mouse.get_pos()
newListCar, flatten = listLanes.update(x0,x1, flatten, whichLane, \
corner, listCars.get_listCars(),\
listStartEnd)
listCars.update_cars(newListCar)
for l in range(len(listCars.get_listCars())):
listStart, listCars, listLanes, flatten =\
utils.updatePosition(l, listStart, listCars, \
listLanes, distSecur, \
flatten, [listPassingLine, listPassingLine2,\
listPassingLine3, listPassingLine4],\
whereEnd)
DISPSURF.fill(WHITE)
for v in listToBlit:
DISPSURF.blit(v[0], v[1])
#pygame.draw.polygon(DISPSURF, RED, listLanes.get_lane(0)\
#.get_positions(), 2)
#pygame.draw.polygon(DISPSURF, RED, listLanes.get_lane(1)\
#.get_positions(), 2)
#pygame.draw.polygon(DISPSURF, RED, listLanes.get_lane(2)\
#.get_positions(), 2)
#for c in listCars.get_listCars():
#c.carPrint()
pygame.display.update()
pygame.quit()
quit()
|
{"/Car.py": ["/utils.py"], "/utils.py": ["/Car.py", "/Map.py"], "/Map.py": ["/utils.py"], "/main.py": ["/utils.py"]}
|
28,377
|
amirhoseinbidar/django-payir
|
refs/heads/master
|
/pay/models.py
|
from django.db import models
from django.conf import settings
import uuid
class TransactionBase(models.Model):
uuid_id = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False, max_length=16)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True)
trans_id = models.CharField(unique=True, max_length=31)
factor_number = models.CharField(max_length=31, blank=True)
card_number = models.CharField(max_length=16)
timestamp = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class UserTransaction(TransactionBase):
pass
|
{"/pay/forms.py": ["/pay/settings.py"], "/pay/views.py": ["/pay/forms.py", "/pay/settings.py", "/pay/models.py"]}
|
28,378
|
amirhoseinbidar/django-payir
|
refs/heads/master
|
/pay/urls.py
|
from django.conf.urls import include, url
from . import views
app_name = 'pay'
urlpatterns = [
url(r'^form/processor/$',views.FormProcessorView.as_view() ,name = 'form_processor'),
url(r'^callback/$',views.CallBackView.as_view() , name = 'callback'),
]
|
{"/pay/forms.py": ["/pay/settings.py"], "/pay/views.py": ["/pay/forms.py", "/pay/settings.py", "/pay/models.py"]}
|
28,379
|
amirhoseinbidar/django-payir
|
refs/heads/master
|
/pay/forms.py
|
from django import forms
from .settings import PAY_DESCRIPTION_MAX_LENGTH, PAY_FORM_PROCESSOR, METHOD_FIELD_PREFIX
import copy
# method field do nothing it just add a prefix to name of a signal method for identifiction purpose
class MethodField(forms.CharField):
def __init__(self, signal_method_name, *args, **kwargs):
assert isinstance(signal_method_name,
str), 'signal_method_name must be a str'
super().__init__(
# "methodfield||__" is method fileds default identifier
# but it can change in setting
initial=METHOD_FIELD_PREFIX+signal_method_name,
widget=forms.HiddenInput,
*args, **kwargs
)
class PayForm(forms.Form):
# this use for form action
pay_form_processor = PAY_FORM_PROCESSOR
form_name = forms.CharField(widget=forms.HiddenInput, required=False)
# extadData won't send to pay.ir api
# it is just for programmer pass another data he/she want
extraData = forms.CharField(widget=forms.HiddenInput, required=False)
amount = forms.IntegerField(widget=forms.HiddenInput)
mobile = forms.IntegerField(widget=forms.HiddenInput, required=False)
factorNumber = forms.IntegerField(
widget=forms.HiddenInput, required=False)
description = forms.CharField(
max_length=PAY_DESCRIPTION_MAX_LENGTH, widget=forms.HiddenInput, required=False)
return_url = forms.CharField(widget=forms.HiddenInput)
cancel_url = forms.CharField(widget=forms.HiddenInput)
methodable_fileds = ['extraData','amount', 'factorNumber','mobile',
'description', 'return_url', 'cancel_url' ]
after_callback_handel = [
'return_url', 'cancel_url'
]
def __init__(self, initial=None, change_methodable_fileds=True, *args, **kwargs):
# think all initial data are none methodable
non_methodfiled_data = copy.deepcopy(initial)
methodfiled_data = {}
# then pop intial data that are isnstance of MethodField
# and replace them with thair methodfield
# methodfields will handel in FormProcessorView
if change_methodable_fileds:
for key in initial:
if key in self.methodable_fileds and isinstance(initial[key], MethodField):
methodfiled_data[key] = non_methodfiled_data.pop(key)
super().__init__(initial=non_methodfiled_data, *args, **kwargs)
for key, value in methodfiled_data.items():
self.fields[key] = value
|
{"/pay/forms.py": ["/pay/settings.py"], "/pay/views.py": ["/pay/forms.py", "/pay/settings.py", "/pay/models.py"]}
|
28,380
|
amirhoseinbidar/django-payir
|
refs/heads/master
|
/pay/migrations/0001_initial.py
|
# Generated by Django 2.2.1 on 2019-05-28 19:54
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserTransaction',
fields=[
('uuid_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('trans_id', models.CharField(max_length=31, unique=True)),
('factor_number', models.CharField(blank=True, max_length=31)),
('card_number', models.CharField(max_length=16)),
('timestamp', models.DateTimeField(auto_now=True)),
],
options={
'abstract': False,
},
),
]
|
{"/pay/forms.py": ["/pay/settings.py"], "/pay/views.py": ["/pay/forms.py", "/pay/settings.py", "/pay/models.py"]}
|
28,381
|
amirhoseinbidar/django-payir
|
refs/heads/master
|
/pay/admin.py
|
from django.contrib import admin
from . import models
class UTAdmin(admin.ModelAdmin):
list_display = ('uuid_id','user','trans_id','factor_number' , 'card_number' , 'timestamp')
admin.site.register(models.UserTransaction , UTAdmin)
|
{"/pay/forms.py": ["/pay/settings.py"], "/pay/views.py": ["/pay/forms.py", "/pay/settings.py", "/pay/models.py"]}
|
28,382
|
amirhoseinbidar/django-payir
|
refs/heads/master
|
/pay/views.py
|
from django.shortcuts import render, redirect, Http404
from django.views import generic
from django.utils.translation import gettext_lazy as _
from .forms import PayForm
from . import settings
from .settings import cache
from .models import UserTransaction
from django.core.exceptions import ImproperlyConfigured
from django.db import IntegrityError
import requests
import importlib
def get_error_render(request, errorMessage, errorCode, state):
return render(
request=request,
template_name='pay/service_error.html',
context={
'errorMessage': errorMessage,
'errorCode': errorCode,
'state': state,
}
)
def get_signal_method(method_name):
method = settings.PAY_SIGNAL_METHODS.get(method_name, None)
if isinstance(method,str):
mod_name, func_name = method.rsplit('.',1)
mod = importlib.import_module(mod_name)
return getattr(mod, func_name)
return method
def get_signal_from_value(value , kwargs):
method_name = value.split(settings.METHOD_FIELD_PREFIX)[1]
method = get_signal_method(method_name)
if not method:
raise ImproperlyConfigured(
'method with name "%s" does not exist in PAY_SIGNAL_METHODS' % (
method_name)
)
else:
return method(**kwargs)
def data_adapter(request, form_name, data: dict):
for key, value in data.items():
if key in PayForm.after_callback_handel:
data[key] = value
continue
# if field is for a field method fireup a signal
if value.startswith(settings.METHOD_FIELD_PREFIX):
data[key] = get_signal_from_value(value ,
kwargs ={'request': request, 'form_name': form_name} )
return data
def call_or_redirect(method,kwargs):
if method.startswith(settings.METHOD_FIELD_PREFIX):
return get_signal_from_value(method,kwargs)
else:
return redirect(method)
class FormProcessorView(generic.View):
def post(self, request):
request_data = self.get_form_data()
request_data['user'] = -1
if request.user.is_authenticated:
request_data['user'] = request.user.id
service_data = { # it is better dont cache this data
'api': settings.PAY_API_KEY,
'redirect': settings.PAY_CALLBACK_URL,
}
service_data.update(request_data)
pay_request = requests.post(
settings.PAY_REQUEST_TOKEN_URL, json=service_data)
data = pay_request.json()
if str(data['status']) in settings.OK_STATUS:
cache[data['token']] = request_data
return redirect(settings.PAY_REDIRECT_USER_URL+data['token'])
else:
return get_error_render(request, data['errorMessage'], data['errorCode'], 'send')
def get_form_data(self):
data = data_adapter(
request=self.request,
form_name=self.request.POST.get('form_name', None),
data=self.request.POST.copy()
)
form = PayForm(data=data, change_methodable_fileds=False)
if form.is_valid():
return form.cleaned_data
else:
raise ImproperlyConfigured('invalid PayForm , check inputs \n '+str(form.errors.as_text()))
class CallBackView(generic.View):
def get(self, request):
status = request.GET.get('status', None)
token = request.GET.get('token', None)
if not status or not token:
raise Http404
data = cache.pop(token, None)
if not data: # if not data mean data is outdate or token is uncorrect
raise Http404
cached_user = data.pop('user')
cancel_url = data.pop('cancel_url')
return_url = data.pop('return_url')
data['request'] = request
if not (status in settings.OK_STATUS):
return call_or_redirect(cancel_url , kwargs = data)
# if one of data sender or giver is authenticated check their id
# if both are anonymous ignore check
if not (cached_user == -1 and not request.user.is_authenticated):
if not request.user.id == cached_user:
return render(request,'pay/user_auth_error.html')
data.update(self.verify_callback(token))
if UserTransaction.objects.filter(trans_id = data['transId']).exists():
return render(request,'pay/duplicate_trans_id.html')
self.save_to_db(data)
return call_or_redirect(return_url,data)
def verify_callback(self, token):
''' verify a callback if every thing ok path it else send a error page '''
pay_request = requests.post(settings.PAY_VARIFY_URL, json={
'api': settings.PAY_API_KEY,
'token': token,
})
data = pay_request.json()
if str(data['status']) in settings.OK_STATUS:
return data
return get_error_render(self.request, data['errorMessage'], data['errorCode'], 'varify')
def save_to_db(self, data):
user = self.request.user
if not self.request.user.is_authenticated:
user = None
ut = UserTransaction(
user=user,
trans_id=data['transId'],
factor_number=data['factorNumber'] or '',
card_number=data['cardNumber'] or '',
)
ut.save()
return ut
|
{"/pay/forms.py": ["/pay/settings.py"], "/pay/views.py": ["/pay/forms.py", "/pay/settings.py", "/pay/models.py"]}
|
28,383
|
amirhoseinbidar/django-payir
|
refs/heads/master
|
/pay/settings.py
|
# Django library.
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse
# Extra library
from cachetools import TTLCache
# CONTENT ----------------------------------------------------------
# https://pay.ir/docs/gateway/
PAY_DESCRIPTION_MAX_LENGTH = 255
PAY_REQUEST_TOKEN_URL = 'https://pay.ir/pg/send'
PAY_REDIRECT_USER_URL = 'https://pay.ir/pg/'
PAY_VARIFY_URL = 'https://pay.ir/pg/verify'
OK_STATUS = ('1' , 'OK',)
ERROR_STATUS = '0'
# ------------------------------------------------------------------
def _pay(var, default):
"""
Adds "pay_" to the requested variable and retrieves its value
from settings or returns the default.
:param var: Variable to be retrieved.
:type var: str
:param default: Default value if the variable is not defined.
:return: Value corresponding to 'var'.
"""
try:
return getattr(settings, 'PAY_' + var, default)
except ImproperlyConfigured:
# To handle the auto-generation of documentations.
return default
PAY_USED_DOMAIN = 'http://localhost:8000/'
PAY_API_KEY = _pay('API_KEY', 'test')
PAY_CALLBACK_URL = _pay('CALLBACK_URL', PAY_USED_DOMAIN+'pay/callback/')
PAY_FORM_PROCESSOR = _pay('FORM_PROCESSOR','/pay/form/processor/' )
PAY_SIGNAL_METHODS = _pay('SIGNAL_METHODS' , {})
# EXTRA --------------------------------------------------------------
METHOD_FIELD_PREFIX = "methodfield||__"
# simple cacheClass for cache data that need for callback
cache = TTLCache(2**20,1800) #1800s = 30min
|
{"/pay/forms.py": ["/pay/settings.py"], "/pay/views.py": ["/pay/forms.py", "/pay/settings.py", "/pay/models.py"]}
|
28,423
|
Lxhouse/CSS-Platform
|
refs/heads/master
|
/QuestionServer.py
|
# -*- encoding:utf-8 -*-
import hashlib
import json
from flask import Flask
from flask import make_response
from flask import redirect
from flask import render_template
from flask import request
from flask_cors import CORS
from flask import Flask, jsonify, request, render_template, send_from_directory, session
from time import time
from flask_pymongo import PyMongo, DESCENDING, ASCENDING
import sys
import pandas as pd
import jieba.analyse
import sys
import db
# import wechatsogou
import os
from hashlib import md5
import requests
from sklearn.decomposition import PCA as sklearnPCA
from matplotlib import pyplot as plt
import csv
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.manifold import TSNE as sklearnTSNE
from sklearn.cluster import KMeans
from textblob import TextBlob
import re, collections
from flask import send_file, send_from_directory
from numpy import array, zeros, argmin, inf, equal, ndim
# from scipy.spatial.distance import cdist
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer, CountVectorizer
from sklearn import metrics
from sklearn.naive_bayes import BernoulliNB
import random
from scipy import stats
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from os import path
from PIL import Image
import numpy as np
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from pdfminer.pdfparser import PDFParser, PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.layout import LAParams
from pdfminer.converter import PDFPageAggregator
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
from docx import Document
from scipy.interpolate import lagrange#拉格朗日函数
import warnings
warnings.filterwarnings("ignore")
from minepy import MINE
# import numpy as np
import pymongo
from pymongo import MongoClient
import hashlib
import sys
# user = "root"
# password = "ls269031126"
charset = "utf8"
app = Flask(__name__)
app.secret_key = 'zju'
app.config['MONGO_DBNAME'] = 'question'
app.config['MONGO_URI'] = 'mongodb://qs:double@10.72.100.5:8027/question'
app.url_map.strict_slashes = False
mongo = PyMongo(app)
# CORS(app)
# 跨域支持
@app.after_request
def after_request(resp):
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
app.after_request(after_request)
@app.route('/')
def index():
return render_template("index.html")
global file_name
file_name = []
global loginName
loginName = ""
# 文件上传
@app.route("/upload/<name>", methods=['post', 'get'])
def upload(name):
print(name)
loginName = name
f = request.files['file']
print(f.filename)
# file_name.append(f.filename)
f.save(name + '/'+ f.filename)
state = db.uploadFile(name, f.filename)
print(state)
print(loginName)
# return 'http://0.0.0.0/'
return 'https://mo.zju.edu.cn/css/'
#PCA降维
@app.route('/pca/<name>', methods=['post', 'get'])
def pca(name):
pca_dim = request.get_json() #bytes
data=[]
traffic_feature=[]
traffic_target=[]
# fileN = file_name.pop()
# print(name)
fileN = db.searchFile(name)
# print(fileN)
fileN = fileN[0]['filename']
# print(fileN)
if fileN == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
csvFile = open(name + '/'+ fileN)
csv_file = csv.reader(csvFile)
for content in csv_file:
content=list(map(float,content))
if len(content)!=0:
data.append(content)
traffic_feature.append(content[0:-2])
traffic_target.append(content[-1])
csvFile.close()
min_max_scaler = preprocessing.MinMaxScaler()
traffic_feature = min_max_scaler.fit_transform(traffic_feature)
# print('data=',data)
# print('traffic_feature=',traffic_feature)
# print('traffic_target=',traffic_target)
dim = int(pca_dim["pca"])
sklearn_pca = sklearnPCA(n_components = dim)
sklearn_transf = sklearn_pca.fit_transform(traffic_feature)
# print(sklearn_transf)
with open(name + '/' + 'PCA_'+fileN, 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
for line in sklearn_transf:
# print(line)
csv_writer.writerow(line)
ret = {"route": 'PCA_'+fileN}
return json.dumps(ret)
#t-SNE降维
@app.route('/TSNE/<name>', methods=['post', 'get'])
def TSNE(name):
TSNE_dim = request.get_json() #bytes
data=[]
traffic_feature=[]
traffic_target=[]
# fileN = file_name.pop()
# print(name)
fileN = db.searchFile(name)
# print(fileN)
fileN = fileN[0]['filename']
# print(fileN)
if fileN == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
with open(name + '/'+ fileN, 'r', newline='') as csvFile:
csv_file = csv.reader(csvFile)
for content in csv_file:
content=list(map(float,content))
if len(content)!=0:
data.append(content)
traffic_feature.append(content[0:-2])
traffic_target.append(content[-1])
min_max_scaler = preprocessing.MinMaxScaler()
traffic_feature = min_max_scaler.fit_transform(traffic_feature)
# print('data=',data)
# print('traffic_feature=',traffic_feature)
# print('traffic_target=',traffic_target)
dim = int(TSNE_dim["TSNE"])
sklearn_TSNE = sklearnTSNE(n_components=dim)
sklearn_transf = sklearn_TSNE.fit_transform(traffic_feature)
with open(name + '/' + 'TSNE_'+fileN, 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
for line in sklearn_transf:
# print(line)
csv_writer.writerow(line)
ret = {"route": 'TSNE_'+fileN}
return json.dumps(ret)
#kmeans聚类
@app.route('/kmeans/<name>', methods=['post', 'get'])
def kmeans(name):
cluster_dim = request.get_json() #bytes
data=[]
cluster = int(cluster_dim["kmeans"])
# print(name)
fileN = db.searchFile(name)
# print(fileN)
fileN = fileN[0]['filename']
# print(fileN)
if fileN == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
with open(name + '/'+ fileN, 'r', newline='') as csvFile:
csv_file = csv.reader(csvFile)
for content in csv_file:
content=list(map(float,content))
if len(content)!=0:
data.append(content)
estimator = KMeans(n_clusters=cluster)#构造聚类器
estimator.fit(data)#聚类
label_pred = estimator.labels_ #获取聚类标签
centroids = estimator.cluster_centers_ #获取聚类中心
with open(name + '/' + 'kmeans_'+fileN, 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
csv_writer.writerow(["label"])
for line in label_pred:
# print(line)
temp = str(line)
# print(temp)
csv_writer.writerow([temp])
csv_writer.writerow(["clusterCenter"])
for line in centroids:
# print(line)
csv_writer.writerow(line)
ret = {"route": 'kmeans_'+fileN}
return json.dumps(ret)
#单样本T检验
@app.route('/Ttest/<name>', methods=['post', 'get'])
def Ttest(name):
cluster_dim = request.get_json() #bytes
data=[]
Tnum = float(cluster_dim["Tnum"])
# print(name)
fileN = db.searchFile(name)
# print(fileN)
fileN = fileN[0]['filename']
# print(fileN)
if fileN == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
with open(name + '/'+ fileN, 'r', newline='') as csvFile:
csv_file = csv.reader(csvFile)
for content in csv_file:
content=list(map(float,content))
if len(content)!=0:
data.append(content)
result = stats.ttest_1samp(data, Tnum)
print("result\n")
print(result)
with open(name + '/' + 'Ttest_'+fileN, 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
csv_writer.writerow(result[1])
if result[1] < 0.05:
csv_writer.writerow(["The difference is significant"])
else:
csv_writer.writerow(["The difference is not significant"])
ret = {"route": 'Ttest_'+fileN}
return json.dumps(ret)
#时序DTW分析
@app.route('/dtw/<name>', methods=['post', 'get'])
def dtw(name):
fileN = db.searchFile(name)
isfile = fileN[0]['filename']
# print(fileN)
if isfile == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
if len(fileN) < 2:
ret = {"route": '需要上传两个文件以进行时序列dtw分析'}
return json.dumps(ret)
data1=[]
data2=[]
file1 = fileN[0]['filename']
file2 = fileN[1]['filename']
csvFile1 = open(name + '/'+ file1)
csv_file1 = csv.reader(csvFile1)
for content in csv_file1:
content = list(map(float,content))
if len(content)!=0:
data1.append(float(content[0]))
csvFile1.close()
print('data1=',data1)
csvFile2 = open(name + '/'+ file2)
csv_file2 = csv.reader(csvFile2)
for content in csv_file2:
content = list(map(float,content))
if len(content)!=0:
data2.append(float(content[0]))
csvFile2.close()
print('data2=',data2)
r, c = len(data1), len(data2)
D0 = zeros((r+1,c+1))
D0[0,1:] = inf
D0[1:,0] = inf
D1 = D0[1:,1:]
#浅复制
# print D1
for i in range(r):
for j in range(c):
D1[i,j] = euclidean_distances(data1[i],data2[j])
#生成原始距离矩阵
M = D1.copy()
for i in range(r):
for j in range(c):
D1[i,j] += min(D0[i,j],D0[i,j+1],D0[i+1,j])
#代码核心,动态计算最短距离
i,j = array(D0.shape) - 2
#最短路径
# print i,j
p,q = [i],[j]
while(i>0 or j>0):
tb = argmin((D0[i,j],D0[i,j+1],D0[i+1,j]))
if tb==0 :
i-=1
j-=1
elif tb==1 :
i-=1
else:
j-=1
p.insert(0,i)
q.insert(0,j)
print(M)
#原始距离矩阵
print(zip(p,q))
#匹配路径过程
print(D1)
#Cost Matrix或者叫累积距离矩阵
print(D1[-1,-1])
dis = []
dis.append(D1[-1,-1])
print(dis)
#序列距离
with open(name + '/'+ 'DTW_result.csv', 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
csv_writer.writerow(["Cost Matrix"])
for line in D1:
# print(line)
csv_writer.writerow(line)
csv_writer.writerow(["Sequence distance"])
csv_writer.writerow(dis)
ret = {"route": 'DTW_result.csv'}
return json.dumps(ret)
# 获取训练集数据
def get_train_dataset(name, filename):
data = []
with open(name + '/'+ filename, 'r', newline='') as csvFile:
csv_file = csv.reader(csvFile)
for content in csv_file:
content = list(map(float,content))
if len(content)!=0:
data.append(content)
random.shuffle(data)
return data
# 获取测试集数据
def get_test_dataset(name, filename):
data = []
with open(name + '/'+ filename, 'r', newline='') as csvFile:
csv_file = csv.reader(csvFile)
for content in csv_file:
content = list(map(float,content))
if len(content)!=0:
data.append(content)
return data
# content = list(map(float,content))
# if len(content)!=0:
# data1.append(float(content[0]))
def get_nb_train_dataset(name, filename):
data = []
with open(name + '/'+ filename, 'r', newline='') as csvFile:
csv_file = csv.reader(csvFile)
for content in csv_file:
if len(content)!=0:
data.append(content)
random.shuffle(data)
return data
# 获取朴素贝叶斯处理文本分类问题的测试集
def get_nb_test_dataset(name, filename):
data = []
with open(name + '/'+ filename, 'r', newline='') as csvFile:
csv_file = csv.reader(csvFile)
for content in csv_file:
if len(content)!=0:
data.append(content[0])
return data
# 将朴素贝叶斯处理文本分类问题的训练集划分为训练集和验证集
def nb_train_and_valid_data(data_):
filesize = int(0.9 * len(data_))
# 训练集和测试集的比例为9:1
train_data_ = [each[0] for each in data_[:filesize]]
train_target_ = [each[1] for each in data_[:filesize]]
test_data_ = [each[0] for each in data_[filesize:]]
test_target_ = [each[1] for each in data_[filesize:]]
return train_data_, train_target_, test_data_, test_target_
def train_valid_data(data_):
filesize = int(0.9 * len(data_))
# 训练集和测试集的比例为9:1
train_data_ = [each[0:-1] for each in data_[:filesize]]
train_target_ = [each[-1] for each in data_[:filesize]]
test_data_ = [each[0:-1] for each in data_[filesize:]]
test_target_ = [each[-1] for each in data_[filesize:]]
return train_data_, train_target_, test_data_, test_target_
#朴素贝叶斯处理文本分类问题
@app.route('/nb/<name>', methods=['post', 'get'])
def nb(name):
print("first")
fileN = db.searchFile(name)
print("second")
print(fileN)
isfile = fileN[0]['filename']
# print(fileN)
if isfile == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
if len(fileN) < 2:
ret = {"route": '需要上传训练集和测试集以进行模型训练与测试'}
return json.dumps(ret)
train=[]
test=[]
train_file = fileN[0]['filename']
test_file = fileN[1]['filename']
print(train_file)
print(test_file)
train = get_nb_train_dataset(name, train_file)
test = get_nb_test_dataset(name, test_file)
print('train=',train)
print('test=',test)
train_data, train_target, valid_data, valid_target = nb_train_and_valid_data(train)
nbc_6 = Pipeline([
('vect', TfidfVectorizer(
)),
('clf', MultinomialNB(alpha=1.0)),
])
nbc_6.fit(train_data, train_target) #训练我们的多项式模型贝叶斯分类器
valid = nbc_6.predict(valid_data) #在测试集上预测结果
print(valid)
count = 0 #统计预测正确的结果个数
for left , right in zip(valid, valid_target):
if left == right:
count += 1
print(count/len(valid_target))
predict = nbc_6.predict(test)
print(predict)
p_list = []
for p in predict:
list_tem = []
list_tem.append(p)
p_list.append(list_tem)
print(p_list)
with open(name + '/'+ 'NaiveBayes.csv', 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
for line in p_list:
# print(line)
csv_writer.writerow(list(line))
ret = {"route": 'NaiveBayes.csv'}
return json.dumps(ret)
#决策树处理分类问题
@app.route('/decisionTree/<name>', methods=['post', 'get'])
def decisionTree(name):
fileN = db.searchFile(name)
isfile = fileN[0]['filename']
# print(fileN)
if isfile == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
if len(fileN) < 2:
ret = {"route": '需要上传训练集和测试集以进行模型训练与测试'}
return json.dumps(ret)
train=[]
test=[]
train_file = fileN[0]['filename']
test_file = fileN[1]['filename']
print(train_file)
print(test_file)
train = get_train_dataset(name, train_file)
test = get_test_dataset(name, test_file)
# print('test=',test)
train_data, train_target, valid_data, valid_target = train_valid_data(train)
model = tree.DecisionTreeClassifier(criterion='gini')
# print('train_data=',train_data)
# print('train_target=',train_target)
model.fit(train_data,train_target)
print("train score:", model.score(train_data, train_target))
print("valid score:", model.score(valid_data, valid_target))
predict = model.predict(test)
print(predict)
p_list = []
for p in predict:
list_tem = []
list_tem.append(p)
p_list.append(list_tem)
print(p_list)
with open(name + '/'+ 'DecisionTree.csv', 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
for line in p_list:
# print(line)
csv_writer.writerow(list(line))
ret = {"route": 'DecisionTree.csv'}
return json.dumps(ret)
#随机森林处理分类问题
@app.route('/randomForest/<name>', methods=['post', 'get'])
def randomForest(name):
fileN = db.searchFile(name)
isfile = fileN[0]['filename']
# print(fileN)
if isfile == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
if len(fileN) < 2:
ret = {"route": '需要上传训练集和测试集以进行模型训练与测试'}
return json.dumps(ret)
train=[]
test=[]
train_file = fileN[0]['filename']
test_file = fileN[1]['filename']
print(train_file)
print(test_file)
train = get_train_dataset(name, train_file)
test = get_test_dataset(name, test_file)
print('train=',train)
print('test=',test)
train_data, train_target, valid_data, valid_target = train_valid_data(train)
# Create the model with 100 trees
model = RandomForestClassifier(n_estimators=100,
bootstrap = True,
max_features = 'sqrt')
# Fit on training data
model.fit(train_data,train_target)
print("train score:", model.score(train_data, train_target))
print("valid score:", model.score(valid_data, valid_target))
predict = model.predict(test)
print(predict)
p_list = []
for p in predict:
list_tem = []
list_tem.append(p)
p_list.append(list_tem)
print(p_list)
with open(name + '/'+ 'RandomForest.csv', 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
for line in p_list:
# print(line)
csv_writer.writerow(list(line))
ret = {"route": 'RandomForest.csv'}
return json.dumps(ret)
#svm 支持向量机处理分类问题
@app.route('/svm/<name>', methods=['post', 'get'])
def svm(name):
fileN = db.searchFile(name)
isfile = fileN[0]['filename']
# print(fileN)
if isfile == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
if len(fileN) < 2:
ret = {"route": '需要上传训练集和测试集以进行模型训练与测试'}
return json.dumps(ret)
train=[]
test=[]
train_file = fileN[0]['filename']
test_file = fileN[1]['filename']
train = get_train_dataset(name, train_file)
test = get_test_dataset(name, test_file)
train_data, train_target, valid_data, valid_target = train_valid_data(train)
svclassifier = SVC(kernel='poly', degree=8)
svclassifier.fit(train_data, train_target)
print("train score:", svclassifier.score(train_data, train_target))
print("valid score:", svclassifier.score(valid_data, valid_target))
predict = svclassifier.predict(test)
print(predict)
p_list = []
for p in predict:
list_tem = []
list_tem.append(p)
p_list.append(list_tem)
print(p_list)
with open(name + '/'+ 'svm.csv', 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
for line in p_list:
# print(line)
csv_writer.writerow(list(line))
ret = {"route": 'svm.csv'}
return json.dumps(ret)
#svr 支持向量回归机处理回归问题
@app.route('/svr/<name>', methods=['post', 'get'])
def svr(name):
fileN = db.searchFile(name)
isfile = fileN[0]['filename']
# print(fileN)
if isfile == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
if len(fileN) < 2:
ret = {"route": '需要上传训练集和测试集以进行模型训练与测试'}
return json.dumps(ret)
train=[]
test=[]
train_file = fileN[0]['filename']
test_file = fileN[1]['filename']
print(train_file)
print(test_file)
train = get_train_dataset(name, train_file)
test = get_test_dataset(name, test_file)
print('train=',train)
print('test=',test)
train_data, train_target, valid_data, valid_target = train_valid_data(train)
svrlassifier = SVR(kernel = 'rbf', c = 20)
# Fit on training data
svrlassifier.fit(train_data,train_target)
print("train score:", svrlassifier.score(train_data, train_target))
print("valid score:", svrlassifier.score(valid_data, valid_target))
predict = svrlassifier.predict(test)
print(predict)
p_list = []
for p in predict:
list_tem = []
list_tem.append(p)
p_list.append(list_tem)
print(p_list)
with open(name + '/'+ 'svr.csv', 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
for line in p_list:
# print(line)
csv_writer.writerow(list(line))
ret = {"route": 'svr.csv'}
return json.dumps(ret)
#linearRegression 多元线性回归
@app.route('/linearRegression/<name>', methods=['post', 'get'])
def linearRegression(name):
fileN = db.searchFile(name)
isfile = fileN[0]['filename']
# print(fileN)
if isfile == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
if len(fileN) < 2:
ret = {"route": '需要上传训练集和测试集以进行模型训练与测试'}
return json.dumps(ret)
train=[]
test=[]
train_file = fileN[0]['filename']
test_file = fileN[1]['filename']
print(train_file)
print(test_file)
train = get_train_dataset(name, train_file)
test = get_test_dataset(name, test_file)
print('train=',train)
print("\n")
train_data, train_target, valid_data, valid_target = train_valid_data(train)
linreg = LinearRegression()
model = linreg.fit(train_data, train_target)
print (model)
# 训练后模型截距
print (linreg.intercept_)
# 训练后模型权重(特征个数无变化)
print (linreg.coef_)
predict = linreg.predict(test)
p_list = []
for p in predict:
list_tem = []
list_tem.append(p)
p_list.append(list_tem)
print(p_list)
with open(name + '/'+ 'linearRegression.csv', 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
for line in p_list:
# print(line)
csv_writer.writerow(list(line))
ret = {"route": 'linearRegression.csv'}
return json.dumps(ret)
# LogisticRegression 逻辑回归处理分类问题
@app.route('/logisticRegression/<name>', methods=['post', 'get'])
def logisticRegression(name):
fileN = db.searchFile(name)
isfile = fileN[0]['filename']
# print(fileN)
if isfile == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
if len(fileN) < 2:
ret = {"route": '需要上传训练集和测试集以进行模型训练与测试'}
return json.dumps(ret)
train=[]
test=[]
train_file = fileN[0]['filename']
test_file = fileN[1]['filename']
print(train_file)
print(test_file)
train = get_train_dataset(name, train_file)
test = get_test_dataset(name, test_file)
# print('train=',train)
# print('test=',test)
train_data, train_target, valid_data, valid_target = train_valid_data(train)
logreg = LogisticRegression()
model = logreg.fit(train_data, train_target)
print (model)
predict = model.predict(test)
print("train score:", model.score(train_data, train_target))
print("valid score:", model.score(valid_data, valid_target))
p_list = []
for p in predict:
list_tem = []
list_tem.append(p)
p_list.append(list_tem)
with open(name + '/'+ 'logisticRegression.csv', 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
for line in p_list:
# print(line)
csv_writer.writerow(list(line))
ret = {"route": 'logisticRegression.csv'}
return json.dumps(ret)
#wordcloud词云生成
@app.route('/wordCloud/<name>', methods=['post', 'get'])
def workCloud(name):
# print(name)
fileN = db.searchFile(name)
# print(fileN)
fileN = fileN[0]['filename']
# print(fileN)
if fileN == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
txtfile = fileN
print(txtfile)
f = open(name + '/'+ txtfile,encoding = 'gb18030')
text = f.read()
print(text)
f.close()
# Read the whole text.
# #获取当前的项目文件加的路径
# d=path.dirname(__file__)
# text = open(path.join(d, txtfile))
# 结巴分词
wordlist = jieba.cut(text, cut_all=True)
wl = " ".join(wordlist)
print(wl)#输出分词之后的txt
coloring = np.array(Image.open("wordcloud/background.jpg"))
# 设置停用词
stopwords = set(STOPWORDS)
stopwords.add("said")
# 你可以通过 mask 参数 来设置词云形状
# wc = WordCloud(background_color="white", max_words=2000, mask=coloring,
# max_font_size=50, random_state=42,font_path='fangsong_GB2312.ttf')
wc = WordCloud(background_color="white", max_words=2000, mask=coloring,
max_font_size=50, random_state=42,font_path='Hiragino Sans GB.ttc')
wc.generate(wl)
# create coloring from image
image_colors = ImageColorGenerator(coloring)
# show
# 在只设置mask的情况下,你将会得到一个拥有图片形状的词云
# plt.imshow(wc, interpolation="bilinear")
# plt.axis("off")
# plt.figure()
# plt.show()
wc.to_file(name + '/'+ "workcloud.png")
ret = {"route": 'workcloud.png'}
return json.dumps(ret)
#pdf2word 文献pdf内容提取
@app.route('/pdf2word/<name>', methods=['post', 'get'])
def pdf2word(name):
# pip install pdfminer3k
# pip install python_docx
fileN = db.searchFile(name)
# print(fileN)
fileN = fileN[0]['filename']
# print(fileN)
if fileN == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
pdfname = fileN
document = Document()
pdf=os.open(name + '/'+ pdfname,os.O_RDWR )
fn = open(pdf,'rb')
parser = PDFParser(fn)
doc = PDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
resource = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(resource,laparams=laparams)
interpreter = PDFPageInterpreter(resource,device)
for i in doc.get_pages():
interpreter.process_page(i)
layout = device.get_result()
for out in layout:
if hasattr(out,"get_text"):
content = out.get_text().replace(u'\n', u'')
document.add_paragraph(
content, style='ListBullet'
)
document.save(name + '/'+ pdfname[0:-4]+'.docx')
print ('处理完成')
ret = {"route": pdfname[0:-4]+'.docx'}
return json.dumps(ret)
#自定义列向量插值函数
def ploy(s,n,k=3):
y=s[list(range(n-k,n))+list(range(n+1,n+1+k))]#取数
y=y[y.notnull()]
return lagrange(y.index,list(y))(n)
#poly 拉格朗日插值处理文本数据缺失值
@app.route('/polydata/<name>', methods=['post', 'get'])
def polydata(name):
fileN = db.searchFile(name)
# print(fileN)
fileN = fileN[0]['filename']
# print(fileN)
if fileN == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
csv_file = fileN
csv_data = pd.read_csv(name + '/'+ csv_file, low_memory = False)#防止弹出警告
data = pd.DataFrame(csv_data)
for i in data.columns:
for j in range(len(data)):
if(np.isnan(data[i][j])):
data[i][j]=ploy(data[i],j)
# print(data[i][j])
data.to_csv(name + '/'+ csv_file[0:-4]+'.csv', index=False, header=False )
print ('处理完成')
ret = {"route": csv_file[0:-4]+'.csv'}
return json.dumps(ret)
#计算两个序列的最大信息系数MIC
@app.route('/MIC/<name>', methods=['post', 'get'])
def MIC(name):
# pip install minepy
fileN = db.searchFile(name)
isfile = fileN[0]['filename']
# print(fileN)
if isfile == "dont_have_file":
ret = {"route": "nofile"}
return json.dumps(ret)
if len(fileN) < 2:
ret = {"route": '需要上传两个文件以进行MIC计算'}
return json.dumps(ret)
x=[]
y=[]
file1 = fileN[0]['filename']
file2 = fileN[1]['filename']
csvFile1 = open(name + '/'+ file1, encoding='utf-8-sig')
csv_file1 = csv.reader(csvFile1)
for content in csv_file1:
print(content)
content = list(map(float,content))
if len(content)!=0:
x.append(float(content[0]))
csvFile1.close()
print('x=',x)
csvFile2 = open(name + '/'+ file2, encoding='utf-8-sig')
csv_file2 = csv.reader(csvFile2)
for content in csv_file2:
content = list(map(float,content))
if len(content)!=0:
y.append(float(content[0]))
csvFile2.close()
print('y=',y)
mine = MINE(alpha = 0.6, c = 15)
mine.compute_score(x, y)
print("MIC", mine.mic())
#将MIC值写入文件
with open(name + '/'+ 'MIC_result.csv', 'w', newline='') as new_file:
csv_writer = csv.writer(new_file)
csv_writer.writerow(["MIC result"])
data = []
data.append(str(mine.mic()))
csv_writer.writerow(data)
ret = {"route": 'MIC_result.csv'}
return json.dumps(ret)
#下载处理完成的文件
@app.route('/download/<name>/<filename>', methods=['post', 'get'])
def download(name,filename):
# data = request.get_json()
# file = data['file']
# print("hhh\n")
# print(file)
# 需要知道2个参数, 第1个参数是本地目录的path, 第2个参数是文件名(带扩展名)
# directory = os.getcwd() # 假设在当前目录
# response = make_response(send_from_directory(directory, file, as_attachment=True))
# response.headers["Content-Disposition"] = "attachment; file={}".format(file_name.encode().decode('latin-1'))
# return response
print(name)
print(filename)
directory = os.getcwd() # 假设在当前目录
print(directory)
path = name + '/' + filename
print(path)
return send_from_directory(directory, path, as_attachment=True)
# ret = {"status": "success"}
# return json.dumps(ret)
#移除用户上传的文件
@app.route('/removeFile/<name>', methods=["post"])
def removeFile(name):
file = request.get_json()
fileName = file['name']
if os.path.exists(name + '/' + fileName): # 如果文件存在
db.deleteFile(name, fileName)
os.remove(name + '/' + fileName) # 则删除
ret = {"status": "success"}
else:
print('no such file:%s'%fileName)
ret = {"status": "no such file"}
return json.dumps(ret)
#获取问卷列表信息
@app.route('/list/', methods=['post', 'get'])
def showList():
user = request.get_json()
# user = user["user"]
print(user)
result = db.get_list_info(user)
ret = []
for i in result:
# if i['randomNum'] != '0':
# q_final = []
# for q in i['question']:
# if q['isRandom'] == 0:
# q_final.append(q)
# for cnt in range(1, int(i['randomNum'])+1):
# print("cnt= ")
# print(cnt)
# q_list = []
# for q in i['question']:
# print(q['isRandom'])
# print("hhh")
# if int(q['isRandom']) == cnt:
# print("shabi")
# q_list.append(q)
# print(q_list)
# print(len(q_list))
# rand = random.randint(0,len(q_list)-1)
# q_final.append(q_list[rand])
# print("q_final\n")
# print(q_final)
# i['question'] = q_final
# else:
# pass
ret.append({
"user": i['user'],
"num": i['num'],
"title": i['title'],
"time": i['time'],
"state": i['state'],
"stateTitle": i['stateTitle'],
"checked": i['checked'],
"question": i['question']
})
return json.dumps(ret)
#获取问卷列表信息
@app.route('/fillList/', methods=['post', 'get'])
def fillList():
print("come")
user = request.get_json()
result = db.fill_list(user['user'])
ret = []
for i in result:
if i['num'] == int(user["num"]):
ret.append({
"user": i['user'],
"num": i['num'],
"title": i['title'],
"time": i['time'],
"state": i['state'],
"stateTitle": i['stateTitle'],
"checked": i['checked'],
"question": i['question']
})
print("ret")
print(ret)
return json.dumps(ret)
#新增问卷
@app.route('/editList/',methods=['post', 'get'])
def addList():
data = request.get_json() #bytes
print("data_receive\n")
print(data)
db.addList(data)
ret = {"status": "success"}
return json.dumps(ret)
#删除问卷
@app.route('/deleteList/',methods=['post', 'get'])
def deleteList():
# print("hhhhhhh\n")
# temp = request.get_data()
# num = bytes.decode(temp)
# num = json.loads(num)
# print(type(num))
data = request.get_json() #bytes
db.deleteList(data)
ret = {"status": "success"}
return json.dumps(ret)
#填写问卷
@app.route('/addChoose/',methods=['post', 'get'])
def addChoose():
data = request.get_json() #bytes
# print("data_receive\n")
# print(data)
db.addChoose(data)
ret = {"status": "success"}
return json.dumps(ret)
#获取问卷回答信息
@app.route('/showData/', methods=['post', 'get'])
def showData():
data = request.get_json() #bytes
print(data)
choose_result = db.get_choose_info(data['user'])
choose = []
for i in choose_result:
if i['num'] == int(data["num"]):
choose.append({
"num": i['num'],
"question": i['question']
})
print("choose")
print(choose)
result = db.fill_list(data['user'])
ques = []
for i in result:
if i['num'] == int(data["num"]):
ques.append({
"num": i['num'],
"question": i['question']
})
ques = ques[0]
print("ques")
print(ques)
res = []
for q in ques['question']:
if q['type'] == 'radio' or q['type'] == 'checkbox' or q['type'] == 'rate':
# q = 'num': 'Q1', 'title': '单选题', 'type': 'radio', 'isNeed': True, 'options': ['1', '2', '3'], 'isHidden': False
q_dict = dict()
chos = q['options']
for cho in chos:
q_dict[cho] = 0
count_dict = {
q['num'] : q_dict
}
q_num = q['num']
for c in choose:
if(type(c['question'][q_num]) == str):
ans = c['question'][q_num]
count_dict[q_num][ans] = count_dict[q_num][ans] + 1
else:
for ans in c['question'][q_num]:
count_dict[q_num][ans] = count_dict[q_num][ans] + 1
# print("count_dict = \n")
# print(count_dict)
res.append(count_dict)
elif q['type'] == 'textarea':
# 'num': 'Q3', 'title': '文本题', 'type': 'textarea', 'isNeed': True
q_list = []
count_dict = {
q['num'] : q_list
}
for c in choose:
q_list.append(c['question'][q['num']])
res.append(count_dict)
else:
pass
print("res")
print(res)
return json.dumps(res)
@app.route('/keywordExtraction/', methods=["POST"])
def keywordExtraction():
data = request.get_json() #bytes
# print("data_receive\n")
# print(data)
textarea = data["data"];
topK = 6;
jieba.analyse.set_stop_words("data/stopWord.txt") # 加载自定义停用词表
keywords = jieba.analyse.textrank(textarea, topK=topK, allowPOS=('n','nz','v','vd','vn','l','a','d'))
word_split = " ".join(keywords)
print(word_split)
text_key = {}
text_key[textarea] = word_split
db.addkeywordExtraction(text_key)
return json.dumps(word_split)
@app.route('/getWordFrequency/', methods=["POST"])
def getWordFrequency():
text = request.get_json() #bytes
# print("data_receive\n")
# print(data)
data = text["data"];
print(data)
#Get Hasgtag WordBox
data = data.lower()
data = re.sub(r'\|\~|\`|\!|\$|\%|\^|\&|\*|\(|\)|\-|\_|\+|\=|\||\\|\[|\]|\{|\}|\;|\:|\"|\'|\,|\<|\.|\>|\/|\?', " ", data)
print(data)
wordsBox = data.strip().split()
print(wordsBox)
wordsBoxofWord = []
for word in wordsBox:
#get word
if re.match(r'([a-zA-Z0-9])', word):
wordsBoxofWord.append(word)
#Get frequency
c = collections.Counter(wordsBoxofWord)
# return json.dumps(word_split)
ret = {"status": "success"}
return json.dumps(c)
@app.route('/freAnlysisPuc/', methods=["POST"])
def freAnlysisPuc():
text = request.get_json() #bytes
data = text["data"];
print(data)
txt1 = data
txt1 = txt1.replace('\n', '') # 删掉换行符
txt1 = txt1.lower()
for ch in 'abcdefghijklmnopqrstuvwxyz1234567890 ':
txt1 = txt1.replace(ch, '')
mylist = list(txt1)
mycount = collections.Counter(mylist)
results = []
for key, val in mycount.most_common():
results.append((key, val))
sorted(results, key=lambda x: x[1], reverse=True)
print(results)
return json.dumps(results)
@app.route('/getSentiment/', methods=["POST"])
def getSentiment():
text = request.get_json() #bytes
data = text["data"];
print(data)
sentiment = TextBlob(data)
polarityResult = sentiment.polarity
subjectivityResult = sentiment.subjectivity
print(polarityResult)
print(subjectivityResult)
results = {}
new_polarityResult = round( polarityResult , 3 )
new_subjectivityResult = round( subjectivityResult , 3 )
results['polarity'] = new_polarityResult
results['subjectivity'] = new_subjectivityResult
return json.dumps(results)
# @app.route('/searchArticle/', methods=["POST"])
# def searchArticle():
# data = request.get_json() #bytes
# article = data["data"];
# print(article)
# ws_api = wechatsogou.WechatSogouAPI(captcha_break_time=3)
# data_receive = ws_api.search_article(article, identify_image_callback=identify_image_callback_ruokuai_sogou)
# results = []
# for i in data_receive:
# print(i)
# results.append(i)
# return json.dumps(results)
#登陆界面,检查用户名是否存在,若不存在则直接注册
@app.route('/login/',methods=["POST"])
def login():
data = request.get_json() #bytes
print("data_receive")
print(data)
state = db.login(data)
print("state")
print(state["state"])
if state["state"] == "register_success":
os.makedirs(data["name"])
ret = {"status": state["state"]}
return json.dumps(ret)
@app.route('/get_form_completion/<user>/<num>/<qid>', methods=['GET'])
def get_form_completion(user,num,qid):
result = db.user_result(user,num)
ret = []
for i in result:
ret.append(
i['question'][qid]
)
print(ret)
return json.dumps(ret)
@app.route('/get_mic_data', methods=['GET','POST'])
def MICvalue():
choice_user = request.get_json() # 获取前端用户选择的数据
flag = True
# data = request.get_json() #bytes
# print(data)
choice0 = {}
choice1 = {}
# choice[0]['db'] = data[0][db]
# choice[0]['col'] = data[0][col]
# choice[0]['field'] = data[0][field]
# choice[1]['db'] = data[1][db]
# choice[1]['col'] = data[1][col]
# choice[1]['field'] = data[1][field]
choice0['db'] = choice_user[0][0]
choice0['col'] = choice_user[0][1]
choice0['field'] = choice_user[0][2]
choice1['db'] = choice_user[1][0]
choice1['col'] = choice_user[1][1]
choice1['field'] = choice_user[1][2]
print("choice0", choice0)
print("choice1", choice1)
# choice0['db'] = 'EpidemicData'
# choice0['col'] = '上海'
# choice0['field'] = '新增确诊'
# choice1['db'] = 'EpidemicData'
# choice1['col'] = '河北'
# choice1['field'] = '新增确诊'
# print(choice0)
# print(choice1)
# 获取数据
# client = MongoClient("10.72.100.5",8027,username='double',password='double')
client = MongoClient("10.72.100.5",8027)
db = client.admin
db.authenticate("double", "double")
conn = MongoClient(host='mongodb://10.72.100.5:8027/'+'admin',username='double',password='double')
database = conn[choice0['db']]
collection0 = database[choice0['col']]
results0 = collection0.find({},{choice0['field']:1,"_id":0}).sort("_id",pymongo.ASCENDING) # 按照_id排序
collection1 = database[choice1['col']]
results1 = collection1.find({},{choice1['field']:1,"_id":0}).sort("_id",pymongo.ASCENDING) # 按照_id排序
# 1表示显示此字段,0表示不显示此字段,默认会显示_id
rawdata0 = []
rawdata1 = []
for result in results0:
rawdata0.append(result[choice0['field']])
for result in results1:
rawdata1.append(result[choice1['field']])
# 清理数据
for i in range(len(rawdata0)-1,-1,-1): # 假定rawdata0与rawdata1的长度相同
if rawdata0[i] and rawdata1[i]:
try: # 将数字形式的数据转换为浮点数
rawdata0[i] = float(rawdata0[i])
rawdata1[i] = float(rawdata1[i])
except ValueError:
flag = False # 存在非数值字段
else:
del rawdata0[i]
del rawdata1[i]
print("rawdata0", rawdata0)
print("rawdata1", rawdata1)
# 计算MIC
m = MINE()
if rawdata0: # 当rawdata0与rawdata1不为空时
if flag:
# 将数据映射到[0,1]区间
min_max_scaler = MinMaxScaler()
data1_std = min_max_scaler.fit_transform(np.array(rawdata0).reshape(-1, 1))
data2_std = min_max_scaler.fit_transform(np.array(rawdata1).reshape(-1, 1))
data1 = data1_std.reshape(1,-1)[0]
data2 = data2_std.reshape(1,-1)[0]
m.compute_score(data1,data2)
# str(m.mic())
return json.dumps(m.mic())
else:
return "请选取数值字段"
else:
return "您所选取的两个字段无对应数据"
# 获取options
@app.route('/getOptions', methods=['post', 'get'])
def getOptions():
client = MongoClient("10.72.100.5",8027)
db = client.admin
db.authenticate("double", "double")
dblist = client.list_database_names() # 服务器上除系统数据库外的所有数据库
print('所有的数据库:', dblist)
j = 0
for i in range(len(dblist)):
if dblist[j] == 'admin':
dblist.pop(j)
elif dblist[j] == 'config':
dblist.pop(j)
elif dblist[j] == 'local':
dblist.pop(j)
else:
j += 1
print(dblist)
options_list = []
for i in range(len(dblist)):
db = dblist[i]
database = client[db]
collection_list = database.list_collection_names() # 指定数据库中的所有集合
child_list0 = []
for j in range(len(collection_list)):
coll = collection_list[j]
collection = database[coll]
document = collection.find_one()
field_list = list(document.keys())[1:] # 指定数据库中的所有字段(除了"_id")
child_list1 = []
for k in range(len(field_list)):
child_list1.append({'value':field_list[k],'label':field_list[k]})
child_list0.append({'value':collection_list[j],'label':collection_list[j],'children':child_list1})
options_list.append({'value':dblist[i],'label':dblist[i],'children':child_list0})
return json.dumps(options_list)
#博文信息
# @app.route('/API/getStu/<id>')
# def getStu(id=id):
# result = db.get_stu_info(id)
# #id,title,type,pubTime
# ret = []
# for i in result:
# ret.append({
# "id": i[0],
# "name": i[1],
# "age": i[2],
# "sex": i[3],
# "birthtime": i[4].strftime("%Y-%m-%d"),
# "class": i[5],
# "address": i[6],
# "tel":i[7]
# })
# print "ret"+json.dumps(ret)
# return json.dumps(ret)
# @app.route('/favicon.ico')
# def favicon(id=id):
# return app.send_static_file("./static/favicon.ico")
# # token <=> password id
# # verifyToken return isUser userId
# #验证账户
# def verifyToken(token):
# SQLresult = db.verifyToken()
# if token == None:
# return False, "null"
# for i in SQLresult:
# print i
# if token == genCookie(i[0]):
# return True, i[1]
# return False, "null"
# #加密
# def genCookie(passMd5):
# today = datetime.date.today()
# Md5 = hashlib.md5()
# Md5.update(passMd5 + today.strftime("%Y/%m/%d"))
# Md5hex = Md5.hexdigest()
# return Md5hex
if __name__ == '__main__':
app.run(host="127.0.0.1", debug=False, port=8025)
|
{"/QuestionServer.py": ["/db.py"]}
|
28,424
|
Lxhouse/CSS-Platform
|
refs/heads/master
|
/db.py
|
# encoding:utf-8 -*-
#!/usr/bin/python3
from pymongo import MongoClient
import hashlib
import sys
import datetime
host = "localhost"
# user = "root"
# password = "ls269031126"
database = "question"
con_list = "list"
charset = "utf8"
#连接数据库
def open():
# conn = MongoClient()
conn = MongoClient(host='mongodb://10.72.100.5:8027/question',username='qs',password='double')
return conn
#断开数据库连接
def close(conn):
conn.close()
#获取首页的list
def get_list_info(user):
conn = open()
mydb = conn[database]
mycol = mydb[con_list]
result = mycol.find(user)
close(conn)
return result
#获取特定用户的特定问卷信息
def fill_list(user):
conn = open()
mydb = conn[database]
mycol = mydb[con_list]
myquery = { "user": user}
result = mycol.find(myquery)
result = list(result[:])
close(conn)
return result
#将新问卷存入数据库或者修改数据库已有问卷
def addList(data):
conn = open()
mydb = conn[database]
mycol = mydb[con_list]
myquery = { "user": data['user'] }
result = mycol.find(myquery)
result_list = list(result[:])
# print("hhhhh\n\n")
# print(data)
# print(data["num"])
# print(result_list)
# print(len(result_list))
if data != None:
if data["num"] == 1 and len(result_list) !=0:
result = list(mycol.find().sort("num", -1).limit(1))[0]['num']
data["num"] = result + 1
mycol.insert_one(data)
elif data["num"] == 1 and len(result_list) ==0:
mycol.insert_one(data)
else:
myquery = { "num": data["num"] }
newvalues = { "$set": data }
mycol.update_one(myquery, newvalues)
# mycol.delete_one(myquery)
else:
pass
close(conn)
return
#删除问卷
def deleteList(data):
conn = open()
mydb = conn[database]
mycol = mydb[con_list]
myquery = data
mycol.delete_one(myquery)
infor = mydb["choose"]
infor.delete_many(myquery)
close(conn)
return
#将问卷填写信息存入数据库
def addChoose(data):
conn = open()
mydb = conn[database]
mycol = mydb["choose"]
if data != None:
mycol.insert_one(data)
else:
pass
close(conn)
return
def get_choose_info(user):
print(user)
conn = open()
mydb = conn[database]
mycol = mydb["choose"]
myquery = { "user": user }
print(myquery)
result = mycol.find(myquery)
result = list(result[:])
print(result)
close(conn)
return result
#将关键词提取文本存入数据库
def addkeywordExtraction(data):
conn = open()
mydb = conn[database]
mycol = mydb["keyword"]
if data != None:
mycol.insert_one(data)
else:
pass
close(conn)
return
def login(data):
conn = open()
mydb = conn[database]
mycol = mydb["user"]
myquery = { "name": data['name'] }
result = mycol.find(myquery)
# print(result.count())
# result = list(result[:])
# print(result)
if result.count() == 0:
mycol.insert_one(data)
state = {"state": "register_success"}
else:
result = list(result[:])
if result[0]["password"] == data["password"]:
state = {"state": "login_success"}
else:
state = {"state": "login_fail"}
return state
def uploadFile(name, filename):
conn = open()
mydb = conn[database]
mycol = mydb["uploadFile"]
data = {}
data["name"] = name;
data["filename"] = filename;
mycol.insert_one(data)
state = {"state": "insert_success"}
return state
def searchFile(name):
conn = open()
mydb = conn[database]
mycol = mydb["uploadFile"]
myquery = { "name": name }
result = mycol.find(myquery)
result = list(result[:])
print(result)
if len(result) == 0:
state = [{"filename": "donot_have_file"}]
return state
else:
filename = []
for file in result:
filename.append(file)
mycol.delete_many(myquery)
return filename
return
def deleteFile(name, fileName):
conn = open()
mydb = conn[database]
mycol = mydb["uploadFile"]
myquery = {}
myquery["name"] = name;
myquery["filename"] = fileName
print(myquery)
mycol.delete_many(myquery)
return
def user_result(user, num):
conn = open()
mydb = conn[database]
mycol = mydb["choose"]
myquery = {"num":int(num) }
result = mycol.find(myquery)
result = list(result[:])
# print(result)
return result
# def college_result(college_num):
# conn = open()
# mydb = conn[database]
# mycol = mydb["choose"]
# myquery = {"num":college_num}
# result = mycol.find(myquery)
# result = list(result[:])
# print(result)
# return
#获取帐号密码
# def APIlogin(result):
# conn, cursor = open()
# result[0] = MySQLdb.escape_string(result[0])
# result[1] = MySQLdb.escape_string(result[1])
# cursor.execute("select user_id,password from login where user_id = '%s'" % result[0])
# result = cursor.fetchall()
# close(conn, cursor)
# return result
# #添加用户,密码两次 md5 加密
# def insertUser(username, password):
# conn, cursor = open()
# today = datetime.date.today()
# Md5 = hashlib.md5()
# Md5.update(password)
# Md5hex = Md5.hexdigest()
# Md52 = hashlib.md5()
# Md52.update(Md5hex)
# password_twice = Md52.hexdigest()
# cursor.execute("insert into user values('1','%s','1','%s','%s')" % (
# username, today.strftime("%Y-%m-%d"), password_twice))
# conn.commit()
# close(conn, cursor)
# return
|
{"/QuestionServer.py": ["/db.py"]}
|
28,425
|
Lxhouse/CSS-Platform
|
refs/heads/master
|
/test.py
|
import pandas as pd
import os
base_path = 'datasets/xia4ngji1dui4jia1ojia4ozhu3nshu4ju4ji2-5d7154881afd9440006d2a54'
movies_dataset = pd.read_csv(os.path.join(base_path, 'movies.csv'))
tags_dataset = pd.read_csv(os.path.join(base_path, 'tags.csv'))
ratings_dataset = pd.read_csv(os.path.join(base_path, 'ratings.csv'))
print(movies_dataset.values)
print(tags_dataset.values)
print(ratings_dataset.values)
|
{"/QuestionServer.py": ["/db.py"]}
|
28,426
|
Lxhouse/CSS-Platform
|
refs/heads/master
|
/gunicorn_config.py
|
import gevent.monkey
gevent.monkey.patch_all()
import multiprocessing
import os
if not os.path.exists('log'):
os.mkdir('log')
debug = True
loglevel = 'debug'
bind = '127.0.0.1:8025'
timeout = 300
# pidfile = 'log/gunicorn.pid'
# errorlog = 'log/gunicorn_error.log'
# accesslog = 'log/gunicorn_access.log'
# 启动的进程数
# workers = 4
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gunicorn.workers.ggevent.GeventWorker'
x_forwarded_for_header = 'X-FORWARDED-FOR'
|
{"/QuestionServer.py": ["/db.py"]}
|
28,427
|
Lxhouse/CSS-Platform
|
refs/heads/master
|
/temp.py
|
@app.route('/get_mic_data', methods=['GET','POST'])
def MICvalue():
choice_user = request.get_json() # 获取前端用户选择的数据
flag = True
# data = request.get_json() #bytes
# print(data)
choice0 = {}
choice1 = {}
# choice[0]['db'] = data[0][db]
# choice[0]['col'] = data[0][col]
# choice[0]['field'] = data[0][field]
# choice[1]['db'] = data[1][db]
# choice[1]['col'] = data[1][col]
# choice[1]['field'] = data[1][field]
choice0['db'] = choice_user[0][0]
choice0['col'] = choice_user[0][1]
choice0['field'] = choice_user[0][2]
choice1['db'] = choice_user[1][0]
choice1['col'] = choice_user[1][1]
choice1['field'] = choice_user[1][2]
print("choice0", choice0)
print("choice1", choice1)
# choice0['db'] = 'EpidemicData'
# choice0['col'] = '上海'
# choice0['field'] = '新增确诊'
# choice1['db'] = 'EpidemicData'
# choice1['col'] = '河北'
# choice1['field'] = '新增确诊'
# print(choice0)
# print(choice1)
# 获取数据
# client = MongoClient("10.72.100.5",8027,username='double',password='double')
client = MongoClient("10.72.100.5",8027)
db = client.admin
db.authenticate("double", "double")
conn = MongoClient(host='mongodb://10.72.100.5:8027/'+'admin',username='double',password='double')
database = conn[choice0['db']]
collection0 = database[choice0['col']]
results0 = collection0.find({},{choice0['field']:1,"_id":0}).sort("_id",pymongo.ASCENDING) # 按照_id排序
collection1 = database[choice1['col']]
results1 = collection1.find({},{choice1['field']:1,"_id":0}).sort("_id",pymongo.ASCENDING) # 按照_id排序
# 1表示显示此字段,0表示不显示此字段,默认会显示_id
rawdata0 = []
rawdata1 = []
for result in results0:
rawdata0.append(result[choice0['field']])
for result in results1:
rawdata1.append(result[choice1['field']])
# 清理数据
for i in range(len(rawdata0)-1,-1,-1): # 假定rawdata0与rawdata1的长度相同
if rawdata0[i] and rawdata1[i]:
try: # 将数字形式的数据转换为浮点数
rawdata0[i] = float(rawdata0[i])
rawdata1[i] = float(rawdata1[i])
except ValueError:
flag = False # 存在非数值字段
else:
del rawdata0[i]
del rawdata1[i]
print("rawdata0", rawdata0)
print("rawdata1", rawdata1)
# 计算MIC
m = MINE()
if rawdata0: # 当rawdata0与rawdata1不为空时
if flag:
# 将数据映射到[0,1]区间
min_max_scaler = MinMaxScaler()
data1_std = min_max_scaler.fit_transform(np.array(rawdata0).reshape(-1, 1))
data2_std = min_max_scaler.fit_transform(np.array(rawdata1).reshape(-1, 1))
data1 = data1_std.reshape(1,-1)[0]
data2 = data2_std.reshape(1,-1)[0]
m.compute_score(data1,data2)
# str(m.mic())
return json.dumps(m.mic())
else:
return "请选取数值字段"
else:
return "您所选取的两个字段无对应数据"
# 获取options
@app.route('/getOptions', methods=['post', 'get'])
def getOptions():
client = MongoClient("10.72.100.5",8027)
db = client.admin
db.authenticate("double", "double")
dblist = client.list_database_names() # 服务器上除系统数据库外的所有数据库
print('所有的数据库:', dblist)
j = 0
for i in range(len(dblist)):
if dblist[j] == 'admin':
dblist.pop(j)
elif dblist[j] == 'config':
dblist.pop(j)
elif dblist[j] == 'local':
dblist.pop(j)
else:
j += 1
print(dblist)
options_list = []
for i in range(len(dblist)):
db = dblist[i]
database = client[db]
collection_list = database.list_collection_names() # 指定数据库中的所有集合
child_list0 = []
for j in range(len(collection_list)):
coll = collection_list[j]
collection = database[coll]
document = collection.find_one()
field_list = list(document.keys())[1:] # 指定数据库中的所有字段(除了"_id")
child_list1 = []
for k in range(len(field_list)):
child_list1.append({'value':field_list[k],'label':field_list[k]})
child_list0.append({'value':collection_list[j],'label':collection_list[j],'children':child_list1})
options_list.append({'value':dblist[i],'label':dblist[i],'children':child_list0})
return json.dumps(options_list)
|
{"/QuestionServer.py": ["/db.py"]}
|
28,446
|
lquresh52/student_data
|
refs/heads/master
|
/filter_student_data/apps.py
|
from django.apps import AppConfig
class FilterStudentDataConfig(AppConfig):
name = 'filter_student_data'
|
{"/filter_student_data/views.py": ["/filter_student_data/models.py"]}
|
28,447
|
lquresh52/student_data
|
refs/heads/master
|
/filter_student_data/models.py
|
from django.db import models
# Create your models here.
class StudetData(models.Model):
name = models.CharField(max_length=300)
standard = models.IntegerField()
roll_no = models.IntegerField()
age = models.IntegerField()
mobile_no_p1 = models.BigIntegerField()
mobile_no_p2 = models.BigIntegerField(blank=True,null=True)
def __str__(self):
return self.name
|
{"/filter_student_data/views.py": ["/filter_student_data/models.py"]}
|
28,448
|
lquresh52/student_data
|
refs/heads/master
|
/filter_student_data/views.py
|
from django.shortcuts import render,redirect
from .models import StudetData
# Create your views here.
info = None
def index(request):
if request.method == 'POST':
from_std = request.POST.get('from_std')
to_std = request.POST.get('to_std')
name = request.POST.get('name')
age = request.POST.get('age')
roll_no = request.POST.get('roll_no')
std = request.POST.get('std')
mobile_num = request.POST.get('mble_no')
print(to_std,from_std,name,age,roll_no)
lst = []
if name != 'on':
lst.append('name')
if age != 'on':
lst.append('age')
if roll_no != 'on':
lst.append('roll_no')
if std != 'on':
lst.append('standard')
if mobile_num != 'on':
lst.append('mobile_no_p1')
lst.append('mobile_no_p2')
print(lst)
if from_std>to_std:
fetch_data = StudetData.objects.filter(standard__range=[to_std,from_std])
else:
fetch_data = StudetData.objects.filter(standard__range=[from_std,to_std])
print(fetch_data)
global info
info = fetch_data
return redirect(data,lst)
else:
return render(request,'index.html')
def data(request,lst):
global info
print(lst)
return render(request, 'data.html', {'data':info,'exclude':lst})
|
{"/filter_student_data/views.py": ["/filter_student_data/models.py"]}
|
28,495
|
huaifeng1993/Facenet_pytorch
|
refs/heads/master
|
/Config.py
|
class Config():
"""
The Configration of the FaceNet
"""
"""
embedding_size : The output of the size of feature.
num_classes : The number of classes of people.
margin : The margin of triplet loss.
"""
embedding_size = 128
num_classes = 54
margin = 0.5
"""
batch_size : The batch of training.
num_workers : The workers to generate data.
start_epoch : The epoch to start with. If not 0, it will continue the training.
num_epochs : The total epoch to train.
"""
batch_size = 64
num_workers = 1
start_epoch = 0
num_epochs = 100
"""
base_learning_rate : The base learning rate. The learning rate will start after the "warm_up_epoch"
if use_warmup is True, else it will work at the beginning.
start_learning_rate : The start learning rate. The learning rate of the warm up start learning rate.
IT ONLY WORKS WHEN THE use_warmup TRUE.
warmup_epoch : The epoch to use warm up learning rate. IT ONLY WORKS WHEN THE use_warmup TRUE.
use_warmup : To use warm up or not.
"""
base_learning_rate = 0.01
start_learning_rate = 1e-5
warmup_epoch = 5
use_warmup = False
"""
image_size : The size of input image (image_size,image_size).
del_classifier : The flag to delete the classifier layers. Set it True when change your data classes.
triplet_lambuda : The lambuda forword the triplet loss. The triplet loss is (triplet*triplet_lambuda).
"""
image_size = 56
del_classifier = False
triplet_lambuda = 5
|
{"/engine_trt.py": ["/Config.py"], "/test.py": ["/Config.py", "/ONNX_model.py"], "/pytorch_to_onnx.py": ["/ONNX_model.py"], "/train.py": ["/dataset.py", "/Config.py", "/lr_scheduler.py"]}
|
28,496
|
huaifeng1993/Facenet_pytorch
|
refs/heads/master
|
/data2txt.py
|
import os
import numpy as np
def data2txt(DataPath,SavePath,split = 0.9):
dirs = os.listdir(DataPath)
data = []
for dir in dirs:
file_dirs = os.listdir(os.path.join(DataPath, dir))
for file_dir in file_dirs:
pics = os.listdir(os.path.join(DataPath, dir, file_dir))
pics_dir = [os.path.join(DataPath, dir, file_dir, x) for x in pics]
data += pics_dir
labels = [int(x.split('/')[2]) for x in data]
np.random.seed(50)
np.random.shuffle(data)
np.random.seed(50)
np.random.shuffle(labels)
train_data = data[:int(len(data)*split)]
train_label = labels[:int(len(data)*split)]
val_data = data[int(len(data)*split):]
val_label = labels[int(len(data)*split):]
write_txt(SavePath,'train_data.txt',train_data)
write_txt(SavePath, 'train_label.txt', train_label)
write_txt(SavePath, 'val_data.txt', val_data)
write_txt(SavePath, 'val_label.txt', val_label)
def write_txt(SavePath,Filename,SaveList):
file = open(os.path.join(SavePath, Filename),'w')
for id in SaveList:
file.writelines(str(id)+'\n')
file.close()
if __name__ == '__main__':
data2txt('./head_data','./data_txt')
|
{"/engine_trt.py": ["/Config.py"], "/test.py": ["/Config.py", "/ONNX_model.py"], "/pytorch_to_onnx.py": ["/ONNX_model.py"], "/train.py": ["/dataset.py", "/Config.py", "/lr_scheduler.py"]}
|
28,497
|
huaifeng1993/Facenet_pytorch
|
refs/heads/master
|
/ONNX_model.py
|
import numpy as np
import torch
import torch.nn as nn
from torchvision.models import resnet18
from torch.nn.modules.distance import PairwiseDistance
class FaceNetModel(nn.Module):
def __init__(self, embedding_size, pretrained=False):
super(FaceNetModel, self).__init__()
self.model = resnet18(pretrained)
self.embedding_size = embedding_size
self.model.fc = nn.Linear(2048, self.embedding_size)
def l2_norm(self, input):
# input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1,keepdim=True)
norm = torch.sqrt(normp)
# norm = norm.expand_as(input)
_output = torch.div(input, norm)
output = _output.view(-1, 128)
return output
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x).view(-1, 2048)
# print(x.size)
x = self.model.fc(x)
self.features = self.l2_norm(x)
# Multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf
alpha = 10
self.features = self.features * alpha
return self.features
if __name__ == '__main__':
model = FaceNetModel(128,22)
|
{"/engine_trt.py": ["/Config.py"], "/test.py": ["/Config.py", "/ONNX_model.py"], "/pytorch_to_onnx.py": ["/ONNX_model.py"], "/train.py": ["/dataset.py", "/Config.py", "/lr_scheduler.py"]}
|
28,498
|
huaifeng1993/Facenet_pytorch
|
refs/heads/master
|
/engine_trt.py
|
import google.protobuf
from PIL import Image
import numpy as np
import time
from onnx_tensorrt.tensorrt_engine import Engine
import tensorrt as trt
import torchvision.transforms as transforms
from Config import Config
import os
import cv2
import torch
import matplotlib.pyplot as plt
def pil_loader(path):
with open(path,'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def process_image(img_path, H=56, W=56):
# image = pil_loader(img_path) ##(H,W,C)
# normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
test_transforms = transforms.Compose([
# transforms.Resize(size=(int(H), int(W)), interpolation=3),
transforms.ToTensor(),
])
image = Image.open(img_path)
image = np.array(image)
image = cv2.resize(image, (H, W))
# image = (image / 255.0).astype(np.float32)
image = Image.fromarray(image)
image = test_transforms(image)
image = image.unsqueeze(0) ##(1,H,W,C)
return image
cfg = Config()
G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.INFO)
trt_engine = trt.utils.load_engine(G_LOGGER, 'engine/test_engine.engine')
CROWD_ENGINE = Engine(trt_engine)
# img_path = 'head_data/001/1/0_3.jpg'
#
# ims = process_image(img_path)
# np_ims = np.asarray(ims.data.cpu())
#
# start = time.time()
# result = CROWD_ENGINE.run([np_ims])
# print(result)
# print(time.time()-start)
def get_feature(img_path): # bbox shape must be []
ims = process_image(img_path)
np_ims = np.asarray(ims.data.cpu())
features = CROWD_ENGINE.run([np_ims])
return features[0]
def person_read(dir):
file_list = {}
for root,dirs,files in os.walk(dir):
for dir in dirs:
files = os.listdir(os.path.join(root,dir))
files = [os.path.join(root,dir,file) for file in files]
file_list[dir] = files
return file_list
def cosine_distance(a, b, data_is_normalized=False):
if not data_is_normalized:
a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)
b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)
return 1. - np.dot(a, b.T)
# a = np.array(a)
# b = np.array(b)
# dist = np.sqrt(np.sum(np.square(a - b),axis=1))
# return dist
a, b = np.asarray(a), np.asarray(b)
if len(a) == 0 or len(b) == 0:
return np.zeros((len(a), len(b)))
a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1)
r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :]
r2 = np.clip(r2, 0., float(np.inf))
return r2
def nn_cosine_distance(x, y):
distances = cosine_distance(x, y)
return distances.min(axis=0)*10
features = []
man_dict = person_read('/data/Project/Detect/feature_test/data/001/')
woman_dict = person_read('/data/Project/Detect/feature_test/data/002/')
for file in man_dict['3']:
# img = Image.open(file)
# img = np.array(img)
# img = cv2.resize(img,(cfg.image_size,cfg.image_size))
feature = get_feature(file)
features.append(np.squeeze(feature))
man_result = []
for file in man_dict['15']:
# img = Image.open(file)
# img = np.array(img)
# img = cv2.resize(img, (cfg.image_size, cfg.image_size))
feature = get_feature(file)
# features.append(np.squeeze(feature))
man_result.append(np.squeeze(nn_cosine_distance(features,[np.squeeze(feature)])))
woman_result = []
for file in woman_dict['20']:
# img = Image.open(file)
# img = np.array(img)
# img = cv2.resize(img, (cfg.image_size, cfg.image_size))
feature = get_feature(file)
# features.append(np.squeeze(feature))
woman_result.append(np.squeeze(nn_cosine_distance(features,[np.squeeze(feature)])))
plt.plot(range(0,len(man_result)),man_result,'b-')
plt.plot(range(len(man_result),len(man_result)+len(woman_result)),woman_result,'r-')
plt.show()
|
{"/engine_trt.py": ["/Config.py"], "/test.py": ["/Config.py", "/ONNX_model.py"], "/pytorch_to_onnx.py": ["/ONNX_model.py"], "/train.py": ["/dataset.py", "/Config.py", "/lr_scheduler.py"]}
|
28,499
|
huaifeng1993/Facenet_pytorch
|
refs/heads/master
|
/test.py
|
import torch
from Config import Config
from ONNX_model import FaceNetModel
# from face_model import FaceNetModel
from torchvision import transforms as T
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
cfg = Config()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# model = FaceNetModel(embedding_size = cfg.embedding_size).to(device)
model = FaceNetModel(embedding_size = cfg.embedding_size).to(device)
# model_train = torch.load('./test_model/checkpoint_epoch220.pth', map_location='cuda:0')
# model.load_state_dict(model_train['state_dict'])
model_dict = model.state_dict()
checkpoint = torch.load('./test_model/checkpoint_epoch220.pth',map_location='cuda:0')
checkpoint['state_dict'] = {k: v for k, v in checkpoint['state_dict'].items() if k in model_dict}
model_dict.update(checkpoint['state_dict'])
model.load_state_dict(model_dict)
model.eval()
# transform = T.Compose([
# T.Resize(cfg.image_size),
# T.CenterCrop(cfg.image_size),
# T.ToTensor(),
# # normalize
# ])
def detect(img):
img = (img / 255.0).astype(np.float32)
img = torch.from_numpy(img.transpose((2,0,1)))
pred = model.forward(img.unsqueeze(0).to(device)).to(device)
return pred.cpu().detach().numpy()
def cosine_distance(a, b, data_is_normalized=False):
if not data_is_normalized:
a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)
b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)
return 1. - np.dot(a, b.T)
# a = np.array(a)
# b = np.array(b)
# dist = np.sqrt(np.sum(np.square(a - b),axis=1))
# return dist
a, b = np.asarray(a), np.asarray(b)
if len(a) == 0 or len(b) == 0:
return np.zeros((len(a), len(b)))
a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1)
r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :]
r2 = np.clip(r2, 0., float(np.inf))
return r2
def nn_cosine_distance(x, y):
distances = cosine_distance(x, y)
return distances.min(axis=0)*10
# result_man1 = detect(Image.open('/data/Project/Detect/feature_test/data/001/3/0_4.jpg')).cpu().detach().numpy()
# result_man2 = detect(Image.open('/data/Project/Detect/feature_test/data/001/3/0_4.jpg')).cpu().detach().numpy()
# result_woman1 = detect(Image.open('/data/Project/Detect/feature_test/data/002/1/0_8.jpg')).cpu().detach().numpy()
def get_feature(image): # bbox shape must be []
features = detect(image)
return features
def person_read(dir):
file_list = {}
for root,dirs,files in os.walk(dir):
for dir in dirs:
files = os.listdir(os.path.join(root,dir))
files = [os.path.join(root,dir,file) for file in files]
file_list[dir] = files
return file_list
features = []
man_dict = person_read('/data/Project/Detect/feature_test/data/001/')
woman_dict = person_read('/data/Project/Detect/feature_test/data/002/')
for file in man_dict['3']:
img = Image.open(file)
img = np.array(img)
img = cv2.resize(img,(cfg.image_size,cfg.image_size))
feature = get_feature(img)
features.append(np.squeeze(feature))
man_result = []
for file in man_dict['15']:
img = Image.open(file)
img = np.array(img)
img = cv2.resize(img, (cfg.image_size, cfg.image_size))
feature = get_feature(img)
# features.append(np.squeeze(feature))
man_result.append(np.squeeze(nn_cosine_distance(features,[np.squeeze(feature)])))
woman_result = []
for file in woman_dict['20']:
img = Image.open(file)
img = np.array(img)
img = cv2.resize(img, (cfg.image_size, cfg.image_size))
feature = get_feature(img)
# features.append(np.squeeze(feature))
woman_result.append(np.squeeze(nn_cosine_distance(features,[np.squeeze(feature)])))
plt.plot(range(0,len(man_result)),man_result,'b-')
plt.plot(range(len(man_result),len(man_result)+len(woman_result)),woman_result,'r-')
plt.show()
|
{"/engine_trt.py": ["/Config.py"], "/test.py": ["/Config.py", "/ONNX_model.py"], "/pytorch_to_onnx.py": ["/ONNX_model.py"], "/train.py": ["/dataset.py", "/Config.py", "/lr_scheduler.py"]}
|
28,500
|
huaifeng1993/Facenet_pytorch
|
refs/heads/master
|
/engine_test.py
|
#!/usr/bin/python
from __future__ import division
from __future__ import print_function
import os
from random import randint
import numpy as np
import cv2
try:
from PIL import Image
except ImportError as err:
raise ImportError("""ERROR: Failed to import module ({})
Please make sure you have Pillow installed.
For installation instructions, see:
http://pillow.readthedocs.io/en/stable/installation.html""".format(err))
try:
import pycuda.driver as cuda
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
import argparse
except ImportError as err:
raise ImportError("""ERROR: Failed to import module ({})
Please make sure you have pycuda and the example dependencies installed.
https://wiki.tiker.net/PyCuda/Installation/Linux
pip(3) install tensorrt[examples]""".format(err))
try:
import tensorrt as trt
from tensorrt.parsers import uffparser
except ImportError as err:
raise ImportError("""ERROR: Failed to import module ({})
Please make sure you have the TensorRT Library installed
and accessible in your LD_LIBRARY_PATH""".format(err))
G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.INFO)
# MAX_WORKSPACE = 1 << 30
# MAX_BATCHSIZE = 1
# DATA='/mnist/'
# MODEL='resnet50.engine'
# API CHANGE: Try to generalize into a utils function
#Run inference on device
def infer(engine, input_img, batch_size):
#load engine
context = engine.create_execution_context()
assert(engine.get_nb_bindings() == 2)
#create output array to receive data
dims = engine.get_binding_dimensions(1).to_DimsCHW()
elt_count = dims.C() * dims.H() * dims.W() * batch_size
#Allocate pagelocked memory
output = cuda.pagelocked_empty(elt_count, dtype = np.float32)
#alocate device memory
d_input = cuda.mem_alloc(batch_size * input_img.size * input_img.dtype.itemsize)
d_output = cuda.mem_alloc(batch_size * output.size * output.dtype.itemsize)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
#transfer input data to device
cuda.memcpy_htod_async(d_input, input_img, stream)
#execute model
context.enqueue(batch_size, bindings, stream.handle, None)
#transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
#return predictions
return output
def infer_new(d_input,stream,context,bindings,output,d_output, input_img, batch_size):
#transfer input data to device
cuda.memcpy_htod_async(d_input, input_img, stream)
#execute model
context.enqueue(batch_size, bindings, stream.handle, None)
#transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
#return predictions
return output
def get_testcase(path):
im = Image.open(path)
assert(im)
arr = np.array(im)
#make array 1D
img = arr.ravel()
return img
#Also prints case to console
def normalize(data):
#allocate pagelocked memory
norm_data = cuda.pagelocked_empty(data.shape, np.float32)
print("\n\n\n---------------------------", "\n")
for i in range(len(data)):
print(" .:-=+*#%@"[data[i] // 26] + ("\n" if ((i + 1) % 28 == 0) else ""), end="");
norm_data[i] = 1.0 - data[i] / 255.0
print("\n")
return norm_data
# def transfer(image):
class Engine_Config():
batch_size = 1
input_size = 3*56*56*4 # channel * width * height * sizeof(float32)
width = 56
height = 56
output_size = 128*4 # features * sizeof(float32)
engine_path = "./engine/face_engine.engine"
def main():
# Get the engine configuration
cfg = Engine_Config()
# Load Engine
engine = trt.utils.load_engine(G_LOGGER,cfg.engine_path)
assert(engine),"No Engine loaded!"
context = engine.create_execution_context()
assert (engine.get_nb_bindings() == 2)
# Create output array to receive data
dims = engine.get_binding_dimensions(1).to_DimsCHW()
elt_count = dims.C() * dims.H() * dims.W() * 1
# Allocate pagelocked memory
output = cuda.pagelocked_empty(elt_count, dtype=np.float32)
# Allocate device memory
d_input = cuda.mem_alloc(cfg.batch_size * cfg.input_size)
d_output = cuda.mem_alloc(cfg.batch_size * cfg.output_size)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
# Interface a picture
image = cv2.imread('head_data/001/1/0_3.jpg')
image = cv2.cvtColor(cv2.resize(image,(cfg.width,cfg.height)),cv2.COLOR_BGR2RGB) / 255.0
image = np.reshape(image,(3,56,56))
pre = infer_new(d_input, stream, context, bindings, output, d_output, image, cfg.batch_size)
print(pre)
if __name__ == "__main__":
main()
|
{"/engine_trt.py": ["/Config.py"], "/test.py": ["/Config.py", "/ONNX_model.py"], "/pytorch_to_onnx.py": ["/ONNX_model.py"], "/train.py": ["/dataset.py", "/Config.py", "/lr_scheduler.py"]}
|
28,501
|
huaifeng1993/Facenet_pytorch
|
refs/heads/master
|
/pytorch_to_onnx.py
|
# model_path = "/data/github_test/binary_classifier/torch_logs_2/net_params000.pkl"
model_path = '/data/Project/Detect/Facenet_pytorch/test_model/checkpoint_epoch220.pth'
import torch
import cv2
from torch import nn
from ONNX_model import FaceNetModel
from torch.autograd import Variable
def torch_to_onnx(model, model_file):
input_data = Variable(torch.randn(2, 3, 56, 56))
# input_data1 = input_data[:, :, ::2, :, :]
# input_data2 = input_data[:, :, ::16, :, :]
torch.onnx.export(model, input_data, "{}.onnx".format(model_file), verbose=True)
# torch.onnx._export(model, input_data,
# "{}.onnx".format(model_file),
# export_params=True,
# operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
# verbose=True
# )
# torch_out = torch.onnx._export(model,
# input,
# "model_caff2.onnx",
# export_params=False,
# operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
# )
model = FaceNetModel(embedding_size = 128)
model_dict = model.state_dict()
checkpoint = torch.load('./test_model/checkpoint_epoch220.pth',map_location='cuda:0')
checkpoint['state_dict'] = {k: v for k, v in checkpoint['state_dict'].items() if k in model_dict}
model_dict.update(checkpoint['state_dict'])
model.load_state_dict(model_dict)
model.eval()
if __name__ == "__main__":
torch_to_onnx(model, model_path)
# input_data = Variable(torch.randn(2, 3, 64, 112, 112))
# output = model(input_data)
# import numpy as np
# np.expand_dims()
#
# import cv2
# import os
# cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# os.path.exists()
|
{"/engine_trt.py": ["/Config.py"], "/test.py": ["/Config.py", "/ONNX_model.py"], "/pytorch_to_onnx.py": ["/ONNX_model.py"], "/train.py": ["/dataset.py", "/Config.py", "/lr_scheduler.py"]}
|
28,502
|
huaifeng1993/Facenet_pytorch
|
refs/heads/master
|
/lr_scheduler.py
|
# -*- coding: utf-8 -*-
# @Time : 19-4-18 下午4:35
# @Author : Altair
# @FileName: w.py
# @Software: PyCharm
# @email : 641084377@qq.com
from torch.optim.lr_scheduler import _LRScheduler
import numpy as np
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
self.multiplier = multiplier
if self.multiplier <= 1.:
raise ValueError('multiplier should be greater than 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super().__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
def step(self, epoch=None):
if self.finished and self.after_scheduler:
return self.after_scheduler.step(epoch)
else:
return super(GradualWarmupScheduler, self).step(epoch)
class ReducePlateauScheduler(_LRScheduler):
def __init__(self, optimizer, total_epoch):
self.total_epoch = total_epoch
super().__init__(optimizer)
def get_lr(self):
lr = [0.5 * base_lr * ((np.cos(self.last_epoch/self.total_epoch*np.pi))+1) for base_lr in self.base_lrs]
return lr
def step(self, epoch=None):
return super(ReducePlateauScheduler, self).step(epoch)
def WarmAndReduce_LR(optimizer,base_learning_rate,max_epoch,
use_warmup=True,
start_learning_rate=1e-5,
warmup_epoch=5):
""" Create an Reduce Learning Rate with or without warm up.
Args:
optimizer (Optimizer): Wrapped optimizer.
base_learning_rate: The basic learning rate ,the same as the regular learning rate.
max_epoch: The max epoch of training
use_warmup: Use warm up or not
start_learning_rate: is active when the use_warmup is True .If active ,it must be the same as the optimizer learning rate.
warmup_epoch: The epoch to use warm up.
"""
if(use_warmup):
ReduceScheduler = ReducePlateauScheduler(optimizer, max_epoch)
WarmUpScheduler = GradualWarmupScheduler(optimizer, multiplier=int(base_learning_rate / start_learning_rate),
total_epoch=warmup_epoch, after_scheduler=ReduceScheduler)
return WarmUpScheduler
else:
ReduceScheduler = ReducePlateauScheduler(optimizer, max_epoch)
return ReduceScheduler
if __name__ == '__main__':
import torch
import matplotlib.pyplot as plt
lr = []
base_learning_rate = 0.01
start_learning_rate = 0.0001
max_epoch = 100
warmup_epoch = 1000
optimizer = torch.optim.SGD([torch.zeros(10)],lr=base_learning_rate)
WarmUp = WarmAndReduce_LR(optimizer,base_learning_rate,max_epoch,use_warmup=False)
# ReduceScheduler = ReducePlateauScheduler(optimizer,max_epoch)
# WarmUp = GradualWarmupScheduler(optimizer,multiplier=int(base_learning_rate/start_learning_rate),total_epoch=1000,after_scheduler=ReduceScheduler)
for epoch in range(1,100):
WarmUp.step(epoch)
lr.append(optimizer.param_groups[0]['lr'])
plt.plot(np.arange(len(lr)),lr)
plt.show()
|
{"/engine_trt.py": ["/Config.py"], "/test.py": ["/Config.py", "/ONNX_model.py"], "/pytorch_to_onnx.py": ["/ONNX_model.py"], "/train.py": ["/dataset.py", "/Config.py", "/lr_scheduler.py"]}
|
28,503
|
huaifeng1993/Facenet_pytorch
|
refs/heads/master
|
/train.py
|
# -*- coding: utf-8 -*-
# @Time : 19-4-18 下午4:35
# @Author : Altair
# @FileName: w.py
# @Software: PyCharm
# @email : 641084377@qq.com
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.nn.modules.distance import PairwiseDistance
import torchvision
from torchvision import transforms
from eval_metrics import evaluate, plot_roc
from face_model import FaceNetModel,TripletLoss
from dataset import get_dataloader,test_data
from Config import Config
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from lr_scheduler import WarmAndReduce_LR
######################### Set the configration #############################
# The device for train : change the cuda device for your train
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
l2_dist = PairwiseDistance(2)
# The Configration for the model. Change the Config.py for your own set.
cfg = Config()
# The data path of your data.
data_path = './data_txt'
# The logger path for your log dir.
log_dir = "./logs/face_log001"
writer = SummaryWriter(log_dir=log_dir)
# The test data path of your data. The classifier results will be write in the logger.
TestData = test_data('./data_txt',img_size=cfg.image_size)
############################################################################
########################## The Loss Functions ##############################
TL_loss = TripletLoss(cfg.margin) # The triplet loss of model
CE_loss = nn.CrossEntropyLoss() # The cross entropy loss of model
############################################################################
def train_valid(model, optimizer, scheduler, epoch, dataloaders, data_size):
for phase in ['train', 'valid']:
# One step for train or valid
labels, distances = [], []
triplet_loss_sum = 0.0
crossentropy_loss_sum = 0.0
accuracy_sum = 0.0
triplet_loss_sigma = 0.0
crossentropy_loss_sigma = 0.0
accuracy_sigma = 0.0
if phase == 'train':
scheduler.step()
model.train()
else:
model.eval()
for batch_idx, batch_sample in enumerate(dataloaders[phase]):
anc_img = batch_sample['anc_img'].to(device)
pos_img = batch_sample['pos_img'].to(device)
neg_img = batch_sample['neg_img'].to(device)
if(anc_img.shape[0]!=cfg.batch_size or
pos_img.shape[0]!=cfg.batch_size or
neg_img.shape[0]!=cfg.batch_size):
print("Batch Size Not Equal")
continue
pos_cls = batch_sample['pos_class'].to(device)
neg_cls = batch_sample['neg_class'].to(device)
with torch.set_grad_enabled(phase == 'train'):
try:
# anc_embed, pos_embed and neg_embed are encoding(embedding) of image
anc_embed, pos_embed, neg_embed = model(anc_img), model(pos_img), model(neg_img)
# choose the hard negatives only for "training"
pos_dist = l2_dist.forward(anc_embed, pos_embed)
neg_dist = l2_dist.forward(anc_embed, neg_embed)
all = (neg_dist - pos_dist < cfg.margin).cpu().numpy().flatten()
if phase == 'train':
hard_triplets = np.where(all == 1)
if len(hard_triplets[0]) == 0:
continue
else:
hard_triplets = np.where(all >= 0)
if len(hard_triplets[0]) == 0:
continue
anc_hard_embed = anc_embed[hard_triplets].to(device)
pos_hard_embed = pos_embed[hard_triplets].to(device)
neg_hard_embed = neg_embed[hard_triplets].to(device)
anc_hard_img = anc_img[hard_triplets].to(device)
pos_hard_img = pos_img[hard_triplets].to(device)
neg_hard_img = neg_img[hard_triplets].to(device)
pos_hard_cls = pos_cls[hard_triplets].to(device)
neg_hard_cls = neg_cls[hard_triplets].to(device)
anc_img_pred = model.forward_classifier(anc_hard_img).to(device)
pos_img_pred = model.forward_classifier(pos_hard_img).to(device)
neg_img_pred = model.forward_classifier(neg_hard_img).to(device)
triplet_loss = TL_loss.forward(anc_hard_embed, pos_hard_embed, neg_hard_embed).to(device)
triplet_loss *= cfg.triplet_lambuda
predicted_labels = torch.cat([anc_img_pred, pos_img_pred, neg_img_pred])
true_labels = torch.cat([pos_hard_cls, pos_hard_cls,neg_hard_cls]).squeeze()
crossentropy_loss = CE_loss(predicted_labels,true_labels).to(device)
loss = triplet_loss + crossentropy_loss
if phase == 'train':
optimizer.zero_grad()
# triplet_loss.backward()
loss.backward()
optimizer.step()
if phase == 'valid':
pic_array,_ = TestData.get_data()
for i,pic in enumerate(pic_array):
pred = model.forward_classifier(pic.unsqueeze(0).to(device)).to(device)
pred = torch.argmax(pred, 1).cpu().numpy()
# print(pred)
writer.add_image("Person {}/{}".format(pred[0],i),pic,epoch)
_, predicted = torch.max(predicted_labels, 1)
correct = (predicted == true_labels).cpu().squeeze().sum().numpy()/(len(hard_triplets[0])*3)
dists = l2_dist.forward(anc_embed, pos_embed)
distances.append(dists.data.cpu().numpy())
labels.append(np.ones(dists.size(0)))
dists = l2_dist.forward(anc_embed, neg_embed)
distances.append(dists.data.cpu().numpy())
labels.append(np.zeros(dists.size(0)))
triplet_loss_sum += triplet_loss.item()
crossentropy_loss_sum += crossentropy_loss.item()
accuracy_sum += correct
triplet_loss_sigma += triplet_loss.item()
crossentropy_loss_sigma += crossentropy_loss.item()
accuracy_sigma += correct
if batch_idx % 10 == 0 and batch_idx!=0:
print('{} Inter {:4d}/{:4d} - Triplet Loss = {:.5f} - CrossEntropy Loss = {:.5f} - All Loss = {:.5f} - Accuaracy = {:.5f} len:{}'
.format(phase,batch_idx,len(dataloaders[phase]),
triplet_loss_sigma/10,crossentropy_loss_sigma/10,
(triplet_loss_sigma+crossentropy_loss_sigma)/10,
accuracy_sigma/10,len(hard_triplets[0])))
triplet_loss_sigma = 0
crossentropy_loss_sigma = 0
accuracy_sigma = 0
except Exception as e:
print(e)
pass
avg_triplet_loss = triplet_loss_sum / int(data_size[phase]/cfg.batch_size)
avg_crossentropy_loss = crossentropy_loss_sum / int(data_size[phase]/cfg.batch_size)
labels = np.array([sublabel for label in labels for sublabel in label])
distances = np.array([subdist for dist in distances for subdist in dist])
tpr, fpr, accuracy, val, val_std, far = evaluate(distances, labels)
print(' {} set - Triplet Loss = {:.8f}'.format(phase, avg_triplet_loss))
print(' {} set - CrossEntropy Loss = {:.8f}'.format(phase, avg_crossentropy_loss))
print(' {} set - All Loss = {:.8f}'.format(phase, avg_triplet_loss+avg_crossentropy_loss))
print(' {} set - Accuracy = {:.8f}'.format(phase, np.mean(accuracy)))
# 记录训练loss
writer.add_scalars('Loss/Triplet Loss Group'.format(phase), {'{} triplet loss'.format(phase): avg_triplet_loss}, epoch)
writer.add_scalars('Loss/Crossentropy Loss Group'.format(phase), {'{} crossentropy loss'.format(phase): avg_crossentropy_loss}, epoch)
writer.add_scalars('Loss/All Loss Group'.format(phase), {'{} loss'.format(phase): avg_triplet_loss + avg_crossentropy_loss}, epoch)
writer.add_scalars('Accuracy_group'.format(phase), {'{} accuracy'.format(phase): np.mean(accuracy)}, epoch)
# 记录learning rate
writer.add_scalar('learning rate', scheduler.get_lr()[0], epoch)
with open('./log/{}_log_epoch{}.txt'.format(phase, epoch), 'w') as f:
f.write(str(epoch) + '\t' +
str(np.mean(accuracy)) + '\t' +
str(avg_triplet_loss)+ '\t' +
str(avg_crossentropy_loss)+ '\t' +
str(avg_triplet_loss+avg_crossentropy_loss))
if phase == 'train':
torch.save({'epoch': epoch,
'state_dict': model.state_dict()},
'./log/checkpoint_epoch{}.pth'.format(epoch))
else:
plot_roc(fpr, tpr, figure_name='./log/roc_valid_epoch_{}.png'.format(epoch))
def main():
model = FaceNetModel(embedding_size = cfg.embedding_size, num_classes = cfg.num_classes).to(device)
if cfg.use_warmup:
optimizer = optim.Adam(model.parameters(), lr = cfg.start_learning_rate)
# scheduler = lr_scheduler.StepLR(optimizer, step_size = 50, gamma = 0.1)
scheduler = WarmAndReduce_LR(optimizer,cfg.base_learning_rate,cfg.num_epochs,
use_warmup=cfg.use_warmup,
start_learning_rate=cfg.start_learning_rate,
warmup_epoch=cfg.warmup_epoch)
else:
optimizer = optim.Adam(model.parameters(), lr = cfg.base_learning_rate)
# scheduler = lr_scheduler.StepLR(optimizer, step_size = 50, gamma = 0.1)
scheduler = WarmAndReduce_LR(optimizer,cfg.base_learning_rate,cfg.num_epochs,
use_warmup=cfg.use_warmup)
if cfg.start_epoch != 0:
checkpoint = torch.load('./log/checkpoint_epoch{}.pth'.format(cfg.start_epoch-1),map_location='cuda:0')
print("Load weights from {}".format('./log/checkpoint_epoch{}.pth'.format(cfg.start_epoch-1)))
if cfg.del_classifier:
model_dict = model.state_dict()
checkpoint['state_dict'] = {k: v for k, v in checkpoint['state_dict'].items() if k in model_dict}
model_dict.update(checkpoint['state_dict'])
model.load_state_dict(model_dict)
else:
model.load_state_dict(checkpoint['state_dict'])
for epoch in range(cfg.start_epoch, cfg.num_epochs + cfg.start_epoch):
# scheduler.step()
print(80 * '=')
print('Epoch [{}/{}] Learning Rate:{:8f}'.format(epoch, cfg.num_epochs + cfg.start_epoch - 1,scheduler.get_lr()[0]))
data_loaders, data_size = get_dataloader(data_path,cfg.batch_size,cfg.num_workers,cfg.image_size)
train_valid(model, optimizer, scheduler, epoch, data_loaders, data_size)
print(80 * '=')
if __name__ == '__main__':
main()
|
{"/engine_trt.py": ["/Config.py"], "/test.py": ["/Config.py", "/ONNX_model.py"], "/pytorch_to_onnx.py": ["/ONNX_model.py"], "/train.py": ["/dataset.py", "/Config.py", "/lr_scheduler.py"]}
|
28,504
|
huaifeng1993/Facenet_pytorch
|
refs/heads/master
|
/dataset.py
|
from torch.utils.data import Dataset
import os
from torchvision import transforms as T
from PIL import Image
from skimage import io
import numpy as np
import torch
class FaceDataset(Dataset):
def __init__(self,DataPath,DataType="train",img_size=30,transform = None):
assert DataType in ("train","val"),"Data type must be train or val"
self.image_size = (img_size,img_size)
if(DataType == "train"):
data = open(os.path.join(DataPath,'train_data.txt')).readlines()
data = [x.strip() for x in data]
label = open(os.path.join(DataPath,'train_label.txt')).readlines()
label = [int(x) for x in label]
elif(DataType == "val"):
data = open(os.path.join(DataPath,'val_data.txt')).readlines()
data = [x.strip() for x in data]
label = open(os.path.join(DataPath,'val_label.txt')).readlines()
label = [int(x) for x in label]
self.classes = max(label)
data_dict = {}
for i in range(1,self.classes+1):
data_list = [x for j,x in enumerate(data) if label[j]==i]
data_dict[i-1] = data_list
self.data = self.generate_triplets(data_dict,self.classes,len(data))
if transform is None:
normalize = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if(DataType == "train"):
self.transform = T.Compose([
T.Resize(img_size),
T.RandomHorizontalFlip(),
T.CenterCrop(img_size),
T.ToTensor(),
# normalize
])
elif(DataType == "val"):
self.transform = T.Compose([
T.Resize(img_size),
T.CenterCrop(img_size),
T.ToTensor(),
# normalize
])
else:
self.transform = transform
@staticmethod
def generate_triplets(data,classes,triplets_num):
classes = list(range(classes))
triplets = []
for _ in range(triplets_num):
pos_class = np.random.choice(classes)
neg_class = np.random.choice(classes)
while len(data[pos_class]) < 2:
pos_class = np.random.choice(classes)
while pos_class == neg_class:
neg_class = np.random.choice(classes)
pos_list = data[pos_class]
neg_list = data[neg_class]
if len(data[pos_class]) == 2:
ianc, ipos = np.random.choice(2, size=2, replace=False)
else:
ianc = np.random.randint(0, len(data[pos_class]))
ipos = np.random.randint(0, len(data[pos_class]))
while ianc == ipos:
ipos = np.random.randint(0, len(data[pos_class]))
ineg = np.random.randint(0, len(data[neg_class]))
triplets.append([pos_list[ianc], pos_list[ipos], neg_list[ineg],
pos_class, neg_class])
return triplets
def __getitem__(self, index):
anc_path, pos_path, neg_path, pos_class, neg_class = self.data[index]
anc_img = Image.open(anc_path)
pos_img = Image.open(pos_path)
neg_img = Image.open(neg_path)
pos_class = torch.from_numpy(np.array([pos_class]).astype('long'))
neg_class = torch.from_numpy(np.array([neg_class]).astype('long'))
sample = {'anc_img': anc_img, 'pos_img': pos_img, 'neg_img': neg_img, 'pos_class': pos_class, 'neg_class': neg_class}
if self.transform:
sample['anc_img'] = self.transform(sample['anc_img'])
sample['pos_img'] = self.transform(sample['pos_img'])
sample['neg_img'] = self.transform(sample['neg_img'])
return sample
def __len__(self):
return len(self.data)
def get_dataloader(root_dir,batch_size, num_workers,image_size):
face_dataset = {
'train': FaceDataset(DataPath=root_dir,
DataType="train",
img_size=image_size),
'valid': FaceDataset(DataPath=root_dir,
DataType="val",
img_size=image_size)}
dataloaders = {
x: torch.utils.data.DataLoader(face_dataset[x], batch_size=batch_size, shuffle=False, num_workers=num_workers)
for x in ['train', 'valid']}
data_size = {x: len(face_dataset[x]) for x in ['train', 'valid']}
return dataloaders, data_size
class test_data():
def __init__(self,DataPath,img_size=30,transform = None):
self.image_size = (img_size,img_size)
data = open(os.path.join(DataPath,'val_data.txt')).readlines()
self.data = [x.strip() for x in data]
label = open(os.path.join(DataPath,'val_label.txt')).readlines()
self.label = [int(x) for x in label]
self.transform = T.Compose([
T.Resize(img_size),
T.CenterCrop(img_size),
T.ToTensor(),
# normalize
])
def get_data(self):
data_index = np.random.choice(np.arange(len(self.data)),size=100)
pic_array = []
label_array = []
for index in data_index:
pic = Image.open(self.data[index])
pic = self.transform(pic)
pic_array.append(pic)
label_array.append(self.label[index])
# pic_array = torch.
return pic_array,label_array
if __name__ == '__main__':
# data = FaceDataset('./data_txt')
# data.__getitem__(0)
from tensorboardX import SummaryWriter
data_path = './data_txt'
log_dir = "./logs/face_try"
writer = SummaryWriter(log_dir=log_dir)
Data = test_data('./data_txt')
pics,labels = Data.get_data()
for i,pic in enumerate(pics):
writer.add_image("Person {}/{}".format(labels[i],i),pic,0)
for i,pic in enumerate(pics):
writer.add_image("Person {}/{}".format(labels[i],i),pic,1)
|
{"/engine_trt.py": ["/Config.py"], "/test.py": ["/Config.py", "/ONNX_model.py"], "/pytorch_to_onnx.py": ["/ONNX_model.py"], "/train.py": ["/dataset.py", "/Config.py", "/lr_scheduler.py"]}
|
28,505
|
aleMartin99/Deer_Professor
|
refs/heads/master
|
/utils.py
|
def transform_orders(orders):
return orders
|
{"/deer_proffesor.py": ["/utils.py"]}
|
28,506
|
aleMartin99/Deer_Professor
|
refs/heads/master
|
/deer_proffesor.py
|
"""
This is a echo bot.
It echoes any incoming text messages.
"""
import aiogram.utils.markdown as md
from aiogram import Bot, Dispatcher, types
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.types import ParseMode
from aiogram.utils import executor
import logging
import asyncio
import threading
import time
from datetime import datetime, timedelta, timezone
from typing import List
import json
import os
from utils import transform_orders
import pytz
# Configure logging
logging.basicConfig(level=logging.INFO)
configs = json.load(open('info.json'))
API_TOKEN = configs['token']
loop = asyncio.get_event_loop()
bot = Bot(token=API_TOKEN, loop=loop)
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage)
castles = {
"deer": "🦌",
"shark": "🦈",
"dragon": "🐉",
"moon": "🌑",
"wolf": "🐺",
"potato": "🥔",
"eagle": "🦅"
}
class Form(StatesGroup):
me = State() # Will be represented in storage as 'Form:name'
pledge = State() # Will be represented in storage as 'Form:age'
regular_behavior = State() # Will be represented in storage as 'Form:gender'
@dp.message_handler(commands=['start'], commands_prefix='/' )
async def send_welcome(message: types.Message):
"""
This handler will be called when client send `/start` command.
"""
await Form.me.set()
await bot.send_message(message.chat.id, "Welcome, young fawn. I'm the Deer Professor, manager of the Acadeermy. \n Please go to @chtwrsbot and type /me and forward it to me so we can continue with the admission process...")
@dp.message_handler(state=Form.me)
async def process_me(message: types.Message, state: FSMContext):
"""
Process user's me
"""
if message.forward_from == None or not message.forward_from['id'] == 408101137:
await bot.send_message(message.chat.id, "It looks like you just copied your /me and didn't forward it from @chtwrsbot. \nHow do I know that you didn't make that up? ")
elif 'of deerhorn castle' in message.text.lower():
async with state.proxy() as data:
data['me'] = message.text
await Form.next()
await bot.send_message(message.chat.id, "Great, now send your pledge")
else:
await bot.send_message(message.chat.id, "It looks like you're not a warrior from Deerhorn Castle, buh bye")
# Check age. Age gotta be digit
@dp.message_handler(state=Form.pledge)
async def process_pledge(message: types.Message):
"""
Process user's pledge
"""
print(message.text)
print('you were invited by the knight of the' and 'deerhorn castle' in message.text.lower())
if message.forward_from == None or not message.forward_from['id'] == 408101137:
print('copied or not forwarded')
await bot.send_message(message.chat.id, "It looks like you just copied your pledge and didn't forward it from @chtwrsbot. \nHow do I know that you didn't make that up? Please forward it now.")
elif 'you were invited by the knight of the' in message.text.lower():
if 'deerhorn castle' not in message.text.lower():
await bot.send_message(message.chat.id, "Hmmm, it looks like you were invited by a knight from another castle \nI'll put you in contact with our human teachers, feel free to PM (Private Message) them and they'll finish processing your admission.")
return await bot.send_message(message.chat.id, 'This is the list of available teachers: \n@larrygf \n@hiancd \n@scarlettV \n@cptspooks')
else:
print('good')
await Form.regular_behavior.set()
await bot.send_message(message.chat.id, "Fabulous, you were invited by a fellow deer, you're good to go")
return await bot.send_message(message.chat.id, "You can join the Acadeermy using this link t.me/commandbottest")
else:
await bot.send_message(message.chat.id, "It looks like you didn't forward a valid pledge, try again")
@dp.message_handler(state=Form.regular_behavior)
async def process_me(message: types.Message, state: FSMContext):
"""
Normal bot behavior
"""
await bot.send_message(message.chat.id, "Additional commands are not supported right now")
# @dp.message_handler(state=Form.badpledge)
# async def bad_pledge(message: types.Message):
# # Update state and data
# await Form.next()
# # await state.update_data(age=int(message.text))
# # Configure ReplyKeyboardMarkup
# markup = types.ReplyKeyboardMarkup(resize_keyboard=True, selective=True)
# markup.add("Male", "Female")
# markup.add("Other")
# await message.reply("What is your gender?", reply_markup=markup)
# @dp.message_handler(state=Form.gender)
# async def process_gender(message: types.Message, state: FSMContext):
# async with state.proxy() as data:
# data['gender'] = message.text
# # Remove keyboard
# markup = types.ReplyKeyboardRemove()
# # And send message
# await bot.send_message(message.chat.id, md.text(
# md.text('Hi! Nice to meet you,', md.bold(data['name'])),
# md.text('Age:', data['age']),
# md.text('Gender:', data['gender']),
# sep='\n'), reply_markup=markup, parse_mode=ParseMode.MARKDOWN)
# # Finish conversation
# data.state = None
if __name__ == '__main__':
executor.start_polling(dp, loop=loop, skip_updates=True)
|
{"/deer_proffesor.py": ["/utils.py"]}
|
28,508
|
Mike-Polley/Live-Forex-Tracker
|
refs/heads/master
|
/gui.py
|
import matplotlib.animation as animation
from matplotlib import style
import reader as rd
import scraper as sc
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import tkinter as tk
import matplotlib.pyplot as plt
style.use('ggplot')
f = Figure (figsize=(5,4),dpi=100)
a = f.add_subplot(111)
reader = rd.reader()
def animate(i):
scraper = sc.scraper()
site = scraper.scrapeWebsite('https://www1.oanda.com/currency/live-exchange-rates/')
parsed = scraper.parserHTML(site)
scraper.writer(parsed,'data.json')
df = reader.readFile('data.json')
newDF = reader.createData(df)
a.clear()
max = newDF[['GBP_USD']].max()
min = newDF[['GBP_USD']].min()
a.set_title('Current $ to £ Exchange Rates')
newDF.plot(kind='line',x='TIME',y='GBP_USD',ax=a,ylim=[1.30,1.32])
root = tk.Tk()
canvas = FigureCanvasTkAgg(f,root)
canvas._tkcanvas.pack(side=tk.TOP,fill=tk.BOTH,expand=True)
ani = animation.FuncAnimation(f,animate, interval=10000)
root.mainloop()
|
{"/gui.py": ["/reader.py", "/scraper.py"]}
|
28,509
|
Mike-Polley/Live-Forex-Tracker
|
refs/heads/master
|
/scraper.py
|
import re
import requests
from bs4 import BeautifulSoup
import urllib
import json
class scraper:
def __init__(self):
self.session = requests.Session()
def scrapeWebsite(self,url):
response = self.session.get(url)
response_text = response.text
return response_text
def parserHTML(self,text):
para = []
soup = BeautifulSoup(text,'html.parser')
script = soup.findAll('script')
tag = script[12]
for x in tag:
para.append(str(tag))
para = str(para)
para = re.findall(r'lrrr_data = {.*}',para)
para = str(para)
para = re.findall(r'{.*}',para)
return para
def writer(self,text,file):
f = open(file,'w+')
for l in text:
f.write(l)
f.close()
|
{"/gui.py": ["/reader.py", "/scraper.py"]}
|
28,510
|
Mike-Polley/Live-Forex-Tracker
|
refs/heads/master
|
/reader.py
|
import pandas as pd
import matplotlib.pyplot as plt
import time
from datetime import datetime, timedelta
class reader:
def __init__(self):
self.GBP_USD = []
self.USD_GBP = []
self.TIME = []
def readFile(self,json):
df = pd.read_json(json)
df = df[['GBP_USD','USD_GBP']]
df = df.iloc[0:1]
return df
def createData(self,df):
if len(self.GBP_USD) < 100:
self.GBP_USD.append(float(df.iloc[:,0]))
else:
self.GBP_USD.pop(0)
if len(self.USD_GBP) < 100:
self.USD_GBP.append(float(df.iloc[:,1]))
else:
self.USD_GBP.pop(0)
if len(self.TIME) < 100:
self.TIME.append(time.strftime('%H:%M:%S', time.gmtime()))
else:
self.TIME.pop(0)
data = {'GBP_USD':self.GBP_USD, 'USD_GBP':self.USD_GBP,'TIME':self.TIME}
newDF = pd.DataFrame(data)
return newDF
|
{"/gui.py": ["/reader.py", "/scraper.py"]}
|
28,511
|
Mike-Polley/Live-Forex-Tracker
|
refs/heads/master
|
/test.py
|
import pandas as pd
df = pd.read_json('data.json')
print(df[['GBP_USD']])
|
{"/gui.py": ["/reader.py", "/scraper.py"]}
|
28,516
|
JoNowakowska/Tkinter-chat-app
|
refs/heads/master
|
/chat_app.py
|
import tkinter as tk
from tkinter import ttk
from frames import Chat
class Messenger(tk.Tk):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.geometry("1200x800")
self.minsize(100, 100)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.chat_frame = Chat(self)
self.chat_frame.grid(padx = 0, pady=10, sticky = 'nwes')
messenger = Messenger()
messenger.mainloop()
|
{"/chat_app.py": ["/frames/__init__.py"], "/frames/__init__.py": ["/frames/message_bubble.py"]}
|
28,517
|
JoNowakowska/Tkinter-chat-app
|
refs/heads/master
|
/frames/__init__.py
|
from frames.chat import Chat
from frames.message_bubble import MessageBubble
|
{"/chat_app.py": ["/frames/__init__.py"], "/frames/__init__.py": ["/frames/message_bubble.py"]}
|
28,518
|
JoNowakowska/Tkinter-chat-app
|
refs/heads/master
|
/frames/message_bubble.py
|
import tkinter as tk
from tkinter import ttk
import requests
import datetime
from PIL import Image, ImageTk
MAX_MESSAGE_WIDTH = 800
class MessageBubble(tk.Canvas):
def __init__(self, container, *args, **kwargs):
super().__init__(container, *args, **kwargs, highlightthickness = 0)
self.message_frame = ttk.Frame(self)
self.message_frame.columnconfigure(0, weight = 1)
self.scrollable_window = self.create_window((0,0), window = self.message_frame, anchor = 'nw')
def configure_scroll_region(event):
self.configure(scrollregion = self.bbox('all'))
def configure_window_size(event):
self.itemconfig(self.scrollable_window, width = self.winfo_width())
self.bind("<Configure>", configure_window_size)
self.message_frame.bind("<Configure>", configure_scroll_region)
self.bind_all("<MouseWheel>", self._on_mousewheel)
scrollbar = ttk.Scrollbar(container, orient='vertical', command = self.yview)
scrollbar.grid(row=0, column = 1, sticky= 'ns')
self.configure(yscrollcommand = scrollbar.set)
self.yview_moveto(1.0)
def _on_mousewheel(self, event):
self.yview_scroll(-int(event.delta/120), 'units')
def show_messages(self, messages, message_labels):
existing_messages = [(date['text'], message['text']) for date, message in message_labels]
for message in messages:
msg_date = datetime.datetime.fromtimestamp(message['date']).strftime('%d-%m-%Y %H:%M:%S')
if (msg_date, message['message']) not in existing_messages:
container = ttk.Frame(self.message_frame)
container.grid(padx = (10,100), pady=10, sticky = 'ew')
container.columnconfigure(1, weight = 1)
def reconfigure_message_labels(event):
for _, label in message_labels:
label.configure(wraplength = min(container.winfo_width() - 230, MAX_MESSAGE_WIDTH))
container.bind("<Configure>", reconfigure_message_labels)
date_label = ttk.Label(
container,
text = msg_date
)
date_label.grid(row = 0, column=1, sticky = 'new')
message_label = ttk.Label(
container,
text = message['message'],
justify = 'left',
anchor = 'w'
)
message_label.grid(row = 1, column=1, sticky = 'ewsn')
avatar = Image.open('./JKN on the beach.png')
avatar_tk = ImageTk.PhotoImage(avatar)
avatar_label = ttk.Label(
container,
image = avatar_tk
)
avatar_label.image = avatar_tk
avatar_label.grid(row = 0 , rowspan = 2, column = 0)
message_labels.append((date_label, message_label))
|
{"/chat_app.py": ["/frames/__init__.py"], "/frames/__init__.py": ["/frames/message_bubble.py"]}
|
28,519
|
superyang713/harvest_automation
|
refs/heads/main
|
/harvest/harvest.py
|
"""
Note:
1. Resume timer needs some improvement. When there are multiple
items, it does not know which task's time to start.
2. Login should use harvest build-in login method, not google SSO.
"""
import time
import logging
from datetime import date, datetime
from selenium import webdriver
from selenium.common.exceptions import InvalidSessionIdException
from selenium.common.exceptions import NoSuchElementException
class Harvest:
def __init__(self, username, password):
self.username = username
self.password = password
self.driver = self._login()
def _login(self):
logging.debug("Fireing up webdriver")
options = webdriver.ChromeOptions()
options.add_argument("--headless")
driver = webdriver.Chrome(options=options)
logging.debug("Navigating to sign in page.")
driver.get("https://id.getharvest.com/harvest/sign_in")
logging.debug("Filling out username and password")
driver.find_element_by_xpath('//*[@id="email"]')\
.send_keys(self.username)
driver.find_element_by_xpath('//*[@id="password"]')\
.send_keys(self.password)
logging.debug("Signing in...")
driver.find_element_by_xpath('//*[@id="log-in"]').click()
try:
driver.find_element_by_class_name("alert")
except NoSuchElementException:
logging.info("Successfully signed in.")
return driver
else:
message = "Wrong username or password"
logging.error(message)
raise ValueError(message)
@property
def date(self):
if "day" not in self.driver.current_url:
return date.today()
info = self.driver.current_url.split("/")
year, month, day = int(info[-4]), int(info[-3]), int(info[-2])
return date(year, month, day)
@date.setter
def date(self, value: date):
logging.info("Populate the date %s", str(value))
url = (
"https://mhsfdc.harvestapp.com/time/day/"
f"{value.year}/{value.month}/{value.day}/3790451"
)
self.driver.get(url)
logging.debug("Start fillin the new entry form.")
selector = 'button[data-analytics-element-id="timesheet-new-entry"]'
self.driver \
.find_element_by_css_selector(selector) \
.click()
@property
def project(self):
xpath = (
'//*[@id="calendar-recurring-event-popover-wrapper"]'
'/div[2]/div/a/span'
)
return self.driver.find_element_by_xpath(xpath).text
@project.setter
def project(self, value):
logging.info("Select the project %s.", value)
xpath = (
'//*[@id="calendar-recurring-event-popover-wrapper"]'
'/div[2]/div/a'
)
self.driver.find_element_by_xpath(xpath).click()
self.driver \
.find_element_by_css_selector(f'li[title="{value}"]') \
.click()
@property
def task(self):
xpath = '/html/body/div[3]/div[1]/div[1]/form/div[2]/div/a/span'
return self.driver.find_element_by_xpath(xpath).text
@task.setter
def task(self, value):
logging.info("Select the task %s.", value)
xpath = '/html/body/div[3]/div[1]/div[1]/form/div[2]/div/a'
self.driver.find_element_by_xpath(xpath).click()
self.driver \
.find_element_by_css_selector(f'li[title="{value}"]') \
.click()
@property
def note(self):
return
@note.setter
def note(self, text):
logging.info("Populate the note field %s", text)
self.driver.find_element_by_name("notes").send_keys(text)
@property
def duration(self):
return self.driver.find_element_by_name("hours").get_attribute("value")
@duration.setter
def duration(self, value: datetime):
"""
format: X:X or X.X
"""
logging.info("Populate the duration field %s", value)
self.driver.find_element_by_name("hours").send_keys(value)
def submit(self):
logging.info("Submit the entry form.")
xpath = "/html/body/div[3]/div[1]/div[1]/form/div[4]/button[1]"
elem = self.driver.find_element_by_xpath(xpath)
if elem.text != "Save Entry":
raise ValueError("Please specify duration.")
elem.click()
def start_timer(self):
xpath = "/html/body/div[3]/div[1]/div[1]/form/div[4]/button[1]"
elem = self.driver.find_element_by_xpath(xpath)
if elem.text != "Start Timer":
raise ValueError("Please do not specify duration.")
elem.click()
self.driver.close()
def stop_timer(self):
try:
self.driver.close()
except InvalidSessionIdException:
pass
self.driver = self._login()
time.sleep(2)
selector = 'button[data-analytics-element-id="timesheet-stop-timer"]'
try:
self.driver \
.find_element_by_css_selector(selector) \
.click()
except NoSuchElementException:
raise ValueError("No timer to stop.")
def resume_timer(self):
try:
self.driver.close()
except InvalidSessionIdException:
pass
self.driver = self._login()
time.sleep(2)
selector = 'button[data-analytics-element-id="timesheet-start-timer"]'
try:
self.driver \
.find_element_by_css_selector(selector) \
.click()
except NoSuchElementException:
raise ValueError("No timer to resume.")
|
{"/harvest/__init__.py": ["/harvest/harvest.py"], "/example/submit_one_day_entries.py": ["/harvest/__init__.py"]}
|
28,520
|
superyang713/harvest_automation
|
refs/heads/main
|
/harvest/__init__.py
|
from .harvest import Harvest
__all__ = [
"Harvest",
]
|
{"/harvest/__init__.py": ["/harvest/harvest.py"], "/example/submit_one_day_entries.py": ["/harvest/__init__.py"]}
|
28,521
|
superyang713/harvest_automation
|
refs/heads/main
|
/example/submit_one_day_entries.py
|
import time
from datetime import date
from harvest import Harvest
class Project:
INTERNAL = "Internal -- Project"
class Task:
CERTIFICATION = "2. Internal -- Certifications"
MEETING = "2. Internal -- Meeting"
RND = "2. Internal -- R&D"
TRAINING = "2. Internal -- Internal Training"
def main():
username = ""
password = ""
harvest = Harvest(username, password)
_date = date.today()
update_one_day(harvest, _date)
def update_one_day(harvest: Harvest, date):
entries = [
{
"project": Project.INTERNAL,
"task": Task.RND,
"note": "Create a project for automating harvest.",
"duration": "3:30"
},
{
"project": Project.INTERNAL,
"task": Task.MEETING,
"note": "Kickoff meeting.",
"duration": "0:30"
},
{
"project": Project.INTERNAL,
"task": Task.RND,
"note": "Code review for automation package",
"duration": "1:00"
},
{
"project": Project.INTERNAL,
"task": Task.TRAINING,
"note": "Practice bigquery SQL",
"duration": "1:30"
},
{
"project": Project.INTERNAL,
"task": Task.CERTIFICATION,
"note": "Learn GCP and AWS for professional certificate exams",
"duration": "1:30"
},
]
for entry in entries:
entry["date"] = date
update_one_entry(harvest, **entry)
def update_one_entry(
harvest: Harvest,
date,
project,
task,
note,
duration):
harvest.date = date
harvest.project = project
harvest.task = task
harvest.note = note
harvest.duration = duration
harvest.submit()
time.sleep(5)
if __name__ == "__main__":
main()
|
{"/harvest/__init__.py": ["/harvest/harvest.py"], "/example/submit_one_day_entries.py": ["/harvest/__init__.py"]}
|
28,524
|
menly-ctrl/shop
|
refs/heads/master
|
/news/views.py
|
from django.shortcuts import render
from .models import Item
def index(request):
items_query = Item.objects.all()
items_list = []
for el in items_query:
tmp = []
tmp.append(el.image)
tmp.append(el.item_name)
tmp.append(el.item_description)
tmp.append(el.item_price)
items_list.append(tmp)
data = {
'items' : items_list,
}
return render(request, 'main.html', context=data)
def contacts(request):
return render(request, 'contacts.html')
def delivery(request):
return render(request, 'delivery.html')
def login(request):
return render(request, 'login.html')
def gadget(request):
return render(request, 'gadget.html')
|
{"/news/views.py": ["/news/models.py"], "/news/admin.py": ["/news/models.py"]}
|
28,525
|
menly-ctrl/shop
|
refs/heads/master
|
/news/models.py
|
from django.db import models
class Item(models.Model):
image = models.ImageField(upload_to="items/", verbose_name="Изображение")
item_name = models.CharField(max_length=255)
item_description = models.CharField(max_length=255)
item_price = models.IntegerField()
|
{"/news/views.py": ["/news/models.py"], "/news/admin.py": ["/news/models.py"]}
|
28,526
|
menly-ctrl/shop
|
refs/heads/master
|
/news/admin.py
|
from django.contrib import admin
from .models import Item
@admin.register(Item)
class ItemsAdmin(admin.ModelAdmin):
pass
|
{"/news/views.py": ["/news/models.py"], "/news/admin.py": ["/news/models.py"]}
|
28,527
|
hugosteixeira/microservices
|
refs/heads/master
|
/main.py
|
import uvicorn
from fastapi import FastAPI
from app.router import api_router
app = FastAPI(docs_url=None,redoc_url=None)
app.include_router(api_router, prefix="/v1")
if __name__ == "__main__":
uvicorn.run(app, host='0.0.0.0')
|
{"/main.py": ["/app/router.py"], "/app/geolocation/controller.py": ["/app/geolocation/services.py"], "/app/geolocation/routes.py": ["/app/geolocation/controller.py"], "/app/geolocation/services.py": ["/app/configs.py"]}
|
28,528
|
hugosteixeira/microservices
|
refs/heads/master
|
/manager.py
|
import os
import sys
class ModuleManager:
def createFolder(self, path):
try:
os.mkdir(path)
except OSError as err:
print('Folder already exists')
raise err
def createFiles(self, path):
try:
f = open(f'{path}/__init__.py', 'w')
f.close()
f = open(f'{path}/controller.py', 'w')
f.close()
f = open(f'{path}/routes.py', 'w')
f.close()
f = open(f'{path}/services.py', 'w')
f.close()
f = open(f'{path}/models.py', 'w')
f.close()
except Exception as err:
print('Failed to create the files')
raise err
def generateModule(self, name):
path = f'{os.getcwd()}/app/{name}'
try:
self.createFolder(path)
self.createFiles(path)
except OSError:
print("Creation of the module %s failed" % name)
else:
print("Successfully created the module %s " % name)
def module():
module_manager = ModuleManager()
module_manager.generateModule(sys.argv[2])
manager = {
"module": module
}
manager[sys.argv[1]]()
|
{"/main.py": ["/app/router.py"], "/app/geolocation/controller.py": ["/app/geolocation/services.py"], "/app/geolocation/routes.py": ["/app/geolocation/controller.py"], "/app/geolocation/services.py": ["/app/configs.py"]}
|
28,529
|
hugosteixeira/microservices
|
refs/heads/master
|
/app/geolocation/controller.py
|
from .services import Service
class Controller:
service = Service()
def get_geolocation(self, request):
ip = request.client.host
return self.service.get_geolocation(ip)
|
{"/main.py": ["/app/router.py"], "/app/geolocation/controller.py": ["/app/geolocation/services.py"], "/app/geolocation/routes.py": ["/app/geolocation/controller.py"], "/app/geolocation/services.py": ["/app/configs.py"]}
|
28,530
|
hugosteixeira/microservices
|
refs/heads/master
|
/app/geolocation/routes.py
|
from fastapi import APIRouter
from .controller import Controller
from starlette.requests import Request
router = APIRouter()
controller = Controller()
@router.get("/")
def get(request: Request):
return controller.get_geolocation(request)
|
{"/main.py": ["/app/router.py"], "/app/geolocation/controller.py": ["/app/geolocation/services.py"], "/app/geolocation/routes.py": ["/app/geolocation/controller.py"], "/app/geolocation/services.py": ["/app/configs.py"]}
|
28,531
|
hugosteixeira/microservices
|
refs/heads/master
|
/app/configs.py
|
API_TOKEN = "e6753d5fc9bac4"
|
{"/main.py": ["/app/router.py"], "/app/geolocation/controller.py": ["/app/geolocation/services.py"], "/app/geolocation/routes.py": ["/app/geolocation/controller.py"], "/app/geolocation/services.py": ["/app/configs.py"]}
|
28,532
|
hugosteixeira/microservices
|
refs/heads/master
|
/app/router.py
|
from fastapi import APIRouter
from .geolocation import routes as geolocation
api_router = APIRouter()
api_router.include_router(geolocation.router, tags=["geolocation"], prefix="/geolocation")
|
{"/main.py": ["/app/router.py"], "/app/geolocation/controller.py": ["/app/geolocation/services.py"], "/app/geolocation/routes.py": ["/app/geolocation/controller.py"], "/app/geolocation/services.py": ["/app/configs.py"]}
|
28,533
|
hugosteixeira/microservices
|
refs/heads/master
|
/app/geolocation/services.py
|
from .models import GeolocationModel
import requests
from ..configs import API_TOKEN
class Service:
def get_geolocation(self, ip):
try:
address = f"https://ipinfo.io/{ip}/json?token={API_TOKEN}"
r = requests.get(address)
geolocation = GeolocationModel()
geolocation.lat, geolocation.long = r.json()["loc"].split(',')
except:
geolocation = {"error":True, "message": "Ip not Found"}
return geolocation
|
{"/main.py": ["/app/router.py"], "/app/geolocation/controller.py": ["/app/geolocation/services.py"], "/app/geolocation/routes.py": ["/app/geolocation/controller.py"], "/app/geolocation/services.py": ["/app/configs.py"]}
|
28,550
|
VladislavGordeyko/Qenetex.API
|
refs/heads/main
|
/database/schemas.py
|
from typing import List
from pydantic import BaseModel
import datetime
class AddressTransactionCreate(BaseModel):
address_id: str
class TransactionCreate(BaseModel):
id: str
value: float
addr_to_id: str
addr_from_id: str
class Transaction(BaseModel):
id: str
value: float
date: datetime.datetime
addr_to_id: str
addr_from_id: str
class Config:
orm_mode = True
class AddressCreate(BaseModel):
pass
class AddressTransactions(BaseModel):
id: str
value: int
class Config:
orm_mode = True
class Address(BaseModel):
id: str
transactions_to: List[AddressTransactions] = []
transactions_from: List[AddressTransactions] = []
class Config:
orm_mode = True
|
{"/database/db.py": ["/config.py"], "/main.py": ["/database/db.py", "/utils.py"]}
|
28,551
|
VladislavGordeyko/Qenetex.API
|
refs/heads/main
|
/utils.py
|
import random
import secrets
from typing import List
# Generate Hex string with n bytes
def generate_hex(n_bytes: int):
return secrets.token_hex(n_bytes)
# Generate float number in min and max range
def generate_float(min_value: float = 0.01, max_value: float = 10000.0):
return random.uniform(min_value, max_value)
# Get random elements from list
def pick_random_elements(ids: List[int], elements: int = 2):
return random.sample(ids, elements)
|
{"/database/db.py": ["/config.py"], "/main.py": ["/database/db.py", "/utils.py"]}
|
28,552
|
VladislavGordeyko/Qenetex.API
|
refs/heads/main
|
/database/repository.py
|
from sqlalchemy.orm import Session
from . import models, schemas
def get_address(db: Session, address_id: str):
return db.query(models.Address).filter(models.Address.id == address_id).first()
def get_addresses(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Address).offset(skip).limit(limit).all()
def get_addresses_ids(db: Session):
return db.query(models.Address.id).all()
def create_address(db: Session, address: schemas.AddressCreate, generated_id: str):
db_user = models.Address(id=generated_id)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def get_transaction(db: Session, transaction_id: str):
return db.query(models.Transaction).filter(models.Transaction.id == transaction_id).first()
def get_transactions(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Transaction).offset(skip).limit(limit).all()
def get_transactions_by_address(db: Session, address_id: str):
return db.query(models.Transaction).filter((models.Transaction.addr_to_id == address_id) |
(models.Transaction.addr_from_id == address_id)).all()
def create_transaction(db: Session, item: schemas.TransactionCreate):
db_item = models.Transaction(id=item.id, value=item.value, addr_to_id=item.addr_to_id,
addr_from_id=item.addr_from_id)
db.add(db_item)
db.commit()
db.refresh(db_item)
return db_item
|
{"/database/db.py": ["/config.py"], "/main.py": ["/database/db.py", "/utils.py"]}
|
28,553
|
VladislavGordeyko/Qenetex.API
|
refs/heads/main
|
/config.py
|
from pydantic import BaseSettings
# Settings of project
class Settings(BaseSettings):
db_username: str
db_password: str
db_host: str
db_name: str
class Config:
env_file = ".env"
|
{"/database/db.py": ["/config.py"], "/main.py": ["/database/db.py", "/utils.py"]}
|
28,554
|
VladislavGordeyko/Qenetex.API
|
refs/heads/main
|
/database/db.py
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from config import Settings
settings = Settings()
SQLALCHEMY_DATABASE_URL = f'postgresql://{settings.db_username}:{settings.db_password}@' \
f'{settings.db_host}/{settings.db_name}'
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
{"/database/db.py": ["/config.py"], "/main.py": ["/database/db.py", "/utils.py"]}
|
28,555
|
VladislavGordeyko/Qenetex.API
|
refs/heads/main
|
/database/models.py
|
import datetime
from sqlalchemy import Column, ForeignKey, String, DateTime, Float
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
# Transaction model
class Transaction(Base):
__tablename__ = "transaction"
id = Column(String, primary_key=True)
date = Column(DateTime, default=datetime.datetime.now())
value = Column(Float)
addr_to_id = Column(String, ForeignKey("address.id"))
addr_from_id = Column(String, ForeignKey("address.id"))
addr_to = relationship("Address", foreign_keys=[addr_to_id], back_populates='transactions_to')
addr_from = relationship("Address", foreign_keys=[addr_from_id], back_populates='transactions_from')
# Address model
class Address(Base):
__tablename__ = "address"
id = Column(String, primary_key=True)
transactions_to = relationship("Transaction", foreign_keys='Transaction.addr_to_id', back_populates="addr_to")
transactions_from = relationship("Transaction", foreign_keys='Transaction.addr_from_id', back_populates="addr_from")
|
{"/database/db.py": ["/config.py"], "/main.py": ["/database/db.py", "/utils.py"]}
|
28,556
|
VladislavGordeyko/Qenetex.API
|
refs/heads/main
|
/main.py
|
from typing import List
from fastapi import FastAPI, Depends, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from sqlalchemy.orm import Session
from database import repository, models, schemas
from database.db import SessionLocal, engine
from utils import generate_hex, generate_float, pick_random_elements
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
# add here address of frontend server to prevent cors error
origins = [
"http://localhost:3000",
]
# Cors settings
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
# Add address
@app.post("/address/", response_model=schemas.Address)
def create_address(address: schemas.AddressCreate, db: Session = Depends(get_db)):
return repository.create_address(db=db, address=address, generated_id=generate_hex(8))
# Get address list
@app.get("/address/", response_model=List[schemas.Address])
def read_addresses(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
addresses = repository.get_addresses(db, skip=skip, limit=limit)
return addresses
# Get address by id
@app.get("/address/{address_id}", response_model=schemas.Address)
def read_address(address_id: str, db: Session = Depends(get_db)):
db_addr = repository.get_address(db, address_id=address_id)
if db_addr is None:
raise HTTPException(status_code=404, detail="Address not found")
return db_addr
# Get transaction by id
@app.get("/transaction/{transaction_id}", response_model=schemas.Address)
def read_address(transaction_id: str, db: Session = Depends(get_db)):
db_transaction = repository.get_transaction(db, transaction_id=transaction_id)
if db_transaction is None:
raise HTTPException(status_code=404, detail="Transaction not found")
return db_transaction
# Get Transactions list
@app.get("/transaction/", response_model=List[schemas.Transaction])
def read_transactions(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
items = repository.get_transactions(db, skip=skip, limit=limit)
return items
# Get Transaction by address id
@app.get("/transaction/address/{address_id}", response_model=List[schemas.Transaction])
def read_transactions_by_address(address_id: str, db: Session = Depends(get_db)):
items = repository.get_transactions_by_address(db, address_id)
return items
# Add random transaction
@app.post("/transaction/")
def create_random_transaction(db: Session = Depends(get_db)):
address_ids = [value for value, in repository.get_addresses_ids(db)]
addresses = pick_random_elements(address_ids)
new_transaction = schemas.TransactionCreate(id=generate_hex(16), value=generate_float(),
addr_to_id=addresses[0], addr_from_id=addresses[1])
return repository.create_transaction(db=db, item=new_transaction)
# Add transaction with address
@app.post("/address_transaction/", response_model=schemas.Transaction)
def create_transaction(address: schemas.AddressTransactionCreate, db: Session = Depends(get_db)):
address_ids = [value for value, in repository.get_addresses_ids(db)]
addresses = pick_random_elements(address_ids, 1)
new_transaction = schemas.TransactionCreate(id=generate_hex(16), value=generate_float(),
addr_to_id=addresses[0], addr_from_id=address.address_id)
return repository.create_transaction(db=db, item=new_transaction)
|
{"/database/db.py": ["/config.py"], "/main.py": ["/database/db.py", "/utils.py"]}
|
28,569
|
artursapek/tackboard.me
|
refs/heads/master
|
/urls.py
|
from django.conf.urls.defaults import patterns, include, url
from django.conf import settings
urlpatterns = patterns('',
url(r'^static/(?P<path>.*)$', 'django.views.static.serve',{'document_root':settings.STATIC_ROOT}),
url(r'^ajax/(?P<func>\w+)', 'main.views.ajax'),
url(r'^stats/?', 'main.views.stats'),
url(r'(?P<board_name>[\w\-]+)/?', 'main.views.board'),
url(r'^$', 'main.views.index'),
)
|
{"/main/views.py": ["/main/models.py"]}
|
28,570
|
artursapek/tackboard.me
|
refs/heads/master
|
/main/views.py
|
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response as render
from django.conf import settings
from django.template import RequestContext
from django.utils import simplejson
from main.models import *
import time
import datetime
import re
def index(request):
if 'MSIE' in request.META['HTTP_USER_AGENT']:
return render('ie.html')
else:
return render('splash.html', {}, context_instance=RequestContext(request))
def stats(request):
counters = Reference_Counter.objects.all()
return render('stats.html', {'counters': counters})
def board(request, board_name):
if 'MSIE' in request.META['HTTP_USER_AGENT']:
return render('ie.html')
else:
b = Tackboard.objects.get(short_name=board_name)
return render('board.html', {'board': b, 'default_category': Category.objects.get(tackboard=b, default=True).name, 'categories': b.categories.all()}, context_instance=RequestContext(request))
def ajax(request, func):
return globals()[func](request)
def active_posters(request):
board = Tackboard.objects.get(id=request.GET['board_id'])
category = filter(lambda x: x.name == request.GET['category'], board.categories.all())[0]
active = category.posters.filter(active=True)
tacks = category.posters.filter(active=False, date_created__gte = (datetime.date.today() - datetime.timedelta(14)).isoformat())
# Security fix: combines different dicts so user can't get the body of torn-down posters but only their tack color and position
return JSON([dict({p.id: {'id': p.id, 'active': p.active, 'x': p.position_x, 'y': p.position_y, 'tack_color': p.tack_color,
'body': p.body, 'age': ago(p.date_created)} for p in active}.items() + {p.id: {'id': p.id, 'active': p.active,
'x': p.position_x, 'y': p.position_y, 'tack_color': p.tack_color} for p in tacks}.items())])
def ago(date):
now = datetime.datetime.now()
if date.year != now.year or date.month != now.month:
return 'over 2 weeks ago'
elif date.day != now.day:
days = now.day - date.day
if days == 1:
return 'yesterday'
weeks = int(days / 7)
if weeks:
if weeks > 2:
return 'over 2 weeks ago'
else:
return '%s week%s ago' % (weeks, '' if weeks == 1 else 's')
else:
return '%s days ago' % (now.day - date.day)
else:
return 'today'
def add_poster(request):
post = request.POST
error = validate_poster(post) # Validating everything
if error:
return JSON({403: error})
p = Poster(active = True, position_x = post['x'], position_y = post['y'], tack_color = post['tack_color'], body = post['body'], from_user_agent = request.META['HTTP_USER_AGENT'], from_ip = request.META['REMOTE_ADDR'])
p.save()
t = Tackboard.objects.get(id = post['board_id'])
c = Category.objects.get(tackboard = t, name = request.POST['category'])
c.posters.add(p)
t.save()
return JSON([200, {'id': p.id, 'active': p.active}])
def remove_poster(request):
post = request.POST
poster = Poster.objects.get(id=post['poster_id'])
poster.active = False
poster.date_removed = datetime.datetime.now()
poster.save()
return HttpResponse(status=200)
def retrieve_poster(request):
p = Poster.objects.get(id=request.GET['poster_id'])
if not p.active:
return HttpResponse(status=403)
return JSON([{'id': p.id, 'active': p.active, 'x': p.position_x, 'y': p.position_y, 'tack_color': p.tack_color,
'body': p.body, 'age': ago(p.date_created)}])
def update(request):
board = Tackboard.objects.get(id=request.GET['board_id'])
category = filter(lambda x: x.name == request.GET['category'], board.categories.all())[0]
active = category.posters.filter(active=True)
tacks = category.posters.filter(active=False, date_created__gte = (datetime.date.today() - datetime.timedelta(14)).isoformat())
data = { }
for poster in active | tacks:
data[poster.id] = poster.active
return JSON([ data ])
def reference_counter(request):
counter = Reference_Counter.objects.get(name=request.POST['action'])
counter.count = counter.count + 1
counter.save()
return HttpResponse(status=200)
def validate_poster(data):
if data['body'] == '':
return 'EMPTY'
forbidden_strings = ['"javascript:', 'background:']
if '"javascript:' in data['body']:
return '[Script injection]'
if 0 > data['x'] > 850 or 0 > data['y'] > 335:
return 'Invalid position'
tags = re.findall(r'\<(?P<tagname>[^\/][\w]+)[^/]+\/[\w]+>', data['body'])
if tags:
for tag in tags:
if tag not in ['span', 'div', 'a', 'b', 'u', 'i', 'br']: # safe tags
return '[HTML injection] %s ' % tag
return False
def JSON(json):
return HttpResponse(simplejson.dumps(json, separators=(',',':')), mimetype='application/javascript')
|
{"/main/views.py": ["/main/models.py"]}
|
28,571
|
artursapek/tackboard.me
|
refs/heads/master
|
/main/models.py
|
from django.db import models
class Tackboard(models.Model):
def __unicode__(self):
return '%s tackboard with %s categories' % (self.name, len(self.categories.all()))
def set_default(self, new_default):
for c in self.categories.all():
if (c.name == new_default):
c.default = True
else:
c.default = False
c.save()
def list_categories(self):
categories_list = []
for c in self.categories.all():
categories_list.append([c.name, c.active_posters()])
return categories_list
name = models.CharField(max_length=50)
short_name = models.CharField(max_length=20)
categories = models.ManyToManyField('Category')
date_created = models.DateTimeField(auto_now_add=True)
refresh_rate = models.IntegerField()
class Category(models.Model):
def __unicode__(self):
try:
board = Tackboard.objects.get(categories=self).name
except:
board = "[unset]"
return '%s category on %s board with %s posters' % (self.name, board, len(self.posters.all()))
def active_posters(self):
return len(filter(lambda x: x.active == True, self.posters.all() ) )
name = models.CharField(max_length=40)
posters = models.ManyToManyField('Poster')
default = models.BooleanField(default=False)
class Poster(models.Model):
def __unicode__(self):
return '%s: %s' % (self.id, self.body[:20])
def get_category(self):
return Category.objects.get(posters=self)
date_created = models.DateTimeField(auto_now_add=True)
active = models.BooleanField()
position_x = models.IntegerField()
position_y = models.IntegerField()
tack_color = models.CharField(max_length=1)
body = models.TextField()
date_removed = models.DateTimeField(auto_now_add=True)
from_user_agent = models.CharField(max_length=200)
from_ip = models.CharField(max_length=20)
class Reference_Counter(models.Model):
def __unicode__(self):
return '%s : %s' % (self.name, self.count)
count = models.IntegerField()
name = models.CharField(max_length=30)
|
{"/main/views.py": ["/main/models.py"]}
|
28,572
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/get_access_token.py
|
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import time
import logging
import requests
import redis
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from socrates.set import REDIS
GRANT_TYPE = 'client_credential'
APPID = 'wx16b6a22daa10d5c1'
SECRET = '37e8ff0ffbe0cfa1589787cb42ab3b54'
URL = 'https://api.weixin.qq.com/cgi-bin/token'
r5 = redis.StrictRedis(host=REDIS['HOST'],
port=REDIS['PORT'],
db=5, password='srjdZ5weyil',
)
def get_access_token():
params = {'grant_type':GRANT_TYPE,
'appid':APPID,
'secret':SECRET}
r = requests.get(URL, params=params)
if r.status_code == 200:
json_dict = r.json()
access_token = json_dict['access_token']
r5.set('access_token', access_token)
logging.info('access_token:' + access_token)
else:
logging.error(r.status_code)
if __name__ == '__main__':
while 1:
get_access_token()
time.sleep(3600)
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,573
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/photo_send.py
|
# coding: utf-8
import pickle
import logging
import requests
from socrates import hanzi
from socrates.set import log
from scripts.mongo_operate import update_user
from scripts.session_get import get_session
def get_photo_stream(pic_url, msgid):
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:22.0) Gecko/20100101 Firefox/22.0'
headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
headers['Accept-Language'] = 'zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3'
headers['Accept-Encoding'] = 'gzip, deflate'
headers['Connection'] = 'keep-alive'
r=requests.get(pic_url, headers=headers)
filename = '/home/nightwish/elfin/picture/' + msgid + '.jpg'
f = open(filename, 'w')
f.write(r.content)
f.close()
return open(filename, 'rb')
def upload_photo(user, pic_url, pic_id):
photo = get_photo_stream(pic_url, pic_id)
logging.info(pic_id)
logging.info(user['hash'])
if pic_id == user['hash']:
return hanzi.REPEAT
upload_photo_url = 'http://m.weilairiji.com/index.php?op=sendphoto&tsid='
data = {}
data['phototitle'] = (hanzi.WECHATPIC.decode('utf-8') + pic_id).encode('GB18030')
data['action'] = 'upload'
files = {}
files['photo'] = ('1.jpg', photo, 'image/jpeg')
if user.has_key('session'):
session = pickle.loads(user.get('session'))
else:
session = get_session(user.get('xiezhua_id'))
r = session.post(upload_photo_url, data=data, files=files)
if r.status_code==200:
update_user({'id':user['id']}, hash=pic_id)
return hanzi.SEND_OK
else:
return hanzi.SEND_FAIL
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,574
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/timeline_get.py
|
# coding: utf-8
import pickle
import logging
import requests
from scripts.timeline_operate import timeline
from scripts.session_get import get_session
from socrates.set import open_line_url, time_line_url
def open_line():
open_line_ret = requests.get(url=open_line_url)
return timeline(open_line_ret.content)
def time_line(key, user):
if user.get('session',''):
session = user['session']
session = pickle.loads(session)
else:
session = get_session(user['xiezhua_id'])
tml_dict = {'tml1':1, 'tml2':2, 'tml3':3,}
time_line_ret = session.get(url=time_line_url.format(tml_dict[key]))
return timeline(time_line_ret.content)
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,575
|
zhao-ji/elfin
|
refs/heads/master
|
/handlers/userset.py
|
#!/usr/bin/env python
# coding: utf-8
import time
import logging
import tornado.web
from socrates import hanzi
from socrates.set import log
from scripts.talk_send import transmit
from scripts.mongo_operate import get_user_value, del_user, save_user
class userset(tornado.web.RequestHandler):
def get(self, wechat_id):
action = '/elfin/userset/' + wechat_id
self.render('userset.html', info=hanzi.USERSET , time=time.ctime(), action=action)
def post(self, wechat_id):
tail = self.get_arguments('tail')
ret = self.get_arguments('ret')
user = get_user_value(wechat_id=wechat_id)
if tail:
user['tail'] = tail[0]
try:
transmit(user, hanzi.CHANGE_TAIL.decode('utf-8').format(tail[0]))
except:
self.write(hanzi.TAIL_ERR)
else:
del_user(wechat_id=wechat_id)
save_user(user)
self.render('return.html', info=hanzi.TAIL_OK, time=time.ctime())
finally:
logging.info(str(user['id']) + ':' + user['tail'])
elif ret:
if ret[0]=='0':
user['ret'] = ''
del_user(wechat_id=wechat_id)
save_user(user)
elif ret[0]=='1':
custom = self.get_argument('custom')
user['ret'] = custom
del_user(wechat_id=wechat_id)
save_user(user)
self.render('return.html', info=hanzi.RET_OK, time=time.ctime())
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,576
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/simi.py
|
#!/usr/bin/env python
# coding:utf-8
import json
import requests
SIMI_URL = 'http://api.simsimi.com/request.p'
def simi(talk):
params = {}
params['key'] = 'b031132b-28ab-4769-b560-96ebddf70c1e'
params['lc'] = 'ch'
params['text'] = talk
r = requests.get(url=SIMI_URL, params=params)
respons = json.loads(r.text)
return respons['response']
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.