seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
31040777442 | import numpy as np
import dgl.backend as F
from functools import partial
from dgl import graph, heterograph, batch
from ..utils.mol_to_graph import k_nearest_neighbors, mol_to_bigraph
from ..utils.featurizers import BaseAtomFeaturizer, BaseBondFeaturizer, ConcatFeaturizer, atom_type_one_hot, atom_total_degree_one_hot, atom_formal_charge_one_hot, atom_is_aromatic, atom_implicit_valence_one_hot, atom_explicit_valence_one_hot, bond_type_one_hot, bond_is_in_ring
__all__ = ['ACNN_graph_construction_and_featurization',
'PN_graph_construction_and_featurization']
def filter_out_hydrogens(mol):
"""Get indices for non-hydrogen atoms.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
Returns
-------
indices_left : list of int
Indices of non-hydrogen atoms.
"""
indices_left = []
for i, atom in enumerate(mol.GetAtoms()):
atomic_num = atom.GetAtomicNum()
# Hydrogen atoms have an atomic number of 1.
if atomic_num != 1:
indices_left.append(i)
return indices_left
def get_atomic_numbers(mol, indices):
"""Get the atomic numbers for the specified atoms.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
indices : list of int
Specifying atoms.
Returns
-------
list of int
Atomic numbers computed.
"""
atomic_numbers = []
for i in indices:
atom = mol.GetAtomWithIdx(i)
atomic_numbers.append(atom.GetAtomicNum())
return atomic_numbers
def int_2_one_hot(a, bins):
"""Convert integer encodings on a vector to a matrix of one-hot encoding"""
n = len(a)
b = np.zeros((n, len(bins)))
b[np.arange(n), a] = 1
return b
def PN_graph_construction_and_featurization(ligand_mol,
protein_mol,
ligand_coordinates,
protein_coordinates,
max_num_ligand_atoms=None,
max_num_protein_atoms=None,
max_num_neighbors=4,
distance_bins=[1.5, 2.5, 3.5, 4.5],
strip_hydrogens=False):
"""Graph construction and featurization for `PotentialNet for Molecular Property Prediction
<https://pubs.acs.org/doi/10.1021/acscentsci.8b00507>`__.
Parameters
----------
ligand_mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
protein_mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
ligand_coordinates : Float Tensor of shape (V1, 3)
Atom coordinates in a ligand.
protein_coordinates : Float Tensor of shape (V2, 3)
Atom coordinates in a protein.
max_num_ligand_atoms : int or None
Maximum number of atoms in ligands for zero padding, which should be no smaller than
ligand_mol.GetNumAtoms() if not None. If None, no zero padding will be performed.
Default to None.
max_num_protein_atoms : int or None
Maximum number of atoms in proteins for zero padding, which should be no smaller than
protein_mol.GetNumAtoms() if not None. If None, no zero padding will be performed.
Default to None.
max_num_neighbors : int
Maximum number of neighbors allowed for each atom when constructing KNN graph. Default to 4.
distance_bins : list of float
Distance bins to determine the edge types.
Edges of the first edge type are added between pairs of atoms whose distances are less than `distance_bins[0]`.
The length matches the number of edge types to be constructed.
Default `[1.5, 2.5, 3.5, 4.5]`.
strip_hydrogens : bool
Whether to exclude hydrogen atoms. Default to False.
Returns
-------
complex_bigraph : DGLGraph
Bigraph with the ligand and the protein (pocket) combined and canonical features extracted.
The atom features are stored as DGLGraph.ndata['h'].
The edge types are stored as DGLGraph.edata['e'].
The bigraphs of the ligand and the protein are batched together as one complex graph.
complex_knn_graph : DGLGraph
K-nearest-neighbor graph with the ligand and the protein (pocket) combined and edge features extracted based on distances.
The edge types are stored as DGLGraph.edata['e'].
The knn graphs of the ligand and the protein are batched together as one complex graph.
"""
assert ligand_coordinates is not None, 'Expect ligand_coordinates to be provided.'
assert protein_coordinates is not None, 'Expect protein_coordinates to be provided.'
if max_num_ligand_atoms is not None:
assert max_num_ligand_atoms >= ligand_mol.GetNumAtoms(), \
'Expect max_num_ligand_atoms to be no smaller than ligand_mol.GetNumAtoms(), ' \
'got {:d} and {:d}'.format(max_num_ligand_atoms, ligand_mol.GetNumAtoms())
if max_num_protein_atoms is not None:
assert max_num_protein_atoms >= protein_mol.GetNumAtoms(), \
'Expect max_num_protein_atoms to be no smaller than protein_mol.GetNumAtoms(), ' \
'got {:d} and {:d}'.format(max_num_protein_atoms, protein_mol.GetNumAtoms())
if strip_hydrogens:
# Remove hydrogen atoms and their corresponding coordinates
ligand_atom_indices_left = filter_out_hydrogens(ligand_mol)
protein_atom_indices_left = filter_out_hydrogens(protein_mol)
ligand_coordinates = ligand_coordinates.take(ligand_atom_indices_left, axis=0)
protein_coordinates = protein_coordinates.take(protein_atom_indices_left, axis=0)
else:
ligand_atom_indices_left = list(range(ligand_mol.GetNumAtoms()))
protein_atom_indices_left = list(range(protein_mol.GetNumAtoms()))
# Node featurizer for stage 1
atoms = ['H','N','O','C','P','S','F','Br','Cl','I','Fe','Zn','Mg','Na','Mn','Ca','Co','Ni','Se','Cu','Cd','Hg','K']
atom_total_degrees = list(range(5))
atom_formal_charges = [-1, 0, 1]
atom_implicit_valence = list(range(4))
atom_explicit_valence = list(range(8))
atom_concat_featurizer = ConcatFeaturizer([partial(atom_type_one_hot, allowable_set=atoms),
partial(atom_total_degree_one_hot, allowable_set=atom_total_degrees),
partial(atom_formal_charge_one_hot, allowable_set=atom_formal_charges),
atom_is_aromatic,
partial(atom_implicit_valence_one_hot, allowable_set=atom_implicit_valence),
partial(atom_explicit_valence_one_hot, allowable_set=atom_explicit_valence)])
PN_atom_featurizer = BaseAtomFeaturizer({'h': atom_concat_featurizer})
# Bond featurizer for stage 1
bond_concat_featurizer = ConcatFeaturizer([bond_type_one_hot, bond_is_in_ring])
PN_bond_featurizer = BaseBondFeaturizer({'e': bond_concat_featurizer})
# construct graphs for stage 1
ligand_bigraph = mol_to_bigraph(ligand_mol, add_self_loop=False,
node_featurizer=PN_atom_featurizer,
edge_featurizer=PN_bond_featurizer,
canonical_atom_order=False) # Keep the original atomic order)
protein_bigraph = mol_to_bigraph(protein_mol, add_self_loop=False,
node_featurizer=PN_atom_featurizer,
edge_featurizer=PN_bond_featurizer,
canonical_atom_order=False)
complex_bigraph = batch([ligand_bigraph, protein_bigraph])
# Construct knn graphs for stage 2
complex_coordinates = np.concatenate([ligand_coordinates, protein_coordinates])
complex_srcs, complex_dsts, complex_dists = k_nearest_neighbors(
complex_coordinates, distance_bins[-1], max_num_neighbors)
complex_srcs = np.array(complex_srcs)
complex_dsts = np.array(complex_dsts)
complex_dists = np.array(complex_dists)
complex_knn_graph = graph((complex_srcs, complex_dsts), num_nodes=len(complex_coordinates))
d_features = np.digitize(complex_dists, bins=distance_bins, right=True)
d_one_hot = int_2_one_hot(d_features, distance_bins)
# add bond types and bonds (from bigraph) to stage 2
u, v = complex_bigraph.edges()
complex_knn_graph.add_edges(u.to(F.int64), v.to(F.int64))
n_d, f_d = d_one_hot.shape
n_e, f_e = complex_bigraph.edata['e'].shape
complex_knn_graph.edata['e'] = F.zerocopy_from_numpy(
np.block([
[d_one_hot, np.zeros((n_d, f_e))],
[np.zeros((n_e, f_d)), np.array(complex_bigraph.edata['e'])]
]).astype(np.int64)
)
return complex_bigraph, complex_knn_graph
# pylint: disable=C0326
def ACNN_graph_construction_and_featurization(ligand_mol,
protein_mol,
ligand_coordinates,
protein_coordinates,
max_num_ligand_atoms=None,
max_num_protein_atoms=None,
neighbor_cutoff=12.,
max_num_neighbors=12,
strip_hydrogens=False):
"""Graph construction and featurization for `Atomic Convolutional Networks for
Predicting Protein-Ligand Binding Affinity <https://arxiv.org/abs/1703.10603>`__.
Parameters
----------
ligand_mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
protein_mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
ligand_coordinates : Float Tensor of shape (V1, 3)
Atom coordinates in a ligand.
protein_coordinates : Float Tensor of shape (V2, 3)
Atom coordinates in a protein.
max_num_ligand_atoms : int or None
Maximum number of atoms in ligands for zero padding, which should be no smaller than
ligand_mol.GetNumAtoms() if not None. If None, no zero padding will be performed.
Default to None.
max_num_protein_atoms : int or None
Maximum number of atoms in proteins for zero padding, which should be no smaller than
protein_mol.GetNumAtoms() if not None. If None, no zero padding will be performed.
Default to None.
neighbor_cutoff : float
Distance cutoff to define 'neighboring'. Default to 12.
max_num_neighbors : int
Maximum number of neighbors allowed for each atom. Default to 12.
strip_hydrogens : bool
Whether to exclude hydrogen atoms. Default to False.
"""
assert ligand_coordinates is not None, 'Expect ligand_coordinates to be provided.'
assert protein_coordinates is not None, 'Expect protein_coordinates to be provided.'
if max_num_ligand_atoms is not None:
assert max_num_ligand_atoms >= ligand_mol.GetNumAtoms(), \
'Expect max_num_ligand_atoms to be no smaller than ligand_mol.GetNumAtoms(), ' \
'got {:d} and {:d}'.format(max_num_ligand_atoms, ligand_mol.GetNumAtoms())
if max_num_protein_atoms is not None:
assert max_num_protein_atoms >= protein_mol.GetNumAtoms(), \
'Expect max_num_protein_atoms to be no smaller than protein_mol.GetNumAtoms(), ' \
'got {:d} and {:d}'.format(max_num_protein_atoms, protein_mol.GetNumAtoms())
if strip_hydrogens:
# Remove hydrogen atoms and their corresponding coordinates
ligand_atom_indices_left = filter_out_hydrogens(ligand_mol)
protein_atom_indices_left = filter_out_hydrogens(protein_mol)
ligand_coordinates = ligand_coordinates.take(ligand_atom_indices_left, axis=0)
protein_coordinates = protein_coordinates.take(protein_atom_indices_left, axis=0)
else:
ligand_atom_indices_left = list(range(ligand_mol.GetNumAtoms()))
protein_atom_indices_left = list(range(protein_mol.GetNumAtoms()))
# Compute number of nodes for each type
if max_num_ligand_atoms is None:
num_ligand_atoms = len(ligand_atom_indices_left)
else:
num_ligand_atoms = max_num_ligand_atoms
if max_num_protein_atoms is None:
num_protein_atoms = len(protein_atom_indices_left)
else:
num_protein_atoms = max_num_protein_atoms
data_dict = dict()
num_nodes_dict = dict()
# graph data for atoms in the ligand
ligand_srcs, ligand_dsts, ligand_dists = k_nearest_neighbors(
ligand_coordinates, neighbor_cutoff, max_num_neighbors)
data_dict[('ligand_atom', 'ligand', 'ligand_atom')] = (ligand_srcs, ligand_dsts)
num_nodes_dict['ligand_atom'] = num_ligand_atoms
# graph data for atoms in the protein
protein_srcs, protein_dsts, protein_dists = k_nearest_neighbors(
protein_coordinates, neighbor_cutoff, max_num_neighbors)
data_dict[('protein_atom', 'protein', 'protein_atom')] = (protein_srcs, protein_dsts)
num_nodes_dict['protein_atom'] = num_protein_atoms
# 4 graphs for complex representation, including the connection within
# protein atoms, the connection within ligand atoms and the connection between
# protein and ligand atoms.
complex_srcs, complex_dsts, complex_dists = k_nearest_neighbors(
np.concatenate([ligand_coordinates, protein_coordinates]),
neighbor_cutoff, max_num_neighbors)
complex_srcs = np.array(complex_srcs)
complex_dsts = np.array(complex_dsts)
complex_dists = np.array(complex_dists)
offset = num_ligand_atoms
# ('ligand_atom', 'complex', 'ligand_atom')
inter_ligand_indices = np.intersect1d(
(complex_srcs < offset).nonzero()[0],
(complex_dsts < offset).nonzero()[0],
assume_unique=True)
data_dict[('ligand_atom', 'complex', 'ligand_atom')] = \
(complex_srcs[inter_ligand_indices].tolist(),
complex_dsts[inter_ligand_indices].tolist())
# ('protein_atom', 'complex', 'protein_atom')
inter_protein_indices = np.intersect1d(
(complex_srcs >= offset).nonzero()[0],
(complex_dsts >= offset).nonzero()[0],
assume_unique=True)
data_dict[('protein_atom', 'complex', 'protein_atom')] = \
((complex_srcs[inter_protein_indices] - offset).tolist(),
(complex_dsts[inter_protein_indices] - offset).tolist())
# ('ligand_atom', 'complex', 'protein_atom')
ligand_protein_indices = np.intersect1d(
(complex_srcs < offset).nonzero()[0],
(complex_dsts >= offset).nonzero()[0],
assume_unique=True)
data_dict[('ligand_atom', 'complex', 'protein_atom')] = \
(complex_srcs[ligand_protein_indices].tolist(),
(complex_dsts[ligand_protein_indices] - offset).tolist())
# ('protein_atom', 'complex', 'ligand_atom')
protein_ligand_indices = np.intersect1d(
(complex_srcs >= offset).nonzero()[0],
(complex_dsts < offset).nonzero()[0],
assume_unique=True)
data_dict[('protein_atom', 'complex', 'ligand_atom')] = \
((complex_srcs[protein_ligand_indices] - offset).tolist(),
complex_dsts[protein_ligand_indices].tolist())
g = heterograph(data_dict, num_nodes_dict=num_nodes_dict)
g.edges['ligand'].data['distance'] = F.reshape(F.zerocopy_from_numpy(
np.array(ligand_dists).astype(np.float32)), (-1, 1))
g.edges['protein'].data['distance'] = F.reshape(F.zerocopy_from_numpy(
np.array(protein_dists).astype(np.float32)), (-1, 1))
g.edges[('ligand_atom', 'complex', 'ligand_atom')].data['distance'] = \
F.reshape(F.zerocopy_from_numpy(
complex_dists[inter_ligand_indices].astype(np.float32)), (-1, 1))
g.edges[('protein_atom', 'complex', 'protein_atom')].data['distance'] = \
F.reshape(F.zerocopy_from_numpy(
complex_dists[inter_protein_indices].astype(np.float32)), (-1, 1))
g.edges[('ligand_atom', 'complex', 'protein_atom')].data['distance'] = \
F.reshape(F.zerocopy_from_numpy(
complex_dists[ligand_protein_indices].astype(np.float32)), (-1, 1))
g.edges[('protein_atom', 'complex', 'ligand_atom')].data['distance'] = \
F.reshape(F.zerocopy_from_numpy(
complex_dists[protein_ligand_indices].astype(np.float32)), (-1, 1))
# Get atomic numbers for all atoms left and set node features
ligand_atomic_numbers = np.array(get_atomic_numbers(ligand_mol, ligand_atom_indices_left))
# zero padding
ligand_atomic_numbers = np.concatenate([
ligand_atomic_numbers, np.zeros(num_ligand_atoms - len(ligand_atom_indices_left))])
protein_atomic_numbers = np.array(get_atomic_numbers(protein_mol, protein_atom_indices_left))
# zero padding
protein_atomic_numbers = np.concatenate([
protein_atomic_numbers, np.zeros(num_protein_atoms - len(protein_atom_indices_left))])
g.nodes['ligand_atom'].data['atomic_number'] = F.reshape(F.zerocopy_from_numpy(
ligand_atomic_numbers.astype(np.float32)), (-1, 1))
g.nodes['protein_atom'].data['atomic_number'] = F.reshape(F.zerocopy_from_numpy(
protein_atomic_numbers.astype(np.float32)), (-1, 1))
# Prepare mask indicating the existence of nodes
ligand_masks = np.zeros((num_ligand_atoms, 1))
ligand_masks[:len(ligand_atom_indices_left), :] = 1
g.nodes['ligand_atom'].data['mask'] = F.zerocopy_from_numpy(
ligand_masks.astype(np.float32))
protein_masks = np.zeros((num_protein_atoms, 1))
protein_masks[:len(protein_atom_indices_left), :] = 1
g.nodes['protein_atom'].data['mask'] = F.zerocopy_from_numpy(
protein_masks.astype(np.float32))
return g
| awslabs/dgl-lifesci | python/dgllife/utils/complex_to_graph.py | complex_to_graph.py | py | 17,948 | python | en | code | 641 | github-code | 50 |
35490634658 | import requests
import re
from lxml import etree
import time
import random
import pymongo
from datetime import datetime
class FengHuangSpider:
def __init__(self):
self.star_url = "https://search.ifeng.com/sofeng/search.action?q=%E6%B2%B3%E5%8D%97%E8%BF%9D%E6%B3%95&c=1&chel=&p=1"
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
}
# self.client = pymongo.MongoClient("39.165.96.15", 27017)
def parse_url(self, url):
'''发送请求'''
time.sleep(random.random()) # 每次发送请求停顿0-1s避免被封
# print(url)
resp = requests.get(url=url, headers=self.headers)
return resp.content.decode('utf-8','ignore')
def save_mongo(self,tiebas,dataitem):
'''保存到数据库'''
pass
def run(self):
'''主函数'''
content = self.parse_url(self.star_url)
html = etree.HTML(content)
divs = html.xpath('//div[@class="searchResults"]')
for div in divs:
item = {}
item['title'] = "".join(div.xpath('.//a//text()')).replace('\xa0','')
item['href'] = "".join(div.xpath('.//a/@href'))
post_time = "".join(div.xpath('.//p/font//text()'))
print(post_time)
post_time2 = "".join(re.findall(r'.*? (.+)',post_time))
item['post_time'] = datetime.strptime(post_time2,'%Y-%m-%d %H:%M:%S')
content2 = self.parse_url(item['href'])
html2 = etree.HTML(content2)
item['detail'] = "".join(html2.xpath('//div[@class="text-3zQ3cZD4"]//text()'))
item['detail_img'] = html2.xpath('//div[@class="text-3zQ3cZD4"]//img/@src')
print(item)
if __name__ == '__main__':
spider = FengHuangSpider()
spider.run()
| 13419879072/myspider | fenghuang/fenghuangspider.py | fenghuangspider.py | py | 1,897 | python | en | code | 1 | github-code | 50 |
25216158104 | import xml.etree.ElementTree as ET
from collections import defaultdict
from os import listdir
from sqlite3 import connect
def xml_to_germanet (pathPrefix):
typeDict = {'adj':'ADJ', 'nomen':'NOUN', 'verben':'VERB'}
synsets = {}
words = defaultdict(set)
polysemous = defaultdict(lambda: 0)
for path in [path for path in listdir(pathPrefix)]:
if ('nomen' in path or 'verben' in path or 'adj' in path) and not 'wiktionary' in path:
tree = ET.parse(pathPrefix+path).getroot()
for synset in tree:
for word in synset:
if word.tag == 'lexUnit':
synsets[word.attrib['id']] = synset.attrib['id']
orthos = []
for orthform in word:
if orthform.tag in ['orthForm','orthVar','oldOrthForm','oldOrthVar']:
orthos.append((typeDict[synset.attrib['category']], orthform.text.lower(), word.attrib['sense']))
words[synset.attrib['id']].add(tuple(orthos))
polysemous[orthos[0][1],orthos[0][0]] += 1
polysemous = {key for key in polysemous if polysemous[key] > 1}
return synsets,dict(words), polysemous
def db_to_wordnet (path):
typeDict = {'s':'ADJ', 'a':'ADJ', 'n':'NOUN', 'v':'VERB'}
synsets = defaultdict(set)
polysemous = defaultdict(lambda: 0)
connection = connect(path)
cursor = connection.cursor()
cursor.execute('SELECT synsetid, sensenum, lemma, pos FROM words NATURAL JOIN senses NATURAL JOIN synsets')
for synsetid, sense, lemma, pos in cursor.fetchall():
if pos in 'nvas':
synsets[synsetid].add((typeDict[pos],lemma,sense))
polysemous[(lemma,typeDict[pos])] += 1
polysemous = {key for key in polysemous if polysemous[key] > 1}
return dict(synsets), polysemous
def create_mapping (iliPath, wordnet_path, germanet_path, lang='en'):
def isPolysemous(english,german,wordtype):
if lang == 'en':
if (english,wordtype) in polysemous_e:
return True
elif lang == 'de':
if (german,wordtype) in polysemous_g:
return True
return False
wordnet, polysemous_e = db_to_wordnet(wordnet_path)
g_syns, g_words, polysemous_g = xml_to_germanet(germanet_path)
def clean (string):
dictio = {'n':'1','v':'2','a':'3','s':'3'}
return int(dictio[string[-1]]+string[6:-2])
tagDict = defaultdict(lambda: defaultdict(set))
tree = ET.parse(iliPath).getroot()
for entry in tree:
if entry.tag == 'iliRecord':
if all([stop not in entry.attrib['pwn30Id'] for stop in ['00000000', '0000null', '-r']]):
try:
for english, german in ((english,german) for english in wordnet[clean(entry.attrib['pwn30Id'])] for german in g_words[g_syns[entry.attrib['lexUnitId']]]):
if lang == 'en':
tagDict[english[0]][(english[1],tuple(word[1] for word in german))].add(english[2])
elif lang == 'de':
tagDict[german[0][0]][(english[1],tuple(word[1] for word in german))].add(german[0][2])
except KeyError:
pass
language = 'english' if lang == 'en' else 'german'
print('Linked %s nouns:'%(language),len(tagDict['NOUN']))
print('Linked %s verbs:'%(language),len(tagDict['VERB']))
print('Linked %s adjectives:'%(language),len(tagDict['ADJ']))
tagDict= {wordtype:{(english,german):tagDict[wordtype][(english,german)] for english,german in tagDict[wordtype] if isPolysemous(english,german[0],wordtype)} for wordtype in tagDict}
print('Polysemous linked %s nouns:'%(language),len(tagDict['NOUN']))
print('Polysemous linked %s verbs:'%(language),len(tagDict['VERB']))
print('Polysemous linked %s adjectives:'%(language),len(tagDict['ADJ']))
tagDict = {wordtype:{wordpair:tagDict[wordtype][wordpair] for wordpair in tagDict[wordtype] if len(tagDict[wordtype][wordpair])==1} for wordtype in tagDict}
print('Polysemous linked %s nouns that can be disambiguated:'%(language),len(tagDict['NOUN']))
print('Polysemous linked %s verbs that can be disambiguated:'%(language),len(tagDict['VERB']))
print('Polysemous linked %s adjectives that can be disambiguated:'%(language),len(tagDict['ADJ']))
tagDict = {wordtype:{(english,german):tagDict[wordtype][english,g_set] for english, g_set in tagDict[wordtype] for german in g_set} for wordtype in tagDict}
return dict(tagDict), polysemous_e, polysemous_g
if __name__ == '__main__':
dictio,_,_ = create_mapping('interLingualIndex_DE-EN_GN110.xml', 'sqlite-30.db', 'germanet-11.0/GN_V110_XML/')
print('Entries after expanding german orthography:',sum([len(dictio[k1]) for k1 in dictio]))
dictio,_,_ = create_mapping('interLingualIndex_DE-EN_GN110.xml', 'sqlite-30.db', 'germanet-11.0/GN_V110_XML/','de')
print('Entries after expanding german orthography:',sum([len(dictio[k1]) for k1 in dictio]))
| k0rmarun/semantikws1617 | ili_mapping.py | ili_mapping.py | py | 5,102 | python | en | code | 1 | github-code | 50 |
39078099243 | # modulesDemo1.py
# Does not use modules
# Creates a face and displays it
# The face can either smile or frown
from Tkinter import *
#######################
# makeFace and drawFace
#######################
def makeFace(canvas, left, top, right, bottom, isSmiley):
return dict([ ("canvas", canvas),
("left", left),
("top", top),
("right", right),
("bottom", bottom),
("isSmiley", isSmiley)
])
def drawFace(face):
# extract the values from the "face" dict
canvas = face["canvas"]
isSmiley = face["isSmiley"]
(x0, y0, x1, y1) = (face["left"], face["top"], face["right"], face["bottom"])
(cx, cy) = ( (x0 + x1)/2, (y0 + y1)/2 )
(dx, dy) = ( (x1 - x0), (y1 - y0) )
# draw the head
canvas.create_oval(x0, y0, x1, y1, fill="yellow")
# draw the eyes
eyeRx = dx/8
eyeRy = dy/8
eyeCx1 = cx - dx/5
eyeCx2 = cx + dx/5
eyeCy = y0 + dy/3
canvas.create_oval(eyeCx1-eyeRx, eyeCy-eyeRy,
eyeCx1+eyeRx, eyeCy+eyeRy,
fill="black")
canvas.create_oval(eyeCx2-eyeRx, eyeCy-eyeRy,
eyeCx2+eyeRx, eyeCy+eyeRy,
fill="black")
# draw the nose
noseRx = eyeRx/2
noseRy = eyeRy
noseCx = cx
noseCy = cy + dy/24
canvas.create_oval(noseCx-noseRx, noseCy-noseRy,
noseCx+noseRx, noseCy+noseRy,
fill="black")
# draw the mouth
mouthCx = cx
mouthCy = y0 + dy*4/5
mouthRx = dx/4
mouthRy = dy/8
mx0 = mouthCx - mouthRx
mx1 = mouthCx + mouthRx
if (isSmiley):
# draw arc across bottom half of upper-mouth rectangle
my0 = mouthCy - 3*mouthRy
my1 = mouthCy + mouthRy
canvas.create_arc(mx0, my0, mx1, my1,
start=180, extent=180,
style="arc", width=mouthRy/4)
else:
# draw arc across top half of lower-mouth rectangle
my0 = mouthCy - mouthRy
my1 = mouthCy + 3*mouthRy
canvas.create_arc(mx0, my0, mx1, my1,
start=0, extent=180,
style="arc", width=mouthRy/4)
#######################
# redrawAll and init
#######################
def redrawAll(canvas):
canvas.delete(ALL)
# Draw the demo info
font = ("Arial", 16, "bold")
msg = "Modules Demo1: No Module"
canvas.create_text(canvas.width/2, 25, text=msg, font=font)
# Draw the face
drawFace(canvas.data["face1"])
drawFace(canvas.data["face2"])
def init(canvas):
canvas.width = canvas.winfo_reqwidth() - 4
canvas.height = canvas.winfo_reqheight() - 4
canvas.data["face1"] = makeFace(canvas,
0, 50,
canvas.width/2, canvas.height,
True) # True = smiley
canvas.data["face2"] = makeFace(canvas,
canvas.width/2, 50,
canvas.width, canvas.height,
False) # False = frowny
redrawAll(canvas)
########### copy-paste below here ###########
def run():
# create the root and the canvas
root = Tk()
root.resizable(width=FALSE, height=FALSE)
canvas = Canvas(root, width=300, height=200)
canvas.pack(fill=BOTH, expand=YES)
# Store canvas in root and in canvas itself for callbacks
root.canvas = canvas.canvas = canvas
# Set up canvas data and call init
canvas.data = { }
init(canvas)
# set up events
# root.bind("<Button-1>", leftMousePressed)
# root.bind("<KeyPress>", keyPressed)
# timerFired(canvas)
# and launch the app
root.mainloop() # This call BLOCKS (so your program waits until you close the window!)
run() | Sirrie/112work | termProject_backup_copy/gamePart/modulesDemo1.py | modulesDemo1.py | py | 3,882 | python | en | code | 0 | github-code | 50 |
39586778358 | #!/usr/bin/env python
'''Client to standardize access to information regarding services
Simplifies changing server names, and updating them in code. Code should
never include hardcoded server names/urls, etc.
'''
import os
joinp = os.path.join
import yaml
class ServiceInfo(dict):
'''Wrap info from yaml so we can perform name mapping if necessary'''
# Have a set of name maps, in case more than one exists
# key -> 'true' version; ex: 'devel' -> 'dev'
# true ones are 'dev'/'staging'/'prod'
NAME_MAPS = {'devel': 'dev',
'development': 'dev',
'preprod': 'prod',
'production': 'prod',
}
@staticmethod
def _map_name(name):
'''try and map name from INFRA scheme to ours '''
if name in ServiceInfo.NAME_MAPS:
return ServiceInfo.NAME_MAPS[name]
return name
def __getitem__(self, name):
name = self._map_name(name)
return super(ServiceInfo, self).__getitem__(name)
def __contains__(self, name):
name = self._map_name(name)
return super(ServiceInfo, self).__contains__(name)
def get(self, k, d=None):
'''overload get so that it behaves properly with our name mapping'''
try:
return self[k]
except KeyError:
return d
def _get_yaml_files():
'''return the yaml list of files'''
data_dir = joinp(os.path.dirname(__file__), 'data')
return [joinp(data_dir, d) for d in os.listdir(data_dir) if d.endswith('yaml')]
def _parse_yaml(yaml_file):
'''return a dictionary of properties based from yaml file'''
with open(yaml_file) as fd:
d = yaml.safe_load(fd)
return ServiceInfo(d)
def get_services():
'''returns a dict of services
Parses the data/.yaml files, and creates returns a dictionary of
structure::
{
'task_service': {
'properties': {
'confluence': '....',
'description': 'Task service, runs tasks (aka PlatformTaskManager)',
'puppet_url': '....',
'ports': ['8000(nginx auth)', ...],
'other_service':.
},
#the environments
'dev': {
'human_url': 'http://bbpsrvi35:8000/ui/',
'machine': 'bbpsrvi35',
'oauth_dev': 'dev',
'url': 'http://bbpsrvi35:8000'},
'prod': (same as dev, but for prod),
'staging': (same as dev, but for staging)}
}
Thus, you can easily pick the service you want to connect to:
>>> import bbp_services.client as bsc
>>> services = bsc.get_services()
>>> env = 'dev' # or prod, or picked by the command line
>>> oauth_url = services['oauth_service'][env]['url']
'''
ret = {}
for service in _get_yaml_files():
(service_name, _) = os.path.splitext(os.path.basename(service))
ret[service_name] = _parse_yaml(service)
return ret
def get_environments():
'''get the available environments known to bbp_services
We `voted <http://www.polljunkie.com/poll/bkwgbd/environment-naming/view>`_:
9 responses::
* dev: 88%, development: 11%
* staging: 88%, preprod: 11%
* prod: 66%, production: 33%
So internally, our services are referred to by: dev/staging/prod
>>> import bbp_services.client as bsc
>>> bsc.get_environments()
['prod', 'staging', 'dev']
'''
return ['prod', 'staging', 'dev']
def get_environment_aliases():
'''get all the available environment names
These consist of the environments defined by get_environments()
plus all their aliases
'''
return tuple(set(ServiceInfo.NAME_MAPS.keys() + get_environments()))
def confluence_services_table(): # pylint: disable=R0912
'''create a confluence markup table about our services'''
services = get_services()
HEADINGS = ['Name', 'Dev', 'Staging', 'Prod', 'Ports', 'Puppet', 'Confluence']
ret = ['||' + '||'.join(HEADINGS) + '||']
def confluence_url(url):
'''create confluence urls from full urs'''
our_space = 'https://bbpteam.epfl.ch/project/spaces/display/BBPWFA/'
if url.startswith(our_space):
url = str(url[len(our_space):]).replace('+', ' ')
return '[%s]' % url
for name in services.keys():
service = services[name]
row = ['', name]
for env in ('dev', 'staging', 'prod'):
if env not in service:
row.append('-')
continue
serv_env = service[env]
if 'machine' in serv_env and 'human_url' in serv_env:
row.append('[%s|%s]' % (serv_env['machine'],
serv_env['human_url']))
elif 'machine' in serv_env:
row.append(serv_env['machine'])
else:
row.append('-')
props = service['properties']
if 'ports' in props:
row.append(', '.join([str(i) for i in props['ports']]))
else:
row.append('-')
if 'puppet_url' in props:
row.append(confluence_url(props['puppet_url']))
else:
row.append('-')
if 'confluence' in props:
row.append(confluence_url(props['confluence']))
else:
row.append('-')
row.append('') # ensure ending |
ret.append(' | '.join(row))
return '\n'.join(ret)
if __name__ == '__main__':
print(confluence_services_table())
| dcam0050/NRP_Docker | NRP_Edits/user-scripts/config_files/VirtualCoach/platform_venv/bbp_services/client.py | client.py | py | 5,552 | python | en | code | 1 | github-code | 50 |
33111719591 | import os
import shutil
import pandas as pd
import argparse
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
import matplotlib.pyplot as plt
from official.nlp import optimization # to create AdamW optimizer
from string import Template
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorboard.plugins.hparams import api as hp
from model.data_processing import load_data
from model.conversion import convert_to_unstructure
from model.conversion import convert_to_unstructure_for_pretraining
from model.utils import print_my_examples
from model.custom_loss import Custom_CE_Loss
from model.training import train_and_evaluate
from model.utils import Params
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', default='experiments/test',
help="Experiment directory containing params.json")
if __name__ == '__main__':
AUTOTUNE = tf.data.AUTOTUNE
args = parser.parse_args()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(
json_path), "No json configuration file found at {}".format(json_path)
params = Params(json_path)
seed = 42
log_dir = 'experiments/language_model'
run_name = 'post_pretune_final'
#Load Data
dataframe = load_data("data/visits", "/visitdataclassification-4300.csv")
dataframe.head()
train_x, hold_x = train_test_split(dataframe, test_size=0.20)
#data for pretrain
convert_to_unstructure_for_pretraining(dataframe)
processedFolder = "data/visits/train"
if(os.path.exists(processedFolder) == False):
convert_to_unstructure(train_x)
convert_to_unstructure(hold_x, False)
#
raw_train_ds = tf.keras.utils.text_dataset_from_directory(
'data/visits/train',
batch_size=params.batch_size,
validation_split=0.25,
subset='training',
seed=seed)
class_names = raw_train_ds.class_names
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = tf.keras.utils.text_dataset_from_directory(
'data/visits/train',
batch_size=params.batch_size,
validation_split=0.25,
subset='validation',
seed=seed)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = tf.keras.utils.text_dataset_from_directory(
'data/visits/test',
batch_size=params.batch_size)
test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE)
data_set = {
'train_ds': train_ds,
'val_ds': val_ds,
'test_ds': test_ds,
}
for text_batch, label_batch in train_ds.take(1):
for i in range(10):
print(f'Review: {text_batch.numpy()[i]}')
label = label_batch.numpy()[i]
print(f'Label : {label} ({class_names[label]})')
#Select Model
bert_model_name = 'small_bert/bert_en_uncased_L-4_H-512_A-8'
map_name_to_handle = {
'bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3',
'bert_en_cased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/3',
'bert_multi_cased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/3',
'small_bert/bert_en_uncased_L-2_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/1',
'small_bert/bert_en_uncased_L-2_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-256_A-4/1',
'small_bert/bert_en_uncased_L-2_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-512_A-8/1',
'small_bert/bert_en_uncased_L-2_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-768_A-12/1',
'small_bert/bert_en_uncased_L-4_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-128_A-2/1',
'small_bert/bert_en_uncased_L-4_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-256_A-4/1',
'small_bert/bert_en_uncased_L-4_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1',
'small_bert/bert_en_uncased_L-4_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-768_A-12/1',
'small_bert/bert_en_uncased_L-6_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-128_A-2/1',
'small_bert/bert_en_uncased_L-6_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-256_A-4/1',
'small_bert/bert_en_uncased_L-6_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-512_A-8/1',
'small_bert/bert_en_uncased_L-6_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-768_A-12/1',
'small_bert/bert_en_uncased_L-8_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-128_A-2/1',
'small_bert/bert_en_uncased_L-8_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-256_A-4/1',
'small_bert/bert_en_uncased_L-8_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-512_A-8/1',
'small_bert/bert_en_uncased_L-8_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-768_A-12/1',
'small_bert/bert_en_uncased_L-10_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-128_A-2/1',
'small_bert/bert_en_uncased_L-10_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-256_A-4/1',
'small_bert/bert_en_uncased_L-10_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-512_A-8/1',
'small_bert/bert_en_uncased_L-10_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-768_A-12/1',
'small_bert/bert_en_uncased_L-12_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-128_A-2/1',
'small_bert/bert_en_uncased_L-12_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-256_A-4/1',
'small_bert/bert_en_uncased_L-12_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-512_A-8/1',
'small_bert/bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-768_A-12/1',
'albert_en_base':
'https://tfhub.dev/tensorflow/albert_en_base/2',
'electra_small':
'https://tfhub.dev/google/electra_small/2',
'electra_base':
'https://tfhub.dev/google/electra_base/2',
'experts_pubmed':
'https://tfhub.dev/google/experts/bert/pubmed/2',
'experts_wiki_books':
'https://tfhub.dev/google/experts/bert/wiki_books/2',
'talking-heads_base':
'https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_base/1',
}
map_model_to_preprocess = {
'bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'bert_en_cased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_cased_preprocess/3',
'small_bert/bert_en_uncased_L-2_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-2_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-2_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-2_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-4_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-4_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-4_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-4_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-6_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-6_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-6_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-6_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-8_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-8_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-8_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-8_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-10_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-10_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-10_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-10_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-12_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-12_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-12_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'bert_multi_cased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_multi_cased_preprocess/3',
'albert_en_base':
'https://tfhub.dev/tensorflow/albert_en_preprocess/3',
'electra_small':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'electra_base':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'experts_pubmed':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'experts_wiki_books':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'talking-heads_base':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
}
tfhub_handle_encoder = map_name_to_handle[bert_model_name]
tfhub_handle_preprocess = map_model_to_preprocess[bert_model_name]
print(f'BERT model selected : {tfhub_handle_encoder}')
print(f'Preprocess model auto-selected: {tfhub_handle_preprocess}')
bert_model = hub.KerasLayer(tfhub_handle_encoder)
bert_preprocess_model = hub.KerasLayer(tfhub_handle_preprocess)
checkpoint_path = "trained-models/pretraining_output/model.ckpt-20"
# Configure Hyperparameter for Language Model
HP_DROPOUT = hp.HParam('dropout_rate', hp.Discrete([params.dropout_rate]))
HP_LEARNINGRATE = hp.HParam('learning_rate', hp.Discrete([params.learning_rate]))
METRIC_ACCURACY = 'accuracy'
hparams = {
HP_DROPOUT: params.dropout_rate,
HP_LEARNINGRATE: params.learning_rate
}
def build_BERT_model():
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string)
preprocessing_layer = hub.KerasLayer(tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
model = tf.keras.Model(text_input, net)
checkpoint = tf.train.Checkpoint(model)
checkpoint.restore(checkpoint_path)
#model.load_weights(checkpoint_path)
return model
def build_classifier_model(classes,params, HP_DROPOUT):
drop_out = params[HP_DROPOUT]
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string)
preprocessing_layer = hub.KerasLayer(tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(drop_out)(net)
net = tf.keras.layers.Dense(classes, activation="softmax", name='classifier')(net)
model = tf.keras.Model(text_input, net)
checkpoint = tf.train.Checkpoint(model)
checkpoint.restore(checkpoint_path)
return model
#bert_model = build_BERT_model()
#save model
#bert_model.save('bertmodel')
language_model = build_classifier_model(len(class_names), hparams, HP_DROPOUT)
#bert_raw_result = language_model(tf.constant(text_test))
#print(tf.sigmoid(bert_raw_result))
#tf.keras.utils.plot_model(language_model)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False,ignore_class=None,
name='sparse_categorical_crossentropy')
#loss = Custom_CE_Loss(gamma=0.1)
#metrics = tf.keras.metrics.Recall()
metrics = tf.keras.metrics.SparseCategoricalAccuracy('accuracy', dtype=tf.float32)
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
num_train_steps = steps_per_epoch * params.num_epochs
num_warmup_steps = int(0.1*num_train_steps)
#init_lr = 1e-3
learning_rate = hparams[HP_LEARNINGRATE]
optimizer = optimization.create_optimizer(init_lr=learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
optimizer_type='adamw')
language_model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
print(f'Training model with {tfhub_handle_encoder}')
#checkpoint
posttraining_checkpoint_path = "trained-models/language/training_CustomLoss_2/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
#One Sample Testing
examples = [
#'A 29 months old, 50 lb, Male Dalmatian-Canine is checked-in on Monday in hospital for Exam Annual due to Vaccines.', # this is the same sentence tried earlier
'A 45 months old, 72 lb, Male Labrador Retriever Mix-Canine is checked-in on Tuesday in hospital for Technician Appointment due to Exam.'
]
original_results = tf.sigmoid(language_model(tf.constant(examples)))
#Train and evaluate
run_name+='_lr-'+str(learning_rate)+'_ep-'+str(params.num_epochs)
#language_model.summary()
train_and_evaluate(language_model, data_set, log_dir, hparams, params, run_name)
post_finetunning_results = tf.sigmoid(language_model(tf.constant(examples)))
#Uncomment below if you want to visualize the results pre and post training
'''
print('-----Model prediction-----')
print('Results from the model without training:')
print(original_results)
print_my_examples(examples, original_results)
plt.plot(original_results[0], linestyle = 'dotted')
plt.title('Visit Time Window before Fine Tunning distribution')
plt.show()
print('Results from the saved model:')
print(post_finetunning_results)
print_my_examples(examples, post_finetunning_results)
plt.plot(post_finetunning_results[0], linestyle = 'dotted')
plt.title('Visit Time Window Post Fine Tunning distribution')
plt.show()
''' | saurabh-malik/patient-visittime-model | train_vlm.py | train_vlm.py | py | 16,424 | python | en | code | 0 | github-code | 50 |
16164119838 | import matplotlib.pyplot as plt
import numpy as np
def discount_rewards(r,gamma=0.95,normalize_rewards=False):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r,dtype=np.float32)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = float(running_add)
if normalize_rewards and not np.all(discounted_r==0):
dd = discounted_r
dd -= np.mean(discounted_r)
dd /= np.std(discounted_r)
discounted_r = dd
return discounted_r
class training_log():
"""
Takes care of organizing data for each game and batches of game
Instructions:
1. Call add_turns after each step to store turn info
2. At the end of each game call add_games to store game info
3. When batch_size games have been logged, add_batch will be called
automatically and will create a summary for whole batch
4. To produce a dictionary for training, call get_training_data
5. Call get_performance_record to get played-won-lost-drew-steps stats
6. Plot performance across entire training using plot_stats
"""
def __init__(self,nrows,ncols,max_steps,batch_size,num_episodes):
self.nrows = nrows
self.ncols = ncols
self.max_turns = max_steps
self.batch_size = batch_size
self.num_episodes = num_episodes
self.max_batches = int(num_episodes/batch_size)
self.reset_batch_log()
def add_turn(self,current_state,action,reward,next_state=None):
"""Adds information about current turn to turns log"""
if self.nturns==self.max_turns:
print('Warning: game length is',self.nturns+1,'which exceeds max_steps [',self.max_turns,'] call add_game to make new game.')
return 0
if self.ngames==self.batch_size:
# print('Info: batch',self.nbatches+1,'/',self.max_batches,'is complete. Storing info and making new batch.')
self.reset_game_log()
self.turns_cstate[self.nturns,:,:] = np.array(current_state)
self.turns_action[self.nturns] = action
self.turns_reward[self.nturns] = reward
if next_state!=None:
self.turns_nstate[self.nturns,:,:] = np.array(next_state)
# print('Batch:',self.nbatches+1,'\tGame:',self.ngames+1,'\tTurn:',self.nturns+1)
self.nturns+=1
return self.nturns
def add_game(self,game_outcome,ep,dr_gamma=0.95,norm_r=False):
"""Adds information about current game to games log"""
if self.nturns==0:
print('Error: game had zero moves. Nothing was added to game_log')
return 0
if self.ngames==self.batch_size:
print('Error: trying to add game number',self.ngames+1,'to batch',self.nbatches+1,'[ batch size =',self.batch_size,']')
return 0
i0 = self.total_in_batch
discounted_r = discount_rewards(self.turns_reward,dr_gamma,norm_r)
running_reward = np.cumsum(self.turns_reward)
self.running_reward = running_reward[0:self.nturns]
for i in range(self.nturns):
self.games_cstate[i0+i,:,:] = self.turns_cstate[i,:,:]
self.games_action[i0+i] = self.turns_action[i]
self.games_reward[i0+i] = discounted_r[i]
self.games_running_reward[i0+i] = running_reward[i]
self.games_total_reward[self.ngames] = np.sum(self.turns_reward)
self.games_length[self.ngames] = self.nturns
self.games_record[self.ngames] = game_outcome
# update total batch size by length of last game
self.total_in_batch+=self.nturns
self.ngames+=1
# clear turn logs
self.reset_turn_log()
if self.ngames==self.batch_size:
# print('Info: batch',self.nbatches+1,'/',self.max_batches,'is complete. Storing info and making new batch.')
self.add_batch()
return self.ngames
def add_game_performance(self,num_in_batch,loss,cross_entropy=-1):
""" stores some extra data such as loss and cross entropy"""
if num_in_batch>=self.batch_size or num_in_batch<0:
print('Error: cannot add NN performance data for game',num_in_batch,'[ >',self.batch_size,']')
return False
self.games_loss[num_in_batch]=loss
if cross_entropy>=0:
self.games_cross_entropy[num_in_batch]=cross_entropy
return True
def add_batch(self):
"""Adds summary information across multiple batches to batch log
- Note that calling this function ends the batch and deletes game info
"""
if self.total_in_batch==0:
print('Error: batch has zero games. Nothing was added to batch_log')
return 0
if self.ngames!=self.batch_size:
print('Warning: batch has size',self.ngames+1,'but size',self.batch_size,'was expected..')
unique, counts = np.unique(self.games_record, return_counts=True)
self.batch_record[self.nbatches,0] = len(self.games_record)
key = [1,2,0,-1]
for i,k in enumerate(key):
if len(counts[unique==k]):
self.batch_record[self.nbatches,i+1] = int(counts[unique==k])
iend = self.total_in_batch
self.batch_ave_reward[self.nbatches] = np.mean(self.games_total_reward[0:iend])
self.batch_std_reward[self.nbatches] = np.std(self.games_total_reward[0:iend])
self.batch_ave_turns[self.nbatches] = np.mean(self.games_length[0:iend])
self.batch_std_turns[self.nbatches] = np.std(self.games_length[0:iend])
self.batch_ave_loss[self.nbatches] = np.mean(self.games_loss[0:iend])
self.batch_std_loss[self.nbatches] = np.std(self.games_loss[0:iend])
self.batch_ave_ce[self.nbatches] = np.mean(self.games_cross_entropy[0:iend])
self.batch_std_ce[self.nbatches] = np.std(self.games_cross_entropy[0:iend])
self.nbatches+=1
def get_training_data(self,ngames=-1):
""" returns states,rewards and actions for ngames games"""
i0 = 0
nturns = int(self.total_in_batch)
iend = nturns
# select ngames most recent games
if ngames<0:
pass # negative input defaults to all games in batch
elif ngames>=0 and ngames<=self.ngames:
nturns = int(np.sum(self.games_length[self.ngames-ngames:self.ngames]))
i0 = int(iend - nturns)
else:
print('Error: ngames =',ngames,'is more than total_in_batch [ =',self.total_in_batch,']')
return 0,0,0
states = np.zeros([nturns,self.nrows,self.ncols,1])
for i in range(nturns):
states[i,:,:,0] = self.games_cstate[i0+i,:,:]
# appropriate dimensions are taken care of in split_board with argument self.batch
# sep_states = my_models.split_board(self.games_cstate,self.total_in_batch)
actions = self.games_action[i0:iend]
rewards = self.games_reward[i0:iend]
# return raw_states,sep_states,actions,rewards
return states,actions,rewards
def get_batch_record(self,fetch_batches=[-1],percentage=True,sum_batches=False):
""" returns game performance stats
eg. won 70, lost 3, drew 25, out-of-steps 2, played 100
stored as elements in stats:- 0=w,1=l,2=d,3=s,4=tot
returns: summary stats for all batches in fetch_batches
"""
# if these results are already stored it is easy to sum multiple batches
if hasattr(fetch_batches,"__len__"):
nfetch = len(fetch_batches)
else:
nfetch=1
fetch_batches = [fetch_batches]
stats = np.zeros([nfetch,5])
for i,bat in enumerate(fetch_batches):
indx = 0
if np.abs(bat)>self.nbatches+1:
print('Error: cannot get performance stats for batch ',bat,'.. [ max =',self.nbatches,']')
return stats
elif bat<0: indx = self.nbatches+bat
else: indx = bat
stats[i,:] = self.batch_record[indx,:]
if sum_batches:
stats = np.sum(stats,axis=0)
tot,won,lost,drew,step = [],[],[],[],[]
if percentage:
if sum_batches:
stats[1:]*=100.0/stats[0]
tot,won,lost,drew,step = stats
else:
for j in range(nfetch):
stats[j,1:]*=100.0/stats[j,0]
tot = stats[:,0]
won = stats[:,1]
lost = stats[:,2]
drew = stats[:,3]
step = stats[:,4]
return tot,won,lost,drew,step
def get_running_reward(self,all_batch=False):
if all_batch:
return self.games_running_reward.copy()
else: # only for most recent game
return self.running_reward.copy()
def get_batch_rewards(self,fetch_batches=[-1],sum_batches=False):
""" returns reward data
"""
if hasattr(fetch_batches,"__len__"):
nfetch = len(fetch_batches)
else:
nfetch=1
fetch_batches = [fetch_batches]
avg_rew = np.zeros(nfetch)
std_rew = np.zeros(nfetch)
for i,bat in enumerate(fetch_batches):
indx = 0
if np.abs(bat)>self.total_in_batch+1:
print('Error: cannot get rewards for batch ',bat,'.. [ >',self.nbatches,']')
return 0,0
elif bat<0: indx = self.nbatches+bat
else: indx = bat
avg_rew[i] = self.batch_ave_reward[indx]
std_rew[i] = self.batch_std_reward[indx]
if sum_batches: # return a single value for avg and std
avg_rew = np.mean(avg_rew)
std_rew = np.mean(std_rew)
return avg_rew,std_rew
def reset_turn_log(self):
self.nturns=0
# pre-declared numpy containers that can contain up to max_steps elements
self.turns_cstate = np.zeros([self.max_turns,self.nrows,self.ncols])
self.turns_nstate = np.zeros([self.max_turns,self.nrows,self.ncols])
self.turns_action = np.zeros(self.max_turns)
self.turns_reward = np.zeros(self.max_turns)
def reset_game_log(self):
self.ngames=0
self.total_in_batch=0
max_in_batch = self.max_turns*self.batch_size
# pre-declared numpy containers that can contain up to max_batch elements
self.games_cstate = np.zeros([max_in_batch,self.nrows,self.ncols])
self.games_nstate = np.zeros([max_in_batch,self.nrows,self.ncols])
self.games_action = np.zeros(max_in_batch)
self.games_reward = np.zeros(max_in_batch)
self.games_running_reward = np.zeros(max_in_batch)
self.running_reward=0 # easy-to-use variable size container
self.games_total_reward = np.zeros(self.batch_size)
self.games_record = np.zeros(self.batch_size)
self.games_length = np.zeros(self.batch_size)
self.games_loss = np.zeros(self.batch_size)
self.games_cross_entropy = np.zeros(self.batch_size)
self.reset_turn_log()
def reset_batch_log(self):
self.nbatches=0
self.batch_record = np.zeros([self.max_batches,5])
self.batch_ave_reward = np.zeros(self.max_batches)
self.batch_std_reward = np.zeros(self.max_batches)
self.batch_ave_turns = np.zeros(self.max_batches)
self.batch_std_turns = np.zeros(self.max_batches)
self.batch_ave_loss = np.zeros(self.max_batches)
self.batch_std_loss = np.zeros(self.max_batches)
self.batch_ave_ce = np.zeros(self.max_batches)
self.batch_std_ce = np.zeros(self.max_batches)
self.reset_game_log()
def regroup(self,x,naverage=1):
""" re-groups a set of points into a smaller group of averages"""
if naverage<=1:
return x
elif naverage>len(x):
print('Error: Cannot re-group',len(x),'points into %.0f'%naverage,'points.')
return x
new_length = round(len(x)/naverage)
ave_x = np.zeros([new_length])
sum_x = 0.0
j = 0
for i in range(len(x)):
sum_x+=x[i]
if i%naverage==0:
ave_x[j]=sum_x/float(naverage)
j+=1
sum_x=0.0
return ave_x
def plot_stats(self,game_name,ngroup=-1):
# plot variables which coincide with episodes [eps] array
eps = range(0,self.num_episodes,self.batch_size)
x = self.regroup(eps,ngroup)
fig = plt.figure(figsize=(10,8), dpi=90)
fig.patch.set_facecolor('white')
fig.suptitle(game_name+' Training', fontsize=20, fontweight='bold')
ax = fig.add_subplot(221)
ax.set_xlabel('Number of Games', fontsize=14)
ax.set_ylabel('Average Reward', fontsize=14)
y = self.regroup(self.batch_ave_reward,ngroup)
dy = self.regroup(self.batch_std_reward,ngroup)
plt.plot(x,y,'k-')
plt.fill_between(x,y-dy,y+dy,color='b',alpha=0.2)
ax = fig.add_subplot(222)
ax.set_xlabel('Number of Games', fontsize=14)
ax.set_ylabel('Average Loss', fontsize=14)
y = self.regroup(self.batch_ave_loss,ngroup)
dy = self.regroup(self.batch_std_loss,ngroup)
plt.plot(x,y,'k-')
plt.fill_between(x,y-dy,y+dy,color='r',alpha=0.2)
ax = fig.add_subplot(223)
ax.set_xlabel('Number of Games', fontsize=14)
ax.set_ylabel('Average Turns', fontsize=14)
y = self.regroup(self.batch_ave_turns,ngroup)
dy = self.regroup(self.batch_std_turns,ngroup)
plt.plot(x,y,'k-')
plt.fill_between(x,y-dy,y+dy,color='g',alpha=0.2)
ax = fig.add_subplot(224)
ax.set_xlabel('Number of Games', fontsize=14)
ax.set_ylabel('Performance per batch', fontsize=14)
_,won,lost,drew,step = self.get_batch_record(fetch_batches=np.arange(self.nbatches),
sum_batches=False,percentage=True)
w = self.regroup(won,ngroup)
l = self.regroup(lost,ngroup)
d = self.regroup(drew,ngroup)
s = self.regroup(step,ngroup)
plt.plot(x,w,'g-',label='won')
plt.plot(x,l,'r-',label='lost')
plt.plot(x,d,'b-',label='drew')
plt.plot(x,s,'k:',label='out-of-steps')
plt.legend(fontsize=9)
# plt.show()
plt.pause(30)
plt.savefig('training_'+game_name)
| steffencruz/mofo | my_stats.py | my_stats.py | py | 14,656 | python | en | code | 1 | github-code | 50 |
27005185640 | import matplotlib.pyplot as plt
import numpy as np
#define data
labels = ['Coats','Jeans','Jackets','Trousers','Joggers','Suits','Hoodies','T-Shirts',
'Shorts','Polo Shirts']
IR = [75,68,20,18,12,11,9,6,4,2]
CP = [0.33,0.64,0.72,0.8,0.86,0.91,0.95,0.97,0.99,1]
c1='#5B9BD5'
c2='#ED7D31'
csfont = {'fontname':'Calibri'}
width = 0.99
id_label = np.arange(len(labels))
yy = np.linspace(0,1,11)
f = plt.figure(figsize=(10,8), dpi=100)
ax = plt.subplot()
ax.bar(id_label + width/2, IR, width, color=c1)
ax.set_xlim(0,10)
ax.set_xticks(id_label + width/2)
ax.set_xticklabels(labels,style='italic')
ax.set_xlabel('Product',weight='bold',fontsize=12)
ax.set_ylim(0,80)
ax.set_ylabel('Items Returned',rotation=90,weight='bold',fontsize=12)
ax.set_title('Returns & Refunds',color='k',fontsize=20,weight='bold',**csfont)
for idx,val in enumerate(IR):
ax.text(idx+width/2,val+1,val,color='k',ha='center',weight='bold')
ax1 = ax.twinx()
ax.tick_params(axis='both',
which='both',
top=False,
bottom=False,
left=False,
right=False,
labelleft=True,
labelbottom=True)
ax1.plot(id_label + width/2,CP,color=c2,linewidth=4,marker='o',label='Cummulative Return')
ax1.set_ylim(0,1.01)
ax1.set_yticks(yy)
ax1.set_yticklabels(['{:.0%}'.format(val) for val in yy])
for idx,val in enumerate(CP):
ax1.text(idx+width/2,val+0.03,'{:.0%}'.format(val),color='r',ha='center',weight='bold')
for spine in ax.spines.keys():
ax.spines[spine].set_visible(False)
ax1.spines[spine].set_visible(False)
ax1.tick_params(axis='both',
which='both',
top=False,
bottom=False,
left=False,
right=False,
labelright=True,
labelbottom=False) | cwk0507/MSDM | MSDM5002/Assignment_4/Working/Q2.py | Q2.py | py | 1,930 | python | en | code | 0 | github-code | 50 |
30820993716 | """
File: asteroids.py
Original Author: Br. Burton
Designed to be completed by others
This program implements the asteroids game.
"""
"""Completed by Nelson Georges"""
import arcade
import random
import math
from abc import ABC, abstractmethod
# These are Global constants to use throughout the game
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
BULLET_RADIUS = 30
BULLET_SPEED = 10
BULLET_LIFE = 60
SHIP_TURN_AMOUNT = 5
SHIP_THRUST_AMOUNT = 0.25
SHIP_RADIUS = 30
INITIAL_ROCK_COUNT = 5
INTERMEDIATE_LEVEL_ROCK_COUNT = 10
HARD_LEVEL_ROCK_COUNT = 15
BIG_ROCK_SPIN = 1
BIG_ROCK_SPEED = 1.5
BIG_ROCK_RADIUS = 15
MEDIUM_ROCK_SPIN = -2
MEDIUM_ROCK_RADIUS = 5
MEDIUM_ROCK_SPEED = 1.5
SMALL_ROCK_SPIN = 5
SMALL_ROCK_RADIUS = 2
SMALL_ROCK_SPEED = 1.5
SCORE_HIT = 2
class Point:
def __init__(self):
self.x = 0.0
self.y = 0.0
class Velocity:
def __init__(self):
self.dx = 0
self.dy = 0
class FlyingObject(ABC):
def __init__(self, img):
self.center = Point()
self.velocity = Velocity()
self.alive = True
self.img = img
self.texture = arcade.load_texture(self.img)
self.width = self.texture.width
self.height = self.texture.height
self.radius = 0
self.angle = 0
self.speed = 0
self.direction = 0
def advance(self):
self.center.x += self.velocity.dx
self.center.y += self.velocity.dy
# This is for screen wrapping on edges
if self.center.x > SCREEN_WIDTH:
self.center.x -= SCREEN_WIDTH
if self.center.x < 0:
self.center.x += SCREEN_WIDTH
# This is for screen wrapping on top and bottom
if self.center.y > SCREEN_HEIGHT:
self.center.y -= SCREEN_HEIGHT
if self.center.y < 0:
self.center.y += SCREEN_HEIGHT
def is_alive(self):
return self.alive
def draw(self):
arcade.draw_texture_rectangle(self.center.x, self.center.y, self.width, self.height, self.texture, self.angle, 255)
class Asteroid(FlyingObject):
def __init__(self, img):
super().__init__(img)
self.radius = 0
def Spin(self, spin):
# Make the asteroid spin
self.spin = spin
self.angle += self.spin
def draw(self):
arcade.draw_texture_rectangle(self.center.x, self.center.y, self.width, self.height, self.texture, self.angle, 255)
if not self.alive:
self.img = "images/explode.jpg"
self.texture = arcade.load_texture(self.img)
self.width = self.texture.width
self.height = self.texture.height
arcade.draw_texture_rectangle(self.center.x, self.center.y, self.width, self.height, self.texture, self.angle, 255)
class SmallAsteroid(Asteroid):
def __init__(self):
super().__init__("images/meteorGrey_small1.png")
self.radius = SMALL_ROCK_RADIUS
self.spin = SMALL_ROCK_SPIN
self.speed = SMALL_ROCK_SPEED
self.center.x = random.randint(1, 50)
self.center.y = random.randint(1, 150)
self.direction = random.randint(1, 50)
self.velocity.dx = math.cos(math.radians(self.direction)) * self.speed
self.velocity.dy = math.cos(math.radians(self.direction)) * self.speed
def break_apart(self, asteroids):
self.alive = False
class MediumAsteroid(Asteroid):
def __init__(self):
super().__init__("images/meteorGrey_med1.png")
self.radius = MEDIUM_ROCK_RADIUS
self.speed = MEDIUM_ROCK_SPEED
self.center.x = random.randint(1, 50)
self.center.y = random.randint(1, 150)
self.direction = random.randint(1, 50)
self.velocity.dx = math.cos(math.radians(self.direction)) * self.speed
self.velocity.dy = math.cos(math.radians(self.direction)) * self.speed
self.spin = MEDIUM_ROCK_SPIN
def break_apart(self, asteroids):
# Create a small asteroid
sma_ast1 = SmallAsteroid()
sma_ast1.center.x = self.center.x
sma_ast1.center.y = self.center.y
sma_ast1.velocity.dy = self.velocity.dy + 1.5
sma_ast1.velocity.dx = self.velocity.dx + 1.5
# Create a second small asteroid
sma_ast2 = SmallAsteroid()
sma_ast2.center.x = self.center.x
sma_ast2.center.y = self.center.y
sma_ast1.velocity.dy = self.velocity.dy - 1.5
sma_ast1.velocity.dx = self.velocity.dx - 1.5
# Add the small asteroids to the ist of Asteroids
asteroids.append(sma_ast1)
asteroids.append(sma_ast2)
self.alive = False
class LargeAsteroid(Asteroid):
def __init__(self):
super().__init__("images/meteorGrey_big1.png")
self.radius = BIG_ROCK_RADIUS
self.center.x = random.randint(1, 50)
self.center.y = random.randint(1, 150)
self.direction = random.randint(1, 50)
self.speed = BIG_ROCK_SPEED
self.velocity.dx = math.cos(math.radians(self.direction)) * self.speed
self.velocity.dy = math.cos(math.radians(self.direction)) * self.speed
self.spin = BIG_ROCK_SPIN
def break_apart(self, asteroids):
#create a medium asteroid
med_ast1 = MediumAsteroid()
med_ast1.center.x = self.center.x
med_ast1.center.y = self.center.y
med_ast1.velocity.dy = self.velocity.dy + 2
# Create a second medium asteroid
med_ast2 = MediumAsteroid()
med_ast2.center.x = self.center.x
med_ast2.center.y = self.center.y
med_ast2.velocity.dy = self.velocity.dy - 2
# Create a small asteroid
sma_ast = SmallAsteroid()
sma_ast.center.x = self.center.x
sma_ast.center.y = self.center.y
sma_ast.velocity.dx = self.velocity.dx + 5
# Add the asteroids being created to the list of asteroids
asteroids.append(med_ast1)
asteroids.append(med_ast2)
asteroids.append(sma_ast)
self.alive = False
class Bullet(FlyingObject):
def __init__(self, ship_ang, ship_x, ship_y):
super().__init__("images/laserBlue01.png")
self.angle = ship_ang
self.center.x = ship_x
self.center.y = ship_y
self.radius = BULLET_RADIUS
self.alive = BULLET_LIFE
self.speed = BULLET_SPEED
def fire(self, ship_dx, ship_dy):
self.velocity.dx -= ship_dx + math.sin(math.radians(self.angle)) * BULLET_SPEED
self.velocity.dy += ship_dy + math.cos(math.radians(self.angle)) * BULLET_SPEED
def advance(self):
super().advance()
self.alive -= 1
if (self.alive <= 0):
self.alive = False
class Ship(FlyingObject):
def __init__(self):
super().__init__("images/playerShip1_orange.png")
self.angle = 1
self.center.x =(SCREEN_WIDTH/2)
self.center.y = (SCREEN_HEIGHT/2)
self.radius = SHIP_RADIUS
def draw(self):
if (self.alive):
arcade.draw_texture_rectangle(self.center.x, self.center.y, self.width, self.height, self.texture, self.angle, 255)
if not self.alive:
# Draw the damaged ship
img = "images/damaged_ship2.png"
self.texture = arcade.load_texture(img)
arcade.draw_texture_rectangle(self.center.x, self.center.y, self.width, self.height, self.texture, self.angle, 255)
self.center.x =(SCREEN_WIDTH/2)
self.center.y = (SCREEN_HEIGHT/2)
self.velocity.dx = 0
self.velocity.dy = 0
# self.img = "images/ship_explode.jpeg"
# self.texture = arcade.load_texture(self.img)
# self.width = self.texture.width
# self.height = self.texture.height
# arcade.draw_texture_rectangle(self.center.x, self.center.y, self.width, self.height, self.texture, self.angle, 60)
# Draw Game over at the top of the screen
img = "images/gameover.jpeg"
texture = arcade.load_texture(img)
arcade.draw_texture_rectangle(SCREEN_WIDTH - 400, SCREEN_HEIGHT - 70, 400, 200, texture, 0, 255)
#draw a message on the screen
img2 = "images/broken_ship_message.png"
texture2 = arcade.load_texture(img2)
arcade.draw_texture_rectangle(SCREEN_WIDTH - 400, SCREEN_HEIGHT - 200, 400, 100, texture2, 0, 255)
# Draw Play Again at the botton of the screen
img3 = "images/play_again.png"
texture3 = arcade.load_texture(img3)
arcade.draw_texture_rectangle(SCREEN_WIDTH - 400, SCREEN_HEIGHT - 500, 400, 200, texture3, 0, 255)
arcade.finish_render()
def rotate_right(self):
# Make the Ship rotate to the right direction
self.angle -= SHIP_TURN_AMOUNT
def rotate_left(self):
# Make the Ship rotate to the left direction
self.angle += SHIP_TURN_AMOUNT
def thrust_forward(self):
# Thrust the ship forward
self.velocity.dx -= math.sin(math.radians(self.angle)) * SHIP_THRUST_AMOUNT
self.velocity.dy += math.cos(math.radians(self.angle)) * SHIP_THRUST_AMOUNT
def thrust_backward(self):
# Thrust the ship backward
self.velocity.dx += math.sin(math.radians(self.angle)) * SHIP_THRUST_AMOUNT
self.velocity.dy -= math.cos(math.radians(self.angle)) * SHIP_THRUST_AMOUNT
class Game(arcade.Window):
"""
This class handles all the game callbacks and interaction
This class will then call the appropriate functions of
each of the above classes.
You are welcome to modify anything in this class.
"""
def __init__(self, width, height):
"""
Sets up the initial conditions of the game
:param width: Screen width
:param height: Screen height
"""
super().__init__(width, height)
arcade.set_background_color(arcade.color.SMOKY_BLACK)
self.score = 0
self.held_keys = set()
# TODO: declare anything here you need the game class to track
self.bullets = []
self.ship = Ship()
self.asteroids = []
# Begin the Game with a number of Asteroids
#INITIAL_ROCK_COUNT: for easy mode
#INTERMEDIATE_LEVEL_ROCK_COUNT: for Intermediate mode
#HARD_LEVEL_ROCK_COUNT: for HARD mode
for i in range(INITIAL_ROCK_COUNT):
big = LargeAsteroid()
self.asteroids.append(big)
# Game sound effect
self.bullet_sound = arcade.load_sound("sound/bullet.wav")
self.asteroid_sound = arcade.load_sound("sound/asteroid.wav")
self.ship_sound = arcade.load_sound("sound/ship.wav")
self.game_over_sound = arcade.load_sound("sound/game_over.wav")
self.congrats_sound = arcade.load_sound("sound/congratulations.wav")
self.ship_rotation_sound = arcade.load_sound("sound/rotation.wav")
def on_draw(self):
"""
Called automatically by the arcade framework.
Handles the responsibility of drawing all elements.
"""
# clear the screen to begin drawing
arcade.start_render()
# TODO: draw each object
self.ship.draw()
for asteroid in self.asteroids:
asteroid.draw()
if self.asteroids == []:
# Draw Congratulations at the top of the screen
img = "images/congratulations.png"
texture = arcade.load_texture(img)
arcade.draw_texture_rectangle(SCREEN_WIDTH /2, SCREEN_HEIGHT /2, SCREEN_WIDTH - 150, SCREEN_WIDTH - 150, texture, 0, 255)
arcade.finish_render()
for bullet in self.bullets:
bullet.draw()
self.check_collisions()
self.draw_score()
def draw_score(self):
"""
Puts the current score on the screen
"""
score_text = "Score: {}".format(self.score)
start_x = 10
start_y = SCREEN_HEIGHT - 20
arcade.draw_text(score_text, start_x=start_x, start_y=start_y, font_size=15, color=arcade.color.WHITE)
def remove_dead_bullets(self):
""" Revemove all bullet that is dead"""
for bullet in self.bullets:
if (not bullet.alive):
self.bullets.remove(bullet)
def remove_dead_asteroids(self):
""" Remove all asteroids that are dead"""
for asteroid in self.asteroids:
if (not asteroid.alive):
self.asteroids.remove(asteroid)
if self.asteroids == []:
arcade.play_sound(self.congrats_sound)
arcade.play_sound(self.congrats_sound)
def check_collisions(self):
"""
Checks to see if there is an asteroid and bullet colision,
and asteroid and ship colison
:return:
"""
for asteroid in self.asteroids:
for bullet in self.bullets:
if ((bullet.alive) and (asteroid.alive)):
distance_x = abs(asteroid.center.x - bullet.center.x)
distance_y = abs(asteroid.center.y - bullet.center.y)
max_distance = asteroid.radius + bullet.radius
if ((distance_x < max_distance) and (distance_y < max_distance)):
"""We have an asteroid and a bullet collision!!"""
bullet.alive = False
asteroid.break_apart(self.asteroids)
self.score += SCORE_HIT
#Play an asteroid explosion sound
arcade.play_sound(self.asteroid_sound)
asteroid.draw()
if ((asteroid.alive) and (self.ship.alive)):
distance_x = abs(asteroid.center.x - self.ship.center.x)
distance_y = abs(asteroid.center.y - self.ship.center.y)
max_distance = asteroid.radius + self.ship.radius
if ((distance_x < max_distance) and (distance_y < max_distance)):
"""We have an asteroid and the ship collision!!"""
self.ship.alive = False
self.score = 0
# Play the Ship explosion sound
arcade.play_sound(self.ship_sound)
# Play a game-over sound
arcade.play_sound(self.game_over_sound)
def update(self, delta_time):
"""
Update each object in the game.
:param delta_time: tells us how much time has actually elapsed
"""
self.check_keys()
# TODO: Tell everything to advance or move forward one step in time
for asteroid in self.asteroids:
asteroid.advance()
asteroid.Spin(asteroid.spin)
for bullet in self.bullets:
bullet.advance()
self.remove_dead_bullets()
self.remove_dead_asteroids()
self.ship.advance()
# TODO: Check for collisions
self.check_collisions()
def check_keys(self):
"""
This function checks for keys that are being held down.
You will need to put your own method calls in here.
"""
if arcade.key.LEFT in self.held_keys:
self.ship.rotate_left()
if arcade.key.RIGHT in self.held_keys:
self.ship.rotate_right()
if arcade.key.UP in self.held_keys:
self.ship.thrust_forward()
if arcade.key.DOWN in self.held_keys:
self.ship.thrust_backward()
# Machine gun mode...
#if arcade.key.SPACE in self.held_keys:
# pass
def on_key_press(self, key: int, modifiers: int):
"""
Puts the current key in the set of keys that are being held.
You will need to add things here to handle firing the bullet.
"""
if self.ship.alive:
self.held_keys.add(key)
if key == arcade.key.SPACE:
# TODO: Fire the bullet here!
bullet = Bullet(self.ship.angle, self.ship.center.x, self.ship.center.y)
self.bullets.append(bullet)
bullet.fire(self.ship.velocity.dx, self.ship.velocity.dy)
#Make a bullet sound
arcade.play_sound(self.bullet_sound)
def on_key_release(self, key: int, modifiers: int):
"""
Removes the current key from the set of held keys.
"""
if key in self.held_keys:
self.held_keys.remove(key)
# Creates the game and starts it going
window = Game(SCREEN_WIDTH, SCREEN_HEIGHT)
arcade.run() | georson00/Asteroids | Asteroid.py | Asteroid.py | py | 17,591 | python | en | code | 0 | github-code | 50 |
23914477783 | def is_prime(data: int):
count = 0
for i in range(2, data):
if data % i == 0:
count += 1
break
if count == 0:
print(count)
print("it is prime number")
else:
print("it is not prime number")
is_prime(10)
| Abhihugar/DSApython | basic/isprime.py | isprime.py | py | 277 | python | en | code | 0 | github-code | 50 |
26371757894 | import math
N = int(input())
x = []
y = []
for i in range(N):
X, Y = map(int, input().split())
x.append(X)
y.append(Y)
def norm2(x1, y1, x2, y2):
return (x1-x2)**2+(y1-y2)**2
max2 = 0
for i in range(N):
for j in range(N):
max2 = max(norm2(x[i],y[i],x[j],y[j]),max2)
print(math.sqrt(max2))
| prettyhappycatty/problems | abc234_b.py | abc234_b.py | py | 333 | python | en | code | 0 | github-code | 50 |
37651506244 | class Solution:
def isPalindrome(self, s: str) -> bool:
s = ''.join(filter(str.isalnum, s.lower()))
L,R = 0,len(s) -1
while L < R:
if s[L] != s[R]:
return False
L += 1
R -= 1
return True
def backtrack(self,s,i,ans,res):
if i == len(s):
ans.append(res.copy())
return
for j in range(i+1,len(s)+1):
if self.isPalindrome(s[i:j]):
res.append(s[i:j])
self.backtrack(s,j,ans,res)
res.pop()
def partition(self, s: str) -> List[List[str]]:
ans = []
self.backtrack(s,0,ans,[])
return(ans) | AmanuelAbel/A2SV-competitive-programming | palindrome-partitioning.py | palindrome-partitioning.py | py | 698 | python | en | code | 0 | github-code | 50 |
19588180369 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 13:52:27 2020
@author: fedor.goncharov.ol@gmail.com
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, "../ver-python/utilities")
from radon_transform_matrix import radon_transform2d_xray_matrix
from sinogram_noise_generator import generate_noise_xray_transmission
from em_transmission import em_transmission_convex_nr1, em_transmission_convex_nr2
# make an image - spherical layer with radiuses r_out = 0.5, r_in = 0.25
lin = np.linspace(-1., 1., 64)
[XX, YY] = np.meshgrid(lin, lin)
RR = np.sqrt(XX**2 + YY**2)
image = np.zeros((64,64))
image[RR < 0.5] = 1.
image[RR < 0.25] = 0.
# compute matrix for the Radon transform (this make take a while)
rt_system_matrix = radon_transform2d_xray_matrix(64, 64, 64, 1.0)
# compute denoised sinogram and add poisson noise
ray_transforms_vector = rt_system_matrix.dot(np.reshape(image, (64*64, 1)))
noise_ray_transforms_vector = generate_noise_xray_transmission(ray_transforms_vector, avg_intensity=1e3, T=1.0,
sc_intensity=1e1)
noise_ray_transforms = np.reshape(noise_ray_transforms_vector, (64, 64))
# run EM-algorithm
avg_scattered = 1e1*np.ones((64,64))
max_iterations = 100
relative_err_level = 1e-3
init_point = np.ones((64,64))
reconstruction_em_nr1 = em_transmission_convex_nr1(noise_ray_transforms, rt_system_matrix, 1e3*np.ones((64,64)),
avg_scattered, max_iterations,
relative_err_level, init_point)
# nr1 - algorithm has a tendency to be numerically unstable when estimating attenuation values near zero
# in this example iterations from 1 to 8 give reasonable images, then the process completely diverges
fig1 = plt.figure()
plt.imshow(reconstruction_em_nr1)
# testing EM_emission_algorithm_mlem3
reconstruction_em_nr2 = em_transmission_convex_nr2(noise_ray_transforms, rt_system_matrix, 1e3*np.ones((64,64)),
avg_scattered, max_iterations,
relative_err_level, init_point)
fig2 = plt.figure()
plt.imshow(reconstruction_em_nr2)
# plt.close(fig1)
# plt.close(fig2)
| fedor-goncharov/wrt-project | em-algorithms/test_em_transmission.py | test_em_transmission.py | py | 2,289 | python | en | code | 5 | github-code | 50 |
12686277899 | import torch
import torch.nn as nn
from .darknet import Darknet
from .network_blocks import BaseConv
class YOLOFPN(nn.Module):
"""
YOLOFPN module. Darknet 53 is the default backbone of this model.
"""
def __init__(
self,
depth=53,
in_features=["dark3", "dark4", "dark5"],
):
super().__init__()
self.backbone = Darknet(depth)
self.in_features = in_features
# out 1
self.out1_cbl = self._make_cbl(512, 256, 1)
self.out1 = self._make_embedding([256, 512], 512 + 256)
# out 2
self.out2_cbl = self._make_cbl(256, 128, 1)
self.out2 = self._make_embedding([128, 256], 256 + 128)
# upsample
self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
def _make_cbl(self, _in, _out, ks):
return BaseConv(_in, _out, ks, stride=1, act="lrelu")
def _make_embedding(self, filters_list, in_filters):
m = nn.Sequential(
*[
self._make_cbl(in_filters, filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
]
)
return m
def load_pretrained_model(self, filename="./weights/darknet53.mix.pth"):
with open(filename, "rb") as f:
state_dict = torch.load(f, map_location="cpu")
print("loading pretrained weights...")
self.backbone.load_state_dict(state_dict)
def forward(self, inputs):
"""
Args:
inputs (Tensor): input image.
Returns:
Tuple[Tensor]: FPN output features..
"""
# backbone
out_features = self.backbone(inputs)
x2, x1, x0 = [out_features[f] for f in self.in_features]
# yolo branch 1
x1_in = self.out1_cbl(x0)
x1_in = self.upsample(x1_in)
x1_in = torch.cat([x1_in, x1], 1)
out_dark4 = self.out1(x1_in)
# yolo branch 2
x2_in = self.out2_cbl(out_dark4)
x2_in = self.upsample(x2_in)
x2_in = torch.cat([x2_in, x2], 1)
out_dark3 = self.out2(x2_in)
outputs = (out_dark3, out_dark4, x0)
return outputs
| Megvii-BaseDetection/YOLOX | yolox/models/yolo_fpn.py | yolo_fpn.py | py | 2,378 | python | en | code | 8,661 | github-code | 50 |
33892976372 | from bs4 import BeautifulSoup
from info_retriever import get_info
import requests
import csv
import threading
#from sshfs import SSHFileSystem
def get_courses(link):
print(f'initializing Scraping from: {link}')
site = requests.get(link)
html = site.content
# create the beautifulSoup object
soup = BeautifulSoup(html,"html.parser")
subjects = soup.find_all(class_='nostyle collection-product-card')
courses=[]
for i in range(len(subjects)):
if "Course" in subjects[i].text:
courses.append(subjects[i])
#Multithreading the process to lower execution time.
print('Progress: ')
info_1=[]
info_2=[]
t1 = threading.Thread(target=multi_thread, args=(courses[0:len(courses)//2],info_1,"1",))
t2 = threading.Thread(target=multi_thread, args=(courses[len(courses)//2:],info_2,"2",))
t1.start()
t2.start()
t1.join()
t2.join()
# Join both lists from threads
info = info_1+info_2
#csv file headers
fields = ['Course Name', 'First Instructor Name', 'Course Description', '# of students enrolled', '# of ratings']
#write to a csv file
with open('Results.csv', 'w') as f:
write = csv.writer(f)
write.writerow(fields)
write.writerows(info)
# sending file to the server
# Connect with a password
# !!!!normally this value shall be stored in .env file for securty for the needs of the porject .env file is not created
#fs = SSHFileSystem(
# '127.0.0.1',
# username='90536',
# password='ayk12290'
#)
def multi_thread(courses,info=[],thread=0):
for i in range(len(courses)):
link = f"https://www.coursera.org{courses[i]['href']}"
print(f'thread {thread} progress is :',i+1,'/',len(courses))
info.append(get_info(link=link))
return info
| kerembay9/Scraping | Course_finder.py | Course_finder.py | py | 1,863 | python | en | code | 0 | github-code | 50 |
21290603443 | import openai
import pinecone
import pathlib
import tiktoken
import sys
import re
import os
from tqdm.auto import tqdm
from math import floor
import mysql.connector
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Pinecone settings
index_name = os.getenv("PINECONE_INDEX_NAME")
upsert_batch_size = 50 # how many embeddings to insert at once in the db
# OpenAI settings
embed_model = "text-embedding-ada-002" # embedding model compatible with gpt3.5
max_tokens_model = 8191 # max tokens accepted by embedding model
encoding_model = "cl100k_base" # tokenizer compatible with gpt3.5
# https://platform.openai.com/docs/guides/embeddings/how-can-i-tell-how-many-tokens-a-string-has-before-i-embed-it
def num_tokens_from_string(string: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_model)
num_tokens = len(encoding.encode(string))
return num_tokens
# Convert date to string
def date_converter(o):
return str(o.strftime("%Y-%m-%d"))
def get_daily_report_data():
# Connect to database
connection = mysql.connector.connect(
host=os.getenv('DB_HOST'),
user=os.getenv('DB_USER'),
password=os.getenv('DB_PASSWORD'),
database=os.getenv('DB_NAME'),
port=3306
)
cursor = connection.cursor()
# Query daily report data
query = "SELECT dr.id AS daily_report_id, dr.report_date AS report_date, t.content, u.username FROM daily_reports dr JOIN tasks t ON dr.id = t.daily_report_id JOIN users u ON dr.user_id = u.id ORDER BY dr.report_date DESC LIMIT 100"
cursor.execute(query)
daily_report_data = cursor.fetchall()
# Convert data to list of dictionaries
new_data = []
for row in daily_report_data:
daily_report_id, report_date, content, username = row
new_data.append({
'id': f"daily_report_{daily_report_id}",
'text': content,
'report_date': date_converter(report_date),
'username': username
})
# Close connection
connection.close()
return new_data
# Initialize connection to Pinecone
api_key = os.getenv("PINECONE_API_KEY")
env = os.getenv("PINECONE_ENVIRONMENT")
pinecone.init(api_key=api_key, enviroment=env)
index = pinecone.Index(index_name)
new_data = get_daily_report_data()
print(f"Extracted {len(new_data)} from rs database: {os.getenv('DB_NAME')}")
print(f"Example data: {new_data[1:5]}")
# Create embeddings and upsert the vectors to Pinecone
print(f"Creating embeddings and uploading vectors to database")
for i in tqdm(range(0, len(new_data), upsert_batch_size)):
# process source text in batches
i_end = min(len(new_data), i+upsert_batch_size)
meta_batch = new_data[i:i_end]
ids_batch = [x['id'] for x in meta_batch]
texts = [x['text'] for x in meta_batch]
# compute embeddings using OpenAI API
embedding = openai.Embedding.create(input=texts, engine=embed_model)
embeds = [record['embedding'] for record in embedding['data']]
# clean metadata before upserting
meta_batch = [{
'id': x['id'],
'text': x['text'],
'report_date': x['report_date'],
'username': x['username']
} for x in meta_batch]
# upsert vectors
to_upsert = list(zip(ids_batch, embeds, meta_batch))
index.upsert(vectors=to_upsert)
# Print final vector count
vector_count = index.describe_index_stats()['total_vector_count']
print(f"Database contains {vector_count} vectors.")
| hapodiv/database-pipeline-gpt-demo | database/index_docs.py | index_docs.py | py | 3,547 | python | en | code | 0 | github-code | 50 |
25125601938 | import datetime
import os
import copy
from log.logger import Logger
from db.db_operation import DWOperation
from db.db_operation import MSOperation
from api.capacity_service import Capacity
from api.config_service import Config
from TransferData import TransferData
from common.step_status import StepStatus
from common.password import get_password
from db.crypto import Crypto
from agent.master import MasterHandler
from agent.app import App
class Feedback(object):
def __init__(self, meta, params=None, init_flag=False, logger=None):
"""
# sync feedback data from RDP side to IRIS(OSA) side by incremental via event_key.
# 1, only sync those vendor&retailer which applied OSA Service.
# 2, for Those new vendor&retailer, copy all historical data when initialization.
:param meta: [mandatory] config data from config.properties file
:param params: 2 cases here. depends on whether sync rdp feedback for whole RDP or new customer. see below.
1, rdp_id: if rdp_id was given, then sync all data for this given RDP. otherwise, sync data from all related RDPs.
Noted: rdp_id will be passed when calling this service via REST API.
2, vendor_key: mandatory only when init_flag is True.
retailer_key: mandatory only when init_flag is True
Noted: These 2 parameters will not be passed from REST API but called directly by deploy scripts.
:param init_flag:
if init_flag is True: then only sync feedback data for given vendor & retailer. This is used when introducing new customer.
if init_flat is False: sync all customers' data from RDP periodically(e.g. sync daily).
:param logger:
"""
self.meta = meta
self._params = {} if params is None else params
self._rdp_id = self._params.get("rdpId", None)
self._fact_type = 'fdbk'
self._init_flag = init_flag
self._vendor_key = self._params.get("vendor_key", None)
self._retailer_key = self._params.get("retailer_key", None)
self._debug = self._params.get('debug', 'N')
self._default_rdp = "RDP_AUX"
self._log_file = './log/sync_fdbk_%s_%s.log' % (self._rdp_id, datetime.datetime.now().strftime('%Y%m%d'))
self.logger = logger if logger else Logger(log_level="debug", target="console|file",
vendor_key=-1, retailer_key=-1,
log_file=self._log_file, sql_conn=None)
self.osa_app_conn = MSOperation(meta=self.meta, logger=self.logger)
self.osa_dw_conn = DWOperation(meta=self.meta, logger=self.logger)
self.max_event_key = None
# we already know feedback table name of RDP
self.source_table_rdp = "DS_FACT_FEEDBACK" # source table in RDP side.
self.staging_import_table_osa = "STAGE_FACT_FEEDBACK_RDP" # used to store sync data from RDP table (same structure as table DS_FACT_FEEDBACK)
self.target_table_osa = "FACT_FEEDBACK" # final table in OSA side
self.capacity = Capacity(meta=meta)
self.dct_sync_data = copy.deepcopy(self.meta) # required for calling sync_data module
self.dct_sync_data["meta"] = self.meta # required for calling sync_data module
self.dct_sync_data["target_osa_conn"] = self.osa_dw_conn
self.dct_sync_data["target_dw_schema"] = self.meta['db_conn_vertica_common_schema']
self.dct_sync_data["target_dw_table"] = self.staging_import_table_osa
self.dct_sync_data["logger"] = self.logger
# [True|False(default)] True: direct connection between Vertica clusters. False: using vsql.
self.dct_sync_data["dw_conn_vertica"] = False
# self.dct_sync_data["dw_conn_vertica"] = True
self.transfer = TransferData(dct_sync_data=self.dct_sync_data)
def _populate_source_config(self, source_config):
self.logger.debug("The source config is: %s" % source_config)
_src_config = {}
if os.name == 'nt':
_src_config["temp_file_path"] = "d:"
elif os.name == 'posix':
_src_config["temp_file_path"] = "/tmp"
# Getting user account from config.properties file first.
if self.meta.get("db_conn_vertica_rdp_username"):
_src_config["dw.etluser.id"] = self.meta.get("db_conn_vertica_rdp_username")
if self.meta.get("db_conn_vertica_rdp_password"):
_src_config["dw.etluser.password"] = self.meta.get("db_conn_vertica_rdp_password")
else:
_pmp_pwd = get_password(username=self.meta.get("db_conn_vertica_rdp_username"), meta=self.meta)
# The pwd should be encrypted in order to: 1, align with else part, 2, pass it to db.sync_data module
_src_config["dw.etluser.password"] = Crypto().encrypt(_pmp_pwd)
# if not configed then get them directly from RDP config.
else:
_src_config["dw.etluser.id"] = source_config.get("dw.etluser.id")
# the pwd is encrypted
_src_config["dw.etluser.password"] = source_config.get("dw.etluser.password")
# required info for calling sync_data module.
_src_config["dw.server.name"] = source_config.get("dw.server.name")
_src_config["dw.db.name"] = source_config.get("dw.db.name")
_src_config["dw.db.portno"] = source_config.get("dw.db.portno", 5433)
_src_config["dw.schema.name"] = source_config.get("dw.schema.name")
self.logger.debug("srouce config is: %s" % _src_config)
self.dct_sync_data["source_config"] = _src_config
# Create the connection to RDP Vertica Cluster. which is the source Vertica cluster
rdp_meta = copy.deepcopy(self.meta)
tmp_rdp_meta = {'db_conn_vertica_servername': _src_config["dw.server.name"],
'db_conn_vertica_port': _src_config["dw.db.portno"],
'db_conn_vertica_dbname': _src_config["dw.db.name"],
'db_conn_vertica_username': _src_config["dw.etluser.id"],
'db_conn_vertica_password': _src_config["dw.etluser.password"],
'db_conn_vertica_password_encrypted': "true"
}
rdp_meta.update(tmp_rdp_meta)
self.logger.debug("rdp config is: %s" % rdp_meta)
rdp_connection = DWOperation(meta=rdp_meta)
self.dct_sync_data["source_dw"] = rdp_connection
def main_process(self):
try:
# if not introducing new customer and _rdp_id was given,
# then we will sync all feedback data from given RDP for registered users.
if self._init_flag is False and self._rdp_id:
try:
rdp_config = Config(meta=self.meta, hub_id=self._rdp_id).json_data
if not rdp_config['configs']:
raise Warning("There is no configs returned for RDP: %s."
"Please check if this RDP registered in CP with below URL."
"%s/properties/rdps?factType=fdbk" % (self._rdp_id, self.meta["api_config_str"]))
# exit(StepStatus.SUCCESS.value)
_rdp_schema = rdp_config['configs'].get('dw.schema.name')
self.logger.info("Started to sync data from rdp: %s" % _rdp_schema)
# self.dct_sync_data["source_config"] = rdp_config['configs']
self._populate_source_config(rdp_config['configs'])
self.initialize()
_flag = self.load_data()
if _flag:
# if no data, then no need to process & update variables table.
self.process_data()
sql = """
IF NOT EXISTS(SELECT * FROM VARIABLES WHERE VARIABLE_NAME = '{eventType}')
INSERT INTO VARIABLES (VARIABLE_NAME, VARIABLE_VALUE, PREVIOUS_VALUE, INSERT_TIME, UPDATE_TIME)
VALUES ('{eventType}', '{value}', '', getdate(), getdate())
ELSE
UPDATE VARIABLES
SET PREVIOUS_VALUE = VARIABLE_VALUE, VARIABLE_VALUE = '{value}',UPDATE_TIME = getdate()
WHERE VARIABLE_NAME = '{eventType}'
""".format(eventType=_rdp_schema, value=self.max_event_key)
self.logger.info(sql)
self.osa_app_conn.execute(sql)
self.logger.info("Data sync done for RDP: %s" % _rdp_schema)
except Exception as e:
self.logger.warning(e)
raise
# exit(StepStatus.SUCCESS.value) # exit(0) otherwise Docker container will fail.
# Else we will get all RDPs from REST API: http://10.172.36.75/config/properties/rdps?factType=fdbk
# There could be multi RDPs(e.g. for SVR & WM). if so, loop all RDPs
elif self._init_flag is False and self._rdp_id is None:
try:
rdp_configs = Config(meta=self.meta, rdp_info=True, rdp_fact_type=self._fact_type).json_data
if not rdp_configs:
raise Warning("No feedback related RDP found."
"Please check if any data returned from below URL."
"%s/properties/rdps?factType=fdbk" % (self.meta["api_config_str"]))
# exit(StepStatus.SUCCESS.value)
for rdp_config in rdp_configs:
_rdp_schema = rdp_config['configs'].get('dw.schema.name')
self.logger.info("Started to sync data from rdp: %s" % _rdp_schema)
# self.dct_sync_data["source_config"] = rdp_config['configs']
self._populate_source_config(rdp_config['configs'])
self.initialize()
_flag = self.load_data()
if _flag:
# if no data, then no need to process & update variables table.
self.process_data()
sql = """
IF NOT EXISTS(SELECT * FROM VARIABLES WHERE VARIABLE_NAME = '{eventType}')
INSERT INTO VARIABLES (VARIABLE_NAME, VARIABLE_VALUE, PREVIOUS_VALUE, INSERT_TIME, UPDATE_TIME)
VALUES ('{eventType}', '{value}', '', getdate(), getdate())
ELSE
UPDATE VARIABLES
SET PREVIOUS_VALUE = VARIABLE_VALUE, VARIABLE_VALUE = '{value}',UPDATE_TIME = getdate()
WHERE VARIABLE_NAME = '{eventType}'
""".format(eventType=_rdp_schema, value=self.max_event_key)
self.logger.info(sql)
self.osa_app_conn.execute(sql)
self.logger.info("Data sync done for RDP: %s" % _rdp_schema)
except Exception as e:
self.logger.warning(e)
raise
elif self._init_flag is True:
if self._vendor_key is None or self._retailer_key is None:
self.logger.warning("vendor_key and retailer_key are required when initilize feedback for new customer")
raise ValueError
# getting fdbk related rdps.
try:
rdp_configs = Config(meta=self.meta, rdp_info=True, rdp_fact_type=self._fact_type).json_data
if not rdp_configs:
self.logger.warning("No feedback related RDP found."
"Please check if any data returned from below URL."
"%s/properties/rdps?factType=fdbk" % (self.meta["api_config_str"]))
exit(StepStatus.SUCCESS.value)
fdbk_rdps = [str(rdp_config["rdpId"]).upper() for rdp_config in rdp_configs]
# change table name in case conflict with normal sync process
self.dct_sync_data["target_dw_table"] = "{0}_{1}_{2}".format(self.staging_import_table_osa, self._vendor_key, self._retailer_key)
_silo_config = Config(meta=self.meta, vendor_key=self._vendor_key, retailer_key=self._retailer_key).json_data
_silo_type = _silo_config['configs'].get('etl.silo.type', 'SVR')
_rdp_id = _silo_config['configs'].get('rdp.db.name')
# RDP_AUX is default rdp id for feedback etl on PRODUCTION.
# 1, if there is no RDP for given silo. then exit.
if not _rdp_id or str(_rdp_id).strip() == '':
self.logger.warning("There is no RDP silo configed for the given vendor:%s "
"and retailer:%s. So no need to sync feedback."
% (self._vendor_key, self._retailer_key))
exit(StepStatus.SUCCESS.value)
# 2, Getting configed RDP list, and check if there are feedback related RDPs.
_tmp_rdp_lst = str(_rdp_id).upper().split(sep=",")
_rdp_lst = [_tmp.strip() for _tmp in _tmp_rdp_lst]
# common_rdps is RDP silo configed for syncing feedback data for given silo(vendor&retailer)
common_rdps = list(set(_rdp_lst).intersection(fdbk_rdps))
if common_rdps is None:
self.logger.warning("There is no RDP silo configed for the given vendor:%s "
"and retailer:%s. So no need to sync feedback."
% (self._vendor_key, self._retailer_key))
exit(StepStatus.SUCCESS.value)
# If there is 1 or more than 1 feedback related rdps configed, then loop them to sync feedback data,
# Normally, there should be only 1. or no feedback rdp configed.
for common_rdp in common_rdps:
_rdp_id = common_rdp
self.logger.info("Started to sync data from rdp: %s for given vendor:%s and retailer:%s. "
% (_rdp_id, self._vendor_key, self._retailer_key))
# if RDP is not RDP_AUX, Won't exit but log a warning.
if _rdp_id != self._default_rdp:
self.logger.warning("Please be noted: The RDP is:%s. It is not RDP_AUX." % _rdp_id)
# WM silos are also following above logic.
# all hosted silo are ultilizing RDP_AUX to transfer feedback data. not sure about Walmart.
# if str(_silo_type).upper() in ["WMSSC", "WMCAT", "SASSC", "WMINTL"]:
# _rdp_id = self._default_rdp # WM rdp is RDP_AUX as well?
rdp_config = Config(meta=self.meta, hub_id=_rdp_id).json_data
if not rdp_config['configs']:
self.logger.warning("There is no configs for RDP: %s. Please check following URL:"
"%s/properties/%s/%s" % (_rdp_id, self.meta["api_config_str"], _rdp_id, _rdp_id) )
exit(StepStatus.SUCCESS.value)
_rdp_schema = rdp_config['configs'].get('dw.schema.name')
self.logger.info("Started to init feedback data from rdp: %s for "
"given vendor:%s and retailer:%s " % (_rdp_schema, self._vendor_key, self._retailer_key))
# self.dct_sync_data["source_config"] = rdp_config['configs']
self._populate_source_config(rdp_config['configs'])
self.initialize()
_flag = self.load_data()
if _flag:
# if no data, then no need to process.
self.process_data()
self.logger.info("Data sync done for RDP: %s" % _rdp_id)
except Exception as e:
self.logger.warning(e)
self.logger.warning("Please check if any warning or error messages when doing the initialization!")
finally:
if self.osa_app_conn:
self.osa_app_conn.close_connection()
if self.osa_dw_conn:
self.osa_dw_conn.close_connection()
def initialize(self):
"""
Create local temp tables , and DDLs required to process this fact type
:return:
"""
self.logger.info("Initialize...")
# recreate this table for every RDP. no need to truncate any longer.
# sql = "TRUNCATE TABLE {cmnSchema}.{targetTable}"\
# .format(cmnSchema=self.dct_sync_data["target_dw_schema"], targetTable=self.staging_import_table_osa)
# self.logger.info(sql)
# self.osa_dw.execute(sql)
sql = """
--Store data from RDP table.
DROP TABLE IF EXISTS {cmnSchema}.{importTable};
CREATE TABLE {cmnSchema}.{importTable}
(
EVENT_KEY int NOT NULL,
RETAILER_KEY int,
VENDOR_KEY int,
STORE_VISIT_DATE date,
PERIOD_KEY int NOT NULL,
TYPE varchar(1),
TYPE_DATE varchar(10),
ALERT_ID int,
ALERT_TYPE varchar(64),
MERCHANDISER_STORE_NUMBER varchar(512),
STORE_ID varchar(512),
MERCHANDISER_UPC varchar(512),
INNER_UPC varchar(512),
MERCHANDISER varchar(100),
STORE_REP varchar(1000),
SOURCE varchar(1000),
BEGIN_STATUS varchar(255),
ACTION varchar(255),
FEEDBACK_DESCRIPTION varchar(255),
FEEDBACK_HOTLINEREPORTDATE date,
FEEDBACK_ISININVENTORY varchar(5),
ZIP_CODE varchar(64),
ARTS_CHAIN_NAME varchar(255),
UPC_STATUS varchar(255),
MSI varchar(255)
)
UNSEGMENTED ALL NODES;
""".format(cmnSchema=self.dct_sync_data["target_dw_schema"],
importTable=self.dct_sync_data["target_dw_table"])
self.logger.info(sql)
self.osa_dw_conn.execute(sql)
def load_data(self):
"""
# Load data from RDP table ds_fact_feedback to local temp tables.
There is an column event_key which is incremental for all customers in ds_fact_feedback table.
we can save the snapshot of this column to variable table, and do the incremental every time based on this column.
There are few cases here:
1, Routinely, There will be a scheduled job to sync the whole feedback data for valid customers from related RDP silo.
And save the snapshot of the event_key from previous loading for next incremental loading.
2, if on-boarding a new vendor & retailer customer. Getting rdp_event_key from variable for related RDP silo. (rdp_event_key is from previous loading)
and then sync feedback data from related RDP silo only for this given customer when event_key < rdp_event_key.
Then case1 will take care the rest of feedback data.
:return:
"""
rdp_schema = self.dct_sync_data["source_config"].get('dw.schema.name')
# rdp_aux.ds_fact_feedback
source_table = "{rdpSchema}.{rdptableName}"\
.format(rdpSchema=rdp_schema,
rdptableName=self.source_table_rdp)
# common.stage_fact_feedback_rdp
target_table = "{dwSchema}.{importTable}"\
.format(dwSchema=self.dct_sync_data["target_dw_schema"],
importTable=self.dct_sync_data["target_dw_table"])
self.logger.info("Ready to load Data from {srouceTable} to {targetTable}"
.format(targetTable=target_table,
srouceTable=source_table))
insert_columns = " EVENT_KEY, RETAILER_KEY, VENDOR_KEY, STORE_VISIT_DATE, PERIOD_KEY, TYPE, TYPE_DATE," \
" ALERT_ID, ALERT_TYPE, MERCHANDISER_STORE_NUMBER, STORE_ID, MERCHANDISER_UPC, INNER_UPC," \
" MERCHANDISER, STORE_REP, SOURCE, BEGIN_STATUS, ACTION, FEEDBACK_DESCRIPTION," \
" FEEDBACK_HOTLINEREPORTDATE, FEEDBACK_ISININVENTORY, ZIP_CODE, ARTS_CHAIN_NAME, UPC_STATUS, MSI "
try:
self.logger.info("Getting the previous Event_key from last run for incremental load.")
_event_sql = "SELECT VARIABLE_VALUE FROM variables " \
"WHERE VARIABLE_NAME = '{rdpName}'".format(rdpName=rdp_schema)
self.logger.info(_event_sql)
event_key = self.osa_app_conn.query_scalar(_event_sql)
self.logger.info("Getting customer info which only applied OSA services as filter")
sql = "SELECT DISTINCT retailer_key, vendor_key FROM AP_ALERT_CYCLE_MAPPING " \
"UNION " \
"SELECT DISTINCT retailer_key, vendor_key FROM AP_ALERT_CYCLE_RC_MAPPING"
self.logger.info(sql)
results = self.osa_app_conn.query(sql)
if not results:
raise Warning("There is no data in table AP_ALERT_CYCLE_MAPPING. Please check sql: %s" % sql)
# exit(StepStatus.SUCCESS.value)
user_filters = ['SELECT ' + str(result.retailer_key) + ',' + str(result.vendor_key) for result in results]
user_filter_str = ' UNION ALL '.join(user_filters)
self.logger.info("Customer filters are: %s" % user_filter_str)
# incremental filter from RDP table
where_sql = "EVENT_KEY > {eventKey} AND SOURCE != 'ARIA' " \
"AND (RETAILER_KEY, VENDOR_KEY) in ({userFilter})"\
.format(eventKey=event_key,
userFilter=user_filter_str)
# TODO2DONE: how to set default value? use -1
# copy all if there is no value in variables table.
if not event_key:
self.logger.warning("There is no value set in variables table for RDP:{name}, "
"So copy the whole table".format(name=rdp_schema))
where_sql = " SOURCE != 'ARIA' AND (RETAILER_KEY, VENDOR_KEY) in ({userFilter})"\
.format(eventKey=event_key,
userFilter=user_filter_str)
event_key = -1 # check if this is the first run.
if self._init_flag is True:
if event_key == -1: # event_key is None
self.logger.warning("There is no event_key logged in variables table for the given RDP: %s."
"So Let's wait for the routine job to sync the whole rdp feedback data"
% rdp_schema)
return False
self.logger.info("Generating init feedback filters")
where_sql = "EVENT_KEY <= {eventKey} AND SOURCE != 'ARIA' " \
"AND (RETAILER_KEY, VENDOR_KEY) in ({userFilter}) " \
"AND RETAILER_KEY={retailerKey} AND VENDOR_KEY={vendorKey} "\
.format(eventKey=event_key,
userFilter=user_filter_str,
retailerKey=self._retailer_key,
vendorKey=self._vendor_key)
self.logger.debug("The filters are: %s" % where_sql)
# form the fetch query from RDP and then Insert into the target table
fetch_query = """
SELECT /*+ label(GX_IRIS_SYNCFEEDBACK)*/ {insertQuery} FROM {sourceTable}
WHERE {whereSql}
""".format(insertQuery=insert_columns,
sourceTable=source_table,
whereSql=where_sql)
self.logger.info("fetch_query is : %s" % fetch_query)
self.logger.info(">>Loading {factType} Data from event_key:{eventKey} start at: {timestamp}<<"
.format(factType=self._fact_type, eventKey=event_key, timestamp=datetime.datetime.now()))
self.dct_sync_data["target_column"] = insert_columns
self.dct_sync_data["source_sql"] = fetch_query
row_count = self.transfer.transfer_data(dct_sync_data=self.dct_sync_data)
self.logger.info(">>Done loaded {cnt} rows from event_key:{eventKey} completed at: {timestamp}<<"
.format(cnt=row_count,
factType=self._fact_type,
eventKey=event_key, timestamp=datetime.datetime.now())
)
# if no data transfered, then update variables with previous value.
sql = "SELECT /*+ label(GX_IRIS_SYNCFEEDBACK)*/ nvl(max(event_key), {oldEventKey}) " \
"FROM {schemaName}.{importTable} "\
.format(schemaName=self.dct_sync_data["target_dw_schema"],
importTable=self.dct_sync_data["target_dw_table"],
oldEventKey=event_key)
self.logger.info(sql)
self.max_event_key = self.osa_dw_conn.query_scalar(sql)
# max_event_key = -1 # testing purpose
if self.max_event_key == -1:
self.logger.warning("There is no feedback data in RDP table: {0}".format(source_table))
return False
return True
except Exception as e:
self.logger.warning(e)
raise
finally:
pass
def process_data(self):
"""
after load_data part completes. sync data from temp table to related schemas.
:return:
"""
try:
self.logger.info("Processing feedback start...")
# loop retailer to insert feedback data
sql = "SELECT DISTINCT retailer_key " \
"FROM {cmnSchema}.{importTable}"\
.format(cmnSchema=self.dct_sync_data["target_dw_schema"],
importTable=self.dct_sync_data["target_dw_table"])
self.logger.info(sql)
retailers = self.osa_dw_conn.query(sql)
if retailers.rowcount == 0:
self.logger.warning("There is no data in table {cmnSchema}.{importTable}."
"It could be no incremental data. Please check fetch_query against RDP database"
.format(cmnSchema=self.dct_sync_data["target_dw_schema"],
importTable=self.dct_sync_data["target_dw_table"]))
for retailer in retailers:
retailer_key = retailer.retailer_key
osa_schema = self.capacity.get_retailer_schema_name(retailer_key)
# Finally, run the sql
msg = "Processing fdbk data within retailer {retailerKey}:{retailerName}"\
.format(retailerKey=retailer_key, retailerName=osa_schema)
self.logger.info(msg)
# Normally, There should NOT be duplicated alert_id transfered by incremental.
# But should consider this case here. Delete existing alertid from target table
# TODO: delete could have performance issue. consider using switch partition
delete_sql = "DELETE FROM {osaSchema}.{targetTable} " \
"WHERE alert_id IN (SELECT alert_id FROM {cmnSchema}.{importTable} )"\
.format(targetTable=self.target_table_osa,
osaSchema=osa_schema,
cmnSchema=self.dct_sync_data["target_dw_schema"],
importTable=self.dct_sync_data["target_dw_table"])
self.logger.info(delete_sql)
self.osa_dw_conn.execute(delete_sql)
# inserting feedback data into final table fact_feedback from processed table.
sql = """
INSERT INTO {osaSchema}.{targetTable}
(EVENT_KEY, RETAILER_KEY, VENDOR_KEY, STORE_VISITED_PERIOD_KEY, PERIOD_KEY,
ALERT_ID, STORE_KEY, MERCHANDISER_STORE_NUMBER,
STORE_ID, ITEM_KEY, MERCHANDISER_UPC, UPC, MERCHANDISER, STORE_REP, SOURCE,
BEGIN_STATUS, ACTION, FEEDBACK_DESCRIPTION,
ON_HAND_PHYSICAL_COUNT, ON_HAND_CAO_COUNT
)
SELECT stage.EVENT_KEY, stage.RETAILER_KEY, stage.VENDOR_KEY,
TO_CHAR(stage.STORE_VISIT_DATE, 'YYYYMMDD')::int AS STORE_VISITED_PERIOD_KEY,
stage.PERIOD_KEY,
stage.ALERT_ID,
store.STORE_KEY AS STORE_KEY,
stage.MERCHANDISER_STORE_NUMBER,
COALESCE(store.STORE_ID , alert.STOREID, stage.STORE_ID) AS STORE_ID,
item.ITEM_KEY AS ITEM_KEY,
stage.MERCHANDISER_UPC,
COALESCE(item.UPC, alert.UPC, stage.INNER_UPC, stage.MERCHANDISER_UPC) AS UPC,
stage.MERCHANDISER, stage.STORE_REP, stage.SOURCE,
stage.BEGIN_STATUS, stage.ACTION, stage.FEEDBACK_DESCRIPTION,
0 AS ON_HAND_PHYSICAL_COUNT,
0 AS ON_HAND_CAO_COUNT
FROM {cmnSchema}.{importTable} stage
LEFT JOIN {osaSchema}.FACT_PROCESSED_ALERT alert
ON stage.alert_id = alert.alert_id AND alert.issuanceid = 0
AND alert.retailer_key = {retailerKey} AND stage.vendor_key = alert.vendor_key
INNER JOIN {cmnSchema}.DIM_PRODUCT item
ON item.retailer_key = {retailerKey} AND alert.vendor_key = item.vendor_key
AND item.item_key = alert.item_key
INNER JOIN {cmnSchema}.DIM_STORE store
ON store.retailer_key = {retailerKey} AND alert.vendor_key = store.vendor_key
AND store.store_key = alert.store_key
WHERE stage.retailer_key = {retailerKey}
""".format(osaSchema=osa_schema,
targetTable=self.target_table_osa,
cmnSchema=self.dct_sync_data["target_dw_schema"],
importTable=self.dct_sync_data["target_dw_table"],
retailerKey=retailer_key)
self.logger.info("SQL used to load data to related schema. %s" % sql)
self.osa_dw_conn.execute(sql)
self.logger.info("Processing feedback ended...")
except Exception as e:
self.logger.warning("Process data for RDP {0} failed: {1}".format(self._rdp_id, e))
raise
finally:
if self._debug.upper() == 'N':
_drop_sql = "DROP TABLE IF EXISTS {schemaName}.{importTable};" \
.format(schemaName=self.dct_sync_data["target_dw_schema"],
importTable=self.dct_sync_data["target_dw_table"])
self.logger.info(_drop_sql)
self.osa_dw_conn.execute(_drop_sql)
class FeedbackNanny(Feedback):
def __init__(self, meta, request_body, logger=None):
logger = logger if logger else Logger(log_level="info", vendor_key=-1, retailer_key=-1, module_name="syncRDPFeedbackNanny")
__debug = request_body.get("debug", 'N')
__log_level = 'DEBUG' if str(__debug).upper() == 'Y' else 'INFO'
logger.set_level(log_level=__log_level)
init_fdbk = False # True is only for initial rdp feedback of new customer
Feedback.__init__(self, meta={**meta}, params={**request_body}, init_flag=init_fdbk, logger=logger)
class FeedbackHandler(MasterHandler):
def set_service_properties(self):
self.service_name = 'syncRDPFeedbackNanny'
class FeedbackApp(App):
'''
This class is just for testing, osa_bundle triggers it somewhere else
'''
def __init__(self, meta):
App.__init__(self, meta=meta, service_bundle_name='syncRDPFeedbackNanny')
if __name__ == '__main__':
#import configparser, sys
#with open("{}/../../../config/config.properties".format(sys.argv[0])) as fp:
# cp = configparser.ConfigParser()
# cp.read_file(fp)
# meta = dict(cp.items("DEFAULT"))
#
## case1: sync all vendors feedback data from RDP.
## params = dict(rdpId="RDP_AUX_CAPABILITY_KATHERINE", debug='Y')
#params = dict(rdpId=None, debug='Y')
#f = Feedback(meta=meta, params=params)
#
## case2: init new customer data for given vendor & retailer
## params = dict(vendor_key=684, retailer_key=158, debug='Y')
## f = Feedback(meta=meta, params=params, init_flag=True)
#
#
#f.main_process()
'''REQUEST BODY
{
"jobId": 1234, # mandatory. passed from JobScheduler
"stepId": 3, # mandatory. passed from JobScheduler
"batchId": 0, # mandatory. passed from JobScheduler
"retry": 0, # mandatory. passed from JobScheduler
"groupName": 22,
"rdpId": "RDP_AUX", # optional - will process data with all feedback related RDPs(get RDPs from cp) if no rdp pa
"debug":"N" # char: optional [Y|N]
}
'''
import os
SEP = os.path.sep
cwd = os.path.dirname(os.path.realpath(__file__))
generic_main_file = cwd + SEP + '..' + SEP + 'main.py'
CONFIG_FILE = cwd + SEP + '..' + SEP + '..' + SEP + 'config' + SEP + 'config.properties'
exec(open(generic_main_file).read())
app = FeedbackApp(meta=meta) #************* update services.json --> syncRDPFeedbackNanny.service_bundle_name to syncRDPFeedbackNanny before running the script
app.start_service()
| kenshinsee/common | script/sync_rdp_feedback/SyncFeedbackFromRDP.py | SyncFeedbackFromRDP.py | py | 34,517 | python | en | code | 0 | github-code | 50 |
34348457966 | """http://practice.geeksforgeeks.org/problems/product-of-primes/0"""
import fileinput
import math
import collections
import functools
inputLines = fileinput.input()
testCases = int(inputLines.readline())
for l in range(testCases):
s, n = list(map(int,inputLines.readline().strip().split()))
root = int(math.sqrt(n+1)) + 1
pri = {i:True for i in range(3,root,2)}
pri[2] = True
out = {i:True for i in range(s,n+1)}
for i in range(3,root):
if i in pri:
for j in range(2*i,root,i):
pri.pop(j,None)
for i in pri:
div = s//i
for j in range(i*div,n+1,i):
if j!= i:
out.pop(j,None)
prod = functools.reduce(lambda x,y : (x*y)%(10**9+7),out)
print(prod)
| dbausher/practice | Algorithms/primeProduct.py | primeProduct.py | py | 767 | python | en | code | 0 | github-code | 50 |
26667792307 | import scrapping
import string
#50 пунктов на странице
#между первой и второй частью разделитель - это номер страницы
URL_GOS_USLUGI_REESTR1 = "http://www.zakupki.gov.ru/epz/contract/quicksearch/search.html?morphology=on&pageNumber="
URL_GOS_USLUGI_REESTR2 ="&sortDirection=true&recordsPerPage=_50&sortBy=PO_DATE_OBNOVLENIJA&fz44=on&priceFrom=0&priceTo=200000000000&contractStageList_0=on&contractStageList_3=on&contractStageList=0%2C3®ionDeleted=true"
#в конце ставится номер заказа
URL_COMMON_INFO = 'http://www.zakupki.gov.ru/epz/contract/contractCard/common-info.html?reestrNumber='
URL_OBJ_INFO = 'http://www.zakupki.gov.ru/epz/contract/contractCard/payment-info-and-target-of-order.html?reestrNumber='
URL_TABLE = 'http://goszakaz.ru/tenders'
#количесвто обрабатываемых страниц
n_pages = 33
#получаем номера заказов
#for i in range(n_pages) :
# page_url = URL_GOS_USLUGI_REESTR1 + i.__str__() + URL_GOS_USLUGI_REESTR2
# nums_of_orders = []
# nums_of_orders.append(scrapping.get_nums_of_orders(page_url))
URL_LIST_1part = 'http://goszakaz.ru'
URL_LIST_2part ='/page'
URL_LIST_3part = '/?order=startdate'
dictionary = {}
dictionary = scrapping.get_links_of_table(URL_TABLE)
orders = []
for val in dictionary :
links = dictionary[val][0]
nums = dictionary[val][2]
for k in range(links.__len__()) :
if (int(nums[k]) > 30):
border = int(nums[k])
pages = border//30
i = 2
while (i < pages ) and ( i < 33) :
str = URL_LIST_1part + links[k] + URL_LIST_2part + i.__str__() + URL_LIST_3part
orders = scrapping.get_orders_names(str)
i +=1
else:
str = URL_LIST_1part + links[k]
orders = scrapping.get_orders_names(str)
f = open(val + '.txt', 'a')
f.write(orders.__str__() + '\n')
f.close() | alexandrbektashev/SimplePython | scraper/main1.py | main1.py | py | 2,021 | python | ru | code | 0 | github-code | 50 |
43105096604 | # Lab 3 GRADED exercises
# Return only this script file
def listInsert (l2,x):
l2.append(x)
l2.sort()
l2.reverse()
return l2
def tupleLast3 (t2):
assert len(t2)>3
return t2[-3]
def str2tuple (s3,s4):
return tuple(s3+s4)
#############################################
# !!! DO NOT MODIFY THE CODE BELOW !!!
#############################################
l2 = [2, 5, 7, 8, 11]
x = 6
print ("The result of listInsert ",listInsert (l2,x))
t2= ("u", "a", "b", "2", "3", "4", "c", "i", "s")
print ("The result of tupleLast3 ",tupleLast3 (t2))
s3 = 'Hello'
s4 = "World"
print ("The result of str2tuple ",str2tuple (s3,s4)) | pendlm1/Python | lab3_graded.py | lab3_graded.py | py | 673 | python | en | code | 0 | github-code | 50 |
11889287468 | import math
from constants import PIXEL_UM_RATIO
# BACTERIA SIMULATION #
NUMBER_BACTERIA = 50
TUMBLE_DIRECTION_CHANGE_SPLIT = 5
# BIOLOGICAL DIMENSIONS #
AVG_BACTERIA_RADIUS = 1 # microns
BACTERIA_RADIUS_PX = AVG_BACTERIA_RADIUS * PIXEL_UM_RATIO
# E. COLI #
E_COLI_RUN_TIME = 0.81 # s
E_COLI_RUN_TIME_UNCERTAINTY = 0.7 # s
E_COLI_TUMBLE_TIME = 0.14 # s
E_COLI_TUMBLE_TIME_UNCERTAINTY = 0.01 # s
E_COLI_RUN_VELOCITY = 11.4 # um/s
E_COLI_RUN_VELOCITY_UNCERTAINTY = 3.4 # um/s
E_COLI_ANGULAR_VELOCITY = math.radians(38 / E_COLI_TUMBLE_TIME)
E_COLI_ANGULAR_VELOCITY_UNCERTAINTY = math.radians(25 / E_COLI_TUMBLE_TIME)
E_COLI_TUMBLE_VELOCITY = E_COLI_RUN_VELOCITY / 5
E_COLI_TUMBLE_VELOCITY_UNCERTAINTY = E_COLI_RUN_VELOCITY_UNCERTAINTY / 5
# M. Marinus #
M_MAR_RUN_TIME = 0.6 # s
M_MAR_RUN_TIME_UNCERTAINTY = 0.2 # s
M_MAR_TUMBLE_TIME = M_MAR_RUN_TIME / 0.74 * 0.26 # s
M_MAR_TUMBLE_TIME_UNCERTAINTY = M_MAR_RUN_TIME_UNCERTAINTY / 0.74 * 0.26 # s
M_MAR_RUN_VELOCITY = 100 # um/s
M_MAR_RUN_VELOCITY_UNCERTAINTY = 15 # um/s
M_MAR_TUMBLE_VELOCITY = M_MAR_RUN_VELOCITY / 5
M_MAR_TUMBLE_VELOCITY_UNCERTAINTY = M_MAR_RUN_VELOCITY_UNCERTAINTY / 5
M_MAR_ANGULAR_VELOCITY = math.radians(360)
M_MAR_ANGULAR_VELOCITY_UNCERTAINTY = math.radians(30)
# Run Time #
SIMULATION_TIME = 30 * 60 # 30 min | dragonmushu/BacteriaMotion | src/simulations/bacteria/constants.py | constants.py | py | 1,301 | python | en | code | 0 | github-code | 50 |
42606774106 | #import packages
import sys
import statistics
import csv
def compute_stats(values):
"""Computes the minimum, maximum, mean and median for a list of values
Parameters
----------
values: a list of the values
Returns
-------
tuple: A tuple of the minimum, maximum, mean and median value of the list
"""
#check if the list is empty
if len(values)==0:
Average =None
maximum =None
minimum =None
median =None
#calculate the statistics
else:
Average =statistics.mean(values)
maximum =max(values)
minimum =min(values)
median =statistics.median(values)
#return a tuple of the statisitcs
tuple=(minimum,maximum,Average,median)
return tuple
def main():
"""Takes a txt file with space seperated columns and creates a list of values
from a specific row then sorts the list.
Parameters
----------
No parameters
Returns
-------
values: a list of the values from a specific colum
"""
#define variable
global values
values=[]
column = int(sys.argv[1])-1
data_file = csv.register_dialect("space",delimiter=' ', skipinitialspace = True)
#add values to list if stdin used
if len(sys.argv)==2:
data = csv.reader(sys.stdin, "space")
for row in data:
values+=[float(row[column])]
#add values to list if stdin is not used
if len(sys.argv)==3:
with open(sys.argv[-1]) as input:
data = csv.reader(input, "space")
for row in data:
values+=[float(row[column])]
#remove the "missing" values
for i in values:
if i==-9999.0:
values.remove(-9999.0)
if i==-99.000:
values.remove(-99.000)
#sort values
values.sort()
print(compute_stats(values))
return values
if __name__=='__main__':
main()
| Abby-w/Python-software-dev-3006- | week2 HW/compute_stats2.py | compute_stats2.py | py | 1,873 | python | en | code | 0 | github-code | 50 |
27962381713 | from torchvision.datasets import CIFAR10
from torchvision.transforms import ToTensor, Compose
class CIFAR10GAN(CIFAR10):
def __init__(self,
root: str,
class_name: str,
train: bool = True,
transform: Compose = Compose([ToTensor()]),
download: bool = False,
) -> None:
'''
Dataset limiting original CIFAR10 to specified class_name
Attributes
----------
class_name: str
limits records by given class name f.e. cat
root_dir: str
path to CIFAR10 dataset content
train: bool = True
select train or test part of CIFAR10
transform: Compose
set of transformation performed on images
download: bool = False
download dataset from torchvision or used existing data located
in root_dir folder
'''
super().__init__(root = root, train = train, transform = transform, download = download)
self.class_name = class_name
self.class_id = self.class_to_idx[self.class_name]
self.data, self.targets = self._filter_by_class_name()
def _filter_by_class_name(self):
'''
returns data and targets limited to given class_name
'''
# find elements index for class name
elements_indices = [index for index, id in enumerate(self.targets) if id == self.class_id]
# limit data and target
data = [self.data[index] for index in elements_indices]
targets = [self.targets[index] for index in elements_indices]
return data, targets | KonWski/DCGAN_CIFAR10 | dataset.py | dataset.py | py | 1,642 | python | en | code | 1 | github-code | 50 |
40134586100 | import FWCore.ParameterSet.Config as cms
process = cms.Process("rpcDqmClient")
## InputFile = DQM root file path
process.readMeFromFile = cms.EDAnalyzer("ReadMeFromFile",
InputFile = cms.untracked.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/data/Express/121/964/DQM_V0001_R000121964__StreamExpress__BeamCommissioning09-Express-v2__DQM.root'),
)
####################################### DO NOT CHANGE #############################################
process.load("DQM.RPCMonitorClient.RPCDqmClient_cfi")
process.rpcdqmclient.RPCDqmClientList = cms.untracked.vstring("RPCBxTest")
###################################################################################################
## RMSCut = maximum RMS allowed
## EntriesCut = minimum entries allowed
## DistanceFromZeroBx = maximum distance from BX 0 in absolute value (Rolls that will be written in file)
process.rpcdqmclient.RMSCut = cms.untracked.double(1.1)
process.rpcdqmclient.EntriesCut = cms.untracked.int32(10)
process.rpcdqmclient.DistanceFromZeroBx = cms.untracked.double(1.5)
####################################### DO NOT CHANGE #############################################
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1))
process.load("Geometry.MuonCommonData.muonIdealGeometryXML_cfi")
process.load("Geometry.RPCGeometry.rpcGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.dqmEnv.subSystemFolder = 'RPC'
process.dqmSaver.convention = 'Online'
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
threshold = cms.untracked.string('ERROR')
),
debugModules = cms.untracked.vstring('rpcbxtest')
)
process.p = cms.Path(process.readMeFromFile*process.rpcdqmclient*process.dqmEnv*process.dqmSaver)
####################################################################################################
| cms-sw/cmssw | DQM/RPCMonitorClient/test/rpcBXStudies.py | rpcBXStudies.py | py | 2,251 | python | en | code | 985 | github-code | 50 |
36488086540 | def num_unique_emails(emails):
res_emails = set()
for email in emails:
local, domain = email.split("@")
local = local.split("+")[0]
local = local.replace(".", "")
res_emails.add(local + "@" + domain)
return len(res_emails) | emilycheera/coding-challenges | unique_emails.py | unique_emails.py | py | 281 | python | en | code | 1 | github-code | 50 |
11702789695 | """
Created on Tue Sep 17 12:10:19 2015
@author: Max W. Y. Lam
"""
import sys
sys.path.append("../")
from models import basketball_model
while(1):
bas = basketball_model()
bas.load_data()
bas.train_winning_team_model()
bas.train_player_models()
| MaxInGaussian/TLGProb | experiment-up-to-date/auto_train_model.py | auto_train_model.py | py | 263 | python | en | code | 2 | github-code | 50 |
24958859599 | from PRP import PRPReader
from .GeomTable import GeomTable
from .GeomStats import GeomStats
from .GeomHeader import GeomHeader
from .GeomPropertiesVisitor import GeomPropertiesVisitor
from GMS.TDB.TypeDataBase import TypeDataBase
from typing import Optional, Any
import logging
import struct
import json
import zlib
class GameScene:
def __init__(self, gms_path: str, buf_path: str, prp_path: str, tdb_path: str):
self._gms_path: str = gms_path
self._buf_path: str = buf_path
self._prp_path: str = prp_path
self._tdb_path: str = tdb_path
self._gms_buffer: bytes = bytes()
self._buf_buffer: bytes = bytes()
self._gms_geom_table: Optional[GeomTable] = None
self._gms_geom_stats: Optional[GeomStats] = None
self._prp_reader: PRPReader = PRPReader(prp_path)
self._tdb: TypeDataBase = TypeDataBase(tdb_path)
self._scene_props: Any = None
def prepare(self) -> bool:
# Read GMS
try:
# Load & decompress GMS body
with open(self._gms_path, "rb") as gms_file:
whole_gms: bytes = gms_file.read()
uncompressed_size, buffer_size, is_not_compressed = struct.unpack('<iib', whole_gms[0:9])
is_compressed = not is_not_compressed
if is_compressed:
real_size: int = (uncompressed_size + 15) & 0xFFFFFFF0
self._gms_buffer = zlib.decompress(whole_gms[9:], wbits=-15, bufsize=real_size)
else:
self._gms_buffer = whole_gms[9:]
except Exception as of_ex:
print(f"Failed to open GMS file {self._gms_path}. Reason: {of_ex}")
return False
# Read BUF
try:
with open(self._buf_path, "rb") as buf_file:
self._buf_buffer = buf_file.read()
except Exception as of_ex:
print(f"Failed to open BUF file {self._buf_path}. Reason: {of_ex}")
self._gms_buffer = bytes()
return False
# Read properties
try:
self._prp_reader.parse()
except Exception as e:
print(f"Failed to prepare PRP file {self._prp_path}. Reason: {e}")
return False
# Prepare types database
if not self._tdb.load():
print(f"Failed to load types database from file {self._tdb_path}")
return False
# Prepare GMS body
return self._prepare_gms()
def dump(self, out_file: str) -> bool:
if self._scene_props is None:
return False
try:
with open(out_file, "w") as out_scene_file:
print("Dumping to json... (it's very slow process, cuz Python is so stupid)")
scene_dump: dict = dict()
scene_dump["flags"] = self._prp_reader.flags
scene_dump["is_raw"] = self._prp_reader.is_raw
scene_dump["defines"] = [x.__dict__() for x in self._prp_reader.definitions]
scene_dump["scene"] = self._scene_props
json.dump(scene_dump, out_scene_file, indent=2)
print(f"Scene dump saved to file {out_file} successfully!")
return True
except IOError as ioe:
print(f"Failed to save scene file to {out_file}. IOError: {ioe}")
return False
@property
def properties(self) -> PRPReader:
return self._prp_reader
@property
def geoms(self) -> [GeomHeader]:
return self._gms_geom_table.entries
@property
def geom_stats(self) -> GeomStats:
return self._gms_geom_stats
@property
def type_db(self) -> TypeDataBase:
return self._tdb
def _prepare_gms(self) -> bool:
# Load entries
self._gms_geom_table = GeomTable(self._gms_buffer, self._buf_buffer)
self._gms_geom_stats = GeomStats(self._gms_buffer)
# Load properties for each entry
visitor: GeomPropertiesVisitor = GeomPropertiesVisitor(self.geoms, self.properties)
visited_geoms = visitor.visit(self.type_db, 'ROOT', GeomPropertiesVisitor.ZROOM)
print(f" --- DECOMPILE FINISHED ({len(self.geoms)} GEOMS) --- ")
print(f" Ignored instructions: {visitor.total_instructions - visitor.current_instruction - 1} (0 - 1 is OK; More - FAILURE)")
self._scene_props = visited_geoms
return True
| ReGlacier/HBM_GMSTool | GMS/GameScene.py | GameScene.py | py | 4,427 | python | en | code | 1 | github-code | 50 |
15799248202 | import os, sys, logging, discord, platform, simplimod
from dotenv import load_dotenv
print(f"""
_____ _ ___ __ ___ __
/ ___/(_)___ ___ ____ / (_) |/ /___ ____/ /
\__ \/ / __ `__ \/ __ \/ / / /|_/ / __ \/ __ /
___/ / / / / / / / /_/ / / / / / / /_/ / /_/ /
/____/_/_/ /_/ /_/ .___/_/_/_/ /_/\____/\__,_/
/_/
/_/ {simplimod.__version__}
Copyright © 2022 Rafael Galvan
discord.py {discord.__version__} by rapptz
python-dotenv by Saurabh Kumar
{platform.system()} {platform.release()} {os.name}
""")
load_dotenv()
log_channel = os.getenv("LOG_CHANNEL")
log_level = os.getenv("LOG_LEVEL")
logger = logging.getLogger("simplimod")
logger.setLevel(logging.DEBUG)
log_formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(name)s | %(message)s')
log_stream_handler = logging.StreamHandler(sys.stdout)
log_stream_handler.setLevel(logging.DEBUG)
log_stream_handler.setFormatter(log_formatter)
log_file_handler = logging.FileHandler('simplimod.log')
log_file_handler.setLevel(logging.DEBUG)
log_file_handler.setFormatter(log_formatter)
logger.addHandler(log_stream_handler)
logger.addHandler(log_file_handler)
app_name = os.getenv("APP_NAME")
app_debug = os.getenv("APP_DEBUG")
class SimpliMod(discord.Client):
async def on_ready(self):
logging.info('')
async def on_message(self, message):
pass
intents = discord.Intents.all()
if __name__ == '__main__':
client = SimpliMod(intents)
client.run() | Zentro/SimpliMod | simplimod.py | simplimod.py | py | 1,596 | python | en | code | 0 | github-code | 50 |
74775264796 | # File: api_search_terms.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
from api_classes.api_caller import ApiCaller
class ApiSearchTerms(ApiCaller):
endpoint_url = '/search/terms'
endpoint_auth_level = ApiCaller.CONST_API_AUTH_LEVEL_RESTRICTED
request_method_name = ApiCaller.CONST_REQUEST_METHOD_POST
params_map = {
'file_type_substring': 'filetype_desc',
'environment_id': 'env_id',
'av_detection': 'av_detect',
'av_family_substring': 'vx_family',
'hashtag': 'tag',
'similar_samples': 'similar_to',
'imphash': 'imp_hash',
'file_type': 'filetype',
'file_name': 'filename',
}
verdict_map = {
'whitelisted': 1,
'no verdict': 2,
'no specific threat': 3,
'suspicious': 4,
'malicious': 5
}
def map_params(self, params):
for old, new in self.params_map.iteritems():
if old in params:
params[new] = params[old]
del params[old]
if 'verdict' in params:
params['verdict'] = self.verdict_map[params['verdict']]
return params
| phantomcyber/phantom-apps | Apps/phvxstream/api_classes/api_search_terms.py | api_search_terms.py | py | 1,185 | python | en | code | 81 | github-code | 50 |
12712376360 | # -*- coding: utf-8 -*-
"""
@author: japeach
Conversion of TELMOS2_v2.2 vb scripts
"""
from typing import List, Union
import numpy as np
import pandas as pd
def odfile_to_matrix(in_file: str,
num_columns: int = 1,
delimiter: str = ",",
header: bool = None
) -> np.array:
# Assumes that dat is ordered
data = pd.read_csv(in_file, sep=delimiter, index_col=[0, 1], header=header)
return_data = []
for col in range(num_columns):
return_data.append(np.array(data[col + 2].unstack()))
if num_columns > 1:
return return_data
else:
return return_data[0]
def matrix_to_odfile(data: Union[np.array, List[np.array]],
out_file: str,
num_columns: int = 1,
delimiter: str = ","
) -> None:
# data is a NumPy array or list of NumPy arrays
def stack_matrix(matrix):
df = pd.DataFrame(matrix).stack().reset_index()
df.loc[:, :"level_1"] += 1
return df
if num_columns > 1:
dfs = []
for matrix in data:
dfs.append(stack_matrix(matrix))
df = pd.concat([dfs[0].loc[:, :"level_1"]] + [x[0]
for x in dfs], axis=1)
else:
df = stack_matrix(data)
df.to_csv(out_file, index=None, columns=None, header=None)
| TransportScotland/tmfs18-trip-end-model | data_functions.py | data_functions.py | py | 1,494 | python | en | code | 3 | github-code | 50 |
70071799837 | def main():
positions = readFile(input().strip())
if positions is None:return -1
rev_pos = [reverseOrder(position) for position in positions]
ranks = [getRanks(lst) for lst in rev_pos]
with open("output.txt","w") as file:
for line in ranks:
print(line)
file.write(str(line)+"\n")
def getRanks(lst):
l = len(lst)
ranks = [i for i in range(1,l+1)]
temp = ranks.copy()
for i in range(l-1,-1,-1):
new_pos = i+lst[i]
temp.insert(new_pos,temp.pop(i))
return temp
#print(getRanks([1,1,0,0]))
def reverseOrder(position):
revLst = position.copy()
for i in range(len(position)):
temp = revLst.pop(i)
new_pos = i-temp
revLst.insert(new_pos,temp)
return revLst
#print(reverseOrder([0,1,2,0,1]))
def readFile(inpFile):
try:
with open(inpFile) as file:
pos = [list(map(int,line.strip().split())) for line in file]
except Exception as e:
print(e)
return
return pos
main()
| samitha278/UoM-Labs | Programming Assignment 2/uom 2018 pp2/uom 2018 pp2 8/uom 2018 pp2 8.py | uom 2018 pp2 8.py | py | 1,217 | python | en | code | 0 | github-code | 50 |
18246443346 | #!/usr/bin/python # This is client.py file
import socket # Import socket module
import time
s = socket.socket() # Create a socket object
host = "192.168.43.169" # Get local machine name
port = 12345 # port
s.connect((host, port))
while True:
file = open("/home/pi/data.txt","r+")
mesaj = file.read()
s.send(mesaj.encode('utf-8'))
time.sleep(0.01)
s.close()
time.sleep(1)
| sertugan/PID-position-control-TCP | TCP.py | TCP.py | py | 471 | python | en | code | 0 | github-code | 50 |
39975316537 | from ..Classes import MathSpec
from typing import List, TypedDict
def write_out_space(space: TypedDict) -> str:
out = ""
out += "<h3>"
out += space.__name__
out += "</h3>"
d = space.__annotations__
d = ",<br/>".join(["{}: {}".format(a, b.__name__) for a,b in zip(d.keys(), d.values())])
d = "{" + d + "}"
out += "<p>"
out += d
out += "</p>"
return out
def write_out_spaces(ms: MathSpec, spaces: List[str]) -> str:
out = "<h2>Spaces</h2>"
for name in spaces:
out += write_out_space(ms.spaces[name])
return out | BlockScience/MSML | src/Reports/spaces.py | spaces.py | py | 580 | python | en | code | 0 | github-code | 50 |
26811048341 | """Custom (partially nested) dataclasses describing configurations of individual components."""
# pylint: disable=C0103
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, Union
from ecgan.config.nested_dataclass import nested_dataclass
from ecgan.utils.custom_types import (
DiscriminationStrategy,
LatentDistribution,
MetricOptimization,
SamplingAlgorithm,
TrackerType,
Transformation,
WeightInitialization,
)
from ecgan.utils.miscellaneous import generate_seed
@dataclass
class OptimizerConfig:
"""Type hints for Optimizer dicts."""
_name = 'optimizer'
NAME: str
LR: float # Learning rate
WEIGHT_DECAY: Optional[float] = None
MOMENTUM: Optional[float] = None
DAMPENING: Optional[float] = None
BETAS: Optional[Tuple[float, float]] = None
EPS: Optional[float] = None
ALPHA: Optional[float] = None
CENTERED: Optional[bool] = None
@nested_dataclass
class InverseModuleConfig:
"""Type hints for the module config of an inverse mapping module."""
KERNEL_SIZES: List[int]
LOSS: str
NAME: str
OPTIMIZER: OptimizerConfig
@nested_dataclass
class ReconstructionConfig:
"""Type hints for ReconstructionType dicts."""
STRATEGY: str
@nested_dataclass
class EmbeddingConfig:
"""Type hints for ReconstructionType dicts."""
CREATE_UMAP: bool
LOAD_PRETRAINED_UMAP: bool
@nested_dataclass
class LatentWalkReconstructionConfig(ReconstructionConfig):
"""Type hints for latent walk reconstructions."""
MAX_RECONSTRUCTION_ITERATIONS: int
EPSILON: float
LATENT_OPTIMIZER: OptimizerConfig
CRITERION: str
ADAPT_LR: bool
LR_THRESHOLD: float
VERBOSE_STEPS: Optional[int] = None
@dataclass
class LossConfig:
"""Type hints for a generic loss configuration."""
NAME: str
GRADIENT_PENALTY_WEIGHT: Optional[float] = None
CLIPPING_BOUND: Optional[float] = None
REDUCTION: Optional[str] = None
@dataclass
class BaseCNNConfig:
"""Generalized configuration of an CNN module."""
HIDDEN_CHANNELS: List[int]
@dataclass
class BaseRNNConfig:
"""Generalized configuration of an RNN module."""
HIDDEN_DIMS: int # Amount of layers
HIDDEN_SIZE: int # Size of each layer
@dataclass
class TrackingConfig:
"""Config for tracking and logging information."""
TRACKER_NAME: str
ENTITY: str
PROJECT: str
EXPERIMENT_NAME: str
LOCAL_SAVE: bool
SAVE_PDF: bool
S3_CHECKPOINT_UPLOAD: bool # Currently only supported for W&B tracker
LOG_LEVEL: str = 'info'
@property
def tracker_name(self) -> TrackerType:
return TrackerType(self.TRACKER_NAME)
@nested_dataclass
class ExperimentConfig:
"""
Parameters regarding the experiment itself.
Includes information on the experiment, the used dataset and the directory from where the dataset is loaded.
"""
_name = 'experiment'
TRACKER: TrackingConfig
DATASET: str
MODULE: str
LOADING_DIR: str
TRAIN_ON_GPU: bool
@staticmethod
def configure( # pylint: disable=R0913
entity: str,
project: str,
experiment_name: str,
module: str,
dataset: str,
tracker: str = TrackerType.LOCAL.value,
local_save: bool = False,
save_pdf: bool = False,
loading_dir: str = 'data',
train_on_gpu: bool = True,
s3_checkpoint_upload: bool = False,
log_level: str = 'info',
) -> Dict:
"""Return a default experiment configuration."""
return {
'experiment': {
'TRACKER': {
'TRACKER_NAME': tracker,
'PROJECT': project,
'EXPERIMENT_NAME': experiment_name,
'ENTITY': entity,
'LOCAL_SAVE': local_save,
'SAVE_PDF': save_pdf,
'S3_CHECKPOINT_UPLOAD': s3_checkpoint_upload,
'LOG_LEVEL': log_level,
},
'MODULE': module,
'DATASET': dataset,
'LOADING_DIR': loading_dir,
'TRAIN_ON_GPU': train_on_gpu,
}
}
@property
def name(self):
return self._name
@dataclass
class PreprocessingConfig:
"""Create a preprocessing config object."""
_name = 'preprocessing'
LOADING_DIR: str
NUM_WORKERS: int
WINDOW_LENGTH: int
WINDOW_STEP_SIZE: int
RESAMPLING_ALGORITHM: SamplingAlgorithm
TARGET_SEQUENCE_LENGTH: int
LOADING_SRC: Optional[str]
NUM_SAMPLES: int
@staticmethod
def configure(
loading_src: Optional[str],
target_sequence_length: int,
loading_dir: str = 'data',
num_workers: int = 4,
window_length: int = 0,
window_step_size: int = 0,
resampling_algo: str = 'lttb',
num_samples: int = 0,
):
"""Return a default preprocessing configuration."""
return {
'preprocessing': {
'LOADING_DIR': loading_dir,
'LOADING_SRC': loading_src,
'NUM_WORKERS': num_workers,
'WINDOW_LENGTH': window_length,
'WINDOW_STEP_SIZE': window_step_size,
'RESAMPLING_ALGORITHM': resampling_algo,
'TARGET_SEQUENCE_LENGTH': target_sequence_length,
'NUM_SAMPLES': num_samples,
}
}
@property
def name(self):
return self._name
@property
def resampling_algorithm(self) -> SamplingAlgorithm:
return SamplingAlgorithm(self.RESAMPLING_ALGORITHM)
@dataclass
class SyntheticPreprocessingConfig(PreprocessingConfig):
"""Preprocessing configuration for synthetic datasets."""
RANGE: Tuple[int, int]
ANOMALY_PERCENTAGE: float
NOISE_PERCENTAGE: float
SYNTHESIS_SEED: int
@staticmethod
def configure( # pylint: disable=R0913, W0221
loading_src: Optional[str],
target_sequence_length: int,
loading_dir: str = 'data',
num_workers: int = 4,
window_length: int = 0,
window_step_size: int = 0,
resampling_algo: str = 'lttb',
num_samples: int = 0,
data_range: Tuple[int, int] = (0, 25),
anomaly_percentage: float = 0.2,
noise_percentage: float = 0.5,
synthesis_seed: int = 1337,
) -> Dict:
"""Provide a default configuration for a synthetic dataset."""
result_dict: Dict = PreprocessingConfig.configure(
loading_src=loading_src,
target_sequence_length=target_sequence_length,
loading_dir=loading_dir,
num_workers=num_workers,
window_length=window_length,
window_step_size=window_step_size,
resampling_algo=resampling_algo,
num_samples=num_samples,
)
update_dict: Dict = {
"RANGE": data_range,
"ANOMALY_PERCENTAGE": anomaly_percentage,
"NOISE_PERCENTAGE": noise_percentage,
"SYNTHESIS_SEED": synthesis_seed,
}
result_dict['preprocessing'].update(update_dict)
return result_dict
@dataclass
class SinePreprocessingConfig(SyntheticPreprocessingConfig):
"""Preprocessing config for the synthetic sine dataset."""
AMPLITUDE: float = 3.0
FREQUENCY: float = 3.0
PHASE: float = 5.0
VERTICAL_TRANSLATION: float = 1.0
@staticmethod
def configure( # pylint: disable=W0221, R0913
loading_src: Optional[str],
target_sequence_length: int,
loading_dir: str = 'data',
num_workers: int = 4,
window_length: int = 0,
window_step_size: int = 0,
resampling_algo: str = 'lttb',
num_samples: int = 0,
data_range: Tuple[int, int] = (0, 25),
anomaly_percentage: float = 0.2,
noise_percentage: float = 0.5,
synthesis_seed: int = 1337,
amplitude: float = 3,
frequency: float = 3,
phase: float = 5,
vertical_translation: float = 1,
) -> Dict:
"""Return the default configuration for the sine dataset."""
result_dict = SyntheticPreprocessingConfig.configure(
loading_src=loading_src,
target_sequence_length=target_sequence_length,
loading_dir=loading_dir,
num_workers=num_workers,
window_length=window_length,
window_step_size=window_step_size,
resampling_algo=resampling_algo,
num_samples=num_samples,
data_range=data_range,
anomaly_percentage=anomaly_percentage,
noise_percentage=noise_percentage,
synthesis_seed=synthesis_seed,
)
update_dict = {
"AMPLITUDE": amplitude,
"FREQUENCY": frequency,
"PHASE": phase,
"VERTICAL_TRANSLATION": vertical_translation,
}
result_dict['preprocessing'].update(update_dict)
return result_dict
@dataclass
class TrainerConfig:
"""Used to initialize a config for training."""
_name = "trainer"
NUM_WORKERS: int
CHANNELS: Union[int, List[int]]
EPOCHS: int
BATCH_SIZE: int
TRANSFORMATION: str
SPLIT_PATH: str
SPLIT_METHOD: str
SPLIT: Tuple[float, float]
TRAIN_ONLY_NORMAL: bool
CROSS_VAL_FOLDS: int
CHECKPOINT_INTERVAL: int
SAMPLE_INTERVAL: int
BINARY_LABELS: bool
MANUAL_SEED: int
@staticmethod
def configure( # pylint: disable=R0913
transformation: Transformation = Transformation.NONE,
num_workers: int = 4,
epochs: int = 500,
batch_size: int = 64,
split_path: str = 'split.pkl',
split_method: str = 'random',
split: Tuple[float, float] = (0.85, 0.15),
cross_val_folds: int = 5,
checkpoint_interval: int = 10,
sample_interval: int = 1,
train_only_normal: bool = True,
binary_labels: bool = True,
channels: Union[int, List[int]] = 0,
manual_seed: int = generate_seed(),
):
"""Return a default configuration for the trainer."""
return {
'trainer': {
'NUM_WORKERS': num_workers,
'CHANNELS': channels,
'EPOCHS': epochs,
'BATCH_SIZE': batch_size,
'TRANSFORMATION': transformation.value,
'SPLIT_PATH': split_path,
'SPLIT_METHOD': split_method,
'SPLIT': split,
'CROSS_VAL_FOLDS': cross_val_folds,
'CHECKPOINT_INTERVAL': checkpoint_interval,
'SAMPLE_INTERVAL': sample_interval,
'TRAIN_ONLY_NORMAL': train_only_normal,
'BINARY_LABELS': binary_labels,
'MANUAL_SEED': manual_seed,
}
}
@property
def name(self):
return self._name
@property
def transformation(self) -> Transformation:
"""Return an instance of the internal enum class `Transformation`."""
return Transformation(self.TRANSFORMATION)
@dataclass
class WeightInitializationConfig:
"""Base weight initialization config."""
NAME: str
@property
def weight_init_type(self) -> WeightInitialization:
return WeightInitialization(self.NAME)
@dataclass
class NormalInitializationConfig(WeightInitializationConfig):
"""Base weight initialization config for drawing from a normal distribution."""
MEAN: float
STD: float
@dataclass
class UniformInitializationConfig(WeightInitializationConfig):
"""Base weight initialization config for drawing from a uniform distribution."""
LOWER_BOUND: float
UPPER_BOUND: float
@nested_dataclass
class ModuleConfig:
"""Generalized configuration of a module."""
_name = "module"
@property
def name(self):
return self._name
@nested_dataclass
class BaseNNConfig(ModuleConfig):
"""Generic neural network configuration."""
OPTIMIZER: OptimizerConfig
LOSS: LossConfig
LAYER_SPECIFICATION: Union[BaseCNNConfig, BaseRNNConfig]
WEIGHT_INIT: Union[WeightInitializationConfig, NormalInitializationConfig, UniformInitializationConfig]
SPECTRAL_NORM: bool = False
INPUT_NORMALIZATION: Optional[str] = None
@nested_dataclass
class AutoEncoderConfig(ModuleConfig):
"""Generalized configuration of a AE module."""
LATENT_SIZE: int
ENCODER: BaseNNConfig
DECODER: BaseNNConfig
TANH_OUT: bool
LATENT_SPACE: str
@property
def latent_distribution(self) -> LatentDistribution:
"""Convenience conversion to internal enum type."""
return LatentDistribution(self.LATENT_SPACE)
@nested_dataclass
class VariationalAutoEncoderConfig(AutoEncoderConfig):
"""Generalized configuration of a VAE module."""
KL_BETA: float
@nested_dataclass
class GeneratorConfig(BaseNNConfig):
"""Generic generator configuration."""
TANH_OUT: bool = False
@nested_dataclass
class GANModuleConfig(ModuleConfig):
"""Generalized configuration of a GAN module."""
LATENT_SIZE: int
GENERATOR: GeneratorConfig
DISCRIMINATOR: BaseNNConfig
GENERATOR_ROUNDS: int
DISCRIMINATOR_ROUNDS: int
LATENT_SPACE: str
@property
def latent_distribution(self) -> LatentDistribution:
"""Convenience conversion to internal enum type."""
return LatentDistribution(self.LATENT_SPACE)
@nested_dataclass
class EncoderGANConfig(GANModuleConfig):
"""Generalized configuration for the BeatGAN module."""
ENCODER: BaseNNConfig
@nested_dataclass
class VAEGANConfig(EncoderGANConfig):
"""VAEGAN config."""
KL_WARMUP: int
KL_ANNEAL_ROUNDS: int
KL_BETA: int
@nested_dataclass
class AdExperimentConfig:
"""Basic experimental settings for the anomaly detection process."""
_name = 'ad_experiment'
TRACKER: TrackingConfig
RUN_URI: str
RUN_VERSION: str
FOLD: int
SAVE_DIR: str
@property
def name(self):
return self._name
@dataclass
class DetectionConfig:
"""Generalized configuration of a detection object."""
_name = "detection"
DETECTOR: str
BATCH_SIZE: int
NUM_WORKERS: int
AMOUNT_OF_RUNS: int
SAVE_DATA: bool
@property
def name(self) -> str:
return self._name
@nested_dataclass
class ReconstructionDetectionConfig(DetectionConfig):
"""Generalized configuration of a reconstruction based detection config."""
EMBEDDING: EmbeddingConfig
@nested_dataclass
class GANDetectorConfig(ReconstructionDetectionConfig):
"""Base config for GAN based anomaly detection."""
DISCRIMINATION_STRATEGY: str
AD_SCORE_STRATEGY: str
NORMALIZE_ERROR: bool
RECONSTRUCTION: Union[ReconstructionConfig, LatentWalkReconstructionConfig]
@property
def ad_score_strategy(self) -> MetricOptimization:
return MetricOptimization(self.AD_SCORE_STRATEGY)
@property
def discrimination_strategy(self) -> DiscriminationStrategy:
return DiscriminationStrategy(self.DISCRIMINATION_STRATEGY)
@nested_dataclass
class InverseDetectorConfig(GANDetectorConfig):
"""Config for anomaly detectors utilizing GAN inversion."""
RECONSTRUCTION: ReconstructionConfig
INVERSE_MAPPING_URI: Optional[str]
@nested_dataclass
class GANLatentWalkConfig(GANDetectorConfig):
"""Config for anomaly detectors utilizing latent walks to approximate the reconstructed series."""
RECONSTRUCTION: LatentWalkReconstructionConfig
INVERSE_MAPPING_URI: Optional[str]
| emundo/ecgan | ecgan/config/dataclasses.py | dataclasses.py | py | 15,605 | python | en | code | 8 | github-code | 50 |
11440208029 | #!/usr/bin/env python3
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
import os
from word2doc.wikiextractor import wiki_extractor
from word2doc.retriever import build_db
from word2doc.retriever import build_tfidf
from word2doc.util import constants
from word2doc.util import logger
from word2doc.util import init_project
logger = logger.get_logger()
# ------------------------------------------------------------------------------
# Data pipeline that builds the processes the data for the retriever.
# ------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('data_path', type=str, help='/path/to/wikidump')
parser.add_argument('--preprocess', type=str, default=None,
help=('File path to a python module that defines '
'a `preprocess` function'))
parser.add_argument('--ngram', type=int, default=2,
help=('Use up to N-size n-grams '
'(e.g. 2 = unigrams + bigrams)'))
parser.add_argument('--hash-size', type=int, default=int(math.pow(2, 24)),
help='Number of buckets to use for hashing ngrams')
parser.add_argument('--tokenizer', type=str, default='simple',
help=("String option specifying tokenizer type to use "
"(e.g. 'corenlp')"))
parser.add_argument('--num-workers', type=int, default=None,
help='Number of CPU processes (for tokenizing, etc)')
args = parser.parse_args()
# Init project
init_project.init(args.num_workers)
save_path = constants.get_db_path()
# Extract text from wikipedia dump
if not os.path.isdir(constants.get_wiki_extract_path()):
wiki_extractor.extract_wiki(args.data_path, output=constants.get_wiki_extract_path(), json=True, references=True)
# Build database if it does not already exist
if not os.path.isfile(save_path):
logger.info('No database found. Building database...')
build_db.store_contents(constants.get_wiki_extract_path(), save_path, args.preprocess)
else:
logger.info('Existing database found. Using database.')
# Calculate tfidf data
logger.info('Counting words...')
count_matrix, doc_dict = build_tfidf.get_count_matrix(
args, 'sqlite', {'db_path': save_path}
)
logger.info('Making tfidf vectors...')
tfidf = build_tfidf.get_tfidf_matrix(count_matrix)
logger.info('Getting word-doc frequencies...')
freqs = build_tfidf.get_doc_freqs(count_matrix)
# Save to disk
build_tfidf.save_tfidf(args, tfidf, freqs, doc_dict)
logger.info('Done.')
| jundl77/word2doc | src/build-doc-retriever-model.py | build-doc-retriever-model.py | py | 2,859 | python | en | code | 2 | github-code | 50 |
26148261925 | from sys import stdin
r = stdin.readline
l = r().strip()
while l:
n = int(l)
flist = r().split()
c = dict()
for e in flist:
c[e] = [0,0,0]
for e in range(n):
gl = r().strip().split()
gl[1] = int(gl[1])
gl[2] = int(gl[2])
c[gl[0]][0] += gl[1]
c[gl[0]][1] += gl[1]%gl[2] if gl[2] else gl[1]
am = gl[1]//gl[2] if gl[2] else 0
for i in range(gl[2]):
c[gl[i+3]][2] += am
for e in flist:
print("{} {}".format(e,c[e][2]+c[e][1]-c[e][0]))
l = r().strip()
if l:
print()
| michaelgy/PROBLEMS_PROGRAMMING | UVA/119.py | 119.py | py | 586 | python | en | code | 0 | github-code | 50 |
24169772306 | from django.http import HttpResponse
from django.shortcuts import render
def hello(request):
return HttpResponse("<h1>Hello world !</h1>")
def get_temp(request):
if request.method == 'GET':
return render(request, 'test.html')
elif request.method == 'POST':
a = request.POST['num_a']
b = request.POST['num_b']
op = request.POST['op']
req_dict = {
"value_a": a,
"value_b": b,
"op": op,
"result": get_tempt_res(int(a), op, int(b))
}
return render(request, 'test.html', req_dict)
def get_tempt_res(x, op, y):
if op == '0':
return x + y
elif op == '1':
return x - y
elif op == '2':
return x*y
elif op == '3':
return x/y
else:
return
def get_if_for(request):
req_dict = {
'a': 10
}
return render(request, 'test2.html', req_dict)
def get_img(request):
return render(request, 'imagetest.html')
def play_with_cookies(request):
response = HttpResponse()
response.set_cookie(key="dj", value="wei in the house new", max_age=60*10)
response.set_cookie(key="sessionid", value="951120", max_age=60*10)
response.content = "123"
return response
| fuzz123123/fuzzTest | Fuzzland/views.py | views.py | py | 1,260 | python | en | code | 0 | github-code | 50 |
71209822555 | import settings
import helpers.translateheper as translatehelper
import helpers.loggerhelper as loggerhelper
from modeles import Team
team_msg = None
teams = (Team('Bleu'), Team('Rouge'))
# ----------------------------------------------------------
# ----------------------------------------------------------
# FONCTIONS UTILITAIRE POUR LA GESTIOND ES EQUIPES
# ----------------------------------------------------------
# ----------------------------------------------------------
# reset teams
def reset_teams():
global teams
teams = (Team('Bleu'), Team('Rouge'))
# Recupere la team en fonctions de la couleur voulut
def get_team_by_name(name):
global teams
return [team for team in teams if team.name == name][0]
# Recupere la current team
def get_current_team():
global teams
return [team for team in teams if team.current][0]
# Recupere la team adverse a la current
def get_other_team():
global teams
return [team for team in teams if not team.current][0]
# retourne si le joueur est dans la team actuelle
def is_player_in_current_team(player):
global teams
current_team = get_current_team()
return current_team.is_player_in_team(player)
# chositi la team courant en fonction du joueur
# Attention : n'update pas l'autre team
def set_current_team(player):
global teams
blue_team = get_team_by_name('Bleu')
red_team = get_team_by_name('Rouge')
if any(player in p for p in blue_team.players):
blue_team.current = True
return 'Bleu'
if any(player in p for p in red_team.players):
red_team.current = True
return 'Rouge'
# swap la current team
def swap_current_team():
global teams
for t in teams:
t.current = True if not t.current else False
return get_current_team()
# Mets toutes les teams a current = False
def reset_current_team():
global teams
for t in teams:
t.current = False
# Incremente le score de l'equipe n'étant pas la current team
def update_score_other_team():
get_other_team().score += settings.POINTS_VICTOIRE
# Incremente le score de la current team
def update_score_current_team(points=1):
get_current_team().score += points
# Ajoute le malus de point à l'équipe
def add_malus():
get_current_team().score -= 1
# ----------------------------------------------------------
# ----------------------------------------------------------
# GESTION DE LA CREATION DES EQUIPES AVEC LES EMOJIS
# ----------------------------------------------------------
# ----------------------------------------------------------
#Fonction qui afffiche le menu de choix des equipes
async def welcome(bot):
global team_msg
channel = bot.get_channel(settings.CHANNEL_ID)
message_bot = await channel.send(translatehelper.get_guidelines('welcome'))
team_msg = message_bot
# ajout des emojis
await message_bot.add_reaction("\U0001F534")
await message_bot.add_reaction("\U0001F535")
# Ajout d'une emote == Join d'une equipe
async def on_raw_reaction_add(payload):
global team_msg, teams
if team_msg is not None and payload.message_id == team_msg.id:
print(f"""Selectionne : {team_msg.id}""")
if payload.emoji.name == '\U0001F534':
# PERMET DETRE DANS LES DEUX TEAMS
# A DELETE & DECOMMENTER HORS TESTS
if True:
#if str(payload.member) not in red_team:
loggerhelper.log_reaction_added(team_msg.id, payload.member)
player_red=str(payload.member).split('#')[0]
red_team = get_team_by_name('Rouge')
red_team.players.append(player_red)
if payload.emoji.name == '\U0001F535':
# PERMET DETRE DANS LES DEUX TEAMS
# A DELETE & DECOMMENTER HORS TESTS
if True:
# if str(payload.member) not in blue_team:
loggerhelper.log_reaction_added(team_msg.id, payload.member)
player_blue=str(payload.member).split('#')[0]
blue_team = get_team_by_name('Bleu')
blue_team.players.append(player_blue)
# Suppression d'une emote == Leave d'une equipe
async def on_raw_reaction_remove(self, payload):
global team_msg, teams
if team_msg is not None and payload.message_id == team_msg.id:
# On va cherche le nom du joueur via l'id
user_name = await self.fetch_user(payload.user_id)
red_team = get_team_by_name('Rouge')
blue_team = get_team_by_name('Bleu')
loggerhelper.log_reaction_delete(team_msg.id, payload.user)
if payload.emoji.name == '\U0001F534' and red_team is not None:
player_red=str(user_name).split('#')[0]
red_team.players.remove(player_red)
if payload.emoji.name == '\U0001F535' and blue_team is not None:
player_blue=str(user_name).split('#')[0]
blue_team.players.remove(player_blue) | antoningar/BotRapJeu | helpers/teamshelper.py | teamshelper.py | py | 4,950 | python | en | code | 0 | github-code | 50 |
33753505171 | #!/usr/bin/env python
# -*-coding: utf-8-*-
class Point(object):
def __init__(self, x, y, z):
super(Point, self).__init__()
self.x = x
self.y = y
self.z = z
def write_data(self, fp=None):
self.x = float(self.x)
self.y = float(self.y)
self.z = float(self.z)
print("%15.7E %15.7E %15.7E" % (self.x, self.y, self.z), file=fp)
class ModeShape(Point):
def __init__(self, x, y, z):
super(ModeShape, self).__init__(x, y, z)
if __name__ == "__main__":
point = Point(1.0, 2.0, 3.0)
point.write_data()
| mtldswz/ModeInter | ModeInter/Point.py | Point.py | py | 592 | python | en | code | 5 | github-code | 50 |
25277438911 | code = input("Enter 12 digit code: ")
def checkDigit(upc): #this is the method that checks the check digit
if ((((int(upc[10]) + int(upc[8]) + int(upc[6]) + int(upc[4]) + int(upc[2]) + int(upc[0])) * 3) + (int(upc[9]) + int(upc[7]) + int(upc[5]) + int(upc[3]) + int(upc[1])) % 10) + int(upc[11] == 10 )):
print("mod10 digit is correct");
else:
print("mod10 digit is incorrect");
checkDigit(code)#calls the method on the data inputed by the use
def checkLoop(code):
total = 0
for i in range(10):
if(code[i]%2 == 0):
total = total + (int(code[i]) * 3)
else:
total = total + int(code[i])
if(( total % 10) + code[11] == 10):
print("mod10 digit is correct");
else:
print("mod10 digit is incorrect");
checkLoop(code) | tornadoluna/mod10check | main.py | main.py | py | 806 | python | en | code | 0 | github-code | 50 |
12733043632 | import scipy.sparse as ss
import numpy as np
import math
def calculateSimilarity(data, removeWalletsPercentile=None, removeContractsPercentile=None, removeContracts=None):# -> ss.coo_matrix:
if (removeWalletsPercentile):
interactions_num_perc_99 = np.percentile(data.interactions_num, removeWalletsPercentile)
print("remove signer interactions percentile : " + str(interactions_num_perc_99))
if (removeContractsPercentile):
receiver_interactions_count = data[["signer_account_id", "receiver_account_id"]].groupby("receiver_account_id")\
.count().sort_values(by="signer_account_id", ascending=False)
receiver_interactions_perc_99 = np.percentile(receiver_interactions_count.signer_account_id, 99.9)
leaveContracts = set(receiver_interactions_count[receiver_interactions_count.signer_account_id <= receiver_interactions_perc_99]\
.reset_index()["receiver_account_id"].tolist())
print(receiver_interactions_count.head(10))
print("remove receiver interactions percentile : " + str(receiver_interactions_perc_99))
if (removeWalletsPercentile):
if(removeContractsPercentile):
if(removeContracts):
print("remove contracts : " + str(removeContracts))
leaveContracts = leaveContracts - removeContracts
data = data[(data.interactions_num <= interactions_num_perc_99) & (data.receiver_account_id.isin(leaveContracts))]
data = data[data.signer_account_id != data.receiver_account_id]
else:
data = data[(data.interactions_num <= interactions_num_perc_99) & (data.receiver_account_id.isin(leaveContracts))]
data = data[data.signer_account_id != data.receiver_account_id]
else:
if (removeContracts):
print("remove contracts : " + str(removeContracts))
data = data[(data.interactions_num <= interactions_num_perc_99) & (~data.receiver_account_id.isin(removeContracts))]
data = data[data.signer_account_id != data.receiver_account_id]
else:
data = data[data.interactions_num <= interactions_num_perc_99]
data = data[data.signer_account_id != data.receiver_account_id]
else:
if (removeContractsPercentile):
if (removeContracts):
print("remove contracts : " + str(removeContracts))
leaveContracts = leaveContracts - removeContracts
data = data[data.receiver_account_id.isin(leaveContracts)]
data = data[data.signer_account_id != data.receiver_account_id]
else:
data = data[data.receiver_account_id.isin(leaveContracts)]
data = data[data.signer_account_id != data.receiver_account_id]
else:
if (removeContracts):
print("remove contracts : " + str(removeContracts))
data = data[~data.receiver_account_id.isin(removeContracts)]
data = data[data.signer_account_id != data.receiver_account_id]
else:
data = data[data.signer_account_id != data.receiver_account_id]
signers = data["signer_account_id"].drop_duplicates().reset_index().drop("index", axis=1).reset_index()
print("signers num : " + str(len(signers)))
receivers = data["receiver_account_id"].drop_duplicates().reset_index().drop("index", axis=1).reset_index()
print("receivers num : " + str(len(receivers)))
data = data.set_index("signer_account_id") \
.join(
signers.set_index("signer_account_id"),
how="left") \
.reset_index(drop=True) \
.set_index("receiver_account_id") \
.join(
receivers.set_index("receiver_account_id"),
how="left", lsuffix="_signer", rsuffix="_receiver") \
.reset_index(drop=True) \
.drop("interactions_num", axis=1) \
.drop_duplicates()
row = np.array(data["index_signer"])
col = np.array(data["index_receiver"])
d = np.array(np.ones(len(data)))
print("creating matrices")
m1 = ss.coo_matrix((d, (row, col))).astype(np.uintc).tocsr()
m2 = m1.transpose()
print("multiplying matrices")
common_contracts = m1.dot(m2).tocoo()
a = data.groupby("index_signer").count().apply(lambda x: math.sqrt(x), axis=1).to_dict()
signers_index = signers.set_index("index").to_dict()["signer_account_id"]
print("number of entries : " + str(len(common_contracts.data)))
print("calculating similarity")
row = [signers_index[idx] for idx in common_contracts.row]
print("replaced row indexes with wallets")
col = [signers_index[idx] for idx in common_contracts.col]
print("replaced column indexes with wallets")
data_similarity = [(d/(a[c]*a[r])) for r,c,d in zip(common_contracts.row, common_contracts.col, common_contracts.data)]
print("calculated similarity")
return list(zip(row, col, data_similarity)) | Metronomo-xyz/user_similarity_near_calculator | similarity.py | similarity.py | py | 5,112 | python | en | code | 0 | github-code | 50 |
42013992258 | import networkx as nx
def add_node(graph, areaId, node, **details):
if node in graph:
if areaId not in graph.nodes[node]['areas']:
graph.nodes[node]['areas'].append(areaId)
else:
graph.add_node(node, areas=[areaId], **details)
def add_edge(graph, r1, r2, interface_id):
_interface_id = get_interface_id(interface_id)
if r2 in graph[r1]:
for edge in graph[r1][r2]:
if 'intf' in graph[r1][r2][edge] and graph[r1][r2][edge]['intf'] == _interface_id:
return
graph.add_edge(r1, r2, intf=_interface_id)
def update_topology(graph, lsdb):
'''
Update the NetworkX topology according to the LSDB
'''
for area in lsdb['areaScopedLinkStateDb']:
if len(area['lsa']) == 0:
raise Exception('Topology not available!')
routers = filter(
lambda device: device["type"] == "Router", area['lsa'])
for router in routers:
add_node(graph, area['areaId'],
router['advertisingRouter'], type='Router')
for neighbor in router['lsaDescription']:
if neighbor['neighborRouterId'] != router['advertisingRouter']:
add_node(graph, area['areaId'],
neighbor['neighborRouterId'], type='Router')
add_edge(graph, router['advertisingRouter'],
neighbor['neighborRouterId'], neighbor['interfaceId'])
add_edge(graph, neighbor['neighborRouterId'],
router['advertisingRouter'], neighbor['neighborInterfaceId'])
def get_interface_id(interface_id):
try:
return int(interface_id.split('.')[3])
except Exception:
return interface_id
def map_interfaces(graph, interfaces, router_id):
'''
Map interface IDs to names
'''
del(interfaces['lo'])
for intf_name, intf_data in interfaces.items():
for neighbor in graph[router_id]:
for link in graph[router_id][neighbor]:
conn = graph[router_id][neighbor][link]
if conn['intf'] == intf_data['interfaceId']:
graph[router_id][neighbor][link]['if_name'] = intf_name
def attribute_routes(graph, routes):
'''
Attribute routes into the current graph
'''
loopback = 'fcff:'
for route in routes:
if route.startswith(loopback):
adv_router = routes[route]['lsAdvertisingRouter']
graph.nodes[adv_router]['prefix'] = route
| maurohirt/Docker_GNS3 | routers/src/topology_extractor.py | topology_extractor.py | py | 2,539 | python | en | code | 0 | github-code | 50 |
9659237038 | from datetime import datetime
from dateutil import parser
from src import db_util
def query_db(limit, offset, statement, log, config, query_data=None):
if limit and offset:
statement = statement + ' offset ' + offset + ' limit ' + limit
log.info('statement:' + statement)
conn = db_util.db_get_conn(config, log)
if query_data:
cur = db_util.db_execute(conn, statement, log, query_data)
else:
cur = db_util.db_execute(conn, statement, log)
rows = list(cur.fetchall())
data = []
for row in rows:
row = dict(row)
if row.get('open'):
row['open'] = datetime.strftime(parser.parse(str(row.get('open'))), '%H:%M %p')
if row.get('close'):
row['close'] = datetime.strftime(parser.parse(str(row.get('close'))), '%H:%M %p')
data.append(row)
return data
| UranusLin/BuyingFrenzy | src/utils.py | utils.py | py | 860 | python | en | code | 0 | github-code | 50 |
23593975153 | import pathlib
import astropy.units
from lsst.ts.xml import utils
"""This library defines common variables and functions used by the various
XML test suite generator scripts.
"""
# =========
# Variables
# =========
"""Defines the list of Commandable SAL Components, or CSCs."""
subsystems = [
"ATAOS",
"MTAirCompressor",
"ATBuilding",
"ATCamera",
"ATDome",
"ATDomeTrajectory",
"ATHeaderService",
"ATHexapod",
"ATMCS",
"ATMonochromator",
"ATOODS",
"ATPneumatics",
"ATPtg",
"ATSpectrograph",
"ATWhiteLight",
"Authorize",
"GCHeaderService",
"CCCamera",
"CCHeaderService",
"CCOODS",
"CBP",
"DIMM",
"DREAM",
"DSM",
"EAS",
"Electrometer",
"ESS",
"FiberSpectrograph",
"GenericCamera",
"GIS",
"Guider",
"HVAC",
"LaserTracker",
"LEDProjector",
"LinearStage",
"LOVE",
"MTAOS",
"MTCamera",
"MTDome",
"MTDomeTrajectory",
"MTEEC",
"MTHeaderService",
"MTHexapod",
"MTM1M3",
"MTM1M3TS",
"MTM2",
"MTMount",
"MTOODS",
"MTPtg",
"MTRotator",
"MTVMS",
"OCPS",
"PMD",
"Scheduler",
"Script",
"ScriptQueue",
"SummitFacility",
"Test",
"TunableLaser",
"Watcher",
"WeatherForecast",
]
"""Define the list of Generic Commands."""
generic_commands = [
"abort",
"enable",
"disable",
"standby",
"exitControl",
"start",
"enterControl",
"setLogLevel",
"setAuthList",
]
"""Define the list of Generic Events."""
generic_events = [
"authList",
"clockOffset",
"configurationApplied",
"configurationsAvailable",
"errorCode",
"heartbeat",
"logLevel",
"logMessage",
"simulationMode",
"softwareVersions",
"statusCode",
"summaryState",
"authList",
"largeFileObjectAvailable",
]
generic_topics = set(
[f"command_{val}" for val in generic_commands]
+ [f"logevent_{val}" for val in generic_events]
)
"""Define the list of AddedGenerics categories."""
added_generics_categories = ["configurable", "csc"]
"""Define the list of AddedGenerics commands that are not mandatory."""
added_generics_commands = ["abort", "enterControl"]
"""Define the list of AddedGenerics events that are not mandatory."""
added_generics_events = ["clockOffset", "largeFileObjectAvailable", "statusCode"]
"""Define the list of AddedGenerics mandatory commands."""
added_generics_mandatory_commands: list[str] = []
"""Define the list of AddedGenerics mandatory events."""
added_generics_mandatory_events = [
"heartbeat",
"logLevel",
"logMessage",
"softwareVersions",
]
"""Define the full set of mandatory topics not needed in AddedGenerics."""
added_generics_mandatory_topics = set(
[f"command_{val}" for val in added_generics_mandatory_commands]
+ [f"logevent_{val}" for val in added_generics_mandatory_events]
)
"""Define list of commands for csc category."""
added_generics_csc_commands = [
"disable",
"enable",
"exitControl",
"setAuthList",
"setLogLevel",
"standby",
"start",
]
"""Define list of events for csc category."""
added_generics_csc_events = [
"authList",
"errorCode",
"simulationMode",
"summaryState",
]
"""Define list of commands for configurable category."""
added_generics_configurable_commands: list[str] = []
"""Define list of events for configurable category."""
added_generics_configurable_events = [
"configurationApplied",
"configurationsAvailable",
]
"""Define the full set of approved AddedGenerics items."""
added_generics_items = set(
added_generics_categories
+ [f"command_{val}" for val in added_generics_commands]
+ [f"logevent_{val}" for val in added_generics_events]
+ [f"command_{val}" for val in added_generics_csc_commands]
+ [f"logevent_{val}" for val in added_generics_csc_events]
+ [f"command_{val}" for val in added_generics_configurable_commands]
+ [f"logevent_{val}" for val in added_generics_configurable_events]
)
"""Define the lists of IDL and MySQL Reserved Words"""
idl_reserved = [
"ABSTRACT",
"ANY",
"ATTRIBUTE",
"BOOLEAN",
"CASE",
"CHAR",
"COMPONENT",
"CONST",
"CONSUMES",
"CONTEXT",
"CUSTOM",
"DEC",
"DEFAULT",
"DOUBLE",
"EMITS",
"ENUM",
"EVENTTYPE",
"EXCEPTION",
"EXIT",
"FACTORY",
"FALSE",
"FINDER",
"FIXED",
"FLOAT",
"GETRAISES",
"HOME",
"IMPORT",
"IN",
"INOUT",
"INTERFACE",
"LIMIT",
"LOCAL",
"LONG",
"MODULE",
"MULTIPLE",
"NATIVE",
"OBJECT",
"OCTET",
"ONEWAY",
"OUT",
"PRIMARYKEY",
"PRIVATE",
"PROVIDES",
"PUBLIC",
"PUBLISHES",
"RAISES",
"READONLY",
"SEQUENCE",
"SETRAISES",
"SHORT",
"STRING",
"STRUCT",
"SUPPORTS",
"SWITCH",
"TRUE",
"TRUNCATABLE",
"TYPEDEF",
"TYPEID",
"TYPEPREFIX",
"UNION",
"UNSIGNED",
"USES",
"VALUEBASE",
"VALUETYPE",
"VOID",
"WCHAR",
"WSTRING",
]
"""Define the list of IDL Types"""
idl_types = [
"boolean",
"byte",
"short",
"int",
"long",
"long long",
"unsigned short",
"unsigned int",
"float",
"double",
"string",
]
db_critical_reserved = ["TIME"]
db_optional_reserved = [
"ALL",
"ALTER",
"ANALYZE",
"ANY",
"AS",
"ASC",
"BEGIN",
"BY",
"CREATE",
"CONTINUOUS",
"DATABASE",
"DATABASES",
"DEFAULT",
"DELETE",
"DESC",
"DESTINATIONS",
"DIAGNOSTICS",
"DISTINCT",
"DROP",
"DURATION",
"END",
"EVERY",
"EXPLAIN",
"FIELD",
"FOR",
"FROM",
"GRANT",
"GRANTS",
"GROUP",
"GROUPS",
"IN",
"INF",
"INSERT",
"INTO",
"KEY",
"KEYS",
"KILL",
"LIMIT",
"SHOW",
"MEASUREMENT",
"MEASUREMENTS",
"NAME",
"OFFSET",
"ON",
"ORDER",
"PASSWORD",
"POLICY",
"POLICIES",
"PRIVILEGES",
"QUERIES",
"QUERY",
"READ",
"REPLICATION",
"RESAMPLE",
"RETENTION",
"REVOKE",
"SELECT",
"SERIES",
"SET",
"SHARD",
"SHARDS",
"SLIMIT",
"SOFFSET",
"STATS",
"SUBSCRIPTION",
"SUBSCRIPTIONS",
"TAG",
"TO",
"USER",
"USERS",
"VALUES",
"WHERE",
"WITH",
"WRITE",
]
# Field names used by SAL, and so forbidden in ts_xml
sal_reserved = [
"SALINDEX",
]
"""Define string attributes that are NOT unitless"""
strings_with_units = [
"azPositions",
"elPositions",
"rotPositions",
"localTimeString",
"raString",
"decString",
]
# =========
# Functions
# =========
def get_xmlfile_csc_topic() -> list[tuple[pathlib.Path, str, str]]:
"""Return the XML file for each CSC and each topic"""
pkgroot = utils.get_data_dir()
arguments: list[tuple[pathlib.Path, str, str]] = []
for csc in subsystems:
xml_path = pkgroot / "sal_interfaces" / csc
for xmlfile in xml_path.glob(f"{csc}_*.xml"):
topic = xmlfile.stem.split("_")[1]
arguments.append((xmlfile, csc, topic))
return arguments
def check_unit(unit_str: str) -> None:
if unit_str.isnumeric():
raise TypeError(f"Units={unit_str!r} cannot be a number")
try:
return astropy.units.Quantity(1, unit_str)
except ValueError:
raise ValueError(f"Units={unit_str!r} is not a valid unit.")
except Exception as e:
raise Exception(f"Units={unit_str!r} error: {e!r}.")
| lsst-ts/ts_xml | python/lsst/ts/xml/testutils.py | testutils.py | py | 7,572 | python | en | code | 3 | github-code | 50 |
11261774950 | # spustte jako python bludiste-solution.py ve slozce se souborem bludiste.txt
# nebo tomu dejte jako argument cestu k souboru. Pro jine slovo dodejte druhy
# argument: python bludiste-priklad.txt losi
from typing import List, Tuple, Set
import sys
alfabet = "INTERLOS" if len(sys.argv) < 3 else sys.argv[2].upper()
def parse() -> List[List[str]]:
with open(r'bludiste.txt' if len(sys.argv) < 2 else sys.argv[1]) as f:
lines = [[s for s in line.rstrip('\n\r')] for line in f.readlines()]
return lines
def is_correct(letters: List[str]) -> bool:
for letter in alfabet:
if letters.count(letter) > alfabet.count(letter):
return False
return True
def is_accepted(letters: List[str]) -> bool:
for letter in alfabet:
if letters.count(letter) != alfabet.count(letter):
return False
return True
def in_bound(row: int, col: int, lines: List[List[str]]) -> bool:
return row >= 0 and row < len(lines) and col >= 0 and col < len(lines[row]) and lines[row][col] != '#'
def solve(
data: List[List[str]],
visited: Set[Tuple[int, int]],
remainder: List[str],
row: int,
col: int
) -> List[Tuple[int, int]]:
if not is_correct(remainder):
return []
if row == len(data) - 1 and col == len(data[0]) - 1:
return [(row, col)]
if is_accepted(remainder):
remainder = []
visited.add((row, col))
revert_changes: List[Tuple[int, int, str]] = []
directions: List[Tuple[int, int]] = [(-2, 0), (0, 2), (2, 0), (0, -2)]
for (diff_row, diff_column) in directions:
new_row = row + diff_row
new_col = col + diff_column
peek_row = row + diff_row // 2
peek_col = col + diff_column // 2
if not in_bound(new_row, new_col, data) \
or data[peek_row][peek_col] == '#' \
or (new_row, new_col) in visited:
continue
remainder.append(data[new_row][new_col].upper())
prev = remainder.copy()
path = solve(data, visited, remainder, new_row, new_col)
if path:
path.append((row, col))
return path
else:
remainder = prev
remainder.pop()
revert_changes.append((peek_row, peek_col, data[peek_row][peek_col]))
data[peek_row][peek_col] = '#'
visited.remove((row, col))
for change_row, change_col, value in revert_changes:
data[change_row][change_col] = value
return []
maze = parse()
solution = solve(maze, {(0, 0)}, [alfabet[0]], 0, 0)
print('Full Path:', "".join(maze[i][j] for (i, j) in reversed(solution)), end='\n\n')
print('Solution:', "".join(maze[i][j] for (i, j) in reversed(solution) if maze[i][j].isupper()))
| zverinec/interlos-web | public/download/years/2021/reseni/bludiste-solution.py | bludiste-solution.py | py | 2,719 | python | en | code | 1 | github-code | 50 |
21512176052 | # Prometeus Python initialiZation
# By Pierre-Etienne ALBINET
# Started 20190206
# Changed 20190206
import api
from bson import ObjectId
def checks():
# Config Item Check
print('Checking Config...')
cfg = api.ritm('*', 0, 'cfg', 'promCFG', 'server')
if cfg[0]['_id'] == 'not found':
cfgId = api.citm(0, 'cfg', 'promCFG', 0, 'server')['result']
print('Config Item created')
else:
cfgId = cfg[0]['_id']
print('Config OK')
# Template Item Check
print('Checking Templates...')
tpl = api.ritm('*', 0, 'tmplt', '*', 'server')
model = ['orgzt', 'prson', 'systm', 'objct']
tmpIds = {}
admId = 0
action = False
for x in model:
found = False
for y in tpl:
if tpl[0]['_id'] == 'not found':
break
elif y['val'] == x:
found = True
break
if not found:
action = True
id = api.citm(0, 'tmplt', x, 0, 'server')['result']
print('Template ' + x + 'created')
tmpIds[x] = id
else:
tmpIds[x] = y['_id']
if not action:
print('Templates OK')
| theoneandonly4/prom | init.py | init.py | py | 1,181 | python | en | code | 0 | github-code | 50 |
3347445256 | '''
Enumerating Oriented Gene Orderings
Rosalind ID: SIGN
http://rosalind.info/problems/sign/
Goal: The total number of signed permutations of length n, followed by a list of all such permutations (you may list the signed permutations in any order).
'''
import sys
import math
def add_gene(existing_gene, the_genes):
this_collection = list()
for i in existing_gene:
indiv_genes = str(i).split()
check_list = list()
for l in indiv_genes:
check_list.append(math.sqrt((int(l)**2)))
for j in the_genes:
if math.sqrt(int(j)**2) not in check_list:
this_collection.append(str(i) + " " + str(j))
return(this_collection)
num_enumerations = int(open(sys.argv[1]).readlines()[0].strip())
possible_nums = list()
for i in range(-1 * num_enumerations, num_enumerations + 1):
if i != 0:
possible_nums.append(i)
change_nums = possible_nums
for i in range(num_enumerations - 1):
change_nums = add_gene(change_nums, possible_nums)
final_output = str(len(change_nums)) + "\n"
for i in change_nums:
final_output += i + "\n"
submit_file = open("submit.txt","w")
submit_file.write(final_output)
#Ali Razzak | AHTARazzak/rosalind_bioinf | stronghold/SIGN/SIGN.py | SIGN.py | py | 1,121 | python | en | code | 0 | github-code | 50 |
43161938959 | class Wezel:
def __init__(self,val = None):
self.val = val
self.next = None
class Lista:
def __init__(self):
self.head = Wezel()
def dodaj(self, dane):
dostawiany = Wezel(dane)
if self.head.val == None:
self.head = Wezel(dane)
return
zmienna = self.head
while zmienna.next != None:
poprzednik = zmienna
zmienna = zmienna.next
zmienna.next = dostawiany
def wyswietl(self):
print(self.head.val)
zmienna = self.head
while zmienna.next != None:
zmienna = zmienna.next
print(zmienna.val)
def cykl(self):
if self.head.val == None:
return
zmienna = self.head
while zmienna.next != None:
poprzednik = zmienna
zmienna = zmienna.next
zmienna.next = self.head
def znjadz(self):
zmienna = self.head
zmienna2 = self.head.next.next
if self.head.val == None:
return False
napis = ''
while zmienna.next is not None and zmienna2.next.next is not None:
if zmienna == zmienna2.next.next:
zmienna3 = zmienna2.next.next
licznik = 0
while zmienna.next != zmienna3:
zmienna = zmienna.next
napis+= str(zmienna) + str(zmienna2)
while str(self.head) not in napis:
self.head = self.head.next
licznik += 1
return licznik
zmienna = zmienna.next
zmienna2 = zmienna2.next.next
return False
def elementprzed(tablica):
if tablica.head == None:
return False
else:
zm1 = tablica.head
zm2 = tablica.head.next.next
while zm1 != zm2 or zm2 == None or zm1 == None:
zm1 = zm1.next
zm2 = zm2.next.next
if zm1 == zm2 and zm1 != None:
b = zm1.next
zm1.next = None
prev = Wezel()
prev.next = tablica.head
w = tablica.head
a = b
while True:
while a is not None:
if a == w:
return prev.val
a = a.next
w = w.next
prev = prev.next
a = b
w = Lista()
w.dodaj(9)
w.dodaj(8)
w.dodaj(7)
w.dodaj(10)
w.dodaj(20)
w.dodaj(17)
c = w.head
while c.next is not None:
if c.val == 7:
a = c
c = c.next
c.next = a
print(elementprzed(w))
| Halankedemanke/aaaaaa | zadanie25.py | zadanie25.py | py | 2,656 | python | pl | code | 0 | github-code | 50 |
27390807825 | from collections import deque
import copy
ans = 0
n, m = map(int, input().split(' '))
orderList = list(map(int, input().split(' ')))
storage = deque(list(i for i in range(1,n+1)))
while m and orderList:
left = 0
right = 0
# if orderList[0] == storage[0]:
# orderList.pop(0)
# storage.popleft()
# m -= 1
tempStorageleft = deque(copy.deepcopy(storage))
while orderList and orderList[0] != tempStorageleft[0]:
tempStorageleft.append(tempStorageleft.popleft())
left += 1
tempStorageRight = deque(copy.deepcopy(storage))
while orderList and orderList[0] != tempStorageRight[0]:
tempStorageRight.appendleft(tempStorageRight.pop())
right += 1
if right >= left:
storage = deque(copy.deepcopy(tempStorageleft))
ans += left
storage.popleft()
if orderList:
orderList.pop(0)
m -= 1
else:
storage = deque(copy.deepcopy(tempStorageRight))
ans += right
storage.popleft()
if orderList:
orderList.pop(0)
m -= 1
print(ans) | smartopens/Algorithm | 자료구조(data structure)/회전하는큐.py | 회전하는큐.py | py | 1,151 | python | en | code | 2 | github-code | 50 |
29475331979 | from flask import Flask, render_template, request, flash, redirect, url_for
import os
import boto3
from werkzeug.utils import secure_filename
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
model = ResNet50(weights='imagenet')
img_path = 'elephant.jpg'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
s3 = boto3.resource('s3')
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def is_cat(classification, confidence) -> str:
if classification in ["tabby", "tiger_cat", "Persian_cat", "Siamese_cat", "Egyptian_cat"]:
print("Classification " + classification + " matches a cat!")
if float(confidence) > 0.15:
return "Yes, definitely a cat in this picture"
else:
return "There's probably a cat in this picture"
else:
print("Classification " + classification + " does not match a cat!")
return "No, not a cat"
@app.route("/")
def root():
return render_template('index.html')
@app.route("/classify/")
@app.route("/classify/<filename>")
def classify(filename=None):
if filename is None:
return "<p>No picture to found to classify</p>"
img = image.load_img(os.path.join('/tmp', filename), target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
dpred = decode_predictions(preds, top=1)[0]
predClass = str(dpred[0][1])
predConfidence = str(dpred[0][2])
cat = is_cat(predClass, predConfidence)
return "<p>"+cat+"</p>" + "<p>Class: " + predClass + "</p><p>Confidence: " + predConfidence + "</p>"
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if 'potential-cat-pic' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['potential-cat-pic']
# If the user does not select a file, the browser submits an
# empty file without a filename.
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join('/tmp', filename))
s3.Bucket('cat-image-bucket').put_object(Key=filename, Body=file)
return redirect(url_for('classify', filename=filename))
| srkiNZ84/hascat | app.py | app.py | py | 2,683 | python | en | code | 0 | github-code | 50 |
31013317840 | from django.urls import path
from staff import views
urlpatterns = [
path('', views.staff_login, name='staff_login'),
path('staff_dashboard/', views.staff_dashboard, name='staff_dashboard'),
path('staff_products/', views.staff_products, name='staff_products'),
path('staff_category/', views.staff_category, name='staff_category'),
path('staff_shop/', views.staff_shop, name='staff_shop'),
path('staff_add_order/', views.staff_add_order, name='staff_add_order'),
path('staff_view_order/', views.staff_view_order, name='staff_view_order'),
path('staff_pending_order/', views.staff_pending_order, name='staff_pending_order'),
path('staff_completed_order', views.staff_completed_order, name='staff_completed_order'),
path('staff_logout/', views.staff_logout, name='staff_logout'),
path('staff_edit_order/<int:id>/', views.staff_edit_order, name='staff_edit_order'),
path('staff_delete_order/<int:id>/', views.staff_delete_order, name='staff_delete_order'),
path('st_edit_staff/', views.st_edit_staff, name='st_edit_staff'),
path('st_view_order_list/<int:id>/', views.st_view_order_list, name='st_view_order_list'),
path('staff_order_list_edit/<int:id>/', views.staff_order_list_edit, name='staff_order_list_edit'),
path('staff_order_list_delete/<int:id>/', views.staff_order_list_delete, name='staff_order_list_delete'),
] | muhammedtmurshid/Order_Management | staff/urls.py | urls.py | py | 1,384 | python | en | code | 0 | github-code | 50 |
24003019207 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Transform Normal Constraint",
"author": "marvin.k.breuer",
"version": (0, 1),
"blender": (2, 7, 5),
"location": "View3D > Tools > Transform > Transform with Normal Axis Contraint",
"description": "Transform Objects with Normal Axis Contraint",
"warning": "",
"url": "http://www.blenderartists.org/forum/showthread.php?380673-Transform-with-Normal-Axis-Contraint&p=2932621#post2932621",
"url": "https://plus.google.com/u/0/+MarvinKBreuer/posts",
"category": "User Interface"
}
import bpy
########### Menu #######################
class WKST_N_Transform_Menu(bpy.types.Menu):
"""Normal Transform Menu"""
bl_label = "Normal Transform Menu"
bl_idname = "wkst.normal_transform_menu"
def draw(self, context):
layout = self.layout
layout.menu("translate.normal_menu", text="N-Translate")
layout.menu("rotate.normal_menu", text="N-Rotate")
layout.menu("resize.normal_menu", text="N-Scale")
###space###
if context.mode == 'EDIT_MESH':
layout.separator()
layout.operator('mesh.rot_con', 'Face-Rotation')
class Translate_Normal_Menu(bpy.types.Menu):
"""Translate Normal Constraint"""
bl_label = "Translate Normal Constraint"
bl_idname = "translate.normal_menu"
def draw(self, context):
layout = self.layout
# layout.label("___Move___")
props = layout.operator("transform.transform", text="X-Axis")
props.mode = 'TRANSLATION'
props.constraint_axis = (True, False, False)
props.constraint_orientation = 'NORMAL'
props.snap_target = 'ACTIVE'
props = layout.operator("transform.transform", text="Y-Axis")
props.mode = 'TRANSLATION'
props.constraint_axis = (False, True, False)
props.constraint_orientation = 'NORMAL'
props.snap_target = 'ACTIVE'
props = layout.operator("transform.transform", text="Z-Axis")
props.mode = 'TRANSLATION'
props.constraint_axis = (False, False, True)
props.constraint_orientation = 'NORMAL'
props.snap_target = 'ACTIVE'
class Resize_Normal_Menu(bpy.types.Menu):
"""Resize Normal Constraint"""
bl_label = "Resize Normal Constraint"
bl_idname = "resize.normal_menu"
def draw(self, context):
layout = self.layout
# layout.label("___Scale___")
props = layout.operator("transform.resize", text="X-Axis")
props.constraint_axis = (True, False, False)
props.constraint_orientation = 'NORMAL'
props.snap_target = 'ACTIVE'
props = layout.operator("transform.resize", text="Y-Axis")
props.constraint_axis = (False, True, False)
props.constraint_orientation = 'NORMAL'
props.snap_target = 'ACTIVE'
props = layout.operator("transform.resize", text="Z-Axis")
props.constraint_axis = (False, False, True)
props.constraint_orientation = 'NORMAL'
props.snap_target = 'ACTIVE'
props = layout.operator("transform.resize", text="XY-Axis")
props.constraint_axis = (True, True, False)
props.constraint_orientation = 'NORMAL'
props.snap_target = 'ACTIVE'
class Rotate_Normal_Menu(bpy.types.Menu):
"""Rotate Normal Constraint"""
bl_label = "Rotate Normal Constraint"
bl_idname = "rotate.normal_menu"
def draw(self, context):
layout = self.layout
# layout.label("___Rotate___")
props = layout.operator("transform.rotate", text="X-Axis")
props.constraint_axis = (True, False, False)
props.constraint_orientation = 'NORMAL'
props.snap_target = 'ACTIVE'
props = layout.operator("transform.rotate", text="Y-Axis")
props.constraint_axis = (False, True, False)
props.constraint_orientation = 'NORMAL'
props.snap_target = 'ACTIVE'
props = layout.operator("transform.rotate", text="Z-Axis")
props.constraint_axis = (False, False, True)
props.constraint_orientation = 'NORMAL'
props.snap_target = 'ACTIVE'
class AlignNormal(bpy.types.Operator):
"""Align selected Mesh to active Face in Normal Z Direction"""
bl_idname = "mesh.align_normal"
bl_label = "Align to Normal"
bl_options = {'REGISTER', 'UNDO'}
manipul = bpy.props.BoolProperty(name="Set Normal Orientation", description="Orientation", default=False)
def execute(self, context):
bpy.ops.view3d.pivot_active()
bpy.ops.transform.resize(value=(1, 1, 0), constraint_axis=(False, False, True), constraint_orientation='NORMAL', mirror=False, proportional='DISABLED', proportional_edit_falloff='SMOOTH', proportional_size=1)
for i in range(self.manipul):
bpy.ops.space.normal()
return {'FINISHED'}
# -------------------------------------------------
def transform_normal_draw(self, context):
layout = self.layout
col = layout.column(align=True)
#col.label("Transform with Normal Axis Constraint")
col.menu("translate.normal_menu", text="N-Translate")
col.menu("rotate.normal_menu", text="N-Rotate")
col.menu("resize.normal_menu", text="N-Scale")
if context.mode == 'EDIT_MESH':
col.operator("mesh.align_normal", text="N-Align")
col.separator()
######------------################################################################
###### Registry ################################################################
def register():
bpy.utils.register_class(Translate_Normal_Menu)
bpy.utils.register_class(Resize_Normal_Menu)
bpy.utils.register_class(Rotate_Normal_Menu)
bpy.utils.register_class(AlignNormal)
bpy.types.VIEW3D_PT_tools_transform.append(transform_normal_draw)
bpy.types.VIEW3D_PT_tools_transform_mesh.append(transform_normal_draw)
bpy.types.VIEW3D_PT_tools_transform_curve.append(transform_normal_draw)
bpy.types.VIEW3D_PT_tools_transform_surface.append(transform_normal_draw)
bpy.types.VIEW3D_PT_tools_mballedit.append(transform_normal_draw)
bpy.types.VIEW3D_PT_tools_armatureedit_transform.append(transform_normal_draw)
bpy.types.VIEW3D_PT_tools_latticeedit.append(transform_normal_draw)
bpy.types.VIEW3D_MT_transform_object.prepend(transform_normal_draw)
bpy.types.VIEW3D_MT_transform.prepend(transform_normal_draw)
def unregister():
bpy.utils.unregister_class(Translate_Normal_Menu)
bpy.utils.unregister_class(Resize_Normal_Menu)
bpy.utils.unregister_class(Rotate_Normal_Menu)
bpy.utils.unregister_class(AlignNormal)
bpy.types.VIEW3D_PT_tools_transform.remove(transform_normal_draw)
bpy.types.VIEW3D_PT_tools_transform_mesh.remove(transform_normal_draw)
bpy.types.VIEW3D_PT_tools_transform_curve.remove(transform_normal_draw)
bpy.types.VIEW3D_PT_tools_transform_surface.remove(transform_normal_draw)
bpy.types.VIEW3D_PT_tools_mballedit.remove(transform_normal_draw)
bpy.types.VIEW3D_PT_tools_armatureedit_transform.remove(transform_normal_draw)
bpy.types.VIEW3D_PT_tools_latticeedit.remove(transform_normal_draw)
bpy.types.VIEW3D_MT_transform_object.remove(transform_normal_draw)
bpy.types.VIEW3D_MT_transform.remove(transform_normal_draw)
if __name__ == "__main__":
register()
| JT-a/blenderpython279 | scripts/addons_extern/sfc_workstation/wkst_transform_normal.py | wkst_transform_normal.py | py | 8,078 | python | en | code | 5 | github-code | 50 |
34143009182 | import csv
import json
import re
from collections import defaultdict
import glob
import os
change = json.loads(open("data/change_tags_nw.txt","r").read())
files = glob.glob("data/raw_jl/epicurious*")
for f in files:
f_name = os.path.basename(f)
temp = open(f,"r")
output = list()
for i in temp.readlines():
if i.strip()!="":
l = json.loads(i.strip())
if "tags" in l:
t = [change[x] if x in change else x for x in l["tags"]]
l["tags"] = t
output.append(l)
t = open("data/processed_jl/processed_"+f_name,"w",encoding="utf-8")
for i in output:
t.write(json.dumps(i)+"\n")
t.close()
temp.close() | nathanaj99/recipeDB | change_data_tags.py | change_data_tags.py | py | 709 | python | en | code | 0 | github-code | 50 |
21093738406 | from model.member_list import MemberList
from model.member import Member
from model.water_consumption import WaterConsumption
from datetime import datetime
class WaterBillingService:
price = 3
def __init__(self, title):
self._title = title
self.members = MemberList()
self.consumptionDB = []
@property
def title(self):
return self._title
def register_member(self, nombre):
new_member = Member(nombre)
self.members.add_member(new_member)
def find_member_id(self, nombre):
return self.members.get_member_id(nombre)
def register_consumption(self, member_id, consumption):
self.consumptionDB.append(WaterConsumption(member_id, consumption,datetime.today()))
def calculate_debt(self, member_id):
total_debt = 0
for consumo_member in self.consumptionDB:
if consumo_member.id == member_id:
total_debt += consumo_member.mes_consumption
return total_debt
if __name__ == '__main__':
waterBill1 = WaterBillingService('Facturacion de agua')
waterBill1.register_member('Richard')
waterBill1.register_member('Jhony')
waterBill1.register_member('oso')
print(waterBill1.find_member_id('oso'))
for member in waterBill1.members.members:
print(member)
waterBill1.register_consumption(1001, 50)
waterBill1.register_consumption(1002, 100)
waterBill1.register_consumption(1001, 50)
waterBill1.register_consumption(1001, 50)
for consumo in waterBill1.consumptionDB:
print(consumo)
print(f'Deuda total: {waterBill1.calculate_debt(1001)}') | RichardJDS/Console-app | service/water_billing_service.py | water_billing_service.py | py | 1,634 | python | en | code | 0 | github-code | 50 |
38673563595 | import csv, json, os
#Subgroup,Family,Subfamily,Members
path, filename = os.path.split(os.path.realpath(__file__))
appname = path.split("/")[-1]
csvPath = f'src/{appname}/data/tree.csv'
jsonPath = f'src/{appname}/data/classification.json'
root_name = "FPVR"
def getvalue(s):
return s.split('@')[1]
#removes space parantheses. For example, return DMPK1 in 'DMPK1 (DMPK)'
def fixProtein(p):
#return p.split(" ")[0]
return p # decided to show name with synonym
def classification_csv_to_json():
def create_entity(entity_type,value,row):
entity = None
if entity_type == "subgroup":
entity ={'id': "id" + str(idx) + "@" + value,
'type' : entity_type,
'value':value,
'path': row['Weblogo'][:-4] if 'Weblogo' in row else value,
'members':row['Members'].split(";"),
'nodes': []}
elif entity_type == "family":
entity = {'id':"id" + str(idx) + "@" + value,
'type' : entity_type,
'value':value,
'path': row['Weblogo'][:-4] if 'Weblogo' in row else value,
'members':row['Members'].split(";"),
'nodes': []}
elif entity_type == "subfamily":
entity ={'id':"id" + str(idx) + "@" + value,
'type' : entity_type,
'value': value,
'members':row['Members'].split(";"),
'path':row['Weblogo'][:-4] if 'Weblogo' in row else value,
'nodes':[]
}
else:
ValueError(entity_type)
return entity
subgroups = []
interested_rows = []
root = None #the first line
with open(csvPath) as f:
csvreader = csv.DictReader(f)
root = next(csvreader) #ignore the first subgroup for now, because we don't need it in the treeview hierarchy, we'll use it later
for row in csvreader:
# if ('Weblogo' in row and row["Weblogo"] == root_name + ".png") or ():
# break
interested_rows.append(row)
#Subgroup
idx = 0
for row in interested_rows:
idx += 1
subgroup = row['Subgroup']
if not any(g['value'] == subgroup for g in subgroups): #if subgroup not already added to the subgroups
entity = create_entity("subgroup",subgroup,row)
subgroups.append(entity)
#Family
idx = 0
for row in interested_rows:
idx += 1
family = row['Family']
if family != '': #and not family in subgroup.nodes:
subgroup = next(x for x in subgroups if x['value'] == row['Subgroup']) #find the first (and the only) subgroup having the subgroup name
if not any(x for x in subgroup['nodes'] if x['value'] == family): #in subgroup['nodes']['text']:
entity = create_entity("family",family,row)
subgroup['nodes'].append(entity)
#Subfamily
idx = 0
for row in interested_rows:
idx += 1
subfamily = row['Subfamily']
if subfamily != '':
subgroup = next(x for x in subgroups if x['value'] == row['Subgroup'])
if any(x for x in subgroup['nodes'] if x['value'] == row['Family']):
family = next(x for x in subgroup['nodes'] if x['value'] == row['Family'])
if not any(x for x in family['nodes'] if x['value'] == subfamily):
entity = create_entity("subfamily",subfamily,row)
family['nodes'].append(entity)
# add one row for all to the beginning of the file
# subgroups.insert(0, {
# "id": "id@" + root_name,
# "value": root_name,
# "path": root_name,
# "members": root['Members'].split(";"),
# "nodes": [],
# })
return subgroups
def write_classification(data):
with open(jsonPath, 'w') as f:
f.write(json.dumps(data, indent=4))
print("Classification {0} created.".format(jsonPath))
def prettyjson(cols,jsonPath):
all_json="{"
for prot in cols:
elem = json.loads("{\"" + prot + "\":" + json.dumps(cols[prot])+"}")
all_json += '{},\n'.format(json.dumps(elem)).replace("{","").replace("}","")
with open(jsonPath, 'w') as f_write:
f_write.write(all_json[:-2] + "}")
def numbering_csv_to_json():
csvPath = f'src/{appname}/data/numbering.csv'
jsonPath = f'src/{appname}/data/numbering.json'
cols = dict()
#Build Columns
with open(csvPath) as f:
csvreader = csv.DictReader(f)
row = next(csvreader)
for col in row:
cols.update({col:[]})
#cols = cols[1:]
del cols['Align_Position'] #Remove the first column (alignment)
with open(csvPath) as f:
csvreader = csv.DictReader(f)
for row in csvreader:
for el in cols:
cols.setdefault(el,[]).append(None if not row[el] else int(row[el]))
prettyjson(cols,jsonPath)
print("Numbering {0} created.".format(jsonPath))
if __name__ == "__main__":
cl = classification_csv_to_json()
write_classification(cl)
numbering_csv_to_json()
| prokino/kinview | helpers/tyrosinekinase/app-csv-to-json.py | app-csv-to-json.py | py | 5,252 | python | en | code | 1 | github-code | 50 |
29380759614 | import json
import logging
from hashlib import sha256
from its_client.mobility import kmph_to_mps
TIMESTAMP_ITS_START = 1072915195000 # its timestamp starts at 2004/01/01T00:00:00.000Z
def station_id(uuid: str) -> int:
logging.debug("we compute the station id for " + uuid)
hasher = sha256()
hasher.update(bytes(uuid, "utf-8"))
hashed_uuid = hasher.hexdigest()
return int(hashed_uuid[0:6], 16)
class CooperativeAwarenessMessage:
def __init__(
self,
uuid,
timestamp,
latitude=0.0,
longitude=0.0,
altitude=0.0,
speed=0.0,
acceleration=None,
heading=0.0,
):
self.uuid = uuid
self.timestamp = int(round(timestamp * 1000))
self.latitude = int(round(latitude * 10000000))
self.longitude = int(round(longitude * 10000000))
self.altitude = int(round(altitude * 100))
self.speed = int(round(kmph_to_mps(speed) * 100))
self.acceleration = (
int(round(acceleration * 10)) if acceleration is not None else 161
)
self.heading = int(round(heading * 10))
self.station_id = station_id(uuid)
def generation_delta_time(self) -> int:
return (self.timestamp - TIMESTAMP_ITS_START) % 65536
def to_json(self) -> str:
cam_json = {
"type": "cam",
"origin": "self",
"version": "1.1.1",
"source_uuid": self.uuid,
"timestamp": self.timestamp,
"message": {
"protocol_version": 1,
"station_id": self.station_id,
"generation_delta_time": self.generation_delta_time(),
"basic_container": {
"station_type": 5,
"reference_position": {
"latitude": self.latitude,
"longitude": self.longitude,
"altitude": self.altitude,
},
"confidence": {
"position_confidence_ellipse": {
"semi_major_confidence": 10,
"semi_minor_confidence": 50,
"semi_major_orientation": 1,
},
"altitude": 1,
},
},
"high_frequency_container": {
"heading": self.heading,
"speed": self.speed,
"longitudinal_acceleration": self.acceleration,
"drive_direction": 0,
"vehicle_length": 40,
"vehicle_width": 20,
"confidence": {"heading": 2, "speed": 3, "vehicle_length": 0},
},
"low_freq_container": {"vehicle_role": 2},
},
}
return json.dumps(cam_json)
| Orange-OpenSource/its-client | python/its-client/its_client/cam.py | cam.py | py | 2,902 | python | en | code | 7 | github-code | 50 |
40341425538 | from stack_handler import stdout_handler, stderr_handler
import logging
from flask import Flask
# init a logger
stack_logger = logging.getLogger('stack_logger')
stack_logger.setLevel(logging.DEBUG)
# add stdout_handler、stderr_handler to logger
stack_logger.addHandler(stderr_handler)
stack_logger.addHandler(stdout_handler)
stack_logger.info('this a info message')
stack_logger.error('this is a error message')
'''
output:
{"timestamp": "2018-12-25T07:23:31.888437Z", "severity": 40, "message": "this is a error message"}
{"timestamp": "2018-12-25T07:23:31.887942Z", "severity": 20, "message": "this a info message"}
'''
# or add handler to root logger
root = logging.getLogger()
root.setLevel(logging.DEBUG)
root.addHandler(stderr_handler)
root.addHandler(stdout_handler)
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
# default,app.logger will send log to stderr
app.run(host='0.0.0.0', port=4001)
'''
output:
* Serving Flask app "test" (lazy loading)
* Environment: production
WARNING: Do not use the development server in a production environment.
Use a production WSGI server instead.
* Debug mode: off
{"timestamp": "2018-12-25T07:26:59.281661Z", "severity": 20, "message": " * Running on http://0.0.0.0:4001/ (Press CTRL+C to quit)"}
{"timestamp": "2018-12-25T07:27:04.344244Z", "severity": 20, "message": "127.0.0.1 - - [25/Dec/2018 15:27:04] \"GET / HTTP/1.1\" 200 -"}
'''
| GHQiuJun/Python-Logger-Handler-For-StackDriver | test.py | test.py | py | 1,439 | python | en | code | 1 | github-code | 50 |
15453292678 | from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^$', 'core.views.home', name='home'),
url(r'^manage_team/(?P<team_id>\d+)/$', 'core.views.manage_team', name='manage-team'),
url(r'^player_search/$', 'core.views.player_search', name='player-search'),
#League
url(r'^league/(?P<league_id>\d+)/division/SOMETHINGHERE/create_team/$', 'core.views.create_team', name='create-team'),
)
| mburst/django-league | league/core/urls.py | urls.py | py | 456 | python | en | code | 7 | github-code | 50 |
20425488602 | import sys, math
i=1
for line in sys.stdin:
nums = []
for word in line.split():
nums.append(int(word))
if(nums[0]==0 and nums[1]==0):
break
else:
print("Case "+str(i)+": ",end="")
if(nums[1]>nums[0]):
print(0)
elif(nums[0]>=nums[1]):
res=nums[0]-nums[1]
res2=res/nums[1]
if(res2>26):
print("impossible")
else:
print(math.ceil(res2))
i+=1
| kevinlllR/Competitive-programming | uva/11723 - Numbering Roads.py | 11723 - Numbering Roads.py | py | 378 | python | en | code | 0 | github-code | 50 |
14268476799 | import pandas as pd
from utils.ljqpy import LoadJsons,SaveJsons
import random
import unicodedata
import zhconv,emoji
dpath = './dataset/raw_data/train.csv'
df = pd.read_csv(dpath, sep='\t', encoding="utf-8")
def transfer_to_json(df,out_path):
'''
将csv文件转化为json文件,方便后续调用
'''
data = []
for i in range(len(df)):
l = {}
l["id"] = int(df.iloc[i,0])
l["text"] = df.iloc[i,1]
l["label"] = df.iloc[i,2].split(",")
# l = json.dumps(l, ensure_ascii=False)
# fw.write(l + '\n')
# fw.close()
data.append(l)
SaveJsons(data,out_path)
return
def sep_data(file_path:str):
'''
将数据随机打乱并切分成训练集和验证集
'''
data = []
for xx in LoadJsons(file_path): # 数据格式.json
xx['text_normd'] = xx['text'].replace('\u200b','')
xx['text_normd'] = unicodedata.normalize('NFKC', xx['text_normd']) # 同时清洗数据,并保存到新的字段中
data.append(xx)
random.shuffle(data)
train = data[5000:]; val = data[:5000]
SaveJsons(train,'./dataset/train_normd.json')
SaveJsons(val,'./dataset/val_normd.json')
cc = {'𝓪':'a','𝒶':'a','𝒜':'A','𝓐':'A','𝒂':'a','ⓐ':'a','𝐴':'A','𝑎':'a','𝗮':'a','𝗔':'A','𝟬':'0'}
fconv = {}
for x, y in cc.items():
mx = 10 if y == '0' else 26
for i in range(mx):
fconv[chr(ord(x)+i)] = chr(ord(y)+i)
def ConvertFlower(zz):
'''
转换花体
'''
newz = []
for z in zz: newz.append(fconv.get(z, z))
return ''.join(newz)
def Normalize(z):
'''
将花体转换成正常英文、数字,删除表情,繁体转简体
'''
z = ConvertFlower(z)
return zhconv.convert(emoji.replace_emoji(z, replace=''),'zh-cn')
if __name__ == '__main__':
random.seed(1305)
transfer_to_json(df,'./dataset/train.json')
sep_data('./dataset/train.json') | miiiiiko/wb_topic_final | datapreprocess.py | datapreprocess.py | py | 1,964 | python | en | code | 1 | github-code | 50 |
699928501 | from flask import Flask, request, Response, json, send_from_directory
import os
import pymongo
from flask_cors import cross_origin
from service import ibm_classification
from db.config import load_config
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
app = Flask(__name__, static_folder='build', static_url_path='')
config = load_config()
mongo_client = config['mongo_client']
app.config['ibm_client'] = config['ibm_client']
# Serve React App
@app.route('/', defaults={'path': ''})
#@app.route('/<path:path>')
def serve(path):
if path != "" and os.path.exists(app.static_folder + '/' + path):
return send_from_directory(app.static_folder, path)
else:
return send_from_directory(app.static_folder, 'index.html')
@app.route('/create-meeting')
def login():
return send_from_directory(app.static_folder, 'index.html')
@app.route('/get-summary')
def getSummary():
return send_from_directory(app.static_folder, 'index.html')
@app.route('/createMeeting', methods=["POST"])
@cross_origin()
def createMeeting():
req_data = request.json
meeting_id = str(req_data.get("meetingId"))
# if the meeting id already exists, return 300 response
if meeting_id in mongo_client.list_collection_names():
return Response(
response=json.dumps({'success': False}),
status=300,
mimetype="application/json"
)
# otherwise, create a new collection for this meeting and set it to active
else:
collection = mongo_client[meeting_id]
collection.insert_one({"active": True})
return Response(
response=json.dumps({'success': True}),
status=200,
mimetype="application/json"
)
@app.route('/activeParticipants', methods=["GET"])
@cross_origin()
def get_active_participants():
meeting_id = request.args.get("meetingId")
meeting_collection = mongo_client[str(meeting_id)]
inactivity_threshold = 5
all_participants = list(meeting_collection.find({"type": "ping"}).sort("pingCount", pymongo.ASCENDING))
active_participant_netids = []
# if there are multiple participants, check ping count difference to determine if anyone has dropped
if len(all_participants) > 1:
max_ping_count = all_participants[-1]['pingCount']
for participant in all_participants:
# if participant dropped, remove participant from database
if max_ping_count - participant['pingCount'] >= inactivity_threshold:
meeting_collection.delete_one({"type": "ping", "netId": participant["netId"]})
# participant is active
else:
active_participant_netids.append(participant["netId"])
elif len(all_participants) == 1:
active_participant_netids = [all_participants[0]["netId"]]
consent_collection = mongo_client["consent"]
query = {"meetingId": meeting_id}
records = list(consent_collection.find(query))
# map names to netIDs
active_participants = {}
for entry in records:
if entry["netId"] in active_participant_netids:
active_participants[entry["netId"]] = entry["name"]
# return names and netIDs of active participants
return Response(
response=json.dumps(active_participants),
status=200,
mimetype="application/json"
)
@app.route('/userconsent', methods=["POST"])
@cross_origin()
def consent():
req_data = request.json
name = req_data.get("name")
net_id = req_data.get('netId')
meeting_id = req_data.get('meetingId')
# check if meeting id exists and is active
meeting_collection = mongo_client[meeting_id]
if meeting_id not in mongo_client.list_collection_names():
return Response(
response=json.dumps({'success': False}),
status=300,
mimetype="application/json"
)
else:
data = list(meeting_collection.find({"active": True}))
if not data:
return Response(
response=json.dumps({'success': False}),
status=300,
mimetype="application/json"
)
# insert participant into consent collection
consent_collection = mongo_client["consent"]
consent_collection.insert_one({"name": name, "netId": net_id, "meetingId": meeting_id})
return Response(
response=json.dumps({'success': True}),
status=200,
mimetype="application/json"
)
@app.route('/submitChoices', methods=["POST"])
@cross_origin()
def submit_choices():
req_data = request.json
net_id = req_data.get('netId')
meeting_id = req_data.get('meetingId')
choices = req_data.get('choices')
timestamp = req_data.get("timestamp")
collection = mongo_client[str(meeting_id)]
# insert user choices in DB
collection.insert_one({"netId": net_id, "choices": choices, "timestamp": timestamp, "type": "choices"})
return Response(
response=json.dumps({'success': True}),
status=200,
mimetype="application/json"
)
@app.route('/submittedParticipants', methods=["GET"])
@cross_origin()
def submitted_participants():
meeting_id = str(request.args.get("meetingId"))
collection = mongo_client[meeting_id]
query = {"type": {"$eq":"choices"}}
records = list(collection.find(query))
# get netIDs of participants that have submitted rankings
net_ids = []
for entry in records:
if entry["netId"] not in net_ids:
net_ids.append(entry["netId"])
consent_collection = mongo_client["consent"]
query = {"meetingId": meeting_id}
records = list(consent_collection.find(query))
# map names to netIDs
netIds_to_names = {}
for entry in records:
if entry["netId"] in net_ids:
netIds_to_names[entry["netId"]] = entry["name"]
return Response(
response=json.dumps(netIds_to_names),
status=200,
mimetype="application/json"
)
@app.route('/participantCounts', methods=["GET"])
@cross_origin()
def participant_counts():
meeting_id = str(request.args.get("meetingId"))
collection = mongo_client[meeting_id]
consent_collection = mongo_client["consent"]
query = {"meetingId": meeting_id}
consent_records = list(consent_collection.find(query))
query = {"type": {"$eq":"data"}}
records = list(collection.find(query))
word_counts = {}
turn_counts = {}
names = {}
# map each netID to the number of words they have spoken and turns they have taken
# we are treating each document in the meeting's collection as a turn
for entry in consent_records:
names[entry["netId"]] = entry["name"]
for entry in records:
net_id = entry["netId"]
num_words = len(entry["text"].split())
if net_id not in word_counts:
word_counts[net_id] = num_words
turn_counts[net_id] = 1
else:
word_counts[net_id] += num_words
turn_counts[net_id] += 1
# compute time silent
query = {"type": {"$eq":"silent"}}
records = list(collection.find(query))
time_silent_counts = {}
for entry in records:
net_id = entry['netId']
time_silent = entry['timeSilent']
# convert time silent in seconds to mm:ss string
seconds = str(time_silent%60)
mins = str(time_silent//60)
padded_seconds = '0'*(2-len(seconds)) + seconds
padded_mins = '0'*(2-len(mins)) + mins
time_silent_counts[net_id] = padded_mins + ':' + padded_seconds
# ensure that all netIDs are present for each count to ensure data consistency
all_net_ids = set()
for net_id in word_counts.keys():
all_net_ids.add(net_id)
for net_id in time_silent_counts.keys():
all_net_ids.add(net_id)
# hardcode 0 values for any missing netIds
for net_id in all_net_ids:
if net_id not in word_counts.keys():
word_counts[net_id] = 0
if net_id not in turn_counts.keys():
turn_counts[net_id] = 0
if net_id not in time_silent_counts.keys():
time_silent_counts[net_id] = '00:00'
return Response(
response=json.dumps({'wordCounts': word_counts,
'turnCounts': turn_counts,
'timeSilent': time_silent_counts,
'names': names }),
status=200,
mimetype="application/json"
)
@app.route('/pollconversation', methods=["POST"])
@cross_origin()
def poll_conversation():
req_data = request.json
net_id = req_data.get('netId')
meeting_id = req_data.get('meetingId')
text = req_data.get('text')
timestamp = req_data.get("timestamp")
collection = mongo_client[str(meeting_id)]
# If text is not provided or less than 3 words, do nothing
if not text or len(text.split()) <= 3:
return Response(
response=json.dumps({
'emotions': {
"excited": 0,
"frustrated": 0,
"impolite": 0,
"polite": 0,
"sad": 0,
"satisfied": 0,
"sympathetic": 0
},
}),
status=204,
mimetype="application/json"
)
else:
# store participant's speech in DB
collection.insert_one({"netId": net_id, "text": text, "timestamp": timestamp, "type": "data"})
# Get emotions of participant
result = ibm_classification.classify(text)
return Response(
response=json.dumps({'emotions': result}),
status=200,
mimetype="application/json"
)
@app.route('/incrementPingCount', methods=["POST"])
@cross_origin()
def increment_ping_count():
req_data = request.json
net_id = req_data.get('netId')
meeting_id = req_data.get('meetingId')
collection = mongo_client[str(meeting_id)]
query = {"type": {"$eq":"ping"}, "netId": {"$eq":net_id}}
record = collection.find_one(query)
# if user already has a ping count, increment it
if record:
update_query = {"$set": {"pingCount": record['pingCount']+1}}
collection.update_one(query, update_query)
# otherwise, add new ping entry
else:
query = {"type": "ping"}
record = collection.find_one(query)
# if no other ping counts exist, make ping count 1
if not record:
collection.insert_one({"netId": net_id, "pingCount": 1, "type": "ping"})
# otherwise, synchronize with existing ping count
else:
collection.insert_one({"netId": net_id, "pingCount": record['pingCount'], "type": "ping"})
return Response(
response=json.dumps({'success': True}),
status=200,
mimetype="application/json"
)
@app.route('/setTimeSilent', methods=["POST"])
@cross_origin()
def set_time_silent():
req_data = request.json
net_id = req_data.get('netId')
meeting_id = req_data.get('meetingId')
time_silent = req_data.get('newTimeSilent')
collection = mongo_client[str(meeting_id)]
# update the time silent for given netID
query = {"type": {"$eq":"silent"}, "netId": {"$eq":net_id}}
if collection.find_one(query):
update_query = {"$set": {"timeSilent": time_silent}}
collection.update_one(query, update_query)
else:
collection.insert_one({"netId": net_id, "timeSilent": time_silent, "type": "silent"})
return Response(
response=json.dumps({'success': True}),
status=200,
mimetype="application/json"
)
@app.route('/transcript', methods=["GET"])
@cross_origin()
def transcript():
meeting_id = str(request.args.get("meetingId"))
# query text data and sort by timestamp
collection = mongo_client[meeting_id]
data = list(collection.find({"type": "data"}).sort("timestamp", pymongo.ASCENDING))
# patch text together to display full conversation
conversation = ""
for d in data:
if d['text']:
conversation += d['netId'] + ": " + d["text"] + '\n'
return Response(
response=json.dumps({'transcript': conversation}),
status=200,
mimetype="application/json"
)
@app.route('/endMeeting', methods=["POST"])
@cross_origin()
def endMeeting():
req_data = request.json
meeting_id = str(req_data.get("meetingId"))
collection = mongo_client[meeting_id]
# signify end of meeting by setting 'active' to False
query = {"active": True}
new_values = {"$set": {"active": False}}
collection.update_one(query, new_values)
return Response(
response=json.dumps({'success': True}),
status=200,
mimetype="application/json"
)
@app.route('/finish', methods=["POST"])
@cross_origin()
def finish():
req_data = request.json
netId = req_data.get('netId')
meetingId = req_data.get('meetingId')
collection = mongo_client[str(meetingId)]
# signify participant leaving meeting by deleting their ping count
query = {"type": {"$eq":"ping"}, "netId": {"$eq":netId}}
collection.delete_one(query)
return Response(
response=json.dumps({'success': True}),
status=200,
mimetype="application/json"
)
@app.route('/keywords', methods=["POST"])
@cross_origin()
def keywords():
req_data = request.json
meeting_id = req_data.get('meetingId')
# query text data and sort by timestamp
collection = mongo_client[str(meeting_id)]
data = list(collection.find({"type": "data"}).sort("timestamp", pymongo.ASCENDING))
if not data:
return Response(
response=json.dumps({'keywords': "Meeting not found"}),
status=404,
mimetype="application/json"
)
# patch text together
conversation = ""
for d in data:
conversation += d["text"] + "\n"
# extract keywords from the conversation
keywords = ibm_classification.extract_keywords(conversation)
return Response(
response=json.dumps({'keywords': keywords}),
status=200,
mimetype="application/json"
)
@app.route('/summary', methods=["POST"])
@cross_origin()
def summary():
req_data = request.json
meetingId = req_data.get('meetingId')
# query text data and sort by timestamp
collection = mongo_client[str(meetingId)]
data = list(collection.find({"type": "data"}).sort("timestamp", pymongo.ASCENDING))
if not data:
return Response(
response=json.dumps({'summary': "Meeting not found"}),
status=404,
mimetype="application/json"
)
# patch text together
conversation = ""
for d in data:
conversation += d["text"] + "\n"
sp = set(stopwords.words("english"))
words = word_tokenize(conversation)
freqTable = dict()
# compute frequency of each word
for word in words:
word = word.lower()
if word in sp:
continue
if word in freqTable:
freqTable[word] += 1
else:
freqTable[word] = 1
sentences = sent_tokenize(conversation)
sentence_value = dict()
for sentence in sentences:
for word, freq in freqTable.items():
if word in sentence.lower():
if sentence in sentence_value:
sentence_value[sentence] += freq
else:
sentence_value[sentence] = freq
sumValues = 0
for sentence in sentence_value:
sumValues += sentence_value[sentence]
average = int(sumValues / len(sentence_value))
summary = ''
for sentence in sentences:
if (sentence in sentence_value) and (sentence_value[sentence] > (1.2 * average)):
summary += " " + sentence
return Response(
response=json.dumps({'summary': summary}),
status=200,
mimetype="application/json"
)
if __name__ == '__main__':
app.run()
| mocup/conv-agent | convo-BE/app.py | app.py | py | 16,239 | python | en | code | 0 | github-code | 50 |
28056486095 | class Flower:
color = 'unknown'
rose = Flower()
rose.color = "red"
violet = Flower()
violet.color = "blue"
this_pun_is_for_you = "Darling, sweet I love you"
print("Roses are {},".format(rose.color))
print("violets are {},".format(violet.color))
print(this_pun_is_for_you)
class Dog:
years = 0
def dog_years(self):
return self.years * 7
fido=Dog()
fido.years=3
print(fido.dog_years())
class Person:
def __init__(self, name):
self.name = name
def greeting(self):
# Should return "hi, my name is " followed by the name of the Person.
return "hi, my name is {}".format(self.name)
# Create a new instance with a name of your choice
some_person = Person("Luke")
# Call the greeting method
print(some_person.greeting())
class Person:
def __init__(self, name):
self.name = name
def greeting(self):
"""Outputs a message with the name of the person"""
print("Hello! My name is {name}.".format(name=self.name))
help(Person) | artemis-p/Python_practise | OOP_Classes.py | OOP_Classes.py | py | 992 | python | en | code | 0 | github-code | 50 |
38060423937 | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
from funktion import main
from funktion import query_table
from funktion import insert_table_batch
from funktion import query_table_id
from funktion import delete_table_id
from funktion import query_table_ele
from funktion import gps_map_marker
server = "127.0.0.1"
user = "ATPbaum"
password = "ATPbaum"
database = "Baum"
mssql = main(server, user, password, database)
table_name= 'baum_test'
baum = {'tag_id': 'id00001',
'device_id': '23did1204',
'GPS': '50.783067, 6.045786',
'date': '09.01.2020 9:46:23'
}
baum2 = {'tag_id': 'id00001',
'device_id': 'rgs23451',
'GPS': '50.785067, 6.047786',
'date': '09.01.2020 9:50:23'
}
baum3 = {'tag_id': 'id00001',
'device_id': '23did1204',
'GPS': '50.783667, 6.049786',
'date': '09.01.2020 10:46:23'
}
baum4 = {'tag_id': 'id00001',
'device_id': '34523',
'GPS': '50.783067, 6.055786',
'date': '09.01.2020 18:46:23'
}
baum_list = []
baum_list.extend([baum, baum2, baum3, baum4])
tag_id = 'id00001'
delete_table_id(tag_id)
insert_table_batch(baum_list)
print(query_table(table_name))
print(query_table_ele('device_id', 'rgs23451'))
i = query_table_id(tag_id)
gps_map_marker(i)
| Muzhai/ATP | Baum/test.py | test.py | py | 1,267 | python | en | code | 0 | github-code | 50 |
38616204720 | # Даны два натуральных числа n и m.
# Сократите дробь (n / m), то есть выведите два других числа
# p и q таких, что (n / m) = (p / q) и дробь (p / q) — несократимая.
# Решение оформите в виде функции ReduceFraction(n, m),
# получающая значения n и m и возвращающей кортеж из двух чисел: return p, q.
# Тогда вывод можно будет оформить как print(*ReduceFraction(n, m)).
def gcd(a, b):
if a == 0:
return b
if b == 0:
return a
if a == b:
return a
elif a > b:
d = gcd(b, a % b)
else:
d = gcd(a, b % a)
return d
def ReduceFraction(n, m):
d = gcd(n, m)
p = n // d
q = m // d
return p, q
x, y = int(input()), int(input())
print(*ReduceFraction(x, y))
| AnnaSmelova/Python_programming_basics_course | week4/16_reduce_fraction.py | 16_reduce_fraction.py | py | 932 | python | ru | code | 1 | github-code | 50 |
26297115808 | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Int16
from std_msgs.msg import Float32
# from sensor_msgs.msg import JointState
# from sensor_msgs.msg import Image
import cv2
from nav_msgs.msg import Odometry
# from mav_msgs.msg import Actuators
# from geometry_msgs.msg import Twist
from sensor_msgs.msg import PointCloud2
from sensor_msgs import point_cloud2
from geometry_msgs.msg import Vector3
# from sensor_msgs.msg import Range
from sensor_msgs.msg import Imu
from geometry_msgs.msg import PoseStamped,Pose
from std_msgs.msg import Empty
# from trajectory_msgs.msg import MultiDOFJointTrajectory
from openai_ros.openai_ros_common import ROSLauncher
from rotors_control.srv import *
from visualization_msgs.msg import Marker
from tf import TransformListener
from geometry_msgs.msg import Point
import tf.transformations as transformations
from tf.transformations import euler_from_quaternion
from scipy.io import savemat
import numba as nb
import math
import os
from numba.typed import List
import tf
from gazebo_msgs.srv import *
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
from gazebo_msgs.msg import ModelState
@nb.jit(nopython=True)
def parallel_process_point_cloud(trans, rot, data):
EPS = 2.220446049250313e-16 * 4.0
new_points = []
for i in range(data.shape[0]):
pt = [data[i][0],data[i][1],data[i][2]]
##########################
# adapt from https://answers.ros.org/question/249433/tf2_ros-buffer-transform-pointstamped/
quat = [
rot[0],
rot[1],
rot[2],
rot[3]
]
##########################
# Return homogeneous rotation matrix from quaternion from https://github.com/davheld/tf/blob/master/src/tf/transformations.py
q = numpy.array(quat[:4], dtype=numpy.float64)
nq = numpy.dot(q, q)
if nq < EPS:
mat = numpy.identity(4)
else:
q *= math.sqrt(2.0 / nq)
q = numpy.outer(q, q)
mat = numpy.array((
(1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0),
( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0),
( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
##########################
pt_np = [pt[0], pt[1], pt[2], 1.0]
pt_in_map_np = numpy.dot(mat, numpy.array(pt_np))
pt_in_map_x = pt_in_map_np[0] + trans[0]
pt_in_map_y = pt_in_map_np[1] + trans[1]
pt_in_map_z = pt_in_map_np[2] + trans[2]
new_pt = [pt_in_map_x,pt_in_map_y,pt_in_map_z]
##########################
# new_pt = transform_point(trans,rot, pt)
new_points.append(new_pt)
return new_points
class FireflyDroneEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new FireflyDroneEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /Firefly_1/odometry_sensor1/odometry
* /Firefly_1/command/motor_speed
* /Firefly_2/odometry_sensor1/odometry
* /Firefly_2/command/motor_speed
Args:
"""
rospy.logdebug("Start FireflyDroneEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# Internal Vars
# Doesnt have any accesibles
self.counter = 0
self.counter1 = 0
self.controllers_list = []
self.shutdown_joy = 0
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(FireflyDroneEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
# ROSLauncher(rospackage_name="rotors_gazebo",
# launch_file_name="crazyflie2_swarm_transport_example_2_agents.launch",
# ros_ws_abspath=ros_ws_abspath)
# self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/firefly_1/ground_truth/imu", Imu, self._imu_callback1)
rospy.Subscriber("/firefly_1/odometry_sensor1/odometry", Odometry, self._odometry_callback1)
# self._cmd_motor_pub1 = rospy.Publisher('/firefly_1/command/motor_speed', Actuators, queue_size=1)
self._cmd_pos_pub1 = rospy.Publisher('/firefly_1/cmd_pos', PoseStamped, queue_size=1)
rospy.Subscriber("/firefly_2/ground_truth/imu", Imu, self._imu_callback2)
rospy.Subscriber("/firefly_2/odometry_sensor1/odometry", Odometry, self._odometry_callback2)
# self._cmd_motor_pub2 = rospy.Publisher('/firefly_2/command/motor_speed', Actuators, queue_size=1)
self._cmd_pos_pub2 = rospy.Publisher('/firefly_2/cmd_pos', PoseStamped, queue_size=1)
rospy.Subscriber("/goal_pos", Vector3, self._joy_goal_callback)
rospy.Subscriber("/shutdown_signal", Int16, self._shutdown_collect_callback)
rospy.Subscriber('/bar/ground_truth/odometry', Odometry, self._bar_callback)
# kinect cameras:top and front
# self.tf = TransformListener()
# rospy.Subscriber("/camera_ir_top/camera/depth/points", PointCloud2, self._point_callback_top)
# rospy.wait_for_service('/gazebo/set_model_state')
# self.set_state_service = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
# rospy.Timer(rospy.Duration(0.15), self.set_pos_callback)
# rospy.Timer(rospy.Duration(0.2), self.set_pos_callback_depth) #0.15
# self.image_sub = rospy.Subscriber("/camera_ir_top/camera/depth/image_raw",Image,self.depth_callback_realtime)
# self.bridge = CvBridge()
# self.image_sub = rospy.Subscriber("/camera_ir_top/camera/depth/image_raw",Image,self.depth_callback)
# self.bridge = CvBridge()
self.goal_pub_makers = rospy.Publisher('/goal_makers', Marker, queue_size=10)
self.goal_pub_makers_c = rospy.Publisher('/corrective_goal_makers', Marker, queue_size=10)
self.action_pub_makers_c = rospy.Publisher('/action_maker_c', Marker, queue_size=10)
self.action_pub_makers = rospy.Publisher('/action_maker', Marker, queue_size=10)
self.action_sequence_pub_makers = rospy.Publisher('/action_seq_maker', Marker, queue_size=100)
self.action_sequence_pub_makers1 = rospy.Publisher('/action_seq_maker1', Marker, queue_size=100)
self.pause_controller = rospy.Publisher('/pause_controller', Int16, queue_size=1)
self.wind_controller_x = rospy.Publisher('/wind_force_x', Float32, queue_size=1)
self.wind_controller_y = rospy.Publisher('/wind_force_y', Float32, queue_size=1)
self._check_all_publishers_ready()
self.gazebo.pauseSim()
self.goal_joy = numpy.array([1.0,0.0,1.0])
# self.space_3d = rospy.get_param("/firefly/3d_space")
self.xy = rospy.get_param("/firefly/xy")
rospy.logdebug("Finished FireflyEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def set_pos_callback(self,event):
data = self.get_bar_odometry()
b_pos = data.pose.pose.position
objstate = SetModelStateRequest() # Create an object of type SetModelStateRequest
# set red cube pose
objstate.model_state.model_name = "kinect_ros_3"
objstate.model_state.pose.position.x = b_pos.x
objstate.model_state.pose.position.y = b_pos.y
objstate.model_state.pose.position.z = 3.0
objstate.model_state.pose.orientation.w = 0.70738827
objstate.model_state.pose.orientation.x = 0
objstate.model_state.pose.orientation.y = 0.70682518
objstate.model_state.pose.orientation.z = 0
objstate.model_state.twist.linear.x = 0.0
objstate.model_state.twist.linear.y = 0.0
objstate.model_state.twist.linear.z = 0.0
objstate.model_state.twist.angular.x = 0.0
objstate.model_state.twist.angular.y = 0.0
objstate.model_state.twist.angular.z = 0.0
objstate.model_state.reference_frame = "world"
result = self.set_state_service(objstate)
#this callback function is for depth camera
def set_pos_callback_depth(self,event):
data = self.get_bar_odometry()
b_pos = data.pose.pose.position
state_msg = ModelState()
state_msg.model_name = 'kinect_ros_3'
state_msg.pose.position.x = b_pos.x
state_msg.pose.position.y = b_pos.y
state_msg.pose.position.z = 3.0
state_msg.pose.orientation.x = -0.5
state_msg.pose.orientation.y = 0.5
state_msg.pose.orientation.z = 0.5
state_msg.pose.orientation.w = 0.5
state_msg.twist.linear.x = 0.0
state_msg.twist.linear.y = 0.0
state_msg.twist.linear.z = 0.0
state_msg.twist.angular.x = 0.0
state_msg.twist.angular.y = 0.0
state_msg.twist.angular.z = 0.0
state_msg.reference_frame = "world"
result = self.set_state_service(state_msg)
def set_pos_callback_depth_loop(self):
data = self.get_bar_odometry()
b_pos = data.pose.pose.position
state_msg = ModelState()
state_msg.model_name = 'kinect_ros_3'
state_msg.pose.position.x = b_pos.x
state_msg.pose.position.y = b_pos.y
state_msg.pose.position.z = 3.0
state_msg.pose.orientation.x = -0.5
state_msg.pose.orientation.y = 0.5
state_msg.pose.orientation.z = 0.5
state_msg.pose.orientation.w = 0.5
state_msg.twist.linear.x = 0.0
state_msg.twist.linear.y = 0.0
state_msg.twist.linear.z = 0.0
state_msg.twist.angular.x = 0.0
state_msg.twist.angular.y = 0.0
state_msg.twist.angular.z = 0.0
state_msg.reference_frame = "world"
result = self.set_state_service(state_msg)
def set_pos_callback_cloud_loop(self):
data = self.get_bar_odometry()
b_pos = data.pose.pose.position
state_msg = ModelState()
state_msg.model_name = 'kinect_ros_3'
state_msg.pose.position.x = b_pos.x
state_msg.pose.position.y = b_pos.y
state_msg.pose.position.z = 3.0
state_msg.pose.orientation.x = 0
state_msg.pose.orientation.y = 0.70682518
state_msg.pose.orientation.z = 0
state_msg.pose.orientation.w = 0.70738827
state_msg.twist.linear.x = 0.0
state_msg.twist.linear.y = 0.0
state_msg.twist.linear.z = 0.0
state_msg.twist.angular.x = 0.0
state_msg.twist.angular.y = 0.0
state_msg.twist.angular.z = 0.0
state_msg.reference_frame = "world"
result = self.set_state_service(state_msg)
def _bar_callback(self,data):
self.bar_odometry = data
b_pos = data.pose.pose.position
br = tf.TransformBroadcaster()
# br.sendTransform((b_pos.x, b_pos.y, 3.0),
# tf.transformations.quaternion_from_euler(0, 1.57, 0),
# rospy.Time.now(),
# "kinect_camera",
# "world")
br.sendTransform((b_pos.x, b_pos.y, 3.0),
tf.transformations.quaternion_from_euler(0.0, 3.14, 1.57),
rospy.Time.now(),
"camera_link",
"world")
def _joy_goal_callback(self,data):
if data.x >0:
self.goal_joy[0] -= 0.03
elif data.x < 0:
self.goal_joy[0] += 0.03
else:
pass
if data.y >0:
self.goal_joy[1] += 0.03
elif data.y < 0:
self.goal_joy[1] -= 0.03
else:
pass
if data.z >0:
self.goal_joy[2] += 0.03
elif data.z < 0:
self.goal_joy[2] -= 0.03
else:
pass
def _shutdown_collect_callback(self,data):
self.shutdown_joy = data.data
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_imu_ready()
self._check_odometry_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odometry_ready(self):
self.odometry1 = None
rospy.logdebug("Waiting for /firefly_1/odometry_sensor1/odometry")
self.odometry2 = None
rospy.logdebug("Waiting for /firefly_2/odometry_sensor1/odometry")
self.bar_odometry = None
rospy.logdebug("Waiting for /bar/ground_truth/odometry")
while self.odometry1 is None and not rospy.is_shutdown():
try:
self.odometry1 = rospy.wait_for_message("/firefly_1/odometry_sensor1/odometry", Odometry, timeout=5.0)
rospy.logdebug("Current/firefly_1/odometry_sensor1/odometry READY=>")
except:
rospy.logerr("Current /firefly_1/odometry_sensor1/odometry not ready yet, retrying for getting later")
while self.odometry2 is None and not rospy.is_shutdown():
try:
self.odometry2 = rospy.wait_for_message("/firefly_2/odometry_sensor1/odometry", Odometry, timeout=5.0)
rospy.logdebug("Current/firefly_2/odometry_sensor1/odometry READY=>")
except:
rospy.logerr("Current /firefly_2/odometry_sensor1/odometry not ready yet, retrying for getting later")
while self.bar_odometry is None and not rospy.is_shutdown():
try:
self.bar_odometry = rospy.wait_for_message("/bar/ground_truth/odometry", Odometry, timeout=5.0)
rospy.logdebug("Current/bar/ground_truth/odometry READY=>")
except:
rospy.logerr("Current /bar/ground_truth/odometry not ready yet, retrying for getting later")
def _check_imu_ready(self):
self.imu1 = None
rospy.logdebug("Waiting for /firefly_1/ground_truth/imu to be READY...")
self.imu2 = None
rospy.logdebug("Waiting for /firefly_2/ground_truth/imu to be READY...")
while self.imu1 is None and not rospy.is_shutdown():
try:
self.imu1 = rospy.wait_for_message("/firefly_1/ground_truth/imu", Imu, timeout=5.0)
rospy.logdebug("Current/firefly_1/ground_truth/imu READY=>")
except:
rospy.logerr(
"Current /firefly_1/ground_truth/imu not ready yet, retrying for getting imu")
while self.imu2 is None and not rospy.is_shutdown():
try:
self.imu2 = rospy.wait_for_message("/firefly_2/ground_truth/imu", Imu, timeout=5.0)
rospy.logdebug("Current/firefly_2/ground_truth/imu READY=>")
except:
rospy.logerr(
"Current /firefly_2/ground_truth/imu not ready yet, retrying for getting imu")
def _imu_callback1(self, data):
self.imu1 = data
def _imu_callback2(self, data):
self.imu2 = data
def _odometry_callback1(self, data):
self.odometry1 = data
def _odometry_callback2(self, data):
self.odometry2 = data
def depth_callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "32FC1")
cv_image_array = np.array(cv_image, dtype = np.dtype('f8'))
cv_image_norm = cv2.normalize(cv_image_array, cv_image_array, 0, 1, cv2.NORM_MINMAX)
# cv2.imshow("Image window", cv_image_norm)
# cv2.waitKey(3)
folder_path = "/home/wawa/catkin_meta/src/MBRL_transport/depth_images"
wind_condition_x = 0.8
wind_condition_y = 0.0
L = 1.2
fileName = folder_path+"/wind"+ "_x"+str(wind_condition_x) + "_y"+str(wind_condition_y)
fileName += "_" + str(2) + "agents"+"_"+"L"+str(L)
if not os.path.exists(fileName):
os.makedirs(fileName)
# print(cv_image_norm.shape)
dic_d = {"depth":cv_image_norm}
savemat(fileName+"/{0}.mat".format(self.counter1), dic_d)
self.counter1+=1
except CvBridgeError as e:
print(e)
def depth_callback_realtime(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "32FC1")
cv_image_array = np.array(cv_image, dtype = np.dtype('f8'))
cv_image_norm = cv2.normalize(cv_image_array, cv_image_array, 0, 1, cv2.NORM_MINMAX)
self.cv_image_norm = cv_image_norm
# cv2.imshow("Image window", cv_image_norm)
# cv2.waitKey(3)
except CvBridgeError as e:
print(e)
def get_depth_map(self):
return self.cv_image_norm
def _point_callback_top(self, data):
# We get the laser scan data
u1_odm = self.get_odometry1()
u2_odm = self.get_odometry2()
bar_odm = self.get_bar_odometry()
b_roll, b_pitch, b_yaw = self.get_orientation_euler1(bar_odm.pose.pose.orientation)
b_pos = bar_odm.pose.pose.position
uav1_pos = u1_odm.pose.pose.position
uav2_pos = u2_odm.pose.pose.position
max_x = 4
max_y = 2
max_z = 2
#also track the two drones
observations = [round(uav1_pos.x,8)/max_x,
round(uav1_pos.y,8)/max_y,
round(uav1_pos.z,8)/max_z,
round(uav2_pos.x,8)/max_x,
round(uav2_pos.y,8)/max_y,
round(uav2_pos.z,8)/max_z,
round(b_pos.x,8)/max_x,
round(b_pos.y,8)/max_y,
round(b_pos.z,8)/max_z,
round(b_roll,8),
round(b_pitch,8),
round(b_yaw,8)]
configuration_sys = numpy.array(observations)
points_top = data
# transform points from camera_link to world
try:
(trans,rot) = self.tf.lookupTransform("/world", "/camera_link", rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logerr("tf transform error!!!")
data_p = list(point_cloud2.read_points(points_top, field_names=('x', 'y', 'z'), skip_nans=True))
# # print(len(list(data_p)))
# # print(len(list(data_p))>0)
# data_p = list(data_p)
new_points = parallel_process_point_cloud(List(trans),List(rot),numpy.array(data_p))
# save data
# fileName = "/home/wawa/catkin_meta/src/MBRL_transport/point_clouds_and_configurations_additional/firefly_points_3d"
fileName = "/home/wawa/catkin_meta/src/MBRL_transport/point_clouds_obs/firefly_points_3d"
#wind speed: 0.0, 0.3, 0.5, 0.8
wind_condition_x = 0.0
wind_condition_y = 0.0
L = 0.6
fileName += "_wind"+ "_x"+str(wind_condition_x) + "_y"+str(wind_condition_y)
fileName += "_" + str(2) + "agents"+"_"+"L"+str(L)
if not os.path.exists(fileName):
os.makedirs(fileName)
fileName1 = fileName + "/"+str(self.counter)+".mat"
# we need to transform the points into the world coordinate before saving it
# Pxy1 = np.array(Pxy)[:,[1,0,2]]
# Pxy1[:,1] = -Pxy1[:,1]
mdic = {"configuration": configuration_sys, "top":numpy.array(new_points)}
savemat(fileName1, mdic)
self.counter+=1
def _check_all_publishers_ready(self):
"""
Checks that all the publishers are working
:return:
"""
rospy.logdebug("START ALL SENSORS READY")
self._check_cmd_pos_pub_connection()
rospy.logdebug("ALL SENSORS READY")
def _check_cmd_pos_pub_connection(self):
rate1 = rospy.Rate(10) # 10hz
rate2 = rospy.Rate(10) # 10hz
while self._cmd_pos_pub1.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _cmd_pos_pub1 yet so we wait and try again")
try:
rate1.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_pos_pub1 Publisher Connected")
while self._cmd_pos_pub2.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _cmd_pos_pub2 yet so we wait and try again")
try:
rate2.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_pos_pub2 Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
def takeoff(self, L):
"""
Sends the takeoff command and checks it has taken of
It unpauses the simulation and pauses again
to allow it to be a self contained action
"""
self.gazebo.unpauseSim()
# time.sleep(5.0)
# create PoseStamped
pose1 = PoseStamped()
pose1.header.stamp = rospy.Time.now()
pose1.pose = Pose()
# pose1.pose.position.x = 1.3
if not self.xy:
pose1.pose.position.x = 1.0+L/2.0
pose1.pose.position.y = 0
pose1.pose.position.z = 1.6
pose1.pose.orientation.w = 0.0
# create PoseStamped
pose2 = PoseStamped()
pose2.header.stamp = rospy.Time.now()
pose2.pose = Pose()
# pose2.pose.position.x = 0.7
pose2.pose.position.x = 1.0-L/2.0
pose2.pose.position.y = 0
pose2.pose.position.z = 1.6
pose2.pose.orientation.w = 0.0
else:
pose1.pose.position.x = 1.0+L/2.0
pose1.pose.position.y = 1.0
pose1.pose.position.z = 1.6
pose1.pose.orientation.w = 0.0
# create PoseStamped
pose2 = PoseStamped()
pose2.header.stamp = rospy.Time.now()
pose2.pose = Pose()
# pose2.pose.position.x = 0.7
pose2.pose.position.x = 1.0-L/2.0
pose2.pose.position.y = 1.0
pose2.pose.position.z = 1.6
pose2.pose.orientation.w = 0.0
# send PoseStamped
self._cmd_pos_pub1.publish(pose1)
self._cmd_pos_pub2.publish(pose2)
time.sleep(12.0)
self.gazebo.pauseSim()
def move_pos_base(self, dp, L):
"""
accept real dx and dz [-0.5,0.5] [-1.0,1.0] metre
"""
self._check_cmd_pos_pub_connection()
assert(dp.shape[0] == 3)
uav1_odm = self.get_odometry1()
uav2_dom = self.get_odometry2()
uav1_pos = uav1_odm.pose.pose.position
uav2_pos = uav2_dom.pose.pose.position
goal = numpy.zeros(3)
goal[0] = (uav1_pos.x+uav2_pos.x)/2+dp[0]
goal[1] = (uav1_pos.y+uav2_pos.y)/2+dp[1]
goal[2] = (uav1_pos.z+uav2_pos.z)/2+dp[2]
# create PoseStamped
pose1 = PoseStamped()
pose1.header.stamp = rospy.Time.now()
pose1.pose = Pose()
pose1.pose.position.x = goal[0]+L/2
pose1.pose.position.y = goal[1]
pose1.pose.position.z = goal[2]
pose1.pose.orientation.w = 0.0
# create PoseStamped
pose2 = PoseStamped()
pose2.header.stamp = rospy.Time.now()
pose2.pose = Pose()
pose2.pose.position.x = goal[0]-L/2
pose2.pose.position.y = goal[1]
pose2.pose.position.z = goal[2]
pose2.pose.orientation.w = 0.0
# send PoseStamped
self._cmd_pos_pub1.publish(pose1)
self._cmd_pos_pub2.publish(pose2)
self.wait_time_for_execute_movement()
return goal
def wait_time_for_execute_movement(self):
"""
Because this Parrot Drone position is global, we really dont have
a way to know if its moving in the direction desired, because it would need
to evaluate the diference in position and speed on the local reference.
"""
time.sleep(0.15)
def check_array_similar(self, ref_value_array, check_value_array, epsilon):
"""
It checks if the check_value id similar to the ref_value
"""
rospy.logwarn("ref_value_array="+str(ref_value_array))
rospy.logwarn("check_value_array="+str(check_value_array))
return numpy.allclose(ref_value_array, check_value_array, atol=epsilon)
def get_imu1(self):
return self.imu1
def get_imu2(self):
return self.imu2
def get_odometry1(self):
return self.odometry1
def get_odometry2(self):
return self.odometry2
def get_bar_odometry(self):
return self.bar_odometry
# def get_points_top(self):
# gen = point_cloud2.read_points(self.points_top, field_names=("x", "y", "z"), skip_nans=True)
# # time.sleep(1)
# return list(gen)
# # time.sleep(1)
# def get_points_top_and_configuration(self):
# # transform points from camera_link to world
# try:
# (trans,rot) = self.tf.lookupTransform("/world", "/camera_link", rospy.Time(0))
# except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
# rospy.logerr("tf transform error!!!")
# new_points = []
# for x, y, z in point_cloud2.read_points(self.points_top, field_names=('x', 'y', 'z'), skip_nans=True):
# pt = Point()
# pt.x, pt.y, pt.z = x, y, z
# new_pt = self.transform_point(trans,rot, pt)
# new_points.append(new_pt)
# return new_points,self.configuration_sys
# # time.sleep(1)
# def get_points_front(self):
# # transform points from camera_link1 to world
# try:
# (trans,rot) = self.tf.lookupTransform("/camera_link1", "/world", rospy.Time(0))
# except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
# rospy.logerr("tf transform error!!!")
# new_points = []
# for x, y, z in point_cloud2.read_points(self.points_front, field_names=('x', 'y', 'z'), skip_nans=True):
# pt = Point()
# pt.x, pt.y, pt.z = x, y, z
# new_pt = self.transform_point(trans,rot, pt)
# new_points.append(new_pt)
# return numpy.array(new_points)
@staticmethod
def get_orientation_euler1(quaternion_vector):
# We convert from quaternions to euler
orientation_list = [quaternion_vector.x,
quaternion_vector.y,
quaternion_vector.z,
quaternion_vector.w]
roll, pitch, yaw = euler_from_quaternion(orientation_list)
return roll, pitch, yaw
| kpister/prompt-linter | data/scraping/repos/wawachen~openai_ros/src~openai_ros~robot_envs~firefly_env.py | src~openai_ros~robot_envs~firefly_env.py | py | 29,640 | python | en | code | 0 | github-code | 50 |
33215645168 | import sys
n = int(input())
paint = sys.stdin.readline().rstrip()
color = [0, 0]
if paint[0] == 'R': color[0] += 1
else: color[1] += 1
# 초깃값 color[0]에는 빨간색, color[1]에는 파란색 연속되지 않았을 때 카운트한다.
for i in range(1, n):
if paint[i] != paint[i-1]:
# 이전 색깔과 같다면 칠할 필요가 없다.
if paint[i] == 'R': color[0] += 1
else: color[1] += 1
print(min(color)+1)
| PJunyeong/Coding-Test | Baekjoon/20365_블로그2.py | 20365_블로그2.py | py | 451 | python | ko | code | 0 | github-code | 50 |
3423311402 | #Tyler Smith, Kymberly McLane, Emeke Nkadi
#tsmtih328@gatech.edu, kervin3@gatech.edu, enkadi3@gatech.edu
#A06
from Myro import *
def roboScript(fileIn):
f = open(fileIn, 'r')
command = f.readline()
while len(command) > 0:
comList = command.split()
for i in range(len(comList)):
try:
comList[i] = float(comList[i])
except:
pass
if comList[0] == 'fw':
forward(comList[1],comList[2])
elif comList[0] == 'bw':
backward(comList[1],comList[2])
elif comList[0] == 'tl':
turnLeft(comList[1],comList[2])
elif comList[0] == 'tr':
turnRight(comList[1],comList[2])
elif comList[0] == 'bp':
beep(comList[2],comList[1])
command = f.readline()
f.close() | tsmith328/Homework | Python/CS 1301/Recitation Assignments/RA5 - File IO.py | RA5 - File IO.py | py | 835 | python | en | code | 0 | github-code | 50 |
33777543007 | import os
import sys
import time
import pprint
import math
from ROOT import *
import array
from makeTrackDiagrams import *
from collections import OrderedDict
#### Z position of staves
z1inner = GetLayerZ(1000,0)
z2inner = GetLayerZ(1000,2)
z3inner = GetLayerZ(1000,4)
z4inner = GetLayerZ(1000,6)
z1outer = GetLayerZ(1000,1)
z2outer = GetLayerZ(1000,3)
z3outer = GetLayerZ(1000,5)
z4outer = GetLayerZ(1000,7)
# get the layer from the stave number and its z position in mm
def getStaveZ(stave):
zPos = -99999.0
if(stave==0):
zPos = 3864.5125
elif(stave==1):
zPos = 3876.5125
elif(stave==2):
zPos = 3964.5125
elif(stave==3):
zPos = 3976.5125
elif(stave==4):
zPos = 4064.5125
elif(stave==5):
zPos = 4076.5125
elif(stave==6):
zPos = 4164.5125
elif(stave==7):
zPos = 4176.5125
else:
zPos = -99999.0
return zPos
def main():
# give the input text file containing all the track information
inTextFile = sys.argv[1]
inputTrackInfo = open(inTextFile)
### open histogram to know the seed information
plotSuffixName = ""
if (("_" in inTextFile) and ("WIS" in inTextFile)):
eachName = inTextFile.split('.')[0].split('_')
suffixName = "_".join(eachName[2:])
else:
suffixName = inTextFile.split('.')[0]
outFile = TFile("seedingInformationSmallScript_"+suffixName+".root", "RECREATE")
outFile.cd()
hAllPossible = TH1D("hAllPossible", "all possible track combination; bunch crossing; number of track combination", 9508, 0, 9508)
hSeedPossible = TH1D("hSeedPossible", "seed track combination; bunch crossing; number of seed track", 9508, 0, 9508)
hSeedMultiplicity = TH1D("hSeedMultiplicity", "hSeedMultiplicity", 50, 0, 50)
hSigEnergy = TH1D("hSigEnergy", "hSigEnergy", 200, 0, 20)
# all the track info is in the following list
position = []
# get the information from the text files
for lines in inputTrackInfo.readlines():
lines = lines.rstrip()
eachWord = lines.split()
bxNumber = int(eachWord[0])
trackId = int(eachWord[2])
pdgId = int(eachWord[1])
trackEnergy = float(eachWord[6])
if(pdgId!=-11): continue
### select if only background or signal tracks wanted
if(trackId!=1): continue
position.append([bxNumber, trackId, int(eachWord[3])-1000, float(eachWord[4]), float(eachWord[5]), float(eachWord[6]), float(eachWord[7])])
for bxCounter in range(1,9509):
# separate each bx now
eachBXValue = []
for tracks in position:
### the below is needed for e+laser hics setup
if tracks[0] == bxCounter:
eachBXValue.append(tracks)
### fill up the x,y, z and E values from each of the tracker layers
allR1Inner = [];
allR1Outer = [];
for values in eachBXValue:
zPosition = getStaveZ(values[2])
### x, y, z and E
if (values[2] == 0):
allR1Inner.append([values[3], values[4], zPosition, values[5], values[6]])
elif (values[2] == 1):
allR1Outer.append([values[3], values[4], zPosition, values[5], values[6]])
else:
print("stave not needed")
### removing the overlap region of inner and outer stave
allR1Unique = allR1Inner
for r1Out in allR1Outer:
#### remove all points having an x overlap with inner stave: not 100% effective
if r1Out[0] > (308.53 + 29.94176/2.): ### the x position of last chip on inner stave layer 1+half of the x size
allR1Unique.append(r1Out)
for r1 in allR1Unique:
hSigEnergy.Fill(r1[3], r1[4])
outFile.Write()
outFile.Close()
if __name__ == "__main__":
start = time.time()
main()
print("-------- The processing time: ",time.time() - start, " s")
| LUXEsoftware/SeedingAlgorithm | makeEnergyPlots.py | makeEnergyPlots.py | py | 4,124 | python | en | code | 0 | github-code | 50 |
14288320931 | from fastapi import APIRouter,Depends, FastAPI, Header, HTTPException
from .api.routers import users, root
app = FastAPI(
title="FastApi Skeleton",
description="A Boilerplate FastApi project",
version="1.0",
)
router = APIRouter()
app.include_router(root.router)
app.include_router(users.router, prefix="/users")
| ari-hacks/infra-pipeline | app/main.py | main.py | py | 329 | python | en | code | 1 | github-code | 50 |
24046575614 | import json
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from . import tasks
from .models import Repo
GH_EVENTS = {
'pull_request': 'opened',
'pull_request_review': 'submitted',
}
@login_required
def setup_hook(request, repo_id):
try:
repo = Repo.objects.get(github_id=repo_id)
except Repo.DoesNotExists:
return HttpResponse('Repository not found!')
context = {
'url': reverse('webapp_repos')
}
if not repo.has_hooks:
tasks.setup_hook.delay(request.user.id, repo_id)
context['message'] = 'The webhook is being activated...'
else:
context['message'] = 'Webhook already activated'
return render(request, 'redirect.html', context)
@csrf_exempt
def hook_pullrequest(request):
'''
Responds requests from Github trigged by pull_request and
pull_request_review events.
https://developer.github.com/v3/activity/events/types/
'''
data = json.loads(request.body.decode('utf-8'))
gh_event = request.META.get('HTTP_X_GITHUB_EVENT', '')
if gh_event == 'ping':
return HttpResponse('pong')
gh_action = data.get('action')
if gh_event not in GH_EVENTS.keys() or gh_action != GH_EVENTS[gh_event]:
return HttpResponse("I don't what to do :/")
pr = data['pull_request']
repo = Repo.objects.get(github_id=pr['head']['repo']['id'])
tasks.check_pr_reviews.delay(repo.users.first().id, pr)
return HttpResponse('ysnp')
| rougeth/youshallnotpass | ysnp/hook/views.py | views.py | py | 1,637 | python | en | code | 12 | github-code | 50 |
40380126493 | #small imports, fast building :D
import tkinter as tk
from tkinter.font import BOLD
import tkinter.messagebox as tkmessage
#simply function for change value inside the button
def cambio():
if bottoneGA3['text'] =='GA3 OCCUPATA':
bottoneGA3['text'] = 'GA3 LIBERA'
bottoneGA3['background'] = 'green'
tkmessage.showwarning('*AVVERTIRE TORRE*','Si è appena liberata GA3, AVVERTIRE LA TORRE!')
elif bottoneGA3['text'] =='GA3 LIBERA':
bottoneGA3['text'] = 'GA3 OCCUPATA'
bottoneGA3['background'] = 'red'
tkmessage.showwarning('*AVVERTIRE TORRE*','GA3 occupata, AVVERTIRE LA TORRE!')
#main frame
window = tk.Tk()
w = 150 #w and h can be changed here
h = 38
window.maxsize(w,h)
screen_width = window.winfo_screenwidth() #recover screen information
x = (screen_width) - w - 5 # variables x and y can be changed to change position of the button
y = (0)
window.geometry('%dx%d+%d+%d' % (w, h, x, y))
window.iconbitmap('youricon.ico') #Add your icon's path HERE
window.wm_attributes('-topmost', 'True',)
window.wm_attributes('-toolwindow','True')
window.wm_overrideredirect(True)
frame=tk.Frame(master=window,
width=90,
height=40,
bg="black")
#packa the frame
frame.pack(fill=tk.BOTH,
expand=True)
#main button
bottoneGA3 = tk.Button(master=frame,
text="GA3 LIBERA",
background="black",
foreground="white",
command=cambio,
font=('Calibri',13,BOLD)
)
#closing button
bottonechiusura = tk.Button(master=frame,
text ='X',
background="black",
foreground="white",
command=window.destroy,
font=('Calibri',13,BOLD)
)
#placing the button on the main frame
bottoneGA3.place(x=0,y=1)
bottonechiusura.place(x=130,y=1)
frame.mainloop()
| MaurizioCarrara/AlertBox | AlertBox.py | AlertBox.py | py | 2,017 | python | en | code | 1 | github-code | 50 |
2846317200 | """
sub-module to analyse wheel movements based on dots visible in the side view.
"""
import os
import sys
import numpy as np
import pandas as pd
import cv2
from tqdm import tqdm
import multiprocessing
import subprocess
import signal
import glob
from scipy.ndimage import gaussian_filter1d, median_filter
from time import sleep
import matplotlib.pyplot as plt
import pickle
from twoppp import load, utils
from twoppp.behaviour.fictrac import get_mean_image
f_s = 100
r_wheel = 5
def get_wheel_parameters(video_file, skip_existing=True, output_dir=None, y_min=240):
locations_file = os.path.join(output_dir, "wheel_locations.pkl")
if os.path.isfile(locations_file) and not skip_existing:
with open(locations_file, "rb") as f:
locations = pickle.load(f)
return locations
print("Computing mean image and detecting wheel boundaries.")
mean_img = get_mean_image(video_file=video_file, skip_existing=skip_existing, output_name="camera_1_mean_image.jpg")
N_y, N_x = mean_img.shape
img = cv2.medianBlur(mean_img, 5)[y_min:,:] # cut off top part of video with the fly and only keep wheel
# canny_params = dict(threshold1 = 20, threshold2 = 20)
# edges = cv2.Canny(img, **canny_params)
black = np.zeros_like(img)
extended_img = np.concatenate((img,black,black,black,black),axis=0)
circles = cv2.HoughCircles(extended_img, cv2.HOUGH_GRADIENT, 2, minDist=200, param1=20, param2=20, minRadius=500, maxRadius=1200)
circles = np.round(circles[0, :]).astype(int)
x, y, r_out = circles[0] # TODO: implement way to check which of the circles is correct instead of assuming it is the first one found
r_in = r_out - 100
if output_dir is not None:
save_img = cv2.cvtColor(extended_img, cv2.COLOR_GRAY2BGR)
cv2.circle(save_img, (x, y), r_out, (0, 0, 255), 1)
cv2.circle(save_img, (x, y), r_in, (0, 0, 255), 1)
cv2.rectangle(save_img, (x - 5, y - 5), (x + 5, y + 5), (255, 128, 255), -1)
cv2.imwrite(os.path.join(output_dir, "camera_1_wheel_fit.jpg"), save_img)
on_wheel = np.zeros_like(img, dtype=bool)
angles = np.zeros_like(img, dtype=float)
for i_x in np.arange(N_x):
for i_y in np.arange(N_y//2):
d = np.sqrt((i_x-x)**2+(i_y-y)**2)
if d < r_out and d > r_in:
on_wheel[i_y, i_x] = True
angles[i_y, i_x] = np.dot([i_y-y, i_x-x], [0, d]) / d / d / np.pi * 180 # compute angles in °
angles_rounded = np.round(angles * 2) # each step is 0.5°
if output_dir is not None:
fig, axs = plt.subplots(2,1,figsize=(9.5,6))
axs[0].imshow(angles_rounded)
axs[1].imshow(angles_rounded, clim=[-5,5])
fig.tight_layout()
fig.savefig(os.path.join(output_dir, "camera_1_wheel_angles.jpg"))
locations = []
n_per_loc = []
for angle in np.arange(-50,50):
locations.append(np.logical_and(angles_rounded==angle, on_wheel))
n_per_loc.append(np.sum(locations[-1]))
locations = np.array(locations)
locations = locations[np.array(n_per_loc) > np.max(n_per_loc)/2]
if output_dir is not None:
with open(os.path.join(output_dir, "wheel_locations.pkl"), "wb") as f:
pickle.dump(locations, f)
return locations
def extract_line_profile(img, locations, y_min=240):
line = np.zeros(len(locations))
img_cut = img[y_min:]
for i_l, location in enumerate(locations):
line[i_l] = np.mean(img_cut[location])
return line
def get_wheel_speed(video_file, line_locations, y_min=240, max_shift=10):
lines = []
print("Read video to extract wheel patterns.")
f = cv2.VideoCapture(video_file)
while 1:
rval, frame = f.read()
if rval:
frame = frame[:, :, 0]
lines.append(extract_line_profile(frame, line_locations, y_min=y_min))
else:
break
f.release()
print("Compute wheel velocity from wheel patterns.")
possible_shifts = np.arange(-max_shift,max_shift+1).astype(int)
max_shift = np.max(np.abs(possible_shifts)) + 1
shifts = np.zeros(len(lines))
corrs = np.zeros((len(possible_shifts)))
for i_l, line in enumerate(tqdm(lines[:-1])):
next_line = lines[i_l+1]
for i_s, shift in enumerate(possible_shifts):
v1 = line[max_shift:-max_shift]
v2 = next_line[max_shift+shift:-(max_shift-shift)]
corrs[i_s] = v1.dot(v2) / np.linalg.norm(v1) / np.linalg.norm(v2)
shifts[i_l] = possible_shifts[np.argmax(corrs)]
v = shifts / 2 / 180 * np.pi * r_wheel * f_s
return v
def get_wheel_df(v=None, video_file=None, index_df=None, df_out_dir=None, sigma_gauss_size=20):
"""save the velocity of the wheel into a data frame. if not supplied or already computed, compute the wheel velocity.
This computation is dependent on dots being drawn on the side of the wheel and supplying the correct camera.
If index_df is supplied, fictrac results will be added to this dataframe.
Parameters
----------
v : np.ndarray
velocity vector. If None, will be computed. by default None
video_file : str
path to file of side view video with dots on side of the wheel clearly visible. only used in case v is None.
index_df : pandas Dataframe or str, optional
pandas dataframe or path of pickle containing dataframe to which the fictrac result is added.
This could, for example, be a dataframe that contains indices for synchronisation with 2p data,
by default None
df_out_dir : str, optional
if specified, will save the dataframe as .pkl, by default None
sigma_gauss_size : int, optional
width of Gaussian kernel applied to velocity and orientation, by default 20
Returns
-------
pandas DataFrame
dataframe containing the output of fictrac
Raises
------
IOError
If fictract output file cannot be located
ValueError
If the length of the specified index_df and the fictrac output do not match
"""
if isinstance(index_df, str) and os.path.isfile(index_df):
index_df = pd.read_pickle(index_df)
if index_df is not None:
assert isinstance (index_df, pd.DataFrame)
if v is None:
print("Wheel velocity was not provided. Will compute it.")
output_dir = os.path.dirname(video_file)
line_locations = get_wheel_parameters(video_file, skip_existing=False, output_dir=output_dir, y_min=240)
v = get_wheel_speed(video_file, line_locations, y_min=240, max_shift=10)
v_filt = gaussian_filter1d(v.astype(float), sigma=sigma_gauss_size)
if index_df is not None:
if len(index_df) != len(v):
if np.abs(len(index_df) - len(v)) <=10:
Warning("Number of Thorsync ticks and length of wheel processing do not match. \n"+\
"Thorsync has {} ticks, wheel processing file has {} lines. \n".format(len(index_df), len(v))+\
"video_file: "+ video_file)
print("Difference: {}".format(len(index_df) - len(v)))
length = np.minimum(len(index_df), len(v))
index_df = index_df.iloc[:length, :]
else:
raise ValueError("Number of Thorsync ticks and length of wheel processing file do not match. \n"+\
"Thorsync has {} ticks, wheel processing file has {} lines. \n".format(len(index_df), len(v) + 1)+\
"video_file: "+ video_file)
df = index_df
df["v_raw"] = v
df["v"] = v_filt
else:
raise NotImplementedError("Please supply an index dataframe")
if df_out_dir is not None:
df.to_pickle(df_out_dir)
return df
if __name__ == "__main__":
trial_dirs = [
# "/mnt/nas2/JB/221115_DfdxGCaMP6s_tdTom_CsChrimsonxPR/Fly1_part2/004_xz_wheel",
# "/mnt/nas2/JB/221115_DfdxGCaMP6s_tdTom_CsChrimsonxPR/Fly1_part2/005_xz_wheel",
"/mnt/nas2/JB/221115_DfdxGCaMP6s_tdTom_CsChrimsonxPR/Fly1_part2/006_xz_wheel",
# "/mnt/nas2/JB/221117_DfdxGCaMP6s_tdTom_DNP9xCsChrimson/Fly1_part2/003_xz_wheel",
]
for trial_dir in trial_dirs:
video_file = os.path.join(trial_dir, "behData", "images", "camera_1.mp4")
beh_df_dir = os.path.join(trial_dir, load.PROCESSED_FOLDER, "beh_df.pkl")
_ = get_wheel_df(v=None, video_file=video_file, index_df=beh_df_dir, df_out_dir=beh_df_dir, sigma_gauss_size=20)
pass
| NeLy-EPFL/twoppp | twoppp/behaviour/wheel.py | wheel.py | py | 8,539 | python | en | code | 1 | github-code | 50 |
21382039231 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 10 23:53:51 2017
@author: vhm
"""
from model import unet_model_3d
import numpy as np
from keras.utils import plot_model
from keras import callbacks
from keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler, ReduceLROnPlateau, EarlyStopping
from data_handling import load_train_data, load_validatation_data
from unet3d.model import isensee2017_model
from model import dice_coef_loss
from unet3d.training import load_old_model, train_model
import configs
patch_size = configs.PATCH_SIZE
batch_size = configs.BATCH_SIZE
config = dict()
config["pool_size"] = (2, 2, 2) # pool size for the max pooling operations
config["image_shape"] = (256, 128, 256) # This determines what shape the images will be cropped/resampled to.
config["patch_shape"] = (patch_size, patch_size, patch_size) # switch to None to train on the whole image
config["nb_channels"] = 1
if "patch_shape" in config and config["patch_shape"] is not None:
config["input_shape"] = tuple([config["nb_channels"]] + list(config["patch_shape"]))
else:
config["input_shape"] = tuple([config["nb_channels"]] + list(config["image_shape"]))
config["n_labels"] = configs.NUM_CLASSES
config["n_base_filters"] = 16
config["all_modalities"] = ['t1']#]["t1", "t1Gd", "flair", "t2"]
config["training_modalities"] = config["all_modalities"] # change this if you want to only use some of the modalities
config["nb_channels"] = len(config["training_modalities"])
config["deconvolution"] = False # if False, will use upsampling instead of deconvolution
config["batch_size"] = batch_size
config["n_epochs"] = 500 # cutoff the training after this many epochs
config["patience"] = 10 # learning rate will be reduced after this many epochs if the validation loss is not improving
config["early_stop"] = 20 # training will be stopped after this many epochs without the validation loss improving
config["initial_learning_rate"] = 0.0001
config["depth"] = configs.DEPTH
config["learning_rate_drop"] = 0.5
image_type = '3d_patches'
def train_and_predict():
print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs_train, imgs_gtruth_train = load_train_data()
imgs_train = np.transpose(imgs_train, (0, 4, 1, 2, 3))
imgs_gtruth_train = np.transpose(imgs_gtruth_train, (0, 4, 1, 2, 3))
print('-'*30)
print('Loading and preprocessing validation data...')
print('-'*30)
imgs_val, imgs_gtruth_val = load_validatation_data()
imgs_val = np.transpose(imgs_val, (0, 4, 1, 2, 3))
imgs_gtruth_val = np.transpose(imgs_gtruth_val, (0, 4, 1, 2, 3))
print('-'*30)
print('Creating and compiling model...')
print('-'*30)
# create a model
model = isensee2017_model(input_shape=config["input_shape"], n_labels=config["n_labels"],
initial_learning_rate=config["initial_learning_rate"],
n_base_filters=config["n_base_filters"],loss_function=dice_coef_loss)
model.summary()
#summarize layers
#print(model.summary())
# plot graph
#plot_model(model, to_file='3d_unet.png')
print('-'*30)
print('Fitting model...')
print('-'*30)
#============================================================================
print('training starting..')
log_filename = 'outputs/' + image_type +'_model_train.csv'
csv_log = callbacks.CSVLogger(log_filename, separator=',', append=True)
# early_stopping = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='min')
#checkpoint_filepath = 'outputs/' + image_type +"_best_weight_model_{epoch:03d}_{val_loss:.4f}.hdf5"
checkpoint_filepath = 'outputs/' + 'weights.h5'
checkpoint = callbacks.ModelCheckpoint(checkpoint_filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [csv_log, checkpoint]
callbacks_list.append(ReduceLROnPlateau(factor=config["learning_rate_drop"], patience=config["patience"],
verbose=True))
callbacks_list.append(EarlyStopping(verbose=True, patience=config["early_stop"]))
#============================================================================
hist = model.fit(imgs_train, imgs_gtruth_train, batch_size=config["batch_size"], nb_epoch=config["n_epochs"], verbose=1, validation_data=(imgs_val,imgs_gtruth_val), shuffle=True, callbacks=callbacks_list) # validation_split=0.2,
model_name = 'outputs/' + image_type + '_model_last'
model.save(model_name) # creates a HDF5 file 'my_model.h5'
if __name__ == '__main__':
train_and_predict() | vuhoangminh/Brain-segmentation | minh_3d_unet/train_isensee2017.py | train_isensee2017.py | py | 4,806 | python | en | code | 9 | github-code | 50 |
3763166040 |
from texttable import Texttable
def tcb(args):
args = vars(args)
keys = sorted(args.keys())
t = Texttable()
t.add_rows([["Parameter", "Value"]])
t.add_rows([[k.replace("_", " ").capitalize(), args[k]] for k in keys])
print(t.draw())
def cmc(node_properties):
return {value:i for i, value in enumerate(node_properties)}
'''
def tcb(args):
"""
Prints a table with the parameter names and their values.
"""
args = vars(args)
keys = sorted(args.keys())
t = Texttable()
t.add_rows([["Parameter", "Value"]])
t.add_rows([[f"{k.replace('_', ' ').capitalize()}", args[k]] for k in keys])
print(t.draw())
'''
| harsh2929/GNN | fxcn.py | fxcn.py | py | 675 | python | en | code | 0 | github-code | 50 |
30720123981 | from math import pi
from time import time
from poloniex import Poloniex
import pandas as pd
from bokeh.plotting import figure, output_file, show
import numpy as np
from sklearn.linear_model import LinearRegression
from bokeh.models import HoverTool, BoxSelectTool
import matplotlib.pyplot as plt
from pandas_datareader import data
#change the number to move the left bound left and right if needed
numOfDaysToGet = 30
windowLength = 14
currencyToGet = 'USDT_BTC'
#api call with poloniex
api = Poloniex(timeout=None, jsonNums=float)
#change the number to move the right bound left and right if needed
NumOfDaysToMoveBackFromToday = time() - api.DAY*0
#period of candlesticks to recieve: 24, 4, 2, 0.5, 0.25, or 0.083
period = api.HOUR * 4
#api call
raw = api.returnChartData(currencyToGet, period=period, start=time() - api.DAY*numOfDaysToGet, end= NumOfDaysToMoveBackFromToday)
#load dataframe with infrom from api call
df = pd.DataFrame(raw)
#create date column and convert epoch time from api call to date
df['date'] = pd.to_datetime(df["date"], unit='s')
#calculate hui hubel liquidty rates
df['liquidity'] = ((df['high'] - df['low']) / df['low']) / (df['volume'] / (df['weightedAverage'] * df['quoteVolume']))
#Calculates a relative strength index with an exponetial moving average as EMA better shows price movements - Tortise vs Heir example
close = df['close']
delta = close.diff()
delta = delta[1:]
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
roll_up1 = pd.stats.moments.ewma(up, windowLength)
roll_down1 = pd.stats.moments.ewma(down.abs(), windowLength)
RS1 = roll_up1 / roll_down1
df['rsi'] = 100.0 - (100.0 / (1.0 + RS1))
#drop outliers
df.dropna(inplace=True)
#reassign column layouts
df = df[['date', 'open', 'close', 'high', 'low', 'volume', 'rsi', 'quoteVolume','liquidity' ,'weightedAverage']]
#print out last 15 results and correlations
print(df.corr())
print(df.tail())
#tools listed on the graph
tools = "pan,wheel_zoom,box_zoom,reset,save, hover"
#outputs to a html file
output_file(currencyToGet + ".html", title= currencyToGet + "-Poloniex")
#generate figure/graph
p = figure(x_axis_type="datetime", tools=tools, plot_width=1900, title=currencyToGet)
p.xaxis.major_label_orientation = pi / 4
p.grid.grid_line_alpha = 0.7
#determines if the candle stick is red or green
inc = df.close > df.open
dec = df.open > df.close
#creates shadows
p.segment(df.date, df.high, df.date, df.low, color="black")
#width of candle sticks
w = (period * 1000) - 5000
#create green or red candle sticks
p.vbar(df.date[inc], w, df.open[inc], df.close[inc], fill_color="green", line_color="black")
p.vbar(df.date[dec], w, df.open[dec], df.close[dec], fill_color="red", line_color="black")
#opens in browser
show(p)
| milkman97/BitcoinScam | BokesheTest.py | BokesheTest.py | py | 2,772 | python | en | code | 0 | github-code | 50 |
18209767995 | from itertools import takewhile
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
if len(nums) <= 1:
return
i = len(nums) - 1
while i > 0 and nums[i - 1] >= nums[i]:
i -= 1
if i == 0:
nums[:] = reversed(nums[:])
else:
j = i + [(k, n) for k, n in takewhile(lambda t: t[1] > nums[i - 1],
enumerate(nums[i:]))][-1][0]
nums[i - 1], nums[j] = nums[j], nums[i - 1]
nums[i:] = reversed(nums[i:])
| stachenov/PyLeetCode | problems/next_permutation.py | next_permutation.py | py | 693 | python | en | code | 0 | github-code | 50 |
70943064155 | #имя проекта: task 38
#номер версии: 1.0
#имя файла: 38task
#автор и его учебная группа: Pollak Igor, ЭУ-120
#дата создания: 23.12.2019
#дата последней модификации: 23.12.2019
#связанные файлы: - numpy/array
#описание: Исключить M элементов, начиная с позиции K.
#версия Python: 3.8
import numpy as np
import array
import random
N = int(input("Введите количество элементов массива "))
K = int(input("Позиция K "))
M = int(input("количество элементов для вычитания "))
A = [random.randint(0, 100) for i in range(0, N)]
print(A)
A.insert(K,M)
print(A)
A.delete(K,M)
| harry1pacman/Bussines-IT | 38task.py | 38task.py | py | 885 | python | ru | code | 0 | github-code | 50 |
29407159270 | def solution(myStr):
answer = []
for i in myStr:
if i !='a' and i !='b' and i !='c':
answer.append(i)
else:
answer.append(' ')
answer = "".join(answer).split()
if answer :
return answer
else:
return ['EMPTY'] | songye38/2023_algorithm_study | 프로그래머스/0/181862. 세 개의 구분자/세 개의 구분자.py | 세 개의 구분자.py | py | 284 | python | en | code | 0 | github-code | 50 |
25753765279 | #!/usr/bin/python3
"""
Class Base
"""
import json
import os.path
class Base:
"""Class Base"""
__nb_objects = 0
def __init__(self, id=None):
"""
Constructor
"""
if id: # si el usuario pasa un id, lo asigna
self.id = id
else: # si no pasa un id, se asigna el del contador
Base.__nb_objects += 1
self.id = Base.__nb_objects
@staticmethod
def to_json_string(list_dictionaries):
"""
dictionary to JSON string
es decir, pasa de ser dict a ser str, esto con json.dumps()
"""
if list_dictionaries is None or len(list_dictionaries) == 0:
return ("[]")
return json.dumps(list_dictionaries)
@classmethod
def save_to_file(cls, list_objs):
"""Json string to file"""
list = []
if list_objs is not None:
list = [items.to_dictionary() for items in list_objs]
with open("{}.json".format(cls.__name__), "w") as file:
file.write(cls.to_json_string(list))
@staticmethod
def from_json_string(json_string):
"""Json string to dictionary"""
if json_string is None or len(json_string) == 0:
return ([])
return json.loads(json_string)
@classmethod
def create(cls, **dictionary):
"""Dictionary to instance"""
if cls.__name__ == "Rectangle":
holder = cls(1, 1)
if cls.__name__ == "Square":
holder = cls(1)
holder.update(**dictionary)
return holder
@classmethod
def load_from_file(cls):
"""file to instances"""
if not os.path.exists(cls.__name__ + ".json"):
return []
with open(cls.__name__ + ".json", "r") as file:
stuff = cls.from_json_string(file.read())
return [cls.create(**index) for index in stuff]
| Andrecast/holbertonschool-higher_level_programming | 0x0C-python-almost_a_circle/models/base.py | base.py | py | 1,887 | python | en | code | 0 | github-code | 50 |
42932966163 | import BeautifulSoup
import sys
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit()
filein = sys.argv[1]
fileout = 'ou_' + filein
f = open(filein, 'r')
cont = f.read()
f.close()
b = BeautifulSoup.BeautifulSoup(cont)
f = open(fileout, 'w')
f.write(b.prettify())
f.close() | Zacchy/nickcheng-python | HTMLPrettify/pretty.py | pretty.py | py | 350 | python | en | code | 0 | github-code | 50 |
24837615920 | import random
TASK_DESCRIPTION = 'What is the result of the expression?'
LOWER_LIMIT = 1
UPPER_LIMIT = 100
def get_operator():
""" This function returns one of mathematics operators."""
operators_for_expression = ['+', '*', '-']
return random.choice(operators_for_expression)
def get_expected_result(number_1, number_2, operation):
""" Provides expected result for this game according to input values. """
if operation == '+':
result = number_1 + number_2
elif operation == '*':
result = number_1 * number_2
elif operation == '-':
result = number_1 - number_2
return result
def get_task():
""" This function responsible for "math result expression game".
It returns string with simple math expression
e.g 10 + 2
and expected result for this particular expression."""
operation = get_operator()
number_1 = random.randint(LOWER_LIMIT, UPPER_LIMIT)
number_2 = random.randint(LOWER_LIMIT, UPPER_LIMIT)
res = get_expected_result(number_1, number_2, operation)
return str(res), f'{number_1} {operation} {number_2}'
| ZDaria/python-project-lvl1 | brain_games/games/calc.py | calc.py | py | 1,111 | python | en | code | 0 | github-code | 50 |
43919741545 | """
Example of designing a shielded biplanar coil
===============================================
"""
import numpy as np
import matplotlib.pyplot as plt
from mayavi import mlab
import trimesh
from bfieldtools.mesh_conductor import MeshConductor, StreamFunction
from bfieldtools.contour import scalar_contour
from bfieldtools.viz import plot_3d_current_loops
from bfieldtools.utils import load_example_mesh, combine_meshes
# Set unit, e.g. meter or millimeter.
# This doesn't matter, the problem is scale-invariant
scaling_factor = 0.1
# Load simple plane mesh that is centered on the origin
planemesh = load_example_mesh("10x10_plane_hires")
planemesh.apply_scale(scaling_factor)
# Specify coil plane geometry
center_offset = np.array([0, 0, 0]) * scaling_factor
standoff = np.array([0, 4, 0]) * scaling_factor
# Create coil plane pairs
coil_plus = trimesh.Trimesh(
planemesh.vertices + center_offset + standoff, planemesh.faces, process=False
)
coil_minus = trimesh.Trimesh(
planemesh.vertices + center_offset - standoff, planemesh.faces, process=False
)
mesh1 = combine_meshes((coil_minus, coil_plus))
mesh2 = mesh1.copy()
mesh2.apply_scale(1.4)
coil1 = MeshConductor(mesh_obj=mesh1, basis_name="inner", N_sph=4)
coil2 = MeshConductor(mesh_obj=mesh2, basis_name="inner", N_sph=4)
M11 = coil1.inductance
M22 = coil2.inductance
M21 = coil2.mutual_inductance(coil1)
# Mapping from I1 to I2, constraining flux through mesh2 to zero
P = -np.linalg.solve(M22, M21)
A1, Beta1 = coil1.sph_couplings
A2, Beta2 = coil2.sph_couplings
# Use lines below to get coulings with different normalization
# from bfieldtools.sphtools import compute_sphcoeffs_mesh
# A1, Beta1 = compute_sphcoeffs_mesh(mesh1, 5, normalization='energy', R=1)
# A2, Beta2 = compute_sphcoeffs_mesh(mesh2, 5, normalization='energy', R=1)
# Beta1 = Beta1[:, coil1.inner_vertices]
# Beta2 = Beta2[:, coil2.inner_vertices]
x = y = np.linspace(-0.8, 0.8, 50) # 150)
X, Y = np.meshgrid(x, y, indexing="ij")
points = np.zeros((X.flatten().shape[0], 3))
points[:, 0] = X.flatten()
points[:, 1] = Y.flatten()
CB1 = coil1.B_coupling(points)
CB2 = coil2.B_coupling(points)
CU1 = coil1.U_coupling(points)
CU2 = coil2.U_coupling(points)
#%% Precalculations for the solution
# alpha[15] = 1
# Minimization of magnetic energy with spherical harmonic constraint
C = Beta1 + Beta2 @ P
M = M11 + M21.T @ P
from scipy.linalg import eigvalsh
ssmax = eigvalsh(C.T @ C, M, eigvals=[M.shape[1] - 1, M.shape[1] - 1])
#%% Specify spherical harmonic and calculate corresponding shielded field
beta = np.zeros(Beta1.shape[0])
beta[7] = 1 # Gradient
# beta[2] = 1 # Homogeneous
# Minimum residual
_lambda = 1e3
# Minimum energy
# _lambda=1e-3
I1inner = np.linalg.solve(C.T @ C + M * ssmax / _lambda, C.T @ beta)
I2inner = P @ I1inner
s1 = StreamFunction(I1inner, coil1)
s2 = StreamFunction(I2inner, coil2)
# s = mlab.triangular_mesh(*mesh1.vertices.T, mesh1.faces, scalars=I1)
# s.enable_contours=True
# s = mlab.triangular_mesh(*mesh2.vertices.T, mesh2.faces, scalars=I2)
# s.enable_contours=True
B1 = CB1 @ s1
B2 = CB2 @ s2
U1 = CU1 @ s1
U2 = CU2 @ s2
#%% Plot
cc1 = scalar_contour(mesh1, mesh1.vertices[:, 2], contours=[-0.001])
cc2 = scalar_contour(mesh2, mesh2.vertices[:, 2], contours=[-0.001])
cx10 = cc1[0][:, 1]
cy10 = cc1[0][:, 0]
cx20 = cc2[0][:, 1]
cy20 = cc2[0][:, 0]
cx11 = cc1[1][:, 1]
cy11 = cc1[1][:, 0]
cx21 = cc2[1][:, 1]
cy21 = cc2[1][:, 0]
B = (B1.T + B2.T)[:2].reshape(2, x.shape[0], y.shape[0])
lw = np.sqrt(B[0] ** 2 + B[1] ** 2)
lw = 2 * np.log(lw / np.max(lw) * np.e + 1.1)
xx = np.linspace(-1, 1, 16)
# seed_points = 0.56*np.array([xx, -np.sqrt(1-xx**2)])
# seed_points = np.hstack([seed_points, (0.56*np.array([xx, np.sqrt(1-xx**2)]))])
# seed_points = np.hstack([seed_points, (0.56*np.array([np.zeros_like(xx), xx]))])
seed_points = np.array([cx10 + 0.001, cy10])
seed_points = np.hstack([seed_points, np.array([cx11 - 0.001, cy11])])
seed_points = np.hstack([seed_points, (0.56 * np.array([np.zeros_like(xx), xx]))])
# plt.streamplot(x,y, B[1], B[0], density=2, linewidth=lw, color='k',
# start_points=seed_points.T, integration_direction='both')
U = (U1 + U2).reshape(x.shape[0], y.shape[0])
U /= np.max(U)
plt.figure()
plt.contourf(X, Y, U.T, cmap="seismic", levels=40)
# plt.imshow(U, vmin=-1.0, vmax=1.0, cmap='seismic', interpolation='bicubic',
# extent=(x.min(), x.max(), y.min(), y.max()))
plt.streamplot(
x,
y,
B[1],
B[0],
density=2,
linewidth=lw,
color="k",
start_points=seed_points.T,
integration_direction="both",
arrowsize=0.1,
)
# plt.plot(seed_points[0], seed_points[1], '*')
plt.plot(cx10, cy10, linewidth=3.0, color="gray")
plt.plot(cx20, cy20, linewidth=3.0, color="gray")
plt.plot(cx11, cy11, linewidth=3.0, color="gray")
plt.plot(cx21, cy21, linewidth=3.0, color="gray")
plt.axis("image")
plt.xticks([])
plt.yticks([])
#%%
N = 20
mm = max(abs(s1))
dd = 2 * mm / N
vmin = -dd * N / 2 + dd / 2
vmax = dd * N / 2 - dd / 2
contour_vals1 = np.arange(vmin, vmax, dd)
mm = max(abs(s2))
N2 = (2 * mm - dd) // dd
if N2 % 2 == 0:
N2 -= 1
vmin = -dd * N2 / 2
vmax = mm
contour_vals2 = np.arange(vmin, vmax, dd)
contours1 = scalar_contour(mesh1, s1.vert, contours=contour_vals1)
contours2 = scalar_contour(mesh2, s2.vert, contours=contour_vals2)
def setscene(scene1, coil):
scene1.actor.mapper.interpolate_scalars_before_mapping = True
scene1.module_manager.scalar_lut_manager.number_of_colors = 32
scene1.scene.y_plus_view()
if coil == 1:
scene1.scene.camera.position = [
4.7267030067743576e-08,
2.660205137153174,
8.52196480605194e-08,
]
scene1.scene.camera.focal_point = [
4.7267030067743576e-08,
0.4000000059604645,
8.52196480605194e-08,
]
scene1.scene.camera.view_angle = 30.0
scene1.scene.camera.view_up = [1.0, 0.0, 0.0]
scene1.scene.camera.clipping_range = [1.116284842928313, 2.4468228732691104]
scene1.scene.camera.compute_view_plane_normal()
else:
scene1.scene.camera.position = [
4.7267030067743576e-08,
3.7091663385397116,
8.52196480605194e-08,
]
scene1.scene.camera.focal_point = [
4.7267030067743576e-08,
0.4000000059604645,
8.52196480605194e-08,
]
scene1.scene.camera.view_angle = 30.0
scene1.scene.camera.view_up = [1.0, 0.0, 0.0]
scene1.scene.camera.clipping_range = [2.948955346473114, 3.40878670176758]
scene1.scene.camera.compute_view_plane_normal()
scene1.scene.render()
scene1.scene.anti_aliasing_frames = 20
scene1.scene.magnification = 2
fig = mlab.figure(bgcolor=(1, 1, 1), size=(400, 400))
fig = plot_3d_current_loops(
contours1, tube_radius=0.005, colors=(0.9, 0.9, 0.9), figure=fig
)
m = abs(s1).max()
mask = mesh1.triangles_center[:, 1] > 0
faces1 = mesh1.faces[mask]
surf = mlab.triangular_mesh(
*mesh1.vertices.T, faces1, scalars=s1.vert, vmin=-m, vmax=m, colormap="seismic"
)
setscene(surf, 1)
fig = mlab.figure(bgcolor=(1, 1, 1), size=(400, 400))
fig = plot_3d_current_loops(
contours2, tube_radius=0.005, colors=(0.9, 0.9, 0.9), figure=fig
)
faces2 = mesh2.faces[mesh2.triangles_center[:, 1] > 0]
surf = mlab.triangular_mesh(
*mesh2.vertices.T, faces2, scalars=s2.vert, vmin=-m, vmax=m, colormap="seismic"
)
setscene(surf, 2)
#%% Plot the coil surface and the field plane
fig = mlab.figure(bgcolor=(1, 1, 1))
surf = mlab.triangular_mesh(*mesh1.vertices.T, mesh1.faces, color=(0.8, 0.2, 0.2))
surf.actor.property.edge_visibility = True
surf.actor.property.render_lines_as_tubes = True
surf.actor.property.line_width = 1.2
surf = mlab.triangular_mesh(*mesh2.vertices.T, mesh2.faces, color=(0.2, 0.2, 0.8))
surf.actor.property.edge_visibility = True
surf.actor.property.render_lines_as_tubes = True
surf.actor.property.line_width = 1.2
# Plot plane
plane = mlab.triangular_mesh(
np.array([x[0], x[-1], x[-1], x[0]]),
np.array([x[0], x[0], x[-1], x[-1]]),
np.zeros(4),
np.array([[0, 1, 2], [2, 3, 0]]),
color=(0.7, 0.7, 0.7),
opacity=0.7,
)
| bfieldtools/bfieldtools | examples/publication_physics/shielding_biplanar_example.py | shielding_biplanar_example.py | py | 8,211 | python | en | code | 30 | github-code | 50 |
42183819908 | from django.core.cache import get_cache
from django.db.models.query import QuerySet
from avocado.conf import settings
from .model import cache_key_func
PK_LOOKUPS = ('pk', 'pk__exact')
class CacheQuerySet(QuerySet):
def filter(self, *args, **kwargs):
"""For primary-key-based lookups, instances may be cached to prevent
excessive database hits. If this is a primary-key lookup, the cache
will be checked and populated in the `_result_cache` if available.
"""
clone = super(CacheQuerySet, self).filter(*args, **kwargs)
pk = None
opts = self.model._meta
pk_name = opts.pk.name
# Look for `pk` and the actual name of the primary key field
for key in list(PK_LOOKUPS) + [pk_name, u'{0}__exact'.format(pk_name)]:
if key in kwargs:
pk = kwargs[key]
break
if pk is not None:
key = cache_key_func([opts.app_label, opts.module_name, pk])
cache = get_cache(settings.DATA_CACHE)
obj = cache.get(key)
if obj is not None:
clone._result_cache = [obj]
return clone
| chop-dbhi/avocado | avocado/core/cache/query.py | query.py | py | 1,166 | python | en | code | 41 | github-code | 50 |
25216244181 | import json
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, render_to_response
from django.db.models import F
from django.template import RequestContext
from ui.models import Corpus, Sentence, SentenceAnnotation, UserCorpus
SENTENCE_BATCH_SIZE = 5
@login_required
def corpus_list_view(request):
corpus_list = []
for user_corpus in UserCorpus.objects.filter(user=request.user).select_related('corpus'):
corpus = user_corpus.corpus
corpus_list.append(corpus)
corpus.sentence_count = SentenceAnnotation.objects.filter(annotator=request.user,
sentence__corpus=corpus).count()
corpus.unprocessed_sentence_count = SentenceAnnotation.objects.filter(annotator=request.user,
sentence__corpus=corpus,
variant_selected__isnull=True).count()
return render_to_response('corpus.html', RequestContext(request, {'corpus_list': corpus_list, 'page': 'corpus'}))
@login_required
def load_sentences_view(request):
"""
Sample response:
[{
"id": 1,
"sentence": "Tallinn on Eesti pealinn .",
"gap_start": 17,
"gap_end": 25,
"gap_correct": "Eesti",
"gap_variant": "Rootsi",
},
...
]
"""
corpus_id = request.POST['corpus_id']
annotations = SentenceAnnotation.objects \
.filter(sentence__corpus_id=corpus_id) \
.filter(variant_selected__isnull=True) \
.filter(annotator=request.user) \
.select_related('sentence') \
.order_by('order')[:SENTENCE_BATCH_SIZE]
for a in annotations:
a.gap_correct = a.sentence.text[a.sentence.gap_start:a.sentence.gap_end]
a.gap_variant = a.sentence.variants[a.variant]
return render(request, 'annotations.html', {'annotations': annotations},
content_type='application/json; charset=utf-8')
@login_required
def submit_sentences_view(request):
"""
Request should contain sentence annotations in json format:
[
{
"id": 1,
"correct_variant_selected": true,
"both_variants_fit": true,
"time": 10,
"corpus_id": 35
},
...
]
Response contains the next portion of sentences to process.
"""
sentences = json.loads(request.body.decode('utf-8'))
for snt in sentences:
sa = SentenceAnnotation(id=snt['id'],
variant_selected=not snt['correct_variant_selected'],
both_variants_fit=snt['both_variants_fit'],
time=snt['time'])
sa.save(force_update=True, update_fields=['variant_selected', 'time', 'both_variants_fit'])
request.POST = request.POST.copy()
request.POST['corpus_id'] = int(sentences[0]["corpus_id"])
return load_sentences_view(request)
| estnltk/gap-tagger | ui/views.py | views.py | py | 3,213 | python | en | code | 0 | github-code | 50 |
3137590282 | from preprocess_bwt import _get_first_occurence_fn, _get_count_fn
from bwt import burrows_wheeler_transform
from suffix_array import get_suffix_array
# THIS IS A STUB, YOU NEED TO IMPLEMENT THIS
#
# Construct the Burrows-Wheeler transform for given text
# also compute the suffix array
#
# Input:
# text: a string (character `$` assumed to be last character)
#
# Output:
# a tuple (bwt, suffix_array):
# bwt: string containing the Burrows-Wheeler transform of text
# suffix_array: the suffix array of text
def _construct(text):
# done
return burrows_wheeler_transform(text), get_suffix_array(text)
# wrapper for the processing functions used to compute
# auxiliary data structures for efficient BWT matching
# see file `preprocess_bwt.py`
def _preprocess_bwt(bwt):
first_occurence = _get_first_occurence_fn(bwt)
count = _get_count_fn(bwt)
return first_occurence, count
# class encapsulating exact matching with Burrows-Wheeler transform
#
# Fields:
# _text: string, the target string
# _bwt: string, the burrows-wheeler transform of target string
# _suffix_array: [int], suffix array of target string
# first_occurence: function returning first occurence of each symbol in
# first column of sorted rotation table for bwt, see below
# count: function returning number of occurences of each symbol up to
# a given position, see below
#
# Notes:
# After initializing: `bwt = BWT(target)`:
#
# `bwt.first_occurence(symbol)` returns the row in which symbol occurs first
# in the first column of the sorted rotation table corresponding to the BWT
# of target string
#
# `bwt.count(symbol, position)` returns the number of occurrences of symbol
# up to given position in BWT of target string
class BWT:
def __init__(self, target):
self._text = target
self._bwt, self._suffix_array = _construct(self._text)
self.first_occurence, self.count = _preprocess_bwt(self._bwt)
self._l2f = BWT.last_to_first(self._bwt)
# THIS IS A STUB, YOU NEED TO IMPLEMENT THIS
#
# return indices for positions in target string that match
# query exactly
#
# Input:
# pattern: string, query string
#
# Output:
# [int], array of indices of exact matches of query in target
# array is empty if no exact matches found
def get_matches(self, pattern):
top, bottom = self._get_matching_rows(pattern)
if top == -1:
return []
matches = []
for i in xrange(bottom - top + 1):
# col = self.get_bwt_col(top + i)
# matches.append(len(self._text) - col.find("$") + 1)
matches.append(self._suffix_array[top + i])
return matches
@staticmethod
def last_to_first(last_column):
first_column = sorted(last_column)
mapped_indexes = []
for ch in last_column:
i = first_column.index(ch)
mapped_indexes.append(i)
first_column[i] = "\0"
return mapped_indexes
# THIS IS A STUB, YOU NEED TO IMPLEMENT THIS
#
# return top, bottom pointers for rows of sorted rotations table
# that start with query
#
# Input:
# pattern: string, query string
#
# Output:
# tuple (top, bottom): top and bottom pointers for consecutive rows in
# sorted rotations table that start with exact matches to query string
# returns (-1, -1) if no matches are found
def _get_matching_rows(self, pattern):
top = 0
bottom = len(self._bwt) - 1
while top <= bottom:
if len(pattern) > 0:
symbol = pattern[-1:]
pattern = pattern[:-1]
substr = self._bwt[top: bottom + 1]
if symbol in substr:
top_index = substr.index(symbol) + top
bottom_index = len(substr) - substr[::-1].index(symbol) + top - 1
top = self._l2f[top_index]
bottom = self._l2f[bottom_index]
else:
return -1, -1
else:
return top, bottom
| Heanthor/rosalind | proj4/cmsc423_project4-master/cmsc423_project4-master-ed5d0fae5f139092241f814406dc136d09a08fb8/approximate_matcher/bwt/__init__.py | __init__.py | py | 4,194 | python | en | code | 0 | github-code | 50 |
32361139647 |
import RPi.GPIO as GPIO
import time
def init():
global in1, in2, en, p, servo
in1 = 18
in2 = 16
en = 22
GPIO.setmode(GPIO.BOARD)
GPIO.setup(in1, GPIO.OUT)
GPIO.setup(in2, GPIO.OUT)
GPIO.setup(en, GPIO.OUT)
GPIO.output(in1, GPIO.LOW)
GPIO.output(in2, GPIO.LOW)
p = GPIO.PWM(en, 1000)
p.start(25)
GPIO.setup(7, GPIO.OUT)
servo = GPIO.PWM(7, 50)
servo.start(0)
def forward():
servo.ChangeDutyCycle(7.15)
p.ChangeDutyCycle(75)
GPIO.output(in1, GPIO.HIGH)
GPIO.output(in2, GPIO.LOW)
def reverse():
servo.ChangeDutyCycle(7.15)
p.ChangeDutyCycle(75)
GPIO.output(in1, GPIO.LOW)
GPIO.output(in2, GPIO.HIGH)
def forward_left():
p.ChangeDutyCycle(50)
GPIO.output(in1, GPIO.HIGH)
GPIO.output(in2, GPIO.LOW)
servo.ChangeDutyCycle(4.65)
def backward_left():
servo.ChangeDutyCycle(4.65)
p.ChangeDutyCycle(75)
GPIO.output(in1, GPIO.LOW)
GPIO.output(in2, GPIO.HIGH)
def backward_right():
servo.ChangeDutyCycle(12.15)
p.ChangeDutyCycle(75)
GPIO.output(in1, GPIO.LOW)
GPIO.output(in2, GPIO.HIGH)
def forward_right():
p.ChangeDutyCycle(50)
GPIO.output(in1, GPIO.HIGH)
GPIO.output(in2, GPIO.LOW)
servo.ChangeDutyCycle(12.15)
def neutral():
p.ChangeDutyCycle(50)
GPIO.output(in1, GPIO.LOW)
GPIO.output(in2, GPIO.LOW)
servo.ChangeDutyCycle(7.15)
# Set GPIO numbering mode
# Set pin 11 as an output, and define as servo1 as PWM pin
# Loop to allow user to set servo angle. Try/finally allows exit
# with execution of servo.stop and GPIO cleanup :)
| RakeshSubbaraman/12---Motor | control.py | control.py | py | 1,617 | python | en | code | 0 | github-code | 50 |
552820391 | class DFSSolution:
def solve(self, board):
"""
Given a 2D board containing 'X' and 'O' (the letter O), capture all regions surrounded by 'X'.
A region is captured by flipping all 'O's into 'X's in that surrounded region.
Example:
X X X X
X O O X
X X O X
X O X X
After running your function, the board should be:
X X X X
X X X X
X X X X
X O X X
Explanation:
Surrounded regions shouldn’t be on the border, which means that any 'O' on the border of the board are not flipped to 'X'.
Any 'O' that is not on the border and it is not connected to an 'O' on the border will be flipped to 'X'.
Two cells are connected if they are adjacent cells connected horizontally or vertically.
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
if not board or not board[0]:
return
row = len(board)
col = len(board[0])
if row <= 2 or col <= 2:
return
for r in range(row):
if board[r][0] == 'O':
self.dfs(board, r, 0, 'F')
if board[r][col-1] == 'O':
self.dfs(board, r, col-1, 'F')
for c in range(col):
if board[0][c] == 'O':
self.dfs(board, 0, c, 'F')
if board[row-1][c] == 'O':
self.dfs(board, row-1, c, 'F')
for r in range(0, row):
for c in range(0, col):
if board[r][c] == 'F':
board[r][c] = 'O'
elif board[r][c] == 'O':
self.dfs(board, r, c, 'X')
return
def dfs(self, board, r, c, target):
if r < 0 or c < 0 or r >= len(board) or c >= len(board[0]):
return
if board[r][c] == 'X' or board[r][c] == 'F':
return
# mark with visited by setting to target
board[r][c] = target
dirs = [(0, 1), (0, -1), (1, 0), (-1, 0)]
for dr, dc in dirs:
nr, nc = r + dr, c + dc
self.dfs(board, nr, nc, target)
return
s = DFSSolution()
board = [["O","X","X","O","X"],
["X","O","O","X","O"],
["X","O","X","O","X"],
["O","X","O","O","O"],
["X","X","O","X","O"]]
s.solve(board)
print(board)
board = [["X","O","X","X"],
["O","X","O","X"],
["X","O","X","O"],
["O","X","O","X"],
["X","O","X","O"],
["O","X","O","X"]]
s.solve(board)
print(board)
| ljia2/leetcode.py | solutions/dfs/130.Surrounded.Regions.py | 130.Surrounded.Regions.py | py | 2,603 | python | en | code | 0 | github-code | 50 |
23853339544 | #primeirotermo = int(input('Primeiro termo: '))
#razao = int(input('Razão: '))
#c= primeirotermo
#while c <= (razao*9)+primeirotermo:
# print('{}'.format(c), end='-')
# c+= razao
#pergunta = str(input('\nDeseja mostrar mais alguns termos?(S/N) ')).upper().strip()
#if pergunta == 'S':
# quantos = int(input('Quantos termos? '))
# while c <= (razao*(9+quantos))+primeirotermo:
# print('{}'.format(c), end='-')
# c+= razao
#print('\nFIM')
primeirotermo = int(input('Primeiro termo: '))
razao = int(input('Razão: '))
c= primeirotermo
cont = 0
mais = 10
total = 0
while mais != 0:
total = total + mais
while cont <= total:
print('{}'.format(c), end='')
print(' - ' if cont < total else '', end='')
c+= razao
cont+=1
mais = int(input('\nDeseja adicionar mais quantos valores à sequencia: '))
print('FIM') | rafaelaugustofrancozo/Atividades-Python-Curso-em-Video | Desafio Aula 14 - exer61 - refazendo o exer 51 - PA.py | Desafio Aula 14 - exer61 - refazendo o exer 51 - PA.py | py | 873 | python | pt | code | 0 | github-code | 50 |
16409993795 | import requests
from flask import Flask, render_template, redirect, url_for, flash, jsonify, request
from flask_bootstrap import Bootstrap
from flask_restplus import reqparse, Api, Resource
from rank import *
from prediction import *
from comments import *
from matching_function import *
import json
app = Flask(__name__)
api = Api(app, title='wine prediction system')
parser = reqparse.RequestParser()
parser.add_argument('Country', type=str)
parser.add_argument('Variety', type=str)
parser.add_argument('Winery', type=str)
@api.route('/main/value')
class prediction(Resource):
@api.expect(parser, validate=True)
def post(self):
args = parser.parse_args(request)
country = args.get('Country')
variety = args.get('Variety')
winery = args.get('Winery')
price = prediction(country,variety,winery)
recomm = recommendation(country,variety,winery)
return {'price':price,'data':recomm}, 200
@api.route('/main/rank')
class rank(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('country', type=str)
parser.add_argument('variety', type=str)
parser.add_argument('price', type=str)
parser.add_argument('top', type=str)
args = parser.parse_args()
top = args.get('top')
top = int(top)
country = args.get('country')
variety = args.get('variety')
price = args.get('price')
result = ranked(country, variety, price, top)
return jsonify(result), 200
@api.route('/main/show')
class show(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str)
args = parser.parse_args()
name = args.get('name')
data = show_reviews(name)
return jsonify(data), 200
@api.route('/main/add')
class add(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str)
parser.add_argument('comments', type=str)
parser.add_argument('points', type=str)
args = parser.parse_args()
name = args.get('name')
comments = args.get('comments')
points = args.get('points')
points = int(points)
data = add_reviews(name,comments,points)
return jsonify(data), 200
@api.route('/main/match')
class match(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('palate', type=str, action='append')
parser.add_argument('flavor', type=str, action='append')
parser.add_argument('type',type=str)
args = parser.parse_args()
palate = args.get('palate')
flavor = args.get('flavor')
type = args.get('type')
#print(palate)
#print(flavor)
li=palate+flavor
data = matching_function(type,li)
return jsonify(data), 200
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=3000)
| jeremyzhang741/wine_sales_project | apis/api.py | api.py | py | 3,004 | python | en | code | 0 | github-code | 50 |
35185421879 | #11004 K번째수
"""
문제
수 N개 A1, A2, ..., AN이 주어진다. A를 오름차순 정렬했을 때, 앞에서부터 K번째 있는 수를 구하는 프로그램을 작성하시오.
입력
첫째 줄에 N(1 ≤ N ≤ 5,000,000)과 K (1 ≤ K ≤ N)이 주어진다.
둘째에는 A1, A2, ..., AN이 주어진다. (-109 ≤ Ai ≤ 109)
출력
A를 정렬했을 때, 앞에서부터 K번째 있는 수를 출력한다.
예제 입력 1 예제 출력 1
5 2 2
4 1 2 3 5
"""
# sol 1 5124ms / 693504kb
""" import sys
input = sys.stdin.readline
n,k = map(int, input().split())
arr = sorted(input().split(),key=int)
print(arr[k-1]) """
#-----------------------------------------
# sol 2 4432ms 706240kb
""" import sys
input = sys.stdin.read
arr = input().split()
print(sorted(arr[2:],key=int)[int(arr[1])-1])
"""
# 표현만다름
import sys
input = sys.stdin.readline
n,k = map(int, input().split())
print(sorted(map(int, input().split()))[k-1]) | gyl923/BOJ | Sorting/#11004.py | #11004.py | py | 974 | python | ko | code | 0 | github-code | 50 |
20545642833 | import local_db as localdb
temperatures = []
humiditys = []
pressures = []
gases = []
def addReadings(reading):
global temperatures
global humiditys
global pressures
global gases
if len(temperatures) < 6:
temperatures.append(reading["temperature"])
humiditys.append(reading["humidity"])
pressures.append(reading["pressure"])
gases.append(reading["gas"])
print(len(temperatures))
else:
minuteAverage = averageReadings()
localdb.insertMinuteReading(minuteAverage)
clearReadings()
def averageReadings():
global temperatures
global humiditys
global pressures
global gases
averages = { "temperature": None, "humidity": None, "pressure": None, "gas": None}
averages["temperature"] = averageList(temperatures)
averages["humidity"] = averageList(humiditys)
averages["pressure"] = averageList(pressures)
averages["gas"] = averageList(gases)
return averages
def averageList(readingsList):
average = 0
for item in readingsList:
average = average + item
return average / 6
def clearReadings():
global temperatures
global humiditys
global pressures
global gases
temperatures.clear()
humiditys.clear()
pressures.clear()
gases.clear()
| auxcodes/pi-env-tracker | python/local_data.py | local_data.py | py | 1,329 | python | en | code | 0 | github-code | 50 |
38735874749 | import torch
import torch.nn as nn
from ..registry import HEADS
from .labelconverter import CTCLabelConverter
from ..builder import build_loss
@HEADS.register_module
class CTCHead(nn.Module):
def __init__(self, input_size, charsets,batch_max_length=25,use_baidu_ctc=False,loss=None):
super(CTCHead, self).__init__()
self.converter = CTCLabelConverter(charsets)
self.num_class = len(self.converter.character)
self.batch_max_length = batch_max_length
self.use_baidu_ctc = use_baidu_ctc
if self.use_baidu_ctc:
# need to install warpctc. see our guideline.
from warpctc_pytorch import CTCLoss
self.loss_func = CTCLoss()
elif loss!=None:
self.loss_func = build_loss(loss)
else:
self.loss_func = torch.nn.CTCLoss(zero_infinity=True)
self.fc = nn.Linear(input_size,self.num_class)
def forward(self,data:dict,return_loss:bool,**kwargs):
if return_loss:
return self.forward_train(data)
else:
return self.forward_test(data)
def postprocess(self,preds:torch.Tensor):
batch_size = preds.size(0)
# Select max probabilty (greedy decoding) then decode index to character
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
_, preds_index = preds.max(2)
# preds_index = preds_index.view(-1)
preds_str = self.converter.decode(preds_index, preds_size)
scores = []
return preds_str, scores
def forward_train(self,data:dict):
img_tensor = data.get("img")
batch_size = img_tensor.size(0)
# print(img_tensor.shape)
device = img_tensor.device
text = data["label"]
length = data["length"]
preds = self.fc(img_tensor)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
if self.use_baidu_ctc:
preds = preds.permute(1, 0, 2) # to use CTCLoss format
loss = self.loss_func(preds, text, preds_size, length) / batch_size
else:
length = length.long()
preds_size= preds_size.long()
preds = preds.log_softmax(2).permute(1, 0, 2)
loss = self.loss_func(preds, text, preds_size, length.view(batch_size))
# print(loss)
loss = torch.where(torch.isinf(loss), torch.full_like(loss, 6.9), loss)
return dict(
loss=loss, ctc_loss=loss
)
def forward_test(self,data:dict):
img_tensor = data.get("img")
preds = self.fc(img_tensor)
return preds
class FocalCTCloss(torch.nn.Module):
def __init__(self,alpha=0.5,gamma=2.0):
super(FocalCTCloss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.torch_ctc_loss = torch.nn.CTCLoss(zero_infinity=True)
def forward(self,log_probs, targets, input_lengths, target_lengths):
loss_ctc = self.torch_ctc_loss(log_probs, targets, input_lengths, target_lengths)
probability = torch.exp(-loss_ctc)
focal_ctc_loss = torch.mul(torch.mul(self.alpha,torch.pow((1-probability),self.gamma)),loss_ctc)
return focal_ctc_loss
| coldsummerday/text-detect-recognition-hub | texthub/modules/rec_heads/ctc_head.py | ctc_head.py | py | 3,200 | python | en | code | 4 | github-code | 50 |
5357870628 | # -*- coding: utf-8 -*-
"""Several path-related utilities."""
from pathlib import Path
from typing import Union
def nth_parent(src: Union[str, Path], n_times: int = 1) -> Path:
"""Ascend in the `src` path, `n_times`
Args:
src ( Union[str, Path]): Original path.
n_times (int, optional): How many parents to walkt to. Defaults to 1.
Returns:
The n-th ancestor to path (or the root folder if the hierarchy
tree is smaller than `n_times`).
"""
if n_times == 0:
return src.resolve() # type: ignore
try:
parent = src.parent # type: ignore
except AttributeError:
parent = Path(src).parent
return nth_parent(parent, n_times - 1)
| pwoolvett/python_template | {{ cookiecutter.slug_name }}/{{ cookiecutter.slug_name }}/utils/io_/path_.py | path_.py | py | 724 | python | en | code | 0 | github-code | 50 |
34764191968 | from os import walk, mkdir, remove
from os.path import join, isfile, isdir
from datetime import datetime, timedelta, date
import settings
from settings import (
logger,
DIR_NAME_VIDEO_TIMED,
VIDEO_EXT,
TIMING_EXT,
DIR_NAME_VIDEO_TO_POST,
DIR_NAME_VIDEO_TIMING_PROCESSED,
DATETIME_FORMAT,
MAX_FILES_TO_POST,
DIR_NAME_VIDEO_CLIPPED,
)
from utility import (
get_files_list,
get_subdir_list,
read_file,
mv_file,
get_data_dir_path,
do_shell_command,
read_metadata,
save_metadata,
get_uuid_time,
get_mark,
write_file,
date_create_sort,
)
DONE = 0
FAIL = 1
def cat_video():
dir_input_video = get_data_dir_path(DIR_NAME_VIDEO_TIMED)
all_files = get_files_list(dir_input_video)
video_files_list = filter_file_by_ext(all_files, VIDEO_EXT)
video_files_list = date_create_sort(dir_input_video, video_files_list)
for video_file_name in video_files_list:
logger.info(video_file_name)
if not isfile(get_input_timing_file_path(video_file_name)):
continue
cat_video_by_timing(video_file_name)
mv_processed_files(video_file_name)
def mv_processed_files(video_file_name):
input_video_file_path = get_input_video_file_path(video_file_name)
processed_video_file_path = get_processed_video_file_path(video_file_name)
input_timing_file_path = get_input_timing_file_path(video_file_name)
processed_timing_file_path = get_processed_timing_file_path(video_file_name)
mv_file(input_video_file_path, processed_video_file_path)
mv_file(input_timing_file_path, processed_timing_file_path)
def get_input_video_file_path(video_file_name):
return join(
settings.project_dir,
DIR_NAME_VIDEO_TIMED,
video_file_name)
def get_processed_video_file_path(video_file_name):
return join(
settings.project_dir,
DIR_NAME_VIDEO_TIMING_PROCESSED,
video_file_name)
def get_clipped_video_file_path(video_file_name):
return join(
settings.project_dir,
DIR_NAME_VIDEO_CLIPPED,
video_file_name)
def get_to_post_video_file_path(video_file_name, sub_dir):
next_date = get_next_date_time_str(video_file_name)
mark = get_mark(video_file_name)
return join(
settings.project_dir,
DIR_NAME_VIDEO_TO_POST,
sub_dir,
'{}_{}.{}'.format(next_date, mark, VIDEO_EXT)
)
def get_timing_file_name(video_file_name):
return video_file_name.replace(VIDEO_EXT, TIMING_EXT)
def get_input_timing_file_path(video_file_name):
return join(
settings.project_dir,
DIR_NAME_VIDEO_TIMED,
get_timing_file_name(video_file_name))
def get_processed_timing_file_path(video_file_name):
return join(
settings.project_dir,
DIR_NAME_VIDEO_TIMING_PROCESSED,
get_timing_file_name(video_file_name))
def filter_file_by_ext(files_list, filter_ext):
filtered_files = []
for f in files_list:
if filter_ext in f:
filtered_files.append(f)
return filtered_files
def get_output_video_file_path(video_file_name):
mark = get_mark(video_file_name)
return join(
settings.project_dir,
DIR_NAME_VIDEO_TIMED,
'{}_{}.{}'.format(get_uuid_time(), mark, VIDEO_EXT))
def get_timing_lines(timing_data):
return timing_data.split('\n')
def get_start_and_finish_time(line_index, timing_lines):
return timing_lines[line_index], timing_lines[line_index + 1]
def timing_lines_len(timing_lines):
return len(timing_lines) - 1
def cat_video_by_timing(video_file_name):
input_video_file_path = get_input_video_file_path(video_file_name)
timing_file_path = get_input_timing_file_path(get_timing_file_name(video_file_name))
timing_data = read_file(timing_file_path)
timing_lines = get_timing_lines(timing_data)
logger.info('processing {}'.format(input_video_file_path))
for line_index in range(timing_lines_len(timing_lines)):
start_time, finish_time = get_start_and_finish_time(line_index, timing_lines)
if '' in [start_time, finish_time]:
continue
output_video_file_path = get_output_video_file_path(video_file_name)
ffmpeg_cat(input_video_file_path, output_video_file_path, start_time, finish_time)
return DONE
def ffmpeg_cat(input_video_file_path, output_video_file_path, start_time, finish_time):
ffmpeg_shell_command = make_ffmpeg_shell_command(input_video_file_path, output_video_file_path, start_time, finish_time)
do_shell_command(ffmpeg_shell_command)
def make_ffmpeg_shell_command(input_video_file_path, output_video_file_path, start_time, finish_time):
return 'ffmpeg -ss {start_time} -to {finish_time} -i "{input_video_file_path}" -c copy "{output_video_file_path}"'.format(
start_time=start_time,
finish_time=finish_time,
input_video_file_path=input_video_file_path,
output_video_file_path=output_video_file_path
)
def mark_in_video_file_name(video_file_name):
mark = get_mark(video_file_name)
marks = list(read_metadata()['marks'].keys())
return mark in marks
def get_metadata_timer(video_file_name):
mark = get_mark(video_file_name)
metadata_timers = read_metadata()['timers']
for name, data in metadata_timers.items():
if mark in data['marks']:
return name, data
def get_metadata_timer_value(video_file_name):
_, value = get_metadata_timer(video_file_name)
return value
def get_metadata_timer_name(video_file_name):
name, _ = get_metadata_timer(video_file_name)
return name
def date_in_future(next_date_time):
date_time = str_to_date_time(next_date_time)
return date_time > datetime.now()
def get_schedule_tomorrow(metadata_timers_value):
schedule_first = metadata_timers_value['schedule'][0]
next_time = datetime.strptime(schedule_first, DATETIME_FORMAT[9:]).time()
next_date = date.today() + timedelta(days=1)
next_date_time = datetime.combine(next_date, next_time)
return date_time_to_str(next_date_time)
def get_next_date_time_str(video_file_name):
value = get_metadata_timer_value(video_file_name)
next_date_time = value['next_date_time']
if date_in_future(next_date_time):
return next_date_time
return get_schedule_tomorrow(value)
def date_time_to_str(date_time):
return datetime.strftime(date_time, DATETIME_FORMAT)
def str_to_date_time(str_date_time):
return datetime.strptime(str_date_time, settings.DATETIME_FORMAT)
def scheduling_video():
dir_input_video = get_data_dir_path(DIR_NAME_VIDEO_TIMED)
video_files_list = get_files_list(dir_input_video)
video_files_list = date_create_sort(dir_input_video, video_files_list)
for video_file_name in video_files_list:
logger.info(video_file_name)
if not mark_in_video_file_name(video_file_name):
continue
mv_video_to_post_dir(video_file_name)
save_next_video_date_time(video_file_name)
def get_to_post_sub_dir(video_file_name):
to_post_dir_path = get_data_dir_path(DIR_NAME_VIDEO_TO_POST)
subdir_list = get_subdir_list(to_post_dir_path)
for subdir_name in subdir_list:
if len(get_files_list(join(to_post_dir_path, subdir_name))) < MAX_FILES_TO_POST:
return subdir_name
return make_to_post_subdir(video_file_name)
def make_to_post_subdir(video_file_name):
next_date = get_next_date_time_str(video_file_name)[:10]
to_post_dir_path = get_data_dir_path(DIR_NAME_VIDEO_TO_POST)
subdir_name = '{}_{}'.format(next_date, get_uuid_time())
mkdir(join(to_post_dir_path, subdir_name))
return subdir_name
def mv_video_to_post_dir(video_file_name):
input_video_file_path = get_input_video_file_path(video_file_name)
sub_dir = get_to_post_sub_dir(video_file_name)
to_post_video_file_path = get_to_post_video_file_path(video_file_name, sub_dir)
mv_file(input_video_file_path, to_post_video_file_path)
def save_next_video_date_time(video_file_name):
next_video_date_time = make_next_video_date_time(video_file_name)
next_video_date_time_str = date_time_to_str(next_video_date_time)
metadata = read_metadata()
timer_name = get_metadata_timer_name(video_file_name)
metadata['timers'][timer_name]['next_date_time'] = next_video_date_time_str
save_metadata(metadata)
def make_next_date(video_file_name):
last_video_date_time = str_to_date_time(get_next_date_time_str(video_file_name))
if last_video_date_time.time() < get_last_schedule_time(video_file_name):
return extract_current_date(last_video_date_time)
return extract_next_date(last_video_date_time)
def extract_current_date(date_time):
return date_time.date()
def extract_next_date(date_time):
return (date_time + timedelta(days=1)).date()
def get_last_schedule_time(video_file_name):
return get_schedule(video_file_name)[-1]
def get_first_schedule_time(video_file_name):
return get_schedule(video_file_name)[0]
def get_schedule(video_file_name):
value = get_metadata_timer_value(video_file_name)
schedule_list = value['schedule']
schedule_list.sort()
schedule = []
for t in schedule_list:
schedule.append(datetime.strptime(t, DATETIME_FORMAT[9:]).time())
return schedule
def make_next_time(video_file_name):
schedule = get_schedule(video_file_name)
last_video_date_time = str_to_date_time(get_next_date_time_str(video_file_name))
for t in schedule:
if last_video_date_time.time() < t:
return t
return get_first_schedule_time(video_file_name)
def make_next_video_date_time(video_file_name):
next_date = make_next_date(video_file_name)
next_time = make_next_time(video_file_name)
return datetime.combine(next_date, next_time)
def add_cover():
dir_input_video = get_data_dir_path(DIR_NAME_VIDEO_TIMED)
video_files_list = get_files_list(dir_input_video)
for video_file_name in video_files_list:
logger.info(video_file_name)
add_closings(video_file_name)
def get_closing_files(video_file_name):
mark = get_mark(video_file_name)
return read_metadata()['marks'][mark].get('closings', [])
def mv_input_clipped_file(video_file_name):
input_video_file_path = get_input_video_file_path(video_file_name)
clipped_video_file_path = get_clipped_video_file_path(video_file_name)
mv_file(input_video_file_path, clipped_video_file_path)
def make_concat_task_file(input_video_file_path, closing_file_path):
fname = 'concat_task.txt'
data = "file '{}'\n".format(input_video_file_path)
data += "file '{}'\n".format(closing_file_path)
write_file(fname, data)
return fname
def make_concat_shell_command(concat_task_file_path, output_video_file_path):
return 'ffmpeg -f concat -i {} -c copy {}'.format(
concat_task_file_path,
output_video_file_path,
)
def concat_video(input_video_file_path, closing_file_path, output_video_file_path):
concat_task_file_path = make_concat_task_file(input_video_file_path, closing_file_path)
concat_shell_command = make_concat_shell_command(concat_task_file_path, output_video_file_path)
do_shell_command(concat_shell_command)
remove(concat_task_file_path)
def add_closings(video_file_name):
closing_files = get_closing_files(video_file_name)
for closing in closing_files:
closing_file_path = join(settings.project_dir, closing)
input_video_file_path = get_input_video_file_path(video_file_name)
output_video_file_path = get_output_video_file_path(video_file_name)
concat_video(input_video_file_path, closing_file_path, output_video_file_path)
mv_input_clipped_file(video_file_name)
if __name__ == '__main__':
logger.info('app start')
logger.info('project_dir: {}'.format(settings.project_dir))
cat_video()
add_cover()
scheduling_video()
logger.info('app stop')
| Akinava/oculus_blog | src/cutter.py | cutter.py | py | 11,980 | python | en | code | 0 | github-code | 50 |
1686079806 | import copy
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.transformers.transformer import inverse_sigmoid
@manager.TRANSFORMER_DECODERS.add_component
class DetectionTransformerDecoder(nn.Layer):
"""Implements the decoder in DETR3D transformer.
Args:
return_intermediate (bool): Whether to return intermediate outputs.
coder_norm_cfg (dict): Config of last normalization layer. Default:
`LN`.
"""
def __init__(self,
transformerlayers=None,
num_layers=None,
return_intermediate=False):
super(DetectionTransformerDecoder, self).__init__()
if isinstance(transformerlayers, dict):
transformerlayers = [
copy.deepcopy(transformerlayers) for _ in range(num_layers)
]
else:
assert isinstance(transformerlayers, list) and \
len(transformerlayers) == num_layers
self.num_layers = num_layers
self.layers = nn.LayerList()
for i in range(num_layers):
layer_name = transformerlayers[i].pop('type_name')
decoder_layer = manager.TRANSFORMER_DECODER_LAYERS.components_dict[
layer_name]
params = transformerlayers[i]
self.layers.append(decoder_layer(**params))
self.embed_dims = self.layers[0].embed_dims
self.pre_norm = self.layers[0].pre_norm
self.return_intermediate = return_intermediate
self.fp16_enabled = False
def forward(self,
query,
key,
value,
query_pos,
reference_points,
reg_branches=None,
key_padding_mask=None,
**kwargs):
"""Forward function for `Detr3DTransformerDecoder`.
Args:
query (Tensor): Input query with shape
`(num_query, bs, embed_dims)`.
reference_points (Tensor): The reference
points of offset. has shape
(bs, num_query, 4) when as_two_stage,
otherwise has shape ((bs, num_query, 2).
reg_branch: (obj:`nn.ModuleList`): Used for
refining the regression results. Only would
be passed when with_box_refine is True,
otherwise would be passed a `None`.
Returns:
Tensor: Results with shape [1, num_query, bs, embed_dims] when
return_intermediate is `False`, otherwise it has shape
[num_layers, num_query, bs, embed_dims].
"""
output = query
intermediate = []
intermediate_reference_points = []
# np.save("d_query.npy", query.numpy())
# np.save("d_value.npy", kwargs['value'].numpy())
for lid, layer in enumerate(self.layers):
reference_points_input = reference_points[..., :2].unsqueeze(
[2]) # BS NUM_QUERY NUM_LEVEL 2
output = layer(
output,
key,
value,
query_pos,
reference_points=reference_points_input,
key_padding_mask=key_padding_mask,
**kwargs)
output = output.transpose([1, 0, 2])
# np.save("d_output_{}.npy".format(lid), output.numpy())
if reg_branches is not None:
tmp = reg_branches[lid](output)
assert reference_points.shape[-1] == 3
new_reference_points = paddle.zeros_like(reference_points)
new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(
reference_points[..., :2])
new_reference_points[..., 2:
3] = tmp[..., 4:5] + inverse_sigmoid(
reference_points[..., 2:3])
reference_points = F.sigmoid(new_reference_points).detach()
# np.save("d_new_reference_points_{}.npy".format(lid), reference_points.numpy())
output = output.transpose([1, 0, 2])
if self.return_intermediate:
intermediate.append(output)
intermediate_reference_points.append(reference_points)
if self.return_intermediate:
return paddle.stack(intermediate), paddle.stack(
intermediate_reference_points)
return output, reference_points
| PaddlePaddle/Paddle3D | paddle3d/models/transformers/decoders.py | decoders.py | py | 4,556 | python | en | code | 479 | github-code | 50 |
30284596843 | import cv2
import mediapipe as mp
from pynput.keyboard import Key, Controller
keyboard = Controller()
cap = cv2.VideoCapture(0)
#Descomente o código correto
#Width = int(cap.get(cv2.CAP_PROP_FRAME_Height))
#Height = int(cap.get(cv2.CAP_PROP_FRAME_Width))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
#width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
#height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
#width = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
#height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
hands = mp_hands.Hands(min_detection_confidence=0.8, min_tracking_confidence=0.5)
tipIds = [4, 8, 12, 16, 20]
state = None
# Defina uma função para contar os dedos
def countFingers(image, hand_landmarks, handNo=0):
global state
if hand_landmarks:
# Obtenha todos os marcos da PRIMEIRA Mão VISÍVEL
landmarks = hand_landmarks[handNo].landmark
# Conte os dedos
fingers = []
for lm_index in tipIds:
# Obtenha os valores y da ponta e da parte inferior do dedo
finger_tip_y = landmarks[lm_index].y
finger_bottom_y = landmarks[lm_index - 2].y
# Verifique se ALGUM DEDO está ABERTO ou FECHADO
if lm_index !=4:
if finger_tip_y < finger_bottom_y:
fingers.append(1)
# print("DEDO com id ",lm_index," is Open")
if finger_tip_y > finger_bottom_y:
fingers.append(0)
# print("DEDO com id ",lm_index," is Closed")
totalFingers = fingers.count(1)
# Controlar a apresentação
#Descomente o código correto
#finger_tip_y = (landmarks[8].x)*width
#finger_tip_x = (landmarks[8].y)*height
#finger_tip_x = (landmarks[8].x)*height
#finger_tip_y = (landmarks[8].y)*width
finger_tip_x = (landmarks[8].x)*width
finger_tip_y = (landmarks[8].y)*height
#finger_tip_x = (landmarks[8].x)*Width
#finger_tip_y = (landmarks[8].y)*Height
if totalFingers >= 1:
if finger_tip_x < height-250:
print("Rolar para Cima")
keyboard.press(Key.up)
if finger_tip_x > height-250:
print("Rolar para Baixo")
keyboard.press(Key.down)
# Definir uma função para
def drawHandLanmarks(image, hand_landmarks):
# Desenhe conexões entre pontos de referência
if hand_landmarks:
for landmarks in hand_landmarks:
mp_drawing.draw_landmarks(image, landmarks, mp_hands.HAND_CONNECTIONS)
while True:
success, image = cap.read()
image = cv2.flip(image, 1)
# Detectar os pontos de referência das mãos
results = hands.process(image)
# Obter a posição do ponto de referência a partir do resultado processado
hand_landmarks = results.multi_hand_landmarks
# Desenhar pontos de referência
drawHandLanmarks(image, hand_landmarks)
# Obter posição dos dedos das mãos
countFingers(image, hand_landmarks)
cv2.imshow("Controlador de Mídia", image)
# Saia da janela ao pressionar a tecla barra de espaço
key = cv2.waitKey(1)
if key == 27:
break
cv2.destroyAllWindows()
| Alice1Kamui/Projeto-130 | presentationControl.py | presentationControl.py | py | 3,516 | python | pt | code | 1 | github-code | 50 |
1151036120 | import numpy as np
from common import matrix_utils
# FastSLAM 2.0 implementation
# s :: x, y, h (robot state) [SE(2)]
# u :: v, w
# returns sH :: x, y, h (expected robot state) [SE(2)]
def h(s, u, dt):
v, w = u
sH = np.copy(s)
sH[0] += v * np.cos(sH[2]) * dt
sH[1] += v * np.sin(sH[2]) * dt
sH[2] += w * dt
return sH
# mu :: x, y (landmark location)
# s :: x, y, h (robot state) [SE(2)]
# returns zH :: d, dh (expected distance and relative heading to landmark)
def g(mu, s):
p = np.array([s[0], s[1]])
disp = p - mu
dh = np.arctan2(disp[1], disp[0]) - s[2]
dh %= np.pi * 2
if abs(dh) > np.pi:
dh -= np.pi * 2
return np.array([np.linalg.norm(disp), dh])
# z :: d, dh (sensed landmark location)
# s :: x, y, h (robot state) [SE(2)]
# returns mu :: x, y (expected landmark location)
def g_inv(z, s):
d, dh = z
x, y, h = s
th = h + dh
x = x + np.cos(th) * d
y = y + np.sin(th) * d
return np.array([x, y])
R = np.eye(2) * 0.1
P = np.eye(2) * 0.1
p0 = 0.1
rplus = 0.3
rminus = 0.1
featureShape = np.array([
[0.7, 0.7, 1],
[-0.7, 0.7, 1],
[-0.7, -0.7, 1],
[0.7, -0.7, 1]
])
class Feature():
def __init__(self):
self.est = np.zeros(2) # X and Y mean (mu)
self.cov = np.eye(len(self.est)) # covariance (sigma)
self.exist = rplus # probability it actually exists (tau)
def copy(self):
f = Feature()
f.est = np.copy(self.est)
f.cov = np.copy(self.cov)
f.exist = self.exist
return f
# derivative of g with respect to a feature
# returns G :: R(2x2)
# TODO: implement
def G_th(self, s):
x, y = self.est
sx, sy, _ = s
dx = x - sx
dy = y - sy
distsqr = dx * dx + dy * dy
dist = np.sqrt(distsqr)
# G = [
# [ d(dist)/dx, d(dist)/dy ]
# [ d(head)/dx, d(head)/dy ]
# ]
G = [
[dx / dist, dy / dist],
[-dy / distsqr, dx / distsqr]
]
return np.array(G)
# derivative of g with respect to the robot state
# returns G :: R(2x3)
# TODO: implement
def G_s(self, s):
x, y = self.est
dx = x - s.x
dy = y - s.y
distsqr = dx * dx + dy * dy
dist = np.sqrt(distsqr)
# G = [
# [ d(dist)/dx, d(dist)/dy, d(dist)/dh ]
# [ d(head)/dx, d(head)/dy, d(head)/dh ]
# ]
G = [
[dx / dist, dy / dist, 0],
[-dy / distsqr, dx / distsqr, -1]
]
return np.array(G)
# s :: x, y, h (robot state) [SE(2)]
# z :: d, dh (distance and relative heading to one sensed landmark)
# p :: likelihood that the landmark sensed corresponds to this feature
def p_sensed(self, s, z):
Gth = self.G_th(s)
Gs = self.G_s(s)
Q = R + Gth @ self.cov @ Gth.T
zH = g(self.est, s)
s_pos = np.array([s[0], s[1]])
sigma = np.linalg.inv(Gs.T @ np.linalg.inv(Q) @ Gs + np.linalg.inv)
# mu = sigma @ Gs.T @ np.linalg.inv(Q) @ (z - zH) + s_pos
# note: The paper says to sample a new robot state S from
# the landmark probability distribution, but that
# makes absolutely no sense, so I'm going to use
# the state passed in to this function.
# This is equivalent to using zH from above.
# s ~ N(mu, sigma)
# cholesky ~= sqrt
z_diff = z - zH
p = np.linalg.inv(np.linalg.cholesky(2 * np.pi * Q)) * np.exp((z_diff.T @ np.linalg.inv(Q) @ z_diff) / -2)
return p
def getPoints(self):
trans = matrix_utils.translation2d(self.est[0], self.est[1])
covTf = np.block([
[ self.cov, np.zeros(1, 3) ],
[ np.zeros(3, 1), [1] ]
])
return matrix_utils.tfPoints(featureShape, trans @ covTf)
class Particle():
def __init__(self):
self.pose = np.zeros(3) # SE(2)
self.features = [] # list of features (above)
def copy(self):
p = Particle()
p.pose = np.copy(self.pose)
for f in self.features:
p.pose.append(f.copy())
return p
def copyTo(self, other):
other.pose = self.pose
# u :: v, w
def act(self, u, dt):
# propagate pose estimate forwards with time
self.pose = h(self.pose, u, dt)
# pose covariance is not tracked, so that's all folks
# zs :: [d, dh] (distance and relative heading to sensed landmarks)
# returns w :: double (weight of this particle)
# modifies the current particle to incorporate sensor data
def sense(self, zs):
Ns = []
for z in zs:
P = [x.p_sensed(self.pose, z) for x in self.features]
P.append(p0)
Ns.append(np.argmax(P)) # index of most likely feature
w = 0
# handle observed features
for i in range(len(Ns)):
n = Ns[i]
z = zs[i]
if n < len(self.features): # Known feature case
f = self.features[n]
f.exist += rplus
# lots of the following are recomputations, can be optimized
Gth = f.G_th(self.pose)
Q = R + Gth @ f.cov @ Gth.T
zH = g(f.est, self.pose)
K = f.cov @ f.G_th(self.pose) @ np.linalg.inv(Q)
f.est += K @ (z - zH)
f.cov = (np.eye(len(f.est)) - K @ Gth) @ f.cov
Gs = f.G_s(self.pose)
L = Gs @ P @ Gs.T + Gth @ f.cov @ Gth.T + R
zDiff = z - zH
w += np.linalg.inv(np.linalg.cholesky(2 * np.pi * L)) * np.exp((zDiff.T @ np.linalg.inv(L) @ zDiff) / -2)
elif n == len(self.features): # New feature case
f = Feature()
f.est = g_inv(z, self.pose)
G_th = f.G_th(self.pose)
f.cov = G_th @ np.linalg.inv(R) @ G_th.T
w += p0
# later: handle unobserved features within sensor range
return w
class SLAM():
def __init__(self, nParticles=20):
self.particles = [] # list of particles (above)
for _ in range(nParticles):
self.particles.append(Particle())
self.p_visualized = Particle()
# u :: v, w
def act(self, u, dt=0.001):
for p in self.particles:
p.act(u, dt)
def sense(self, zs):
ws = []
for p in self.particles:
ws.append(p.sense(zs))
ws = np.array(ws, dtype="float64")
ws *= (1.0/sum(ws)) # normalize weights
# resample particles
newP = []
for _ in range(0, len(self.particles)):
newP.append(np.random.choice(self.particles, replace=True, p=ws).copy())
# set the visualized particle to the max probability particle
i_vis = np.argmax(ws)
self.particles[i_vis].copyTo(self.p_visualized)
# set new particles
self.particles = newP
def getDrawnObjects(self):
return [self.p_visualized]
| lessthantrue/RobotProjects | slam/slam.py | slam.py | py | 7,106 | python | en | code | 3 | github-code | 50 |
70523944156 | import time, threading
from pyndn import Key
from ChatNet import ChatNet, ChatServer
class ChatNoGUI(object):
def callback(self, nick, text):
print("<%s> %s" % (nick, text))
def main(self):
chatnet = ChatNet("/chat", self.callback)
chatsrv = ChatServer("/chat")
t = threading.Thread(target=chatsrv.listen)
t.start()
i = 0
while True:
message = "number %d" % i
print("Sending: %s" % message)
r = chatnet.pullData()
chatsrv.send_message(message)
i += 1
time.sleep(1)
ui = ChatNoGUI()
ui.main()
| cawka/packaging-PyNDN | examples/ndnChat/chatText.py | chatText.py | py | 531 | python | en | code | 0 | github-code | 50 |
73996566875 | # Creates a dashboard with two bar plots from user's choice: (Year) and (Number of countries)
import pandas as pd
imp_tiv = pd.read_csv(r'0-Downloaded_files/imp_tiv.csv')
exp_tiv = pd.read_csv(r'0-Downloaded_files/exp_tiv.csv')
#Prompts user to input how many countries they would like to display data for. "Number of top countries"
while True:
try:
n_country = int(input ('''Enter the number of "Top Importers" or "Top Exporters" you would like to display, (2 to 40): '''))
if n_country in range(2, 41):
break
except:
print( "Error: Numbers are the only valid inputs. Please try again." )
else:
print ("The year you selected is not in the data's range. Please try again.")
# Creates top "n_country" query for chosen year for both "imports" and "exports"
top_imp_values = imp_tiv.nlargest(int(n_country), str(year_input))
top_exp_values = exp_tiv.nlargest(int(n_country), str(year_input))
# Plots data with plotly: greatest arms importers and exporters for specific year (previously selected by user)
import plotly.express as px
from plotly.subplots import make_subplots
# Determines what data is to be displayed in each bar graph
fig1 = px.bar(top_imp_values, x="Country", y=str(year_input),
text=str(year_input), color=top_imp_values[str(year_input)])
fig2 = px.bar(top_exp_values, x="Country", y=str(year_input),
text=str(year_input), color=top_exp_values[str(year_input)])
# Defines the plot areas and graphic options
fig = make_subplots(rows=2, cols=1, shared_xaxes=False, horizontal_spacing=0.1,
subplot_titles=["Top " + str(year_input) + " Arms Imports Countries",
"Top " + str(year_input) + " Arms Exports Countries"],
y_title="Trend Indicator Values"
)
fig.add_trace(fig1['data'][0], row=1, col=1,)
fig.add_trace(fig2['data'][0], row=2, col=1,)
fig.update_layout(coloraxis_autocolorscale=False)
fig.update_coloraxes(colorscale='Portland')
fig.update_traces(texttemplate='%{text:.2s}')
fig.update_layout(uniformtext_minsize=6, uniformtext_mode='hide')
fig.layout.coloraxis.colorbar.title = 'TIV'
fig.update_layout(showlegend=False,
title_text="STOCKHOLM INTERNATIONAL PEACE RESEARCH INSTITUTE<br>" +
"<i>Trend Indicator Values<i>")
fig.show()
| Magio94/Arms_trading_package1 | TIV_table_package/3a-python_tiv_plot_bar_year.py | 3a-python_tiv_plot_bar_year.py | py | 2,386 | python | en | code | 0 | github-code | 50 |
6574267034 | from pylixir.core.state import GameState
def assert_effect_changed(
source: GameState,
target: GameState,
effect_index: int,
amount: int,
) -> None:
if amount == 0:
assert source == target
else:
source.board.modify_effect_count(effect_index=effect_index, amount=amount)
assert source == target
| oleneyl/pylixir | tests/data/council/util.py | util.py | py | 344 | python | en | code | 0 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.