seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
12178759835 | import os
import json
import pickle
import csv
import time
from collections import defaultdict
from typing import List, Dict, Set, Tuple, Union, Any, DefaultDict
from numpy import mean, median, ndarray
from corpus import Corpus as Cp
from embeddings import Embeddings, get_emb
from clustering import Clustering
from score import Scorer
from utility_functions import get_cmd_args, get_config, get_num_docs, \
get_docs, get_sim
"""
Script to generate a taxonomy.
Execute this script on a server with at least 10G of RAM.
Before executing configure the paths in 'configs.json' or
'configs_template.json'.
Example call -
For paths set for dblp corpus in server paths use:
python3 generate_taxonomy.py -c dblp -l server
"""
# Define global variables.
node_counter = 0
idx_to_term = {}
# {doc-id: {word-id: (term-freq, tfidf)}} doc-length is at word-id -1
doc_distr_type = DefaultDict[int, Union[Tuple[int, int], int]]
term_distr_type = DefaultDict[int, doc_distr_type]
term_distr_base: term_distr_type
def generate_taxonomy() -> None:
"""Generate a taxonomy for a preprocessed corpus.
1. Set paths.
2. Load data.
3. Start recursive taxonomy generation.
"""
# Define globals.
global idx_to_term
global path_embeddings_global
global path_term_distr
global max_depth
# Load cmd args and configs.
print('Load and parse cmd args...')
config = get_config()
args = get_cmd_args()
lemmatized = config['lemmatized']
emb_type = config['embeddings']
threshold = config['threshold']
max_depth = config['max_depth']
# Set paths.
print('Set paths...')
path_out = config['paths'][args.location][args.corpus]['path_out']
if lemmatized:
path_term_ids = os.path.join(
path_out, 'processed_corpus/lemma_terms_idxs.txt')
path_idx_to_term = os.path.join(
path_out, 'indexing/idx_to_lemma.json')
path_df = os.path.join(path_out, 'frequencies/df_lemmas.json')
# path_tf = os.path.join(path_out, 'frequencies/tf_lemmas.json')
# path_tfidf = os.path.join(
# path_out, 'frequencies/tfidf_lemmas.json')
path_term_distr = os.path.join(
path_out, 'frequencies/term_distr_lemmas.json')
path_base_corpus = os.path.join(
path_out, 'processed_corpus/pp_lemma_corpus.txt')
path_base_corpus_ids = os.path.join(
path_out, 'processed_corpus/lemma_idx_corpus.txt')
if emb_type == 'GloVe' or emb_type == 'Word2Vec':
path_embeddings_global = os.path.join(
path_out, 'embeddings/embs_lemma_global_{}.vec'.format(
emb_type))
else:
path_embeddings_global = os.path.join(
path_out, 'embeddings/embs_lemma_global_{}.pickle'.format(
emb_type))
else:
path_term_ids = os.path.join(
path_out, 'processed_corpus/token_terms_idxs.txt')
path_idx_to_term = os.path.join(
path_out, 'indexing/idx_to_token.json')
path_df = os.path.join(path_out, 'frequencies/df_tokens.json')
# path_tf = os.path.join(path_out, 'frequencies/tf_tokens.json')
# path_tfidf = os.path.join(path_out, 'frequencies/tfidf_tokens.json')
path_term_distr = os.path.join(
path_out, 'frequencies/term_distr_tokens.json')
path_base_corpus = os.path.join(
path_out, 'processed_corpus/pp_token_corpus.txt')
path_base_corpus_ids = os.path.join(
path_out, 'processed_corpus/token_idx_corpus.txt')
if emb_type == 'GloVe' or emb_type == 'Word2Vec':
path_embeddings_global = os.path.join(
path_out, 'embeddings/embs_token_global_{}.vec'.format(
emb_type))
else:
path_embeddings_global = os.path.join(
path_out, 'embeddings/embs_token_{}_avg.pickle'.format(
emb_type))
# path_dl = os.path.join(path_out, 'frequencies/dl.json')
path_taxonomy = os.path.join(path_out, 'hierarchy/taxonomy.csv')
tax_file = open(path_taxonomy, 'w', encoding='utf8', newline='')
csv_writer = csv.writer(tax_file, delimiter=',')
# Define starting variables.
print('Load term-ids...')
term_ids = load_term_ids(path_term_ids)
print('Load idx-term mappings...')
with open(path_idx_to_term, 'r', encoding='utf8') as f:
idx_to_term_str = json.load(f)
idx_to_term = {int(k): v for k, v in idx_to_term_str.items()}
print('Load global embeddings...')
term_ids_to_embs_global = Embeddings.load_term_embeddings(
term_ids, path_embeddings_global, idx_to_term)
print('Load base corpus...')
base_corpus = get_base_corpus(path_base_corpus)
print('Load df-base...')
with open(path_df, 'r', encoding='utf8') as f:
# {word_id: [doc_id1, ...]}
df_base_str = json.load(f)
df_base = {int(k): [int(i) for i in v] for k, v in df_base_str.items()}
print('load term distr file...')
global term_distr_base
term_distr_base = pickle.load(open(path_term_distr, 'rb'))
del df_base_str
# Start recursive taxonomy generation.
rec_find_children(term_ids_local=term_ids, term_ids_global=term_ids,
base_corpus=base_corpus,
path_base_corpus_ids=path_base_corpus_ids,
cur_node_id=0, level=0, df_base=df_base, df=df_base,
# cur_repr_terms=[],
path_out=path_out,
cur_corpus=base_corpus,
csv_writer=csv_writer,
threshold=threshold,
term_ids_to_embs_global=term_ids_to_embs_global,
emb_type=emb_type, max_iter=config['max_iter'])
tax_file.close()
print('Done.')
def load_term_distr() -> Dict[int, Dict[int, Union[List[float], int]]]:
"""Load the word distributions from pickle file."""
with open(path_term_distr, 'rb') as f:
return pickle.load(f)
def rec_find_children(term_ids_local: Set[int],
term_ids_global: Set[int],
term_ids_to_embs_global: Dict[int, List[float]],
df_base: Dict[int, List[int]],
# cur_repr_terms: List[Tuple[int, float]],
cur_node_id: int,
level: int,
threshold: float,
base_corpus: Set[int],
path_base_corpus_ids: str,
cur_corpus: Set[int],
path_out: str,
csv_writer: Any,
df: Dict[int, List[int]],
emb_type: str,
max_iter: int
) -> None:
"""Recursive function to generate child nodes for parent node.
Args:
term_ids_local: The ids of the current cluster terms.
term_ids_global: The ids of all terms.
term_ids_to_embs_global: Maps all term_ids to their global
embeddings.
cur_node_id: The id of the current node in the Taxonomy. The
current node is the node which contains all the terms in
term_ids.
level: The level or deepness of the taxonomy. The root node has
level 0.
threshold: The representativenessthreshold for terms to be
pushed up.
path_base_corpus_ids: Path to the corpus file with all documents
in index-representation.
base_corpus: All doc_ids of the documents in the base corpus.
cur_corpus: All doc_ids of the documents in the current corpus.
df_base: df values for all terms in the base corpus, Form:
{term_id: [doc1, ...]}
path_out: The path to the output directory.
csv_writer: csv-writer-object used to write taxonomy to file.
df: Document frequencies of the form: {term-id: List of doc-ids}
emb_type: The embedding type: 'Word2Vec', 'GloVe' or 'ELMo'.
max_iter: The maximum number of iterations for adaptive
spherical clustering.
"""
if level > max_depth or len(term_ids_local) == 0:
# write_tax_to_file(cur_node_id, {}, [], csv_writer, only_id=True)
return None
print(
15 * '-' + ' level {} node {} '.format(level, cur_node_id) + 15 * '-')
msg = 'Start recursion on level {} with node id {}...'.format(level,
cur_node_id)
print(msg)
print('Number of candidate terms: {}'.format(len(term_ids_local)))
print('Number of documents in corpus: {}'.format(len(cur_corpus)))
print('Build corpus file...')
corpus_path = build_corpus_file(cur_corpus, path_base_corpus_ids,
cur_node_id, path_out)
lbc = len(base_corpus)
m = int(lbc / (5 * (level + 1)))
print('Length of basecorpus is at {}, m is at {}.'.format(lbc, m))
print('Train embeddings...')
if level != 0:
emb_path_local = train_embeddings(emb_type, corpus_path,
cur_node_id, path_out,
term_ids_local, cur_corpus)
print('Get term embeddings...')
term_ids_to_embs_local = Embeddings.load_term_embeddings(
term_ids_local, emb_path_local, idx_to_term)
# {id: embedding}
else:
term_ids_to_embs_local = term_ids_to_embs_global
general_terms = []
print('Start finding general terms...')
i = 0
while True:
i += 1
info_msg = ' level {} node {} iteration {} '
print(5 * '-' + info_msg.format(level, cur_node_id, i) + 5 * '-')
print('Cluster terms...')
clusters = perform_clustering(term_ids_to_embs_local)
if len(clusters) == 0:
print('Stopping clustering because of no clusters entries!')
break
# Dict[int, Set[int]]
cluster_sizes = [len(clus) for label, clus in clusters.items()]
print('Cluster_sizes: {}'.format(cluster_sizes))
cluster_centers = Cp.get_topic_embeddings(clusters,
term_ids_to_embs_global)
print('Get subcorpora for clusters...')
sc_scoring, sc_emb_training = Cp.get_subcorpora(
cluster_centers, clusters, term_distr_base, m, path_out,
term_ids_to_embs_local, df)
print('Compute term scores...')
term_scores = get_term_scores(clusters, cluster_centers, sc_scoring,
term_distr_base, df, level)
print('Get average and median score...')
avg_pop, avg_con, avg_total = get_avg_score(term_scores)
median_pop, median_con, median_total = get_median_score(term_scores)
msg_avg = (' avg popularity: {:.3f}, avg concentation: {:.3f}, '
'avg score: {:.3f}')
msg_median = (' median popularity: {:.3f}, median concentation: '
'{:.3f}, median score: {:.3f}')
print(msg_avg.format(avg_pop, avg_con, avg_total))
print(msg_median.format(median_pop, median_con, median_total))
# print('Remove terms from clusters...')
# if cur_node_id != 0:
# clusters, gen_terms_clus = separate_gen_terms(clusters,
# term_scores,
# threshold)
# general_terms.extend(gen_terms_clus)
# else:
# gen_terms_clus = []
clusters, gen_terms_clus = separate_gen_terms(clusters, term_scores,
threshold, level,
emb_type)
general_terms.extend(gen_terms_clus)
print('Terms pushed up: {}'.format(len(gen_terms_clus)))
len_gtc = len(gen_terms_clus)
num_loct = len(term_ids_to_embs_local)
if len_gtc == 0 or num_loct == 0 or i >= max_iter:
# 2. cond for the case if all terms have been pushed up.
# print('Get subcorpora for local embedding training...')
# sc_emb_training = Cp.get_subcorpora_emb_imp(cluster_centers,
# clusters,
# term_ids_to_embs_local,
# df)
break
term_ids_to_embs_local = update_title(term_ids_to_embs_local, clusters)
# Start preparation of next iteration.
child_ids = get_child_ids(clusters)
print('The child ids of {} are {}'.format(cur_node_id, str(child_ids)))
# Write terms to file.
print('Write concept terms to file...')
write_pushed_up_terms_to_file(path_out, cur_node_id, general_terms)
write_term_scores(path_out, child_ids, clusters, term_scores)
write_term_center_distances(path_out, child_ids, clusters,
cluster_centers, term_ids_to_embs_local)
write_tax_to_file(cur_node_id, child_ids, [], csv_writer)
del term_scores
del gen_terms_clus
del term_ids_to_embs_local
print('Start new recursion...')
for label, clus in clusters.items():
node_id = child_ids[label]
subcorpus = sc_emb_training[label]
if len(clus) < 5 or len(subcorpus) < 5:
print('Stopped recursion to few term or docs.')
print('terms: {}, docs: {}'.format(len(clus), len(subcorpus)))
continue
rec_find_children(term_ids_local=clus, base_corpus=base_corpus,
path_base_corpus_ids=path_base_corpus_ids,
cur_node_id=node_id, level=level + 1, df=df_base,
df_base=df_base,
# cur_repr_terms=repr_terms[label],
threshold=threshold,
cur_corpus=subcorpus,
path_out=path_out,
csv_writer=csv_writer, max_iter=max_iter,
term_ids_to_embs_global=term_ids_to_embs_global,
term_ids_global=term_ids_global, emb_type=emb_type)
def get_child_ids(proc_clusters: Dict[int, Set[int]]) -> Dict[int, int]:
"""Get the child-node-ids for the current node.
Args:
proc_clusters: A dict of the form {label: Set of term-ids}
where the set of term-ids is a cluster.
Return:
A dictionary mapping labels to child-node-ids.
{label: child-node-id}
"""
global node_counter
child_ids = {}
for label in proc_clusters:
node_counter += 1
child_ids[label] = node_counter
return child_ids
def write_tax_to_file(cur_node_id: int,
child_ids: Dict[int, int],
repr_terms: List[Tuple[int, float]],
csv_writer: Any,
only_id: bool = False
) -> None:
"""Write the current node with terms and child-nodes to file.
concept_terms is a list containing tuples of the form:
(idx, term_score).
The term score is the one which got the term pushed up. (highest one)
"""
if only_id:
row = [cur_node_id, str(None)]
else:
concept_terms = []
for idx, score in repr_terms:
term = idx_to_term[idx]
term_w_score = '{}|{}|{:.3f}'.format(idx, term, score)
concept_terms.append(term_w_score)
child_nodes = [str(c) for c in child_ids.values()]
row = [str(cur_node_id)] + child_nodes + concept_terms
# print('Write: {}'.format(row))
csv_writer.writerow(row)
def write_pushed_up_terms_to_file(path_out: str,
cur_node_id: int,
general_terms: List[Tuple[int, float]]
) -> None:
"""Write the pushed up terms, belonging to a cluster to file.
Args:
path_out: Path to the output directory.
cur_node_id: The id of the current node.
general_terms: A list of terms of the form (term_id, score)
Output:
A file with name <cur_node_id>.txt with one term per line
of the form:
term_id SPACE term SPACE score NEWLINE
"""
path_out = os.path.join(path_out, 'concept_terms/')
with open(path_out + str(cur_node_id) + '.txt', 'w', encoding='utf8') as f:
for term_id, score in general_terms:
term = idx_to_term[term_id]
line = '{} {} {}\n'.format(term_id, term, score)
f.write(line)
def write_term_center_distances(path_out: str,
child_ids: Dict[int, int],
clusters: Dict[int, Set[int]],
cluster_centers: Dict[int, ndarray],
term_ids_to_embs_local: Dict[int, ndarray]
) -> None:
"""Write to file how far a term is from it's cluster center.
Args:
path_out: The path to the output directory.
child_ids: The A dictionary mapping cluster labels to node ids.
clusters: A dictionary mapping a cluster label to a set of
term-ids.
cluster_centers: Maps the cluster-label to the cluster center
/topic-embedding.
term_ids_to_embs_local: Maps term indices to the term's
embedding.
"""
path_out = os.path.join(path_out, 'concept_terms/')
for label, node_id in child_ids.items():
fname = '{}_cnt_dists.txt'.format(node_id)
fpath = os.path.join(path_out, fname)
clus_center = cluster_centers[label]
with open(fpath, 'w', encoding='utf8') as f:
for term_id in clusters[label]:
term_emb = term_ids_to_embs_local[term_id]
similarity = get_sim(clus_center, term_emb)
term = idx_to_term[term_id]
line = '{} {} {}\n'.format(term_id, term, similarity)
f.write(line)
def write_term_scores(path_out: str,
child_ids: Dict[int, int],
clusters: Dict[int, Set[int]],
term_scores: Dict[int, Tuple[float, float, float]]
) -> None:
"""Write the final term-scores for all terms, not pushed up to file.
Args:
path_out: The path to the output directory.
child_ids: The A dictionary mapping cluster labels to node ids.
clusters: A dictionary mapping a cluster label to a set of
term-ids.
term_scores: A dictionary mapping a term-id to a tuple of the
form: (pop, con, score)
"""
path_out = os.path.join(path_out, 'concept_terms/')
for label, node_id in child_ids.items():
fname = '{}_scores.txt'.format(node_id)
fpath = os.path.join(path_out, fname)
with open(fpath, 'w', encoding='utf8') as f:
for term_id in clusters[label]:
score = term_scores[term_id][2]
term = idx_to_term[term_id]
line = '{} {} {}\n'.format(term_id, term, score)
f.write(line)
def separate_gen_terms(clusters: Dict[int, Set[int]],
term_scores: Dict[int, Tuple[float, float, float]],
threshold: float,
level,
emb_type: str
) -> Tuple[Dict[int, Set[int]],
List[Tuple[int, float]]]:
"""Remove general terms and unpopular terms from clusters.
For each cluster remove the unpopular terms and push up and
remove concept terms.
Args:
clusters: A list of clusters. Each cluster is a set of doc-ids.
term_scores: Maps each term-idx to its popularity and
concentrations.
threshold: The representativeness-threshold at which terms are
pushed up.
level: The current taxonomy level.
emb_type: The embedding type.
Return:
proc_cluster: Same as the input variable 'clusters', but with
terms removed.
concept_terms: A list of tuples of the form (term-id, score).
"""
proc_clusters = {} # {label: clus}
concept_terms = [] # [term_id1, ...]
concept_terms_scores = [] # [(term_id, score), ...]
# Get general terms und repr thresh.
if level == 0:
threshold = 0.25
# thresh_dict = {
# 0: 0.15,
# 1: 0.3,
# 2: 0.4,
# 3: 0.5,
# 4: 0.6
# }
# threshold = thresh_dict[level]
print('Actual threshold: {}'.format(threshold))
for label, clus in clusters.items():
for term_id in clus:
score = term_scores[term_id][2]
if score < threshold:
concept_terms.append(term_id)
concept_terms_scores.append((term_id, score))
if emb_type == 'ELMo':
if not concept_terms:
for label, clus in clusters.items():
clus_term_scores = [(term_id, term_scores[term_id][2])
for term_id in clus]
sorted_terms = sorted(clus_term_scores, key=lambda x: x[1])
clus_concept_term = sorted_terms[0]
concept_terms.append(clus_concept_term[0])
concept_terms_scores.append(clus_concept_term)
# Remove general terms from clusters.
concept_terms_set = set(concept_terms)
for label, clus in clusters.items():
proc_clusters[label] = clus - concept_terms_set
return proc_clusters, concept_terms_scores
def build_corpus_file(doc_ids: Set[int],
path_base_corpus: str,
cur_node_id: int,
path_out: str
) -> str:
"""Generate corpus file from document ids.
Args:
doc_ids: The ids of the document belongig that make up the
corpus.
path_base_corpus: Path to the corpus file with all documents.
cur_node_id: Id of the current node. Used for the name of the
corpus file.
path_out: Path to the output directory.
Return:
The path to the generated corpus file:
'processed_corpora/<cur_node_id>_corpus.txt'
"""
p_out = os.path.join(path_out, 'processed_corpus/{}.txt'.format(
cur_node_id))
# Buffer to store n number of docs. (less writing operations)
docs_str = ''
# yields sentences as strings
with open(p_out, 'w', encoding='utf8') as f_out:
for i, doc in enumerate(get_docs(path_base_corpus,
word_tokenized=False)):
if i in doc_ids:
doc_str = ''
for sent in doc:
line = sent + '\n'
doc_str += line
doc_str += '\n'
docs_str += doc_str
if i % 1000 == 0:
f_out.write(docs_str)
docs_str = ''
f_out.write(docs_str)
return p_out
def train_embeddings(emb_type: str,
path_corpus: str,
cur_node_id: int,
path_out_dir: str,
term_ids: Set[int],
doc_ids: Set[int],
) -> str:
"""Train word2vec embeddings on the given corpus.
Args:
emb_type: The type of the embeddings: 'Word2Vec', 'GloVe' or 'ELMo'.
path_corpus: The path to the corpus file.
cur_node_id: Id of the current node. Used for the name of the
embedding file.
path_out_dir: The path to the output directory.
term_ids: ...
doc_ids: ...
Return:
The path to the embedding file:
'embeddings/<cur_node_id>_w2v.vec'
"""
embedding = get_emb(emb_type)
return embedding.train(path_corpus, str(cur_node_id), path_out_dir,
term_ids, doc_ids)
def perform_clustering(term_ids_to_embs: Dict[int, List[float]]
) -> Dict[int, Set[int]]:
"""Cluster the given terms into 5 clusters.
Args:
term_ids_to_embs: A dictionary mapping term-ids to their
embeddings.
Return:
A dictionary of mapping each cluster label to its cluster.
Each cluster is a set of term-ids.
"""
# Case less than 5 terms to cluster.
num_terms = len(term_ids_to_embs)
if num_terms < 5:
clusters = {}
for i, tid in enumerate(term_ids_to_embs):
clusters[i] = {tid}
return clusters
# Case more than 5 terms to cluster.
c = Clustering()
term_ids_embs_items = [(k, v) for k, v in term_ids_to_embs.items()]
results = c.fit([it[1] for it in term_ids_embs_items])
labels = results['labels']
print(' Density:', results['density'])
clusters = defaultdict(set)
for i in range(len(term_ids_embs_items)):
term_id = term_ids_embs_items[i][0]
label = labels[i]
clusters[label].add(term_id)
return clusters
def load_term_ids(path_term_ids: str) -> Set[int]:
"""Load the ids of all candidate terms.
Args:
path_term_ids: The path to the file containing term_ids. The
file has one id per line.
"""
term_ids = set()
with open(path_term_ids, 'r', encoding='utf8') as f:
for line in f:
term_ids.add(int(line.strip('\n')))
return term_ids
def update_title(term_ids_to_embs_local: Dict[int, ndarray],
clusters: Dict[int, Set[int]]
) -> Dict[int, ndarray]:
"""Update the term_ids_to_embs-variable (title).
Create a new variable that only contains the terms given in
clusters.
Args:
term_ids_to_embs_local: A dict mapping term_ids to embeddings.
clusters: A dict mapping each cluster label to a cluster.
"""
updated_title = {}
for label, clus in clusters.items():
for tid in clus:
updated_title[tid] = term_ids_to_embs_local[tid]
return updated_title
def get_avg_score(term_scores: Dict[int, Tuple[float, float, float]]
) -> Tuple[float, float, float]:
"""Get the average popularity and concentration score."""
pop_scores = [sc[0] for id_, sc in term_scores.items()]
con_scores = [sc[1] for id_, sc in term_scores.items()]
total_scores = [sc[2] for id_, sc in term_scores.items()]
avg_pop = float(mean(pop_scores))
avg_con = float(mean(con_scores))
avg_total = float(mean(total_scores))
return avg_pop, avg_con, avg_total
def get_median_score(term_scores: Dict[int, Tuple[float, float, float]]
) -> Tuple[float, float, float]:
"""Get the median popularity and concentration score."""
pop_scores = [sc[0] for id_, sc in term_scores.items()]
con_scores = [sc[1] for id_, sc in term_scores.items()]
total_scores = [sc[2] for id_, sc in term_scores.items()]
median_pop = float(median(pop_scores))
median_con = float(median(con_scores))
median_total = float(median(total_scores))
return median_pop, median_con, median_total
def get_term_scores(clusters: Dict[int, Set[int]],
cluster_centers: Dict[int, List[float]],
subcorpora: Dict[int, Set[int]],
term_distr: term_distr_type,
df,
level: int
) -> Dict[int, Tuple[float, float, float]]:
"""Get the popularity and concentration for each term in clusters.
The popularity of a term is always the popularity for the cluster
the term belongs to. The concentration is cluster-independent.
Args:
clusters: A list of clusters. Each cluster is a set of term-ids.
cluster_centers: Maps the cluster label to a vector as the
center direction of the cluster.
subcorpora: Maps each cluster label to the relevant doc-ids.
term_distr: For description look in the type descriptions at the
top of the file.
df: Document frequencies of the form: {term-id: List of doc-ids}
level: The recursion level.
Return:
A dictionary mapping each term-id to a tuple of the form:
(popularity, concentration, total)
"""
sc = Scorer(clusters, cluster_centers, subcorpora, level)
return sc.get_term_scores(term_distr, df)
def get_base_corpus(path_base_corpus: str):
"""Get the set of doc-ids making up the base corpus.
Args:
path_base_corpus: Path to the corpus file.
"""
return set([i for i in range(get_num_docs(path_base_corpus))])
if __name__ == '__main__':
start_time = time.time()
generate_taxonomy()
end_time = time.time()
time_used = end_time - start_time
print('Time used: {}'.format(time_used))
print('Finished.')
| jagol/BA_Thesis | pipeline/generate_taxonomy.py | generate_taxonomy.py | py | 29,029 | python | en | code | 2 | github-code | 13 |
3190931939 | import sys
import copy
def count_num(ll, n, m):
flag = [False] * n
for i in range(m):
new = []
for j in range(n):
new.append(ll[j][i])
b = copy.deepcopy(new)
b.sort()
x = b.pop()
while True:
try:
a = new.index(x)
flag[a] = True
new[a] += 1
except:
break
res = 0
for i in flag:
if i == True:
res += 1
return res
ll = []
while True:
line = sys.stdin.readline().strip()
if not line:
break
line = list(map(int, line.split()))
ll.append(line)
n = ll[0][0]
m = ll[0][1]
ll.pop(0)
res = count_num(ll, n, m)
print(res) | GGGWB/LeetCode | practice/1.py | 1.py | py | 723 | python | en | code | 0 | github-code | 13 |
1042385791 | import telebot
from telebot import types
from random import choice
bot = telebot.TeleBot('')
begin = 221
total = begin
limit = 28
@bot.message_handler(commands=['start'])
def star(message):
man = message.from_user.first_name
bot.send_message(message.chat.id, f'Привет, {man}!')
rules(message)
button(message)
def rules(message):
rules = f'Будем играть в конфеты!\nПравила игры:\n\
Количество наших конфет: {total}.\n\
Берем по очереди не больше {limit} конфет.\n\
Кто заберет последние конфеты - тот и победил!'
bot.send_message(message.chat.id, str(rules))
@bot.message_handler(commands=['button'])
def button(message):
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
but = types.KeyboardButton('кинуть жребий')
markup.add(but)
bot.send_message(
message.chat.id, 'Для начала кинем жребий!', reply_markup=markup)
@bot.message_handler(content_types=['text'])
def fate(message):
lot = choice(['bot', 'man'])
if lot == 'bot':
bot.send_message(message.chat.id, 'Я хожу первым!')
take_bot(message)
else:
take_man(message)
def take_bot(message):
global begin, total, limit
bot.send_message(message.chat.id, f'Осталось {total} конфет.')
if total > 0 and total <= limit:
bot.send_message(
message.chat.id, f'Я забираю все оставшиеся конфеты и побеждаю!')
bot.send_message(message.chat.id, f'Если хочешь попробовать еще раз - введи любой символ!')
total = begin
return total
elif total > limit:
take = total % (limit + 1)
total -= take
bot.send_message(message.chat.id, f'Я беру {take} конфет.')
take_man(message)
def count(message):
global total, limit
if 0 < int(message.text) < 29:
take = int(message.text)
total -= take
take_bot(message)
else:
bot.send_message(message.chat.id, f'Можно взять от 1 до {limit} конфет.')
take_man(message)
def take_man(message):
global begin, total, limit
man = message.from_user.first_name
bot.send_message(message.chat.id, f'Осталось {total} конфет.')
if total > 0 and total <= limit:
bot.send_message(
message.chat.id, f'Поздравляю, {man}, ты победил! Забирай оставшиеся {total} конфет.')
total = begin
return total
elif total > limit:
bot.send_message(message.chat.id, f'Ok, {man}, твой ход!\nСколько хочешь забрать конфет?')
bot.register_next_step_handler(message, count)
bot.infinity_polling()
| yakdd/python_seminars | bonbones/main.py | main.py | py | 2,943 | python | ru | code | 0 | github-code | 13 |
5348615799 | from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import AllChem
from collections import defaultdict
import copy
import numpy as np
import dgl
import torch
def set_atommap(mol, num = 0):
for i,atom in enumerate(mol.GetAtoms(), start = num):
atom.SetAtomMapNum(i)
return mol
#smiles->Mol
def get_mol(smiles):
mol = Chem.MolFromSmiles(smiles)
if mol is not None: Chem.Kekulize(mol)
return mol
#Mol->smiles
def get_smiles(mol):
return Chem.MolToSmiles(mol, kekuleSmiles = True)
#Mol->Mol (Error->None)
def sanitize(mol, kekulize = True):
try:
smiles = get_smiles(mol) if kekulize else Chem.MolToSmiles(mol)
mol = get_mol(smiles) if kekulize else Chem.MolFromSmiles(smiles)
except:
mol = None
return mol
def is_aromatic_ring(mol):
if mol.GetNumAtoms() == mol.GetNumBonds():
aroma_bonds = [b for b in mol.GetBonds() if b.GetBondType() == Chem.rdchem.BondType.AROMATIC]
return len(aroma_bonds) == mol.GetNumBonds()
else:
return False
def copy_atom(atom, atommap = True):
new_atom = Chem.Atom(atom.GetSymbol())
new_atom.SetFormalCharge(atom.GetFormalCharge())
if atommap:
new_atom.SetAtomMapNum(atom.GetAtomMapNum())
return new_atom
def copy_edit_mol(mol):
new_mol = Chem.RWMol(Chem.MolFromSmiles(''))
for atom in mol.GetAtoms():
new_atom = copy_atom(atom)
new_mol.AddAtom(new_atom)
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom().GetIdx()
a2 = bond.GetEndAtom().GetIdx()
bt = bond.GetBondType()
new_mol.AddBond(a1, a2, bt)
return new_mol
def get_clique_mol(mol, atoms):
smiles = Chem.MolFragmentToSmiles(mol, atoms, kekuleSmiles = False)
new_mol = Chem.MolFromSmiles(smiles, sanitize = False)
new_mol = copy_edit_mol(new_mol).GetMol()
new_mol = sanitize(new_mol, kekulize = False)
#if tmp_mol is not None: new_mol = tmp_mol
return new_mol
#Valence adjustment by hydrogen addition after decomposition
def add_Hs(rwmol, a1, a2, bond):
if str(bond.GetBondType()) == 'SINGLE':
num = 1
elif str(bond.GetBondType()) == 'DOUBLE':
num = 2
elif str(bond.GetBondType()) == 'TRIPLE':
num = 3
elif str(bond.GetBondType()) == 'AROMATIC':
print("error in add_Hs 1")
else:
print("error in add_Hs 2")
for i in range(num):
new_idx = rwmol.AddAtom(Chem.Atom(1))
rwmol.GetAtomWithIdx(new_idx).SetAtomMapNum(0)
rwmol.AddBond(new_idx, a1.GetIdx(), Chem.BondType.SINGLE)
new_idx = rwmol.AddAtom(Chem.Atom(1))
rwmol.GetAtomWithIdx(new_idx).SetAtomMapNum(0)
rwmol.AddBond(new_idx, a2.GetIdx(), Chem.BondType.SINGLE)
return rwmol
#Valence adjustment by removing hydrogen after connecting
def remove_Hs(rwmol, a1, a2, bond):
try:
if str(bond.GetBondType()) == 'SINGLE':
num = 1
elif str(bond.GetBondType()) == 'DOUBLE':
num = 2
elif str(bond.GetBondType()) == 'TRIPLE':
num = 3
elif str(bond.GetBondType()) == 'AROMATIC':
print("error in remove_Hs 1")
else:
print("error in remove_Hs 2")
except:
if bond == 0:
num = 1
elif bond == 1:
num = 2
elif bond == 2:
num = 3
else:
raise
rwmol = Chem.AddHs(rwmol)
rwmol = Chem.RWMol(rwmol)
#Set hydrogen maps for connected atoms
h_map1 = 2000000
h_map2 = 3000000
f_h_map1 = copy.copy(h_map1)
f_h_map2 = copy.copy(h_map2)
for b in rwmol.GetBonds():
s_atom = b.GetBeginAtom()
e_atom = b.GetEndAtom()
if (e_atom.GetIdx() == a1.GetIdx()) and (s_atom.GetSymbol() == 'H'):
s_atom.SetAtomMapNum(h_map1)
h_map1 += 1
elif (s_atom.GetIdx() == a1.GetIdx()) and (e_atom.GetSymbol() == 'H'):
e_atom.SetAtomMapNum(h_map1)
h_map1 += 1
elif (e_atom.GetIdx() == a2.GetIdx()) and (s_atom.GetSymbol() == 'H'):
s_atom.SetAtomMapNum(h_map2)
h_map2 += 1
elif (s_atom.GetIdx() == a2.GetIdx()) and (e_atom.GetSymbol() == 'H'):
e_atom.SetAtomMapNum(h_map2)
h_map2 += 1
for i in range(num):
try:
for atom in rwmol.GetAtoms():
if atom.GetAtomMapNum() == f_h_map1 + i:
rwmol.RemoveAtom(atom.GetIdx())
break
for atom in rwmol.GetAtoms():
if atom.GetAtomMapNum() == f_h_map2 + i:
rwmol.RemoveAtom(atom.GetIdx())
break
except:
print("Remove Hs times Error!!")
raise
rwmol = rwmol.GetMol()
rwmol = sanitize(rwmol, kekulize = False)
rwmol = Chem.RemoveHs(rwmol)
rwmol = Chem.RWMol(rwmol)
return rwmol
#Calculate frequency after decomposition
def count_fragments(mol):
mol = Chem.rdmolops.RemoveHs(mol)
new_mol = Chem.RWMol(mol)
for atom in new_mol.GetAtoms():
atom.SetAtomMapNum(atom.GetIdx())
sep_sets = [] #Set of atom maps of joints
set_idx = 10000 #Temporarily allocate a large Map
for bond in mol.GetBonds():
if bond.IsInRing(): continue
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
#If both are inside the ring, split there.
if a1.IsInRing() and a2.IsInRing():
sep_sets.append((a1.GetIdx(), a2.GetIdx()))
#If one atom is in a ring and the other has a bond order greater than 2, split there.
elif (a1.IsInRing() and a2.GetDegree() > 1) or (a2.IsInRing() and a1.GetDegree() > 1):
sep_sets.append((a1.GetIdx(), a2.GetIdx()))
sep_idx = 1
atommap_dict = defaultdict(list) #key->AtomIdx, value->sep_idx (In the whole compound before decomposition)
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
if ((a1.GetIdx(),a2.GetIdx()) in sep_sets) or ((a2.GetIdx(),a1.GetIdx()) in sep_sets):
a1map = new_mol.GetAtomWithIdx(a1.GetIdx()).GetAtomMapNum()
a2map = new_mol.GetAtomWithIdx(a2.GetIdx()).GetAtomMapNum()
atommap_dict[a1map].append(sep_idx)
atommap_dict[a2map].append(sep_idx)
new_mol = add_Hs(new_mol, a1, a2, bond)
new_mol.RemoveBond(a1.GetIdx(), a2.GetIdx())
sep_idx += 1
for i in range(len(atommap_dict)):
atommap_dict[i] = sorted(atommap_dict[i])
for i in list(atommap_dict.keys()):
if atommap_dict[i] == []:
atommap_dict.pop(i)
new_mol = new_mol.GetMol()
new_mol = sanitize(new_mol, kekulize = False)
new_smiles = Chem.MolToSmiles(new_mol)
fragments = [Chem.MolFromSmiles(fragment) for fragment in new_smiles.split('.')]
fragments = [sanitize(fragment, kekulize = False) for fragment in fragments]
count_labels = []
for i, fragment in enumerate(fragments):
order_list = [] #Stores join orders in the substructures
count_label = []
frag_mol = copy.deepcopy(fragment)
for atom in frag_mol.GetAtoms():
frag_mol.GetAtomWithIdx(atom.GetIdx()).SetAtomMapNum(0)
frag_smi = Chem.MolToSmiles(sanitize(frag_mol, kekulize = False))
#Fix AtomIdx as order changes when AtomMap is deleted.
atom_order = list(map(int, frag_mol.GetProp("_smilesAtomOutputOrder")[1:-2].split(",")))
for atom in fragment.GetAtoms():
amap = atom.GetAtomMapNum()
if amap in list(atommap_dict.keys()):
order_list.append(atommap_dict[amap])
order_list = sorted(order_list)
count_label.append(frag_smi)
for atom in fragment.GetAtoms():
amap = atom.GetAtomMapNum()
if amap in list(atommap_dict.keys()):
count_label.append(atom_order.index(atom.GetIdx()))
count_label.append(order_list.index(atommap_dict[amap]) + 1)
count_labels.append(tuple(count_label))
return count_labels, fragments
#Create a decomposed list
def find_fragments(mol, count_labels, count_thres):
mol = Chem.rdmolops.RemoveHs(mol)
for atom in mol.GetAtoms():
atom.SetAtomMapNum(atom.GetIdx())
new_mol = Chem.RWMol(mol)
new_mol2 = copy.deepcopy(new_mol)
sep_sets = []
for bond in mol.GetBonds():
if bond.IsInRing(): continue
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
if a1.IsInRing() and a2.IsInRing():
sep_sets.append((a1.GetIdx(), a2.GetIdx()))
elif (a1.IsInRing() and a2.GetDegree() > 1) or (a2.IsInRing() and a1.GetDegree() > 1):
sep_sets.append((a1.GetIdx(), a2.GetIdx()))
sep_idx = 1
atommap_dict = defaultdict(list)
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
if ((a1.GetIdx(),a2.GetIdx()) in sep_sets) or ((a2.GetIdx(),a1.GetIdx()) in sep_sets):
a1map = new_mol.GetAtomWithIdx(a1.GetIdx()).GetAtomMapNum()
a2map = new_mol.GetAtomWithIdx(a2.GetIdx()).GetAtomMapNum()
atommap_dict[a1map].append(sep_idx)
atommap_dict[a2map].append(sep_idx)
new_mol = add_Hs(new_mol, a1, a2, bond)
new_mol.RemoveBond(a1.GetIdx(), a2.GetIdx())
sep_idx += 1
for i in range(len(atommap_dict)):
atommap_dict[i] = sorted(atommap_dict[i])
for i in list(atommap_dict.keys()):
if atommap_dict[i] == []:
atommap_dict.pop(i)
new_mol = new_mol.GetMol()
new_mol = sanitize(new_mol, kekulize = False)
new_smiles = Chem.MolToSmiles(new_mol)
fragments = [Chem.MolFromSmiles(fragment) for fragment in new_smiles.split('.')]
fragments = [sanitize(fragment, kekulize = False) for fragment in fragments]
for i, fragment in enumerate(fragments):
have_ring = False
order_list = []
count_label = []
frag_mol = copy.deepcopy(fragment)
for atom in frag_mol.GetAtoms():
frag_mol.GetAtomWithIdx(atom.GetIdx()).SetAtomMapNum(0)
frag_smi = Chem.MolToSmiles(sanitize(frag_mol, kekulize = False))
for atom in fragment.GetAtoms():
if atom.IsInRing():
have_ring = True
amap = atom.GetAtomMapNum()
if amap in list(atommap_dict.keys()):
order_list.append(atommap_dict[amap])
order_list = sorted(order_list)
count_label.append(frag_smi)
for atom in fragment.GetAtoms():
amap = atom.GetAtomMapNum()
if amap in list(atommap_dict.keys()):
count_label.append(atom.GetIdx())
count_label.append(order_list.index(atommap_dict[amap]) + 1)
count = count_labels[tuple(count_label)]
if count < count_thres and have_ring == False:
set_idx=10000
#Query for substructure search
query = Chem.MolFromSmiles('C(=O)N')
m_list = list(new_mol2.GetSubstructMatches(query))
q_list = [] #Index of query match
for i in range(len(m_list)):
for j in range(len(m_list[i])):
if m_list[i][j] not in q_list:
q_list.append(m_list[i][j])
query2 = Chem.MolFromSmiles('C(=O)O')
m_list2 = list(new_mol2.GetSubstructMatches(query2))
q_list2 = [] #Index of query match
for i in range(len(m_list2)):
for j in range(len(m_list2[i])):
if m_list2[i][j] not in q_list2:
q_list2.append(m_list2[i][j])
query3 = Chem.MolFromSmiles('C(=O)')
m_list3 = list(new_mol2.GetSubstructMatches(query3))
q_list3 = [] #Index of query match
for i in range(len(m_list3)):
for j in range(len(m_list3[i])):
if m_list3[i][j] not in q_list3:
q_list3.append(m_list3[i][j])
query4 = Chem.MolFromSmiles('CO')
m_list4 = list(new_mol2.GetSubstructMatches(query4))
q_list4 = [] #Index of query match
for i in range(len(m_list4)):
for j in range(len(m_list4[i])):
if m_list4[i][j] not in q_list4:
q_list4.append(m_list4[i][j])
for bond in fragment.GetBonds():
if bond.IsInRing(): continue
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
###Amide bond or amide group###
#C side
if ((a1.GetAtomMapNum() in q_list) and (a1.GetSymbol() == 'C') and (a1.GetDegree() == 3)) \
and (a2.GetSymbol() != 'H')and(a2.GetAtomMapNum() not in q_list):
sep_sets.append((a1.GetAtomMapNum(), a2.GetAtomMapNum()))
fragment.GetAtomWithIdx(a1.GetIdx()).SetAtomMapNum(a1.GetAtomMapNum() + set_idx)
elif ((a2.GetAtomMapNum() in q_list) and (a2.GetSymbol() == 'C') and (a2.GetDegree() == 3)) \
and (a1.GetSymbol() != 'H')and(a1.GetAtomMapNum() not in q_list):
sep_sets.append((a1.GetAtomMapNum(), a2.GetAtomMapNum()))
fragment.GetAtomWithIdx(a2.GetIdx()).SetAtomMapNum(a2.GetAtomMapNum()+set_idx)
#N side
elif ((a1.GetAtomMapNum() in q_list) and (a1.GetSymbol() == 'N') and (a1.GetDegree() == 2)) \
and (a2.GetSymbol() != 'H')and(a2.GetAtomMapNum() not in q_list):
sep_sets.append((a1.GetAtomMapNum(), a2.GetAtomMapNum()))
fragment.GetAtomWithIdx(a1.GetIdx()).SetAtomMapNum(a1.GetAtomMapNum()+set_idx)
elif ((a2.GetAtomMapNum() in q_list) and (a2.GetSymbol() == 'N') and (a2.GetDegree() == 2)) \
and (a1.GetSymbol() != 'H')and(a1.GetAtomMapNum() not in q_list):
#If it's already decomposed by a higher priority functional group, then nothing.
if (a1.GetAtomMapNum() >= set_idx or a2.GetAtomMapNum() >= set_idx):
continue
sep_sets.append((a1.GetAtomMapNum(), a2.GetAtomMapNum()))
fragment.GetAtomWithIdx(a2.GetIdx()).SetAtomMapNum(a2.GetAtomMapNum() + set_idx)
for bond in fragment.GetBonds():
if bond.IsInRing(): continue
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
###Ester bond or carboxy group###
#C side
if ((a1.GetAtomMapNum() in q_list2) and (a1.GetSymbol() == 'C') and (a1.GetDegree() == 3)) \
and (a2.GetSymbol() != 'H')and(a2.GetAtomMapNum() not in q_list2):
#If it's already decomposed by a higher priority functional group, then nothing.
if (a1.GetAtomMapNum() >= set_idx or a2.GetAtomMapNum() >= set_idx):
continue
sep_sets.append((a1.GetAtomMapNum(), a2.GetAtomMapNum()))
fragment.GetAtomWithIdx(a1.GetIdx()).SetAtomMapNum(a1.GetAtomMapNum() + set_idx)
elif ((a2.GetAtomMapNum() in q_list2) and (a2.GetSymbol() == 'C') and (a2.GetDegree() == 3)) \
and (a1.GetSymbol() != 'H')and(a1.GetAtomMapNum() not in q_list2):
#If it's already decomposed by a higher priority functional group, then nothing.
if (a1.GetAtomMapNum() >= set_idx or a2.GetAtomMapNum() >= set_idx):
continue
sep_sets.append((a1.GetAtomMapNum(), a2.GetAtomMapNum()))
fragment.GetAtomWithIdx(a2.GetIdx()).SetAtomMapNum(a2.GetAtomMapNum() + set_idx)
#O side
elif ((a1.GetAtomMapNum() in q_list2) and (a1.GetSymbol() == 'O') and (a1.GetDegree() == 2)) \
and (a2.GetSymbol() != 'H')and(a2.GetAtomMapNum() not in q_list2):
#If it's already decomposed by a higher priority functional group, then nothing.
if (a1.GetAtomMapNum() >= set_idx or a2.GetAtomMapNum() >= set_idx):
continue
sep_sets.append((a1.GetAtomMapNum(), a2.GetAtomMapNum()))
fragment.GetAtomWithIdx(a1.GetIdx()).SetAtomMapNum(a1.GetAtomMapNum() + set_idx)
elif ((a2.GetAtomMapNum() in q_list2) and (a2.GetSymbol() == 'O') and (a2.GetDegree() == 2)) \
and (a1.GetSymbol() != 'H')and(a1.GetAtomMapNum() not in q_list2):
#If it's already decomposed by a higher priority functional group, then nothing.
if (a1.GetAtomMapNum() >= set_idx or a2.GetAtomMapNum() >= set_idx):
continue
sep_sets.append((a1.GetAtomMapNum(), a2.GetAtomMapNum()))
fragment.GetAtomWithIdx(a2.GetIdx()).SetAtomMapNum(a2.GetAtomMapNum() + set_idx)
for bond in fragment.GetBonds():
if bond.IsInRing(): continue
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
###Ketone group or Aldehyde group###
if (((a1.GetAtomMapNum() in q_list3) and (a1.GetSymbol() == 'C') and (a1.GetDegree() > 1)) \
and (a2.GetSymbol() != 'H')and(a2.GetSymbol() != 'N')and(a2.GetSymbol() != 'O')and(a2.GetAtomMapNum() not in q_list3)) \
or (((a2.GetAtomMapNum() in q_list3) and (a2.GetSymbol() == 'C') and (a2.GetDegree() > 1)) \
and (a1.GetSymbol() != 'H')and(a1.GetSymbol() != 'N')and(a1.GetSymbol() != 'O')and(a1.GetAtomMapNum() not in q_list3)):
#If it's already decomposed by a higher priority functional group, then nothing.
if (a1.GetAtomMapNum() >= set_idx or a2.GetAtomMapNum() >= set_idx):
continue
sep_sets.append((a1.GetAtomMapNum(), a2.GetAtomMapNum()))
for bond in fragment.GetBonds():
if bond.IsInRing(): continue
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
###Ether bond or hydroxy group###
if (((a1.GetAtomMapNum() in q_list4) and (a1.GetSymbol() == 'C')) \
and (a2.GetSymbol() == 'O')) \
or (((a2.GetAtomMapNum() in q_list4) and (a2.GetSymbol() == 'C')) \
and (a1.GetSymbol() == 'O')):
#If it's already decomposed by a higher priority functional group, then nothing.
if (a1.GetAtomMapNum() >= set_idx or a2.GetAtomMapNum() >= set_idx):
continue
sep_sets.append((a1.GetAtomMapNum(), a2.GetAtomMapNum()))
sep_idx = 1
bondtype_list = []
atommap_dict = defaultdict(list)
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
if ((a1.GetIdx(),a2.GetIdx()) in sep_sets) or ((a2.GetIdx(),a1.GetIdx()) in sep_sets):
a1map = new_mol2.GetAtomWithIdx(a1.GetIdx()).GetAtomMapNum()
a2map = new_mol2.GetAtomWithIdx(a2.GetIdx()).GetAtomMapNum()
atommap_dict[a1map].append(sep_idx)
atommap_dict[a2map].append(sep_idx)
bondtype_list.append(str(bond.GetBondType()))
new_mol2 = add_Hs(new_mol2, a1, a2, bond)
new_mol2.RemoveBond(a1.GetIdx(), a2.GetIdx())
sep_idx += 1
for i in range(len(atommap_dict)):
atommap_dict[i] = sorted(atommap_dict[i])
for i in list(atommap_dict.keys()):
if atommap_dict[i] == []:
atommap_dict.pop(i)
max_mapnum = sep_idx - 1
new_mol2 = new_mol2.GetMol()
new_mol2 = sanitize(new_mol2, kekulize = False)
new_smiles = Chem.MolToSmiles(new_mol2)
fragments = [Chem.MolFromSmiles(fragment) for fragment in new_smiles.split('.')]
fragments = [sanitize(fragment, kekulize = False) for fragment in fragments]
mapidx_list = [] #Set:(substructureSMILES, junctionAtomIdx) for graph and adjacency matrix creation
labelmap_dict = defaultdict(list) #key->label_smiles, value->fragmap_dict
for i, fragment in enumerate(fragments):
fragmap_dict = defaultdict(list) #key->AtomIdx, value->sep_idx(In each compound after decomposition)
fragmap_dict2 = defaultdict(list)
for atom in fragment.GetAtoms():
amap = atom.GetAtomMapNum()
if amap in list(atommap_dict.keys()):
fragmap_dict[atom.GetIdx()].append(atommap_dict[amap])
fragment.GetAtomWithIdx(atom.GetIdx()).SetAtomMapNum(0)
frag_smi = Chem.MolToSmiles(fragment)
atom_order = list(map(int, fragment.GetProp("_smilesAtomOutputOrder")[1:-2].split(",")))
for fragmap_v in list(fragmap_dict.keys()):
val = fragmap_dict.pop(fragmap_v)
fragmap_dict2[atom_order.index(fragmap_v)] = val
fragmap_dict = fragmap_dict2
for j in range(len(fragmap_dict)):
fragmap_dict[j] = sorted(fragmap_dict[j])
if frag_smi in labelmap_dict.keys():
if labelmap_dict[frag_smi] not in list(fragmap_dict.values()):
labelmap_dict[frag_smi].append(fragmap_dict)
else:
labelmap_dict[frag_smi].append(fragmap_dict)
midx = labelmap_dict[frag_smi].index(fragmap_dict)
mapidx_list.append((frag_smi, midx))
for values in labelmap_dict.values():
for v in values:
for i in list(v.keys()):
if v[i] == []:
v.pop(i)
else:
v[i] = v[i][0]
return mapidx_list, labelmap_dict, bondtype_list, max_mapnum, fragments
def revise_maps(labelmap_dict):
rev_labelmap_dict = copy.deepcopy(labelmap_dict)
max_deg = 0
for values in rev_labelmap_dict.values():
for v in values:
maplist = []
for i in list(v.keys()):
maplist += v[i]
if len(v[i]) > max_deg:
max_deg = len(v[i])
maplist = sorted(maplist)
for i in list(v.keys()):
for j in range(len(v[i])):
v[i][j] = maplist.index(v[i][j]) + 1
return rev_labelmap_dict, max_deg
def make_ecfp2D(smiles, n_bit = 2048, r = 2):
mol = Chem.MolFromSmiles(smiles)
ecfp = AllChem.GetMorganFingerprintAsBitVect(mol, r, n_bit, useChirality = False)
return ecfp
def make_ecfp3D(smiles, n_bit = 2048, r = 2):
mol = Chem.MolFromSmiles(smiles)
ecfp = AllChem.GetMorganFingerprintAsBitVect(mol, r, n_bit, useChirality = True)
return ecfp
#Partial tree creation with unnecessary nodes removed
def make_subtree(tree):
flag = 1
while(flag == 1):
flag = 0
for node in range(tree.number_of_nodes()):
deg = tree.in_degrees(node) + tree.out_degrees(node)
if deg == 0:
tree = dgl.remove_nodes(tree,node)
flag = 1
break
return tree
def set_bondlabel(bondtype):
if bondtype == 'SINGLE':
b_label = torch.tensor([0])
elif bondtype == 'DOUBLE':
b_label = torch.tensor([1])
elif bondtype == 'TRIPLE':
b_label = torch.tensor([2])
else:
raise
return b_label
#Creating Graphs
def make_graph(mapidx_list, labelmap_dict, rev_labelmap_dict, labels, bondtype_list):
map_dict = defaultdict(list) #Stores which part (key) has which Index (value)
mg = dgl.DGLGraph()
sub_tree = []
l_ans_list = []
b_ans_list = []
for i, (smi, fragidx) in enumerate(mapidx_list):
for l in labelmap_dict[smi][fragidx].values():
for idx in l:
map_dict[i].append(idx)
if len(map_dict) == 0:
mg.add_nodes(1)
fp = make_ecfp2D(mapidx_list[0][0])
feat = torch.from_numpy(np.array(fp)).float()
feat = feat.unsqueeze(0)
mg.ndata['ecfp'] = feat
sub_tree.append(mg)
label = []
label.append(mapidx_list[0][0])
label.append(rev_labelmap_dict[mapidx_list[0][0]][mapidx_list[0][1]])
root_answer = torch.tensor([labels.index(label)])
return mg, sub_tree, root_answer, l_ans_list, b_ans_list
else:
max_idx = 0
for l in map_dict.values():
for v in l:
if v > max_idx:
max_idx = v
cidx = 1 #map to connect
pair_idx = []
track = dict() # key: get index in part, value: node number in graph
nid = 0
for n in range(i + 1):
if cidx in map_dict[n]:
pair_idx.append(n)
track[n] = nid
nid += 1
if len(pair_idx) == 2:
break
if max(map_dict[pair_idx[1]]) > max(map_dict[pair_idx[0]]):
pair_idx[0], pair_idx[1] = pair_idx[1], pair_idx[0]
track[pair_idx[0]], track[pair_idx[1]] = track[pair_idx[1]], track[pair_idx[0]]
mg.add_nodes(1)
fp = np.array(make_ecfp2D(mapidx_list[pair_idx[0]][0]))
feat1 = torch.from_numpy(np.array(fp)).float()
feat1 = feat1.unsqueeze(0)
mg.ndata['ecfp'] = feat1
sub_tree.append(copy.deepcopy(mg))
label = []
label.append(mapidx_list[pair_idx[0]][0])
label.append(rev_labelmap_dict[mapidx_list[pair_idx[0]][0]][mapidx_list[pair_idx[0]][1]])
root_answer = torch.tensor([labels.index(label)])
mg.add_nodes(1)
mg.add_edges(0,1)
fp = np.array(make_ecfp2D(mapidx_list[pair_idx[1]][0]))
feat2 = torch.from_numpy(np.array(fp)).float()
feat2 = feat2.unsqueeze(0)
feat = torch.cat((feat1, feat2), 0)
mg.ndata['ecfp'] = feat
sub_tree.append(copy.deepcopy(mg))
label = []
label.append(mapidx_list[pair_idx[1]][0])
label.append(rev_labelmap_dict[mapidx_list[pair_idx[1]][0]][mapidx_list[pair_idx[1]][1]])
l_ans_list.append(torch.tensor([labels.index(label)]))
b_ans_list.append(set_bondlabel(bondtype_list[0]))
if max_idx > 1:
for cidx in range(2, max_idx + 1):
pairs = []
pair_idx = []
for n in range(i + 1):
if cidx in map_dict[n]:
pairs.append(Chem.MolFromSmiles(mapidx_list[n][0]))
pair_idx.append(n)
if n not in list(track.keys()):
new_idx = n
track[n] = cidx
if len(pair_idx) != 2:
raise
mg.add_nodes(1)
if mg.in_degrees(track[pair_idx[1]]) + mg.out_degrees(track[pair_idx[1]]) == 0:
mg.add_edges(track[pair_idx[0]], track[pair_idx[1]])
else:
mg.add_edges(track[pair_idx[1]], track[pair_idx[0]])
fp_n = np.array(make_ecfp2D(mapidx_list[new_idx][0]))
feat_n = torch.from_numpy(np.array(fp_n)).float()
feat_n = feat_n.unsqueeze(0)
feat = torch.cat((feat, feat_n), 0)
mg.ndata['ecfp'] = feat
sub_tree.append(copy.deepcopy(mg))
label = []
label.append(mapidx_list[new_idx][0])
label.append(rev_labelmap_dict[mapidx_list[new_idx][0]][mapidx_list[new_idx][1]])
l_ans_list.append(torch.tensor([labels.index(label)]))
b_ans_list.append(set_bondlabel(bondtype_list[cidx - 1]))
mg = make_subtree(mg)
assert mg.number_of_nodes() == len(mapidx_list)
return mg, sub_tree, root_answer, l_ans_list, b_ans_list
def demon_decoder(g, sub_tree, root_ans, label_ans_l, bond_ans_l, \
l_1_counter, l_counter, b_counter, t_counter, labels, MAX_ITER = 500):
target_id_l = []
numnd = 0
kaisa = 1
bg_node_l = [] #Tuple of (node ID when graph batching, backtrack or not)
topo_ans_l = []
numatom = 0
numbond = 0
track = []
map_track = []
ITER = 0
while(ITER < (MAX_ITER + 1)):
if ITER == 0:
label_ans = root_ans
l_1_counter[label_ans] += 1
target_id = 0
numatom += 1
dec_smi = labels[label_ans][0]
dec_mol = setmap_to_mol(Chem.MolFromSmiles(dec_smi), target_id)
track.append(target_id)
target_id_l.append(target_id)
map_track.append(labels[label_ans][1])
bg_node_l.append((numnd,0))
numnd += kaisa
kaisa += 1
elif ITER > 0:
if g.out_degrees(target_id) - (track.count(target_id) - 1) == 0:
topo_ans = 1
topo_ans_l.append(torch.tensor([1]))
else:
topo_ans = 0
topo_ans_l.append(torch.tensor([0]))
t_counter[topo_ans] += 1
if topo_ans == 1: #STOP->Backtrack
if ITER == 1:
break
else:
try:
target_id = tree.predecessors(target_id).cpu()
target_id = int(target_id)
track.append(target_id)
target_id_l.append(target_id)
map_track.pop(-1)
bg_node_l.append((numnd + target_id - kaisa + 1, 1))
except: #no parents
break
elif topo_ans == 0: #Create a child_node
tree = sub_tree[numatom]
#Bond Prediction
bond_ans = bond_ans_l[numbond]
b_counter[bond_ans] += 1
#label Prediction
new_target_id = numatom
label_ans = label_ans_l[new_target_id - 1]
l_counter[label_ans] += 1
#Connect
suc_smi = labels[label_ans][0]
suc_mol = setmap_to_mol(Chem.MolFromSmiles(suc_smi), new_target_id)
if target_id == 0:
for amap in map_track[-1].keys():
if track.count(target_id) in map_track[-1][amap]:
dec_conidx = 1000 * target_id + amap
else:
for amap in map_track[-1].keys():
if track.count(target_id) + 1 in map_track[-1][amap]:
dec_conidx = 1000 * target_id + amap
for amap in labels[label_ans][1].keys():
if 1 in labels[label_ans][1][amap]:
suc_conidx = 1000 * new_target_id + amap
dec_mol, Connecting = connect_smiles(dec_mol, dec_conidx, suc_mol, suc_conidx, bond_ans)
if Connecting == 0:
raise
target_id = new_target_id
numbond += 1
numatom += 1
track.append(target_id)
target_id_l.append(target_id)
map_track.append(labels[label_ans][1])
bg_node_l.append((numnd + target_id,0))
numnd += kaisa
kaisa += 1
ITER += 1
for atom in dec_mol.GetAtoms():
dec_mol.GetAtomWithIdx(atom.GetIdx()).SetAtomMapNum(0)
dec_smi = Chem.MolToSmiles(sanitize(dec_mol, kekulize = False))
return dec_smi, bg_node_l, target_id_l, topo_ans_l,\
l_1_counter, l_counter, b_counter, t_counter
#Add AtomMap to substructure corresponding to NodeID
def setmap_to_mol(mol, node_id):
for atom in mol.GetAtoms():
mol.GetAtomWithIdx(atom.GetIdx()).SetAtomMapNum(node_id * 1000 + atom.GetIdx())
return mol
def connect_smiles(dec_mol, dec_conidx, suc_mol, suc_conidx, bond_label):
if bond_label == 0:
bond_type = Chem.BondType.SINGLE
elif bond_label == 1:
bond_type = Chem.BondType.DOUBLE
elif bond_label == 2:
bond_type = Chem.BondType.TRIPLE
else:
raise
con_smi = Chem.MolToSmiles(dec_mol) + '.' + Chem.MolToSmiles(suc_mol)
con_mol = Chem.MolFromSmiles(con_smi)
rw_mol = Chem.RWMol(con_mol)
con_atom = []
for atom in rw_mol.GetAtoms():
if atom.GetAtomMapNum() == dec_conidx or atom.GetAtomMapNum() == suc_conidx:
con_atom.append(atom)
if len(con_atom) != 2:
print("error!")
raise
try:
rw_mol.AddBond(con_atom[0].GetIdx(), con_atom[1].GetIdx(), bond_type)
rw_mol = remove_Hs(rw_mol, con_atom[0], con_atom[1], bond_label)
mol = rw_mol.GetMol()
Chem.SanitizeMol(mol)
Connecting = 1 #Success
return mol, Connecting
except:
Connecting = 0
return dec_mol, Connecting | toshikiochiai/NPVAE | model/utils.py | utils.py | py | 32,905 | python | en | code | 8 | github-code | 13 |
38639948396 | import random
class RSA():
def __init__(self, p=None, q=None, m=None) -> None:
#获取输入的两个质数p,q和等待加密的明文m
self.p = p
self.q = q
self.m = m
if p != None and q != None:
self.generate_key() #完成公钥和私钥的初始化
def generate_key(self):
self.n = self.p * self.q
phi = (self.p - 1)*(self.q - 1)
while True:
self.e = random.randrange(1, phi) #在1~phi的范围内随机选择一个e
g = self.gcd(self.e, phi)
self.d = self.mod_inverse(self.e, phi) #依据当前的e,通过模转置的方法生成私钥d
#检验是否满足equiv条件
if g == 1 and self.e != self.d:
break
print('Finished generating key...')
#RSA加密
def encrypt(self,e=None,n=None):
if e != None and n != None:
self.e = e
self.n = n
self.c = [[pow(c, self.e, self.n) for c in l] for l in self.m]
print('Finished generating ciphertext...')
return self.c
#RSA解密
def decrypt(self,c=None):
if c != None:
self.c = c
self.m_dcrpt = [[pow(c,self.d,self.n) for c in l] for l in self.c]
def gcd(self,a,b):
if b == 0:
return a
else:
return self.gcd(b, a % b)
def mod_inverse(self, a, m):
for x in range(1, m):
if (a * x) % m == 1:
return x
return -1
| UniqueMR/Self-RSA | RSA.py | RSA.py | py | 1,537 | python | en | code | 2 | github-code | 13 |
29760699556 | import cv2
import numpy as np
import argparse
parse = argparse.ArgumentParser()
parse.add_argument('--shape', type=int, nargs='+', default=[720, 1280])
parse.add_argument('--box', type=int, nargs='+', default=[0, 0, 720, 1280])
parse.add_argument('--mask_path', type=str, default='assets/mask/mask.jpg')
def create_rect_mask(args):
shape = args.shape
mask_np = np.zeros(shape, dtype=np.uint8)
box = args.box
mask_np[: box[0], :] = 1
mask_np[:, : box[1]] = 1
mask_np[box[2]: , :] = 1
mask_np[:, box[3]: ] = 1
mask_cv = cv2.cvtColor(mask_np * 255, cv2.COLOR_GRAY2RGBA)
cv2.imwrite(args.mask_path, mask_cv)
if __name__ == '__main__':
create_rect_mask(parse.parse_args()) | 1000happiness/RoadGradientEstimation | create_rect_mask.py | create_rect_mask.py | py | 716 | python | en | code | 2 | github-code | 13 |
32647107021 | import logging as log, json, sys, time, socket
from threading import Thread, Lock, Event as TreadEvent
from telnetlib import Telnet
from homecontrol.event import Event
class Listener(Thread):
def __init__(self, host, port, event_limit):
self.host = host
self.port = port
self.event_limit = event_limit
self.conn = None
self.events = {}
self.callbacks = []
self.timeout = 2 #s
super(Listener, self).__init__()
self._stop = TreadEvent()
def stop(self):
self._stop.set()
def is_stopped(self):
return self._stop.isSet()
def is_connected(self):
return self.conn is not None
def connect(self):
if self.is_connected():
return True
if self.is_stopped():
return False
try:
log.debug("Connecting to event server %s:%i ..." % (self.host, self.port))
self.conn = Telnet(self.host, self.port, self.timeout)
log.debug("Established connection to event server.")
return True
except:
if self.is_stopped():
return False
log.warning("Could not connect to event server %s:%i, "
"reason: \"%s\". Will retry in a few seconds ..." %
(self.host, self.port, sys.exc_info()[0]))
return False
def run(self):
while not self.is_stopped():
try:
if not self.connect():
time.sleep(0.5)
continue
data = self.conn.read_until("\n", self.timeout)
if data == None or data == "": continue
event = Event.from_json(data)
if event == None: continue
for (callback,filters) in self.callbacks:
if not event.include(filters):
continue
# Append event to the callback's event stack
events = self.append_event(str(callback), event)
# Call callback function.
callback(event, events)
except socket.timeout:
continue
def append_event(self, key, event):
# Get events for this callback.
events = self.events[key]
# Too much events, remove the first.
if events == self.event_limit:
events = self.events[1:]
# Append new event.
events.append(event)
# Update event list of callback.
self.events[key] = events
return events
def register(self, callback, filters = []):
""" Registers a callback function for new events
Registered a method that will be called if a new event was received, while
the first parameter contains the current event and the second parameter
contains a list of events limited to "event_limit" defined in the configuration.
Args:
callback: The method to call for each new event.
filters: A list of name, value tuples to include events that provides
the given name, value pair. If no filter is specified, all event
will be accepted. See Listener::include() for more information about
filters.
"""
self.callbacks.append((callback, filters))
self.events[str(callback)] = []
def unregister(self, callback):
""" Unregisters a callback function from the listener
Args:
callback: The previously registered callback method.
"""
callbacks = []
for i in range(0, len(self.callbacks)):
if self.callbacks[i][0] != callback:
callbacks.append(self.callbacks[i])
self.callbacks = callbacks
self.events[str(callback)] = [] | homecontrol/server | src/homecontrol/listener.py | listener.py | py | 3,244 | python | en | code | 2 | github-code | 13 |
17061182654 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class UserDetails(object):
def __init__(self):
self._user_change_mobile = None
self._user_mobile = None
self._user_name = None
self._user_relation = None
@property
def user_change_mobile(self):
return self._user_change_mobile
@user_change_mobile.setter
def user_change_mobile(self, value):
self._user_change_mobile = value
@property
def user_mobile(self):
return self._user_mobile
@user_mobile.setter
def user_mobile(self, value):
self._user_mobile = value
@property
def user_name(self):
return self._user_name
@user_name.setter
def user_name(self, value):
self._user_name = value
@property
def user_relation(self):
return self._user_relation
@user_relation.setter
def user_relation(self, value):
self._user_relation = value
def to_alipay_dict(self):
params = dict()
if self.user_change_mobile:
if hasattr(self.user_change_mobile, 'to_alipay_dict'):
params['user_change_mobile'] = self.user_change_mobile.to_alipay_dict()
else:
params['user_change_mobile'] = self.user_change_mobile
if self.user_mobile:
if hasattr(self.user_mobile, 'to_alipay_dict'):
params['user_mobile'] = self.user_mobile.to_alipay_dict()
else:
params['user_mobile'] = self.user_mobile
if self.user_name:
if hasattr(self.user_name, 'to_alipay_dict'):
params['user_name'] = self.user_name.to_alipay_dict()
else:
params['user_name'] = self.user_name
if self.user_relation:
if hasattr(self.user_relation, 'to_alipay_dict'):
params['user_relation'] = self.user_relation.to_alipay_dict()
else:
params['user_relation'] = self.user_relation
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = UserDetails()
if 'user_change_mobile' in d:
o.user_change_mobile = d['user_change_mobile']
if 'user_mobile' in d:
o.user_mobile = d['user_mobile']
if 'user_name' in d:
o.user_name = d['user_name']
if 'user_relation' in d:
o.user_relation = d['user_relation']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/UserDetails.py | UserDetails.py | py | 2,541 | python | en | code | 241 | github-code | 13 |
4491805262 | import random
import time
class TicTacToe:
random_game = 1
user_game = 0
minimax_game = 2
ai_only_game = 3
fair_game = 4
alpha_beta = 5
def __init__(self, type = None):
self.board = "\t1\t2\t3\nA\t-\t-\t-\nB\t-\t-\t-\nC\t-\t-\t-\n"
# self.user_game = 0
# self.random_game = 1
if type is None:
playerIn = ""
while playerIn != "minimax" and playerIn != "random" and playerIn != "user" and playerIn != "ai" and playerIn != "fair" and playerIn != "ab":
print("What type of game would you like to play? Options:\nMinimax\nRandom\nUser\nAI\nFair\nAB")
playerIn = input().lower()
if playerIn == "minimax":
type = TicTacToe.minimax_game
elif playerIn == "random":
type = TicTacToe.random_game
elif playerIn == "user":
type = TicTacToe.user_game
elif playerIn == "ai":
type = TicTacToe.ai_only_game
elif playerIn == "fair":
type = TicTacToe.fair_game
elif playerIn == "ab":
type = TicTacToe.alpha_beta
elif isinstance(type, int) and (type == 0 or type == 1 or type ==2 or type == 3 or type == 4 or type == 5):
type = type
else:
return
self.type = type
self.A1 = 9
self.A2 = 11
self.A3 = 13
self.B1 = 17
self.B2 = 19
self.B3 = 21
self.C1 = 25
self.C2 = 27
self.C3 = 29
self.indices = [self.A1,self.A2,self.A3,self.B1,self.B2,self.B3,self.C1,self.C2,self.C3]
self.won = False
def print_instructions(self):
print("Each player gets a letter, either X or O.")
print("When you are prompted, place your letter in one of the spots on the board that is occupied by a dash.")
print("To do this, type in the location you want to play it byt writing the letter, then number.")
print("For instance, typing A1 would put your letter in the top left spot.")
print()
return
def print_board(self):
print(self.board)
return
def is_valid_move(self, index):
if index is None or len(index) != 2:
return False
first = index[0]
second = index[1]
if first != "A" and first != "B" and first != "C":
return False
if second != "1" and second != "2" and second != "3":
return False
loc = self.getIndex(index)
return self.board[loc] == "-"
def is_valid_AI_move(self, index, board):
return board[index] == "-"
def place_player(self, player, index):
self.board = self.board[0:index] + player + self.board[index + 1:]
def take_manual_turn(self, player):
index = None
while True:
print("Enter a valid move as a letter then a number not separated by a space. For example, 'A1'")
move = input()
if self.is_valid_move(move):
break
self.place_player(player, self.getIndex(move))
return
def getIndex(self, index):
first = index[0]
second = index[1]
if first == "A":
if second == "1":
return self.A1
if second == "2":
return self.A2
if second == "3":
return self.A3
if first == "B":
if second == "1":
return self.B1
if second == "2":
return self.B2
if second == "3":
return self.B3
if first == "C":
if second == "1":
return self.C1
if second == "2":
return self.C2
if second == "3":
return self.C3
def take_turn(self, player):
if self.type == self.ai_only_game:
self.take_minimax_turn(player)
elif self.type == self.user_game or player == "X":
print(player + ": it is your turn to move")
self.take_manual_turn(player)
elif self.type == self.random_game:
self.take_random_turn(player)
elif self.type == self.minimax_game:
self.take_minimax_turn(player, 0)
elif self.type == self.fair_game:
self.take_minimax_turn(player, 1)
elif self.type == self.alpha_beta:
self.take_ab_turn(player)
return
def check_col_win(self, player, board):
if board[self.A1] == player and board[self.A2] == player and board[self.A3] == player:
return True
if board[self.B1] == player and board[self.B2] == player and board[self.B3] == player:
return True
if board[self.C1] == player and board[self.C2] == player and board[self.C3] == player:
return True
return False
def check_row_win(self, player, board):
if board[self.A1] == player and board[self.B1] == player and board[self.C1] == player:
return True
if board[self.A2] == player and board[self.B2] == player and board[self.C2] == player:
return True
if board[self.A3] == player and board[self.B3] == player and board[self.C3] == player:
return True
return False
def check_diag_win(self, player, board):
if board[self.A1] == player and board[self.B2] == player and board[self.C3] == player:
return True
if board[self.C1] == player and board[self.B2] == player and board[self.A3] == player:
return True
return False
def check_win(self, player, board):
return self.check_col_win(player, board) or self.check_row_win(player, board) or self.check_diag_win(player, board)
def check_tie(self, board):
if board.count("-") == 0:
return True
return False
def take_random_turn(self, player):
valid = False
while not valid:
move = random.randrange(8)
if self.is_valid_AI_move(self.indices[move], self.board):
self.place_player(player,self.indices[move])
return
print(move)
def reset(self):
self.board = "\t1\t2\t3\nA\t-\t-\t-\nB\t-\t-\t-\nC\t-\t-\t-\n"
self.won = False
self.print_board()
def opposite_player(self, player):
if player == "X":
return "O"
return "X"
def minimax(self, player, max, depth):
if self.check_win("O", self.board):
return 10
elif self.check_win(self.opposite_player("O"), self.board):
return -10
if self.check_tie(self.board):
return 0
if depth != 0:
present_board = self.board
keep = 0
if max:
keep_success = -11
for move in self.indices:
self.board = present_board
if self.is_valid_AI_move(move, self.board):
self.place_player(player, move)
success = self.minimax(self.opposite_player(player), False, depth - 1)
if success > keep_success:
keep = move
keep_success = success
else:
keep_success = 11
for move in self.indices:
self.board = present_board
if self.is_valid_AI_move(move, self.board):
self.place_player(player, move)
success = self.minimax(self.opposite_player(player), True, depth - 1)
if success < keep_success:
keep = move
keep_success = success
self.board = present_board
return keep_success
return 0
def ab_minimax(self, player, max, depth, alpha, beta):
if self.check_win("O", self.board):
return 10
elif self.check_win(self.opposite_player("O"), self.board):
return -10
if self.check_tie(self.board):
return 0
if depth != 0:
present_board = self.board
keep = 0
if max:
for move in self.indices:
self.board = present_board
if self.is_valid_AI_move(move, self.board):
self.place_player(player, move)
success = self.ab_minimax(self.opposite_player(player), False, depth - 1, alpha, beta)
if success > alpha:
keep = move
alpha = success
if alpha >= beta:
break
self.board = present_board
return alpha
else:
for move in self.indices:
self.board = present_board
if self.is_valid_AI_move(move, self.board):
self.place_player(player, move)
success = self.ab_minimax(self.opposite_player(player), True, depth - 1, alpha, beta)
if success < beta:
keep = move
beta = success
if beta <= alpha:
break
self.board = present_board
return beta
return 0
def take_minimax_turn(self, player, version):
if version == 1:
depth = 3
else:
depth = 100
best = [0, -11]
present_board = self.board
for move in self.indices:
self.board = present_board
if self.is_valid_AI_move(move, self.board):
self.place_player(player, move)
current = [move, self.minimax(self.opposite_player(player), False, depth)]
if current[1] > best[1]:
best = current
self.board = present_board
self.place_player(player, best[0])
def take_ab_turn(self, player):
best = [0, -110]
present_board = self.board
for move in self.indices:
self.board = present_board
if self.is_valid_AI_move(move, self.board):
self.place_player(player, move)
current = [move, self.ab_minimax(self.opposite_player(player), False, 3, best[1], 110)]
if current[1] > best[1]:
best = current
self.board = present_board
self.place_player(player, best[0])
def play_game(self):
self.reset()
self.print_instructions()
player = "O"
while not self.won:
if player == "X":
player = "O"
else:
player = "X"
start = time.time()
self.take_turn(player)
end = time.time()
print("This turn took: ", end-start, " seconds")
self.won = self.check_win(player, self.board)
self.print_board()
if self.check_tie(self.board):
break
if self.won:
print(player + " wins!\n")
else:
print("Tie")
again = input("Would you like to play again?\n")
if again == "Yes" or again == "yes" or again == "y" or again == "Y":
self.play_game()
return
| dmuhlner/ATCS-2021 | Semester 2/TicTacToe/tictactoe.py | tictactoe.py | py | 11,488 | python | en | code | 0 | github-code | 13 |
36727299020 | from code import interact
import os
import sys
import re
import argparse
import datetime as dt
import webbrowser as wb
def intro():
print("*****************************************************************")
print(
'''
____ ____ __ __ ___ _ __
/ __ \ / __ \ / / / // | | |/ /
/ /_/ // /_/ // /_/ // /| | | /
/ _, _// ____// __ // ___ | / |
/_/ |_|/_/ /_/ /_//_/ |_|/_/|_|
Rapid Prototyping of Hardware Accelerators on Xilinx FPGAs - v0.1
'''
)
print("*****************************************************************")
def tlv(filename,rundir):
print("\n************Interpreting TL-V with Sandpiper****************\n")
out_file=filename[0:len(filename)-4]
print("Compiling "+filename+" with Sandpiper-Saas")
sp = "sandpiper-saas -i "+filename+" -o "+out_file+".v --iArgs --default_includes --outdir=runs/"+rundir+"/tlv_out"
try:
os.system(sp)
print("Sandpiper has generated the verilog/systemverilog files")
print("\n*******************************************************\n")
except:
print("Error - Verilog file not generated")
exit()
def bsc():
# TO DO
pass
def test1(filename):
print("Filename is ",filename)
print("Extension checker=",filename[len(filename)-4:len(filename)])
def var_share(var,fsuffix):
""" Function to share variables by writing into a file which can be later read by TCL or Shell script"""
print("\n Content to be written in tmp_"+fsuffix+".txt: ", var)
filename="tmp_"+fsuffix+".txt"
try:
rm_file="rm -rf "+filename
os.system(rm_file)
except:
pass
try:
f=open(filename,"w")
f.write(var+"\n")
except:
print("Couldnt create temporary file")
else:
print("\n Variable written in tmp_"+fsuffix+".txt: ", var)
finally:
f.close()
def pwd_write(dirname):
print("\n****************Setting Paths**********************\n")
f = open("tmp.txt", "w")
try:
f.write(dirname)
except:
print("Error - Writing path to temporary file")
else:
print("Path stored to read from TCL in temporary file")
finally:
f.close()
def merge_files(files, out_file):
with open(out_file, 'w') as outfile:
# Iterate through list
for names in files:
# Open each file in read mode
with open(names) as infile:
# read the data from file1 and
# file2 and write it in file3
outfile.write(infile.read())
# Add '\n' to enter data of file2
# from next line
outfile.write("\n")
def automate_axi():
fname = "harness_axi.v"
with open(fname, "r") as f:
ports = []
for line in f:
if(len(line.split()) > 0):
if(line.split()[0] == "input" or line.split()[0] == "output"):
if (re.findall('\[.*?\]', line.split()[1:][0]) != []):
width = int(line.split()[1:][0][1:-1].split(":")[0]) + 1
else:
width = 1
if(line.split()[0] == "input"):
for text in (line.split()[1:]):
if (re.findall('\[.*?\]', text) == []):
ports.append([text.replace(",", ""), "in", width])
elif(line.split()[0] == "output"):
for text in (line.split()[1:]):
if (re.findall('\[.*?\]', text) == []):
ports.append([text.replace(",", ""), "out", width])
f_wires = open("wires.txt", "w")
f_addr_dec = open("addr_dec.txt", "w")
f_inst = open("inst.txt", "w")
inputs = 0
curr_port = 0
f_addr_dec.write(
" always @(*)\n begin\n case ( axi_araddr[ADDR_LSB+OPT_MEM_ADDR_BITS:ADDR_LSB] )\n")
f_inst.write(" harness_axi harness_axi_inst(\n")
f_inst.write(" ."+ports[0][0]+"(S_AXI_ACLK),\n")
f_inst.write(" ."+ports[1][0]+"(S_AXI_ARESETN),\n")
for port in ports[2:]:
if curr_port >= 8:
sys.exit("Error: Maximum allowed ports is eight.")
if port[1] == "in":
f_inst.write(" ."+port[0]+"(slv_reg"+str(inputs)+"),\n")
f_addr_dec.write(" 3'h"+str(curr_port) +
" : reg_data_out <= "+"slv_reg"+str(inputs)+";\n")
inputs += 1
else:
f_inst.write(" ."+port[0]+"("+port[0]+"),\n")
f_addr_dec.write(" 3'h"+str(curr_port) +
" : reg_data_out <= "+port[0]+";\n")
f_wires.write(" wire [C_S_AXI_DATA_WIDTH-1:0] "+port[0]+";\n")
curr_port += 1
f_addr_dec.write(" default : reg_data_out <= 0;\n")
f_addr_dec.write(" endcase\n")
f_addr_dec.write(" end\n")
f_inst.seek(f_inst.tell() - 2, os.SEEK_SET)
f_inst.write("\n );\n\n")
f_inst.write("endmodule\n")
f_wires.close()
f_addr_dec.close()
f_inst.close()
merge_files(["wires.txt", "addr_dec.txt", "inst.txt"],
"../../src/axi_lite/harness_axi_ip_v1_0_S00_AXI_part2.v")
merge_files(["../../src/axi_lite/harness_axi_ip_v1_0_S00_AXI_part1.v", "../../src/axi_lite/harness_axi_ip_v1_0_S00_AXI_part2.v"],
"../../src/axi_lite/harness_axi_ip_v1_0_S00_AXI.v")
os.system("rm wires.txt addr_dec.txt inst.txt")
def ipgen(dirname, interface):
print("\n**************Starting IP Packaging******************\n")
try:
if(interface == "axi_s"):
os.system("vivado -mode batch -source "+dirname+"/src/ip_create.tcl")
elif(interface == "axi_l"):
automate_axi()
os.system("vivado -mode batch -source "+dirname+"/src/axi_lite/ip_create.tcl")
else:
print("Error: Invalid --interface argument. Available values are 'axi_s' and 'axi_l'")
sys.exit()
except:
print("Error - IP Generation")
exit()
else:
print("\n****************Vivado IP Created**********************\n")
finally:
ip_set_params()
def bdgen(dirname, interface):
print("\n****************Starting Block Design**********************\n")
try:
os.system("vivado -mode batch -source "+dirname+"/src/bd_create.tcl")
except:
print("Error generating Block Design")
exit()
else:
print("\n****************Vivado Block Design Created**********************\n")
def bdgen_bitstream(dirname, interface):
print("\n****************Starting Block Design and Bitstream**********************\n")
try:
if(interface == "axi_s"):
os.system("vivado -mode batch -source "+dirname+"/src/bd_bitstream_create.tcl")
elif(interface == "axi_l"):
os.system("vivado -mode batch -source "+dirname+"/src/axi_lite/bd_bitstream.tcl")
else:
print("Error: Invalid --interface argument. Available values are 'axi_s' and 'axi_l'")
sys.exit()
except:
print("Error generating block design and bitstream. Try generating upto block design and use gui for bitstream ")
exit()
else:
print("\n****Vivado Block Design and Bitstream Created**********\n")
def projgen(dirname):
print("\n****************Creating Vivado Project from IP**********************\n")
try:
os.system("vivado -mode batch -source"+dirname+"/src/project.tcl")
except:
print("Error generating project")
exit()
else:
print("\n****************Block Design Generated*****************\n")
def ip_set_params():
pass
def makerchip_create(design,fromURL=None):
if(fromURL == None):
cmd1="makerchip "+design
os.system(cmd1)
elif (fromURL != None):
cmd2="makerchip --from-url "+fromURL+" "+design
os.system(cmd2)
def create_rundir():
try:
os.mkdir("runs",0o777)
except:
print("** Run directory already exists")
def setup_runs(project_name):
try:
create_rundir()
os.chdir("runs")
except:
pass
a=dt.datetime.now()
b=str(a).split(" ")
c=b[1].split(":")
e=b[0].split("-")
f="".join(e)
d=f+"_"+c[0]+c[1]
try:
run_dirname = "run_"+project_name+"_"+d
print("Run Folder :",run_dirname)
os.mkdir(run_dirname)
os.chdir("../")
print(os.getcwd())
except:
print("** Error configuring runs")
exit()
# try:
# os.chdir(run_dirname)
# except:
# print(" Error changing to run_dir")
return run_dirname
def clean(rphax_dir_path):
run_dirs = rphax_dir_path+"/runs/*"
if (sys.platform == "Windows"):
os.system("powershell.exe rm -f tmp.txt")
elif sys.platform in ["Linux","Darwin"] :
os.system("rm -rf run_dirs")
else:
print("Error cleaning temporary files")
def check_extension(filename):
print("\n****************Validating file extensions**********************\n")
print("Design file = ",filename)
if(filename[len(filename)-4:len(filename)]==".tlv"):
pass
else:
print("Only .tlv files are supported")
exit()
def output_files(project_name,rundir):
hwh_file_path = "./run_bd/"+project_name+".srcs/sources_1/bd/design_1/hw_handoff/design_1.hwh"
tcl_file_path = "./run_bd/"+project_name+".srcs/sources_1/bd/design_1/hw_handoff/design_1_bd.tcl"
bit_file_path = "./run_bd/"+project_name+".runs/impl_1/design_1_wrapper.bit"
try:
os.system("mkdir pynq_out")
print(hwh_file_path)
print(tcl_file_path)
print(bit_file_path)
os.system("cp -f {hwh_file_path} pynq_out/")
os.system("cp -f {tcl_file_path} pynq_out/")
except:
print("Error copying output files")
def main():
intro()
parser = argparse.ArgumentParser(description = "RPHAX")
parser.add_argument("--clean",action="store_true",help="Clean all previous runs")
subparsers = parser.add_subparsers(dest = "mode",help="commands")
generate_parser = subparsers.add_parser('generate',help="Generate mode: IP-> Block Design -> Bitstream")
generate_parser.add_argument('-b','--bitstream', action="store_true", help = "Generate upto Bitstream")
generate_parser.add_argument('-c','--connect',action="store_true",help = "Connect Local/Remote FPGA")
generate_parser.add_argument('-py','--pynq',action="store_true",help = "Open PYNQ Jupyter Notebook")
generate_parser.add_argument('-u','--url',type=str,help = "PYNQ URL Format = http://url:port", default="http://pynq:9090")
generate_parser.add_argument("input_file", help = "Input .tlv file", type=str)
generate_parser.add_argument('-if',"--interface", help = "AXI Interface: axi_l for axi lite and axi_s for axi stream", type=str,default="axi_s")
#parser.add_argument()
#Connect Mode
connect_parser = subparsers.add_parser('connect',help="Connect mode: Connect (Local/Remote) Program &| probe designs on FPGA")
connect_parser.add_argument('bit_file',help="Bitstream Path",type=str)
connect_parser.add_argument('-ip',help="IP address of FPGA. Defaults to localhost",default="localhost",type=str)
connect_parser.add_argument('-p',help="Port number. Defaults to 3121", default=3121, type=int)
connect_parser.add_argument('-probe',help="Probe File Path",type=str)
#makerchip create mode
create_parser = subparsers.add_parser('makerchip',help="Develop RTL Design in Makerchip App")
create_parser.add_argument('design', help="Name of the .tlv file", type=str,nargs=1)
create_parser.add_argument('--from_url', help="Template URL", type=str,default=" ")
create_parser.add_argument('--server',help="Specify a different makerchip server", type=str,default="https://app.makerchip.com")
create_parser.add_argument('--makerchip_args',help="Add other makerchip arguments", type=str,default=" ")
args = parser.parse_args()
if(args.mode == "generate"):
filename = args.input_file
check_extension(filename)
rphax_dir_path = os.getcwd()
l_filename = filename.split(".")
project_name = l_filename[0]
rundir = setup_runs(project_name)
run_dir_path = "runs/"+rundir
run_dir_abs_path = rphax_dir_path+"/runs/"+rundir
tlv(filename,rundir)
os.chdir(run_dir_path)
var_share(run_dir_abs_path,"bd")
var_share(project_name,"projectname")
ipgen(rphax_dir_path, args.interface)
if(args.pynq):
wb.open(args.url,new=2)
if(args.bitstream):
bdgen_bitstream(rphax_dir_path, args.interface)
else:
bdgen(rphax_dir_path, args.interface)
output_files(project_name,rundir)
#clean()
if(args.mode == "makerchip"):
print("Opening design in Makerchip to edit...")
if(args.from_url != " " and args.server !="https://app.makerchip.com" and args.makerchip_args != " "):
command = "makerchip --from_url "+args.from_url+" --server "+args.server+" "+args.makerchip_args+" "+args.design[0]
elif(args.from_url != " " and args.server !="https://app.makerchip.com"):
command = "makerchip --from_url "+args.from_url+" --server "+args.server+" "+args.design[0]
elif(args.from_url != " "):
command = "makerchip --from_url "+args.from_url+" "+args.design[0]
elif(args.server !="https://app.makerchip.com"):
command = "makerchip --server "+args.server+" "+args.design[0]
else:
command = "makerchip "+args.design[0]
print(command)
os.system(command)
if(args.mode == "connect"):
pass
if(args.clean):
try:
if(sys.platform == "Windows"):
os.system("rmdir /f buns")
elif(sys.platform in ["Linux","Darwin"]):
os.system('rm -rf buns')
else:
raise Exception
except:
print("Error Cleaning Files")
else:
print("Succesfully cleaned the files")
if __name__ == '__main__':
main()
| shariethernet/RPHAX | rphax.py | rphax.py | py | 14,636 | python | en | code | 12 | github-code | 13 |
8928056338 | import requests
from bs4 import BeautifulSoup
from collections import Counter, defaultdict
import re
from nltk import bigrams, trigrams
import nltk
import datetime
nltk.download('stopwords')
def get_post_titles(url):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'}
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
titles_with_urls = []
for tr in soup.find('table', {'id': 'posts_table'}).find_all('tr', {'style': 'vertical-align:baseline;'}):
td = tr.find_all('td')[2]
a = td.find('a')
if a is not None:
title = a.get_text()
url = a['href']
titles_with_urls.append((title, url))
return titles_with_urls
def get_keywords(titles_with_urls):
stopwords = nltk.corpus.stopwords.words('english')
keyword_dict = defaultdict(list)
for title, url in titles_with_urls:
# Remove all non-letter characters from the title
title = re.sub(r'[^a-zA-Z\s]', '', title)
# Convert the title to lowercase
title = title.lower()
# Split the title into a list of words
words = title.split()
# Remove stopwords and numbers from the list of words
words = [word for word in words if word not in stopwords and not word.isnumeric()]
# Create trigrams from the list of words
trigram_list = list(trigrams(words))
# Add the trigrams and their associated URLs to the keyword dictionary
for trigram in trigram_list:
keyword_dict[' '.join(trigram)].append(url)
# Create bigrams from the list of words
bigram_list = list(bigrams(words))
# Add the bigrams and their associated URLs to the keyword dictionary
for bigram in bigram_list:
keyword_dict[' '.join(bigram)].append(url)
# Add the unigrams (words) and their associated URLs to the keyword dictionary
for word in words:
keyword_dict[word].append(url)
# Count the frequency of each keyword
keyword_counts = {k: len(v) for k, v in keyword_dict.items()}
# Filter trigrams and bigrams with frequency >= 1 and combine with unigrams
filtered_keywords = {k: v for k, v in keyword_dict.items() if keyword_counts[k] >= 1 or len(k.split()) == 1}
# Sort the keywords by frequency and return the top 100 most common keywords with their count and associated URLs
sorted_keywords = sorted(filtered_keywords.items(), key=lambda x: (-keyword_counts[x[0]], x[0]))
return [(k, keyword_counts[k], v) for k, v in sorted_keywords[:100]]
if __name__ == '__main__':
base_url = 'https://www.bogleheads.org'
titles_with_urls = get_post_titles(base_url)
keywords = get_keywords(titles_with_urls)
current_date = datetime.date.today()
current_year = current_date.year
current_month = current_date.month
# You can print or display the keywords if needed
print(keywords)
| primaryobjects/bogleheads-keywords | bogleheads_scraper.py | bogleheads_scraper.py | py | 3,053 | python | en | code | 1 | github-code | 13 |
10683988809 | import json
import paho.mqtt.client as mqtt
import random
import time
import threading
from dataclasses import dataclass
from typing import Dict
from mqtt import FakeMQTTDevice
class FakeSensor(FakeMQTTDevice):
"""Defines a fake sensor.
Objects of this class have periodically publish a random value to the MQTT topic `state_topic`.
To add some real-world feeling to it, random values are drawn from a normal distribution with mean
`value_mean` and standard deviation `value_stddev`.
"""
def __init__(
self,
id: str,
name: str,
state_topic: str,
value_mean: float,
value_stddev: float,
unit: str,
transmission_interval_seconds: float,
):
super(FakeSensor, self).__init__()
self.id = id
self.name = name
self.state_topic = state_topic
self.value_mean = value_mean
self.value_stddev = value_stddev
self.unit = unit
self.transmission_interval_seconds = transmission_interval_seconds
def run(self):
self.mqtt_client.loop_start()
while True:
# Pick a random value.
value = random.gauss(self.value_mean, self.value_stddev)
# Publish to its MQTT topic.
self.mqtt_client.publish(self.state_topic, f"{value:.2f}")
# Sleep.
time.sleep(self.transmission_interval_seconds) | hacker-club/home-automation | part1/simulated-smart-devices/src/sensors.py | sensors.py | py | 1,415 | python | en | code | 0 | github-code | 13 |
20534088235 | from __future__ import annotations
from typing import Union
from gi.repository import Gio, GObject
import turtlico.lib as lib
import turtlico.lib.legacy as legacy
from turtlico.locale import _
FILE_VERSION_FORMAT = 2
DEFAULT_PROJECT = [('fver', FILE_VERSION_FORMAT), ('plugin', 'turtle')]
class CorruptedFileException(Exception):
def __init__(self):
super().__init__(_('File syntax is corrupted'))
class RemovedUsedPluginException(Exception):
plugin: lib.Plugin
missing_commands: set[lib.Command]
def __init__(self, missing_commands: set[lib.Command], plugin: lib.Plugin):
super().__init__()
self.missing_commands = missing_commands
self.plugin = plugin
def __str__(self) -> str:
if len(self.missing_commands) > 5:
commands = (', '.join(
[f'"{c.definition.help}"'
for c in list(self.missing_commands)[:5]
])
+ _(' and others'))
else:
commands = ', '.join(
[f'"{c.definition.help}"' for c in self.missing_commands])
return _('Command(s) {} from plugin "{}" are present in the program. Please remove them before disabling the plugin.').format( # noqa: E501
commands,
self.plugin.name
)
class ProjectBuffer(GObject.Object):
"""Contains information about a project"""
__gtype_name__ = "ProjectBuffer"
_project_file: Gio.File
_run_in_console: bool
available_commands: dict[str, lib.Command]
code: lib.CodeBuffer
enabled_plugins: dict[str, lib.Plugin]
changed = GObject.Property(type=bool, default=False)
@GObject.Property(type=bool, default=False)
def run_in_console(self):
return self._run_in_console
@run_in_console.setter
def run_in_console(self, value):
if value != self._run_in_console:
self._run_in_console = value
self.props.changed = True
@GObject.Property(type=Gio.File)
def project_file(self):
"""The Gio.File of currently opened project"""
return self._project_file
@project_file.setter
def project_file(self, value):
self._project_file = value
@GObject.Signal
def available_commands_changed(self):
pass
def __init__(self):
super().__init__()
self._project_file = None
self._run_in_console = False
self.enabled_plugins = {}
self.available_commands = {}
self.code = lib.CodeBuffer(
project=self, record_history=True, code=None)
self.code.connect('code-changed', self._on_code_changed)
self.load_from_file(file=None)
def load_from_file(self, file: Gio.File):
if file is not None:
# Reads the file
file_dis = Gio.DataInputStream.new(file.read())
source = file_dis.read_upto('\0', 1)[0]
file_dis.close()
# Parses the file
project = lib.parse_tcp(source)
else:
project = DEFAULT_PROJECT.copy()
# Reset variables
self.props.run_in_console = False
self.props.project_file = file
# Load project
# project_code contains only commands (without meta-info like plugin)
error = None
project_code = []
# IDs of plugins that are requested to load
plugin_ids = set(['base'])
file_version = 0
for cmd in project:
if len(cmd) != 2:
error = CorruptedFileException()
continue
id = cmd[0]
data = cmd[1]
if id == 'plugin':
plugin_id = lib.Plugin.get_id_from_path(data)
plugin_ids.add(plugin_id)
elif id == 'fver':
file_version = int(data)
# Skips to conversion from Turtlico 0.x projects
if file_version <= 1:
break
elif id == 'fconsole':
self.props.run_in_console = data == 'True'
else:
project_code.append(cmd)
if file_version <= 1:
error = None
project_code, plugins, self.props.run_in_console = (
legacy.tcp_1_to_2(
source, file_version)
)
plugin_ids.clear()
plugin_ids.update(plugins)
if error is not None:
raise error
enabled_plugin_paths = lib.Plugin.resolve_paths_from_ids(plugin_ids)
self.code.load(None)
self._reload_plugins(enabled_plugin_paths)
self.code.load(project_code)
self.props.changed = False
def save(self) -> bool:
if self.props.project_file is None:
raise Exception(
'Project has not been saved yet. Please use save_as instead.')
return self.save_as(self.props.project_file)
def save_as(self, file: Gio.File) -> bool:
"""Saves buffer to the file. The file will become current project_file.
Args:
file (Gio.File): The file
Returns:
bool: True if the project was saved successfully
"""
output = []
# Meta-info
output.append(('fver', str(FILE_VERSION_FORMAT)))
for p in self.enabled_plugins:
# Base is enabled by default
if p == 'base':
continue
output.append(('plugin', p))
output.append((('fconsole'), str(self.props.run_in_console)))
# Commands
output.extend(self.code.save())
self.props.project_file = file
outs = file.replace(None, False, Gio.FileCreateFlags.NONE)
content = lib.save_tcp(output)
ok, bytes_written = outs.write_all(content.encode('utf-8'))
if ok:
self.props.changed = False
return ok
def _update_available_commands(self):
self.available_commands.clear()
for p in self.enabled_plugins.values():
for c in p.categories:
for cdefin in c.command_definitions:
self.available_commands[cdefin.id] = lib.Command(
None, cdefin)
self.emit('available_commands_changed')
def _reload_plugins(self, plugin_paths: list[str]):
"""Loads plugin from paths and updates available commands.
Args:
enabled_plugins (list[str]): Plugin paths to load.
Last plugin in the collection has the highest priority
If there are more commands with the same id then it's used
the one from the last plugin.
"""
self.enabled_plugins = lib.Plugin.get_from_paths(
plugin_paths)
self._update_available_commands()
def set_plugin_enabled(self, plugin_id: str, enabled: bool):
assert isinstance(plugin_id, str)
if enabled:
plugin_ids = set(self.enabled_plugins.keys()).union({plugin_id})
else:
# Check for usage of commands contained in the plugin
if plugin_id in self.enabled_plugins.keys():
plugin = self.enabled_plugins[plugin_id]
plugin_commands = []
for category in plugin.categories:
for c in category.command_definitions:
plugin_commands.append(c)
missing = set()
for line in self.code.lines:
for c in line:
if c.definition in plugin_commands:
missing.update([c])
if len(missing) > 0:
raise RemovedUsedPluginException(
missing, plugin
)
plugin_ids = set(self.enabled_plugins.keys()) - {plugin_id}
plugin_paths = lib.Plugin.resolve_paths_from_ids(plugin_ids)
self._reload_plugins(plugin_paths)
def get_command(self, id, data) -> tuple[lib.Command, bool]:
command = self.available_commands.get(id, None)
if command is None:
return (None, False)
if not data:
return (command, True)
return (self.set_command_data(command, data), True)
def get_definition_plugin(self,
command: lib.CommandDefinition
) -> Union[lib.Plugin, None]:
for plugin in self.enabled_plugins.values():
for c in plugin.categories:
if command in c.command_definitions:
return plugin
return None
def set_command_data(self, command, data) -> lib.Command:
if not data:
return self.available_commands.get(command.definition.id, None)
return lib.Command(data, command.definition)
def _on_code_changed(self, buffer):
self.props.changed = True
| saytamkenorh/turtlico | turtlico/lib/projectbuffer.py | projectbuffer.py | py | 8,869 | python | en | code | 3 | github-code | 13 |
29197187505 | """
Given an array arr[] of length N and an integer X, the task is to find the
number of subsets with a sum equal to X.
Examples:
Input: arr[] = {1, 2, 3, 3}, X = 6
Output: 3
All the possible subsets are {1, 2, 3},
{1, 2, 3} and {3, 3}
Input: arr[] = {1, 1, 1, 1}, X = 1
Output: 4
"""
def count_subsets_with_sum(arr, sum):
dp = [[False for _ in range(sum + 1)] for _ in range(len(arr) + 1)]
for i in range(len(arr) + 1):
dp[i][0] = 1
for j in range(1, sum + 1):
dp[0][j] = 0
for i in range(1, len(arr) + 1):
for j in range(1, sum + 1):
if arr[i - 1] <= j:
dp[i][j] = dp[i - 1][j - arr[i - 1]] + dp[i - 1][j]
else:
dp[i][j] = dp[i - 1][j]
return dp[len(arr)][sum]
def test():
t1, s1 = [1, 2, 3, 3], 6
assert count_subsets_with_sum(t1, s1) == 3, "Testcase 1 failed."
t2, s2 = [1, 1, 1, 1], 1
assert count_subsets_with_sum(t2, s2) == 4, "Testcase 2 failed."
if __name__ == "__main__":
test()
| sunank200/DSA | dynamicProgramming/0-1_knapsack/count_of_subset_with_sum_equal_to_sum.py | count_of_subset_with_sum_equal_to_sum.py | py | 1,023 | python | en | code | 0 | github-code | 13 |
7736093211 | from CDD2.iface.iWriter import writer
from CDD2.driver.driver import config
class bqWriter(writer):
def write(self, df):
df.write.format("bigquery") \
.option("temporaryGcsBucket", config.get("DEFAULT", "tempBucketPath")) \
.option("table", config.get("DEFAULT", "targetTableName")) \
.option("credentials", "credentials") \
.option("project", config.get("DEFAULT", "gcpProjectId")) \
.save() | shivanianjikar-97/CDD-Python | CDD2/impl/bqWriter.py | bqWriter.py | py | 466 | python | en | code | 0 | github-code | 13 |
22262644474 | """
This module only contains functions that others modules call.
I moved them to a separate file because all modules use these functions,
and they can't call each other in a circle.
"""
import itertools
import math
import os
import numpy as np
def get_subclip_soundarray(wavio_oblect, start, end):
framerate = wavio_oblect.rate
return wavio_oblect.data[int(start * framerate): int(end * framerate)]
def str2error_message(msg):
"""Deletes \n from msg and replace ' '*n -> ' '"""
return " ".join(list(msg.replace("\n", " ").split()))
def read_bytes_from_wave(waveread_obj, start_sec, end_sec):
previous_pos, framerate = waveread_obj.tell(), waveread_obj.getframerate()
start_pos = min(waveread_obj.getnframes(), math.ceil(framerate * start_sec))
end_pos = min(waveread_obj.getnframes(), math.ceil(framerate * end_sec))
waveread_obj.setpos(start_pos)
rt_bytes = waveread_obj.readframes(end_pos - start_pos)
waveread_obj.setpos(previous_pos)
return rt_bytes
def input_answer(quetsion, answers_list, quit_options=["q", "Q"], attempts=10**10):
def list2str(option_list):
if not option_list:
return ""
if len(option_list) == 1:
return option_list[1]
return f"{', '.join(option_list[:-1])} or {option_list[-1]}"
addition = f" (options: {list2str(answers_list)}; {list2str(quit_options)} to quit)"
for i in range(attempts):
if i:
print(f"Cannot understand input '{answer}'. Available values is {addition}")
answer = input(quetsion + addition)
if answer in answers_list:
return answer
if answer in quit_options:
print("Quiting")
exit(0)
def v1timecodes_to_v2timecodes(v1timecodes, video_fps, length_of_video, default_output_fps=9 ** 9):
"""
:param v1timecodes: timecodes in v1format:
[[start0, end0, fps0], [start1, end1, fps1], ... [start_i, end_i, fps_i]]
(same as save_timecodes_to_v1_file)
where start and end in seconds, fps in frames per second
:return: v2timecodes: timecodes in v2format:
[timecode_of_0_frame_in_ms, timecode_of_1st_frame_in_ms, ... timecode_of_nth_frame_in_ms]
"""
default_freq = 1 / default_output_fps / video_fps
time_between_neighbour_frames = default_freq * np.ones(length_of_video, dtype=np.float64)
for elem in v1timecodes:
start_t, end_t = elem[0] * video_fps, elem[1] * video_fps
# todo begin kostil
start_t = min(start_t, length_of_video - 1)
end_t = min(end_t, length_of_video - 1)
# end kostil
time_between_neighbour_frames[round(start_t): round(end_t)] = 1 / elem[2]
"""
tc[math.floor(start_t)] += (1 - start_t % 1) * (1 / elem[2] - default_freq)
tc[math.floor(end_t)] += (end_t % 1) * (1 / elem[2] - default_freq)
tc[math.floor(start_t) + 1: math.floor(end_t)] = 1 / elem[2]
"""
timecodes = cumsum(time_between_neighbour_frames) # np.nancumsum(tc)
# with open('v1timecodes.npy', 'wb') as f:
# np.save(f, v1timecodes)
# print(f"rt[-1] = {rt[-1]}")
return timecodes
def save_v2_timecodes_to_file(filepath, timecodes):
"""
:param filepath: path to file for saving
:param timecodes: list of timecodes of each frame in format
[timecode_of_0_frame_in_ms, timecode_of_1_frame_in_ms, ... timecode_of_i_frame_in_ms]
:return: file object (closed)
"""
str_timecodes = [format(elem * 1000, "f") for elem in timecodes]
# print(f"filepath = '{filepath}'")
with open(filepath, "w") as file:
file.write("# timestamp format v2\n")
file.write("\n".join(str_timecodes))
return file
def save_v1_timecodes_to_file(filepath, timecodes, videos_fps, default_fps=10 ** 10):
"""
:param filepath: path of the file for saving
:param timecodes: timecodes in format
[[start0, end0, fps0], [start1, end1, fps1], ... [start_i, end_i, fps_i]]
:param videos_fps: float fps of video
:param default_fps: fps of uncovered pieces
:return: closed file object in which timecodes saved
"""
with open(filepath, "w") as file:
file.write("# timecode format v1\n")
file.write(f"assume {default_fps}\n")
for elem in timecodes:
elem = [int(elem[0] * videos_fps), int(elem[1] * videos_fps), elem[2]]
elem = [str(n) for n in elem]
# print(elem, ",".join(elem))
file.write(",".join(elem) + "\n")
return file
def cumsum(n1array):
"""
np.nancumsum works wrong for me, so I wrote equivalent function
:param n1array:
:return: n1array of cumulative sums
"""
accumalated_iter = itertools.accumulate(n1array.tolist())
return np.array(list(itertools.chain([0], accumalated_iter)))
def ffmpeg_atempo_filter(speed):
"""
returns string "-af {speed}" atempo filter.
:param speed: float
:return: atempo_filter: string argument fo ffmpeg in format atempo=1.25,atempo=2.0,atempo=2.0
"""
if speed <= 0:
raise ValueError(f"ffmpeg speed {speed} must be positive")
# if speed == 1:
# return ""
return f"-af atempo={speed}"
| mishadobrits/SVA4 | some_functions.py | some_functions.py | py | 5,229 | python | en | code | 3 | github-code | 13 |
45595274174 | from dbac_lib import dbac_util, dbac_data, dbac_primitives, dbac_feature_ext
import numpy as np
import logging
from sklearn.metrics import average_precision_score, precision_recall_fscore_support
logger = logging.getLogger(__name__)
def _learn_primitives(db_name, db_dir, split_file, prim_rpr_file, ex_size=10, num_ex=10, subset_prim_ids=None,
kwargs_str=None):
# processing kwargs
kwargs_dic = dbac_util.get_kwargs_dic(kwargs_str)
logger.info("Kwargs dictionary: {}".format(kwargs_dic))
# read dataset and partitions
logger.info("Reading dataset and split")
db = dbac_data.IDataset.factory(db_name, db_dir)
db.load_split(split_file)
train_imgs_path = db.images_path[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('train')]
train_labels = db.labels[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('train')]
# select subset of primitives
if subset_prim_ids is None:
subset_prim_ids = np.where(db.valid_primitives)[0].tolist()
logger.info("Selected Primitives: {}".format(subset_prim_ids))
# set up feature extractor function
logger.info("Configuring Features Extractor")
feat_extractor = dbac_feature_ext.IFeatureExtractor.factory(dbac_feature_ext.FEAT_TYPE[1], **kwargs_dic)
feat_extractor.load()
# Learning exemplar SVMS for primitives
prims = dbac_primitives.IPrimitiveCollection.factory(dbac_primitives.PRIMITIVE_TYPES[0], **kwargs_dic)
logger.info("Learning Primitives...")
prims.learn(train_imgs_path, train_labels, feat_extractor,
num_ex=num_ex, ex_size=ex_size, prim_ids=subset_prim_ids, **kwargs_dic)
prims.save(prim_rpr_file)
logger.info("Primitives saved to {}.".format(prim_rpr_file))
def _test_primitives(db_name, db_dir, split_file, prim_rpr_file, subset_prim_ids=None, kwargs_str=None):
# processing kwargs
kwargs_dic = dbac_util.get_kwargs_dic(kwargs_str)
logger.info("Kwargs dictionary: {}".format(kwargs_dic))
# read dataset and partitions
logger.info("Reading dataset and split")
db = dbac_data.IDataset.factory(db_name, db_dir)
db.load_split(split_file)
train_imgs_path = db.images_path[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('train')]
train_labels = db.labels[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('train')]
test_imgs_path = db.images_path[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('test')]
test_labels = db.labels[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('test')]
val_imgs_path = db.images_path[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('val')]
val_labels = db.labels[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('val')]
# set up feature extractor function
logger.info("Configuring Features Extractor")
feat_extractor = dbac_feature_ext.IFeatureExtractor.factory(dbac_feature_ext.FEAT_TYPE[1], **kwargs_dic)
feat_extractor.load()
# Learning exemplar SVMS for primitives
prims = dbac_primitives.IPrimitiveCollection.factory(dbac_primitives.PRIMITIVE_TYPES[0], **kwargs_dic)
logger.info("Loading Primitive collection")
prims.load(prim_rpr_file)
# select subset of primitives
if subset_prim_ids is None:
subset_prim_ids = prims.get_ids()
else:
subset_prim_ids = list(set(subset_prim_ids).intersection(set(prims.get_ids())))
logger.info("Selected Primitives: {}".format(subset_prim_ids))
# test primitives
report_dic = dict()
for key, images, labels in zip(['train', 'val', 'test'], [train_imgs_path, val_imgs_path, test_imgs_path],
[train_labels, val_labels, test_labels]):
logger.info("Testing partition: {}".format(key))
images_feats = feat_extractor.compute(images)
# considering uncalibrated scores
#rprs = np.vstack([prims.get_rpr(pid)[0] for pid in subset_prim_ids])
#scores = rprs[:, 0].reshape((-1, 1)) + np.dot(rprs[:, 1:], images_feats.T)
# considering calibrated scores
scores = np.vstack([prims.get_cls(pid)[0].predict_proba(images_feats)[:, 1] for pid in subset_prim_ids])
# fill report dictionary
assert scores.shape == labels[:, subset_prim_ids].T.shape
report_dic['_'.join([key, 'exps'])] = subset_prim_ids
report_dic['_'.join([key, 'imgs'])] = images
report_dic['_'.join([key, 'gt'])] = labels[:, subset_prim_ids].T
report_dic['_'.join([key, 'pred'])] = scores
result_file = "{}.results.npy".format(os.path.splitext(prim_rpr_file)[0])
np.save(result_file, report_dic)
logger.info("Results file saved to {}.".format(result_file))
if __name__ == '__main__':
import argparse
from datetime import datetime
import os
parser = argparse.ArgumentParser(description="Script to Learn Primitives Representation.", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers(title='commands', dest='cmd_name', help='additional help')
# parser for learning
parser_learn = subparsers.add_parser('learn', help='Learn primitives representation',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_learn.add_argument('db_name', type=str, help='Name of the dataset.', choices=dbac_data.DB_NAMES)
parser_learn.add_argument('db_dir', type=str, help='Path to the dataset main directory.')
parser_learn.add_argument('split_file', type=str, help='Path to the split json file.')
parser_learn.add_argument('file_name', type=str, help='Path to the output file .npy .')
parser_learn.add_argument('-ex_size', default=10, type=int, help='Number of positive samples per exemplar.')
parser_learn.add_argument('-num_ex', default=10, type=int, help='Number of exemplars per primitive.')
parser_learn.add_argument('-gpu_str', default='0', type=str, help='CUDA_VISIBLE_DEVICES')
parser_learn.add_argument('-prim_subset_ids', nargs='*', default=None, type=int, help='Subset of primitives.')
parser_learn.add_argument('-kwargs', type=str, default=None, help="Kwargs for the feature extractor k1=v1; k2=v2; ...")
# parser for test
parser_test = subparsers.add_parser('test', help='Test primitives representation',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_test.add_argument('db_name', type=str, help='Name of the dataset.', choices=dbac_data.DB_NAMES)
parser_test.add_argument('db_dir', type=str, help='Path to the dataset main directory.')
parser_test.add_argument('split_file', type=str, help='Path to the split json file.')
parser_test.add_argument('file_name', type=str, help='Path to the output file .npy .')
parser_test.add_argument('-gpu_str', default='0', type=str, help='CUDA_VISIBLE_DEVICES')
parser_test.add_argument('-prim_subset_ids', nargs='*', default=None, type=int, help='Subset of primitives.')
parser_test.add_argument('-kwargs', type=str, default=None, help="Kwargs for the feature extractor k1=v1; k2=v2; ...")
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_str
if args.cmd_name == 'learn':
log_file = "{}_{}.learn.log".format(os.path.splitext(args.file_name)[0], datetime.now().strftime("%Y%m%d-%H%M%S"))
dbac_util.init_logging(log_file)
logger.info(args)
_learn_primitives(args.db_name, args.db_dir, args.split_file, args.file_name, args.ex_size, args.num_ex,
args.prim_subset_ids, args.kwargs)
elif args.cmd_name == 'test':
log_file = "{}_{}.test.log".format(os.path.splitext(args.file_name)[0],
datetime.now().strftime("%Y%m%d-%H%M%S"))
dbac_util.init_logging(log_file)
logger.info(args)
_test_primitives(args.db_name, args.db_dir, args.split_file, args.file_name, args.prim_subset_ids, args.kwargs)
else:
raise ValueError('Not well formatted command line arguments. Parsed arguments {}'.format(args))
| rfsantacruz/neural-algebra-classifiers | src/dbac_learn_primitives.py | dbac_learn_primitives.py | py | 8,062 | python | en | code | 3 | github-code | 13 |
20214537653 | num = int(input('Enter the number : '))
exponent = int(input('Enter exponent value : '))
count = 0
power = 1
copy = num
while copy:
copy % 10
count += 1
for i in range(1, exponent + 1):
power = copy * exponent
print(f'{num} has {count} number of digits')
print(f'{num} power {exponent} = {power}') | Jayabhaskarreddy98/python_practice | while_loops/count_and_power_of_number.py | count_and_power_of_number.py | py | 313 | python | en | code | 1 | github-code | 13 |
22526990773 | import asyncio
import os
from pprint import pprint
import nest_asyncio
from pyppeteer import launch
from pyppeteer_stealth import stealth
nest_asyncio.apply()
API_KEY = "API_KEY"
API_USER = "API_USER"
API_URL = "API_URL"
def get_proxy_auth() -> dict:
"""
Check if the proxy authentication keys are set
:return:
"""
return {
"API_KEY": os.environ.get(API_KEY, None),
"API_USER": os.environ.get(API_USER, None),
"API_URL": os.environ.get(API_URL, None)
}
class Scraper:
def __init__(self, launch_options: dict) -> None:
self.page = None
self.browser = None
self.options = launch_options.get("options")
self.viewPort = launch_options.get("viewPort")
self.proxy_auth = get_proxy_auth()
async def goto(self, url: str) -> None:
self.browser = await launch(options=self.options)
self.page = await self.browser.newPage()
# add proxy auth
# await self.page.authenticate(
# {
# 'username': self.proxy_auth.get(PROXY_USER),
# 'password': self.proxy_auth.get(PROXY_API_KEY)
# }
# )
await self.page.setUserAgent(
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15",
)
# make scraper stealth
await stealth(self.page)
await self.page.setViewport(
self.viewPort) if self.viewPort is not None else print(
"[i] using default viewport")
await self.page.goto(url)
# wait for specific time
await self.page.waitFor(6000)
# wait for element to appear
# await self.page.waitForSelector('h1', {'visible': True})
# click a button
# link = await self.page.querySelector("h1")
# await link.click()
# Scroll To Bottom
# await self.page.evaluate(
# """{window.scrollBy(0, document.body.scrollHeight);}"""
# )
# take a screenshot
# await self.page.screenshot({'path': 'screenshot.png'})
async def get_full_content(self) -> str:
content = await self.page.content()
return content
async def type_value(self, selector: str, value: str) -> None:
"""
Write value to input field
:param selector:
:param value:
:return:
"""
element = await self.page.querySelector(selector)
await element.type(value)
async def extract_many(self, selector: str, attr: str) -> list:
"""
Select and return a list of elements using queryAll
:param selector:
:param attr:
:return:
"""
result = []
elements = await self.page.querySelectorAll(selector)
for element in elements:
text = await element.getProperty(attr)
result.append(await text.jsonValue())
return result
async def extract_one(self, selector: str, attr: str) -> str:
"""
Locate a single element using querySelector
:param selector:
:param attr:
:return:
"""
element = await self.page.querySelector(selector)
text = await element.getProperty(attr)
return await text.jsonValue()
async def run(proxy: str = None, port: int = None) -> None:
# define launch option
launch_options = {
"options": {
"headless": False,
"autoClose": False,
"args": [
"--no-sandbox",
# "--disable-setuid-sandbox", # security issue
"--disable-notifications",
"--start-maximized",
# f"--proxy-server={p.get('ip')}:{p.get('port')}"
# f"--proxy-server={proxy}:{port}"
# set a proxy server
# have to add
# await page.authenticate({'username': 'user', 'password': 'password'})
# after await browser.newPage()
],
"ignoreDefaultArgs": ["--disable-extensions", "--enable-automation"]
},
"viewPort": {
"width": 1600,
"height": 900
}
}
# Initialize the new scraper
scraper = Scraper(launch_options)
# Navigate to the target
target_url = "https://hotels.com/ho237271/simba-run-condos-2bed-2bath-vail-united-states-of-america/"
# target_url = "https://quotes.toscrape.com/"
if scraper.proxy_auth.get(API_URL):
target_url = f"{scraper.proxy_auth.get(API_URL)}/?api_key={scraper.proxy_auth.get(API_KEY)}&url=" + target_url
# target_url = f"https://api.webscrapingapi.com/v1/?api_key={scraper.proxy_auth.get(PROXY_API_KEY)}&url=" + target_url
pprint(f"Navigate to: {target_url}")
await scraper.goto(target_url)
# Type "this is me" inside the input box
# pprint("Type 'this is me' inside the input box")
# await scraper.type_value("#fish", "this is me")
# Scrape the entire page
# pprint("Scrape entire page")
# content = await scraper.get_full_content()
# print(content)
# Scrape one single element
pprint("Scrape one single element")
elem = await scraper.extract_one("h1", "textContent")
print(elem)
# Scrape multiple elements
pprint("Scrape multiple elements")
elems = await scraper.extract_many("li[role=listitem", "textContent")
print(elems)
# Execute javascript
# content = await page.evaluate(
# 'document.body.textContent', force_expr=True)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
| zhou-en/pyppeteer-scraper | scraper.py | scraper.py | py | 5,662 | python | en | code | 0 | github-code | 13 |
39556517281 | from imagekit.specs import ImageSpec
from imagekit import processors
from PIL import ImageOps
import Image as PILImage
# Helper functions
def make_linear_ramp(white):
# putpalette expects [r,g,b,r,g,b,...]
ramp = []
r, g, b = white
for i in range(255):
ramp.extend((r*i/255, g*i/255, b*i/255))
return ramp
def do_black_and_white(img):
img = PILImage.blend(img.convert('L').convert(img.mode), img, 0.0)
return img
def do_sepia(img):
sepia = make_linear_ramp((255, 240, 192))
if img.mode != "L":
img = img.convert("L")
img = ImageOps.autocontrast(img)
img.putpalette(sepia)
img = img.convert("RGB")
return img
# Processors
class ResizeThumb(processors.Resize):
width = 150
height = 150
crop = True
class ResizeFilterDisplay(processors.Resize):
width = 200
height = 200
class BAWer(processors.ImageProcessor):
@classmethod
def process(cls, img, fmt, obj):
img = do_black_and_white(img)
return img, fmt
class Sepiaer(processors.ImageProcessor):
@classmethod
def process(cls, img, fmt, obj):
img = do_sepia(img)
return img, fmt
# Image Specs
class OriginalFilter(ImageSpec):
access_as = 'original_filter'
class BlackAndWhite(ImageSpec):
access_as = 'black_and_white'
processors = [BAWer]
class Sepia(ImageSpec):
access_as = 'sepia'
processors = [Sepiaer]
class Thumbnail(ImageSpec):
processors = [ResizeThumb]
class OriginalFilterThumbnail(ImageSpec):
access_as = 'original_filter_thumbnail'
processors = [ResizeThumb]
| kevinatienza/CodeSSIU | imageupload/core/ikspecs.py | ikspecs.py | py | 1,473 | python | en | code | 4 | github-code | 13 |
70958962897 | import pygame, sys, random
import model
import view
class EventController:
#Variables that keep track of the model and view class.
model = ""
view = ""
def __init__(self, model, view):
self.model = model
self.view = view
def input(self):
font = pygame.font.SysFont(None, 20)
#player = model.Player()
player = model.Level.actualPlayer
if player.lives <= 0:
view.View.lose(self)
#pygame.quit()
#sys.exit()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == model.JumpPowerUp.TIME:
player.image = pygame.image.load("sprites/player/squidknight.png")
player.jump_speed = -10
if event.type == model.GhostPowerUp.TIME:
player.image = pygame.image.load("sprites/player/squidknight.png")
player.ghosting = False
if event.type == model.TileDanger.HURT:
player.image = pygame.image.load("sprites/player/squidknight.png")
player.hurt = False
if event.type == model.LifePowerUp.TIME:
player.image = pygame.image.load("sprites/player/squidknight.png")
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
player.space_start_time = pygame.time.get_ticks()
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
player.space_hold_time = pygame.time.get_ticks() - player.space_start_time
if (not player.jumping):
#Set jumping to True
player.jumping = True
player.jump()
keys = pygame.key.get_pressed()
if keys[pygame.K_d]:
player.direction.x = 1
elif keys[pygame.K_a]:
player.direction.x = -1
else:
player.direction.x = 0
| DonNamTran/Squid-Knight | eventController.py | eventController.py | py | 2,160 | python | en | code | 0 | github-code | 13 |
23607838259 | # data_local_storage_filepath = '/home/zem/labs/trading-project/rt-persistence'
data_local_storage_filepath = '/home/zembrzuski/labs/the-trading-project/rt-persistence'
elasticsearch_address = 'http://localhost:9200'
# company_code, from_epoch, to_epoch, crumb
yahoo_historical_url = \
'https://query1.finance.yahoo.com/v7/finance/download/{}?period1={}&period2={}&interval=1d&events=history&crumb={}'
yahoo_url_for_polling = "https://query1.finance.yahoo.com/v7/finance/quote?formatted=true&crumb=U4e8eDQi%2FyI&" \
"lang=en-US®ion=US&symbols=" \
"{}" \
"&fields=messageBoardId%2ClongName%2CshortName%2CmarketCap%2CunderlyingSymbol%2CunderlyingExchangeSymbol%2CheadSymbolAsString%2CregularMarketPrice%2CregularMarketChange%2CregularMarketChangePercent%2CregularMarketVolume%2Cuuid%2CregularMarketOpen%2CfiftyTwoWeekLow%2CfiftyTwoWeekHigh&corsDomain=finance.yahoo.com"
chunk_size = 10
companies = ["ABEV3.SA",
"B3SA3.SA",
"BBAS3.SA",
"BBDC3.SA",
"BBDC4.SA",
"BBSE3.SA",
"BRAP4.SA",
"BRDT3.SA",
"BRFS3.SA",
"BRKM5.SA",
"BRML3.SA",
"BTOW3.SA",
"CCRO3.SA",
"CIEL3.SA",
"CMIG4.SA",
"CSAN3.SA",
"CSNA3.SA",
"CVCB3.SA",
"CYRE3.SA",
"ECOR3.SA",
"EGIE3.SA",
"ELET3.SA",
"ELET6.SA",
"EMBR3.SA",
"ENBR3.SA",
"EQTL3.SA",
"ESTC3.SA",
"FLRY3.SA",
"GGBR4.SA",
"GOAU4.SA",
"GOLL4.SA",
"HYPE3.SA",
"IGTA3.SA",
"ITSA4.SA",
"ITUB4.SA",
"JBSS3.SA",
"KLBN11.SA",
"KROT3.SA",
"LAME4.SA",
"LOGG3.SA",
"LREN3.SA",
"MGLU3.SA",
"MRFG3.SA",
"MRVE3.SA",
"MULT3.SA",
"NATU3.SA",
"PCAR4.SA",
"PETR3.SA",
"PETR4.SA",
"QUAL3.SA",
"RADL3.SA",
"RAIL3.SA",
"RENT3.SA",
"SANB11.SA",
"SBSP3.SA",
"SMLS3.SA",
"SUZB3.SA",
"TAEE11.SA",
"TIMP3.SA",
"UGPA3.SA",
"USIM5.SA",
"VALE3.SA",
"VIVT4.SA",
"VVAR3.SA",
"WEGE3.SA"]
| zembrzuski/finance_poller | src/config/local.py | local.py | py | 2,598 | python | en | code | 1 | github-code | 13 |
20173735833 | import arcpy
arcpy.env.overwriteOutput=True
arcpy.env.workspace ="D:/Lesson6_Data"
fc="D:/Lesson6_Data/Cities.shp"
fieldList= ["NAME" ,"SHAPE@XY"]
cipath ='D:/Lesson6_Data/cities.txt'
ciFile = open (cipath, "w")
cursor = arcpy.da.SearchCursor(fc,fieldList)
for row in cursor:
Name = row [0]
X,Y = row [1]
ciFile.write(str(Name) + "," + str(X) + "," + str(Y) + "\n")
ciFile.close()
print ('completed')
| Daviey52/GIS-Python-programming | Geometries02/geometries.py | geometries.py | py | 417 | python | en | code | 0 | github-code | 13 |
40726715894 | from app.server import server
from flask import jsonify
from app.server.check_service import check_database
from datetime import datetime, timedelta, timezone
from flask import current_app, request
@server.route('/info')
def server_status():
"""Get DB and email status
Returns:
json: {
update_time:UTC+8 ISO 8601
data:service,isAlive,description
}
"""
database = check_database()
update_time = datetime.utcnow().astimezone(
timezone(offset=timedelta(hours=8))).isoformat()
info = {"update_time": update_time,
"data": [database]
}
log = {"ip": request.remote_addr, "data": info, "api": request.path}
current_app.logger.info(log)
return jsonify(info), 200
@server.route('/echo', methods=['GET', 'POST'])
def echo():
requests_arg = request.args
request_body = request.get_json()
return jsonify(echo=(requests_arg, request_body))
| RainMeoCat/CipherAirSig | backend/app/server/routes.py | routes.py | py | 953 | python | en | code | 0 | github-code | 13 |
15799838965 | #!/usr/bin/python3
import rospy
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
class turtlesim:
#Initialization
def __init__(self):
rospy.init_node('node_turtle_revolve', anonymous=True)
self.velocity_publisher = rospy.Publisher('/turtle1/cmd_vel', Twist, queue_size=10)
self.pose_subscriber = rospy.Subscriber('/turtle1/pose', Pose, self.poseCallback)
self.pose = Pose()
#Subscriber Callback
def poseCallback(self, data):
self.pose.theta = round(data.theta, 1)
rospy.loginfo("Theta = %f\n", self.pose.theta)
def move_circle(self, direction):
"""
@brief publishes velocity to make turtle move in circle
@param direction -1 to move backward +1 to move forward
"""
velocity_msg = Twist()
velocity_msg.linear.x = 2
velocity_msg.angular.z = abs(2/2)*direction
rate = rospy.Rate(10)
count = 0
angle = 3.10
while not rospy.is_shutdown():
self.velocity_publisher.publish(velocity_msg)
rate.sleep()
if self.pose.theta == angle:
angle = -0.0
count = count + 1
continue
elif count == 2:
break
velocity_msg.linear.x = 0
velocity_msg.angular.z = 0
self.velocity_publisher.publish(velocity_msg)
rospy.loginfo("Complete")
rate.sleep()
if __name__ == "__main__":
try:
x = turtlesim()
x.move_circle(-1)
x.move_circle(1)
except rospy.ROSInterruptException:
pass
| RoopanJK/Eyantra-AgriBot | src/pkg_task0/scripts/node_turtle_revolve.py | node_turtle_revolve.py | py | 1,646 | python | en | code | 0 | github-code | 13 |
15520805075 | from kafka import KafkaConsumer
def listen():
consumer = KafkaConsumer("sf.police.department.calls",
bootstrap_servers=["localhost:9092"],
client_id="sf-crime-consumer"
)
for message in consumer:
print(f"{message.topic}:{message.offset}:\nkey={message.key} value={message.value}")
if __name__ == "__main__":
listen()
| maribowman/data-streaming | sf_crime_statistics/consumer_server.py | consumer_server.py | py | 428 | python | en | code | 0 | github-code | 13 |
28987233010 | # coding:utf-8
from PyQt5.QtWidgets import QApplication,QMainWindow,QWidget
from untitled_1 import Ui_MainWindow
from untitled_2 import Ui_Form
import sys
class Example(QMainWindow,Ui_MainWindow):
def __init__(self):
super(Example,self).__init__()
self.setupUi(self)
self.children = Children()
self.addWinAction.triggered.connect(self.childrenShow)
def childrenShow(self):
self.gridLayout.addWidget(self.children)
self.children.show()
class Children(QWidget,Ui_Form):
def __init__(self):
super(Children,self).__init__()
self.setupUi(self)
if __name__ == '__main__':
app = QApplication(sys.argv)
win = Example()
win.show()
sys.exit(app.exec()) | raojixian/pyqt | PyQt5-master/Chapter03/learning/界面跳转.py | 界面跳转.py | py | 747 | python | en | code | 0 | github-code | 13 |
39657779742 | import wx.lib.wxcairo as wxcairo
from .. import _api
from .backend_cairo import cairo, FigureCanvasCairo
from .backend_wx import _BackendWx, _FigureCanvasWxBase, FigureFrameWx
from .backend_wx import ( # noqa: F401 # pylint: disable=W0611
NavigationToolbar2Wx as NavigationToolbar2WxCairo)
@_api.deprecated(
"3.6", alternative="FigureFrameWx(..., canvas_class=FigureCanvasWxCairo)")
class FigureFrameWxCairo(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxCairo(self, -1, fig)
class FigureCanvasWxCairo(FigureCanvasCairo, _FigureCanvasWxBase):
def draw(self, drawDC=None):
size = self.figure.bbox.size.astype(int)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, *size)
self._renderer.set_context(cairo.Context(surface))
self._renderer.dpi = self.figure.dpi
self.figure.draw(self._renderer)
self.bitmap = wxcairo.BitmapFromImageSurface(surface)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
@_BackendWx.export
class _BackendWxCairo(_BackendWx):
FigureCanvas = FigureCanvasWxCairo
| cautionlite32/data-science | lib/matplotlib/backends/backend_wxcairo.py | backend_wxcairo.py | py | 1,104 | python | en | code | 0 | github-code | 13 |
20065697368 | from flask import Flask, jsonify, request, send_from_directory, render_template
import requests
requests.packages.urllib3.disable_warnings()
from pytrends.request import TrendReq
pytrends = TrendReq(hl='en-US', tz=360)
app = Flask(__name__, static_url_path='')
@app.route('/')
def root():
return render_template('index.html')
@app.route('/<path:path>')
def static_proxy(path):
# send_static_file will guess the correct MIME type
return app.send_static_file(path)
@app.route('/get_trends', methods=['GET'])
def get_trends():
keywords = request.args.get('q')
print(keywords)
keywords = keywords.strip().split(";")
payload = pytrends.build_payload(keywords, cat=0, timeframe='all', geo='', gprop='')
df = pytrends.interest_over_time()
dates = df.index.tolist()
dates = [str(x).split()[0] for x in dates]
interests = {}
for _keyword in keywords:
interests[_keyword] = df[_keyword].tolist()
return jsonify({"dates":dates, "interests":interests})
if __name__ == '__main__':
app.run(debug=True)
| spMohanty/SoniTrends | app.py | app.py | py | 1,052 | python | en | code | 0 | github-code | 13 |
34894300929 | import ipdt.player
class Player(ipdt.player.Player):
"""Tit-for-Tat, a strategy that is all about equivalent retaliation."""
name = "Tit-for-tat"
def play(self,last_move):
if last_move is None:
return True
else:
if last_move:
return True
else:
return False
| geeklhem/ipdt | ipdt/players/tft.py | tft.py | py | 354 | python | en | code | 2 | github-code | 13 |
15801337295 | # Find the factorial value
# getting input value from the user
n = int(input("Enter a number: "))
# create a function for finding factorial for the given number
def fact(n):
# initialize the value x = 1, factorial of 1 is 1
x = 1
# if user enter the input value is 1, then print value of 1 factorial
if n == 1:
print(x)
# if input greater than 1
else:
for i in range(2, n+1): # we know factorial of 1 is 1, so start with 2 in range function
# multiplying the value with each other, and values are stored in that by default value is 1
x = x * i
print(x)
fact(n)
| satz2000/Python-practiced-notes | Factorial.py | Factorial.py | py | 670 | python | en | code | 0 | github-code | 13 |
948725473 | import sys, re, operator, string, time
## Constraints
# - larger problem decomposed into entities using some form of abstraction
# - entities are never called on directly for actions
# - existence of an infrastructure for publishing and subscribing to events,
# AKA the `bulletin board`
# - entities post event subscriptions and publish event. Bulletin board infra
# does all the event management and distribution
#
# The event management substrate
#
class EventManager:
def __init__(self):
self._subscriptions = {}
def subscribe(self, event_type, handler):
if event_type in self._subscriptions:
self._subscriptions[event_type].append(handler)
else:
self._subscriptions[event_type] = [handler]
def publish(self, event):
event_type = event[0]
if event_type in self._subscriptions:
for h in self._subscriptions[event_type]:
h(event)
#
# The application entities
#
class DataStorage:
"""Models the contents of the file"""
def __init__(self, event_manager):
self._event_manager = event_manager
self._event_manager.subscribe("load", self.load)
self._event_manager.subscribe("start", self.produce_words)
def load(self, event):
path_to_file = event[1]
with open(path_to_file) as f:
self._data = f.read()
pattern = re.compile("[\W_]+")
self._data = pattern.sub(" ", self._data).lower()
def produce_words(self, event):
data_str = "".join(self._data)
for w in data_str.split():
self._event_manager.publish(("word", w))
self._event_manager.publish(("eof", None))
class StopWordFilter:
"""Models to stop word filter"""
def __init__(self, event_manager):
self._stop_words = []
self._event_manager = event_manager
self._event_manager.subscribe("load", self.load)
self._event_manager.subscribe("word", self.is_stop_word)
def load(self, event):
with open("../static/stop_words.txt") as f:
self._stop_words = f.read().split(",")
self._stop_words.extend(list(string.ascii_lowercase))
def is_stop_word(self, event):
word = event[1]
if word not in self._stop_words:
self._event_manager.publish(("valid_word", word))
class WordFrequencyCounter:
"""Keeps the word frequency data"""
def __init__(self, event_manager):
self._word_freqs = {}
self._event_manager = event_manager
self._event_manager.subscribe("valid_word", self.increment_count)
self._event_manager.subscribe("print", self.print_freqs)
def increment_count(self, event):
word = event[1]
if word in self._word_freqs:
self._word_freqs[word] += 1
else:
self._word_freqs[word] = 1
def print_freqs(self, event):
word_freqs = sorted(
self._word_freqs.items(), key=operator.itemgetter(1), reverse=True
)
for (w, c) in word_freqs[0:25]:
print(w, "-", c)
class WordFrequencyApplication:
def __init__(self, event_manager):
self._event_manager = event_manager
self._event_manager.subscribe("run", self.run)
self._event_manager.subscribe("eof", self.stop)
def run(self, event):
path_to_file = event[1]
self._event_manager.publish(("load", path_to_file))
self._event_manager.publish(("start", None))
def stop(self, event):
self._event_manager.publish(("print", None))
# runtime calc
start_time = time.time()
#
# The main function
#
em = EventManager()
DataStorage(em), StopWordFilter(em), WordFrequencyCounter(em)
WordFrequencyApplication(em)
em.publish(("run", sys.argv[1]))
# final runtime calc
print("--- %s seconds ---" % (time.time() - start_time))
| DEGoodman/EiPS | python/16_bulletinboard.py | 16_bulletinboard.py | py | 3,855 | python | en | code | 0 | github-code | 13 |
34150090829 | # model.py
import torch
from torchvision import models
from torchvision.models.resnet import ResNet50_Weights
from typing import Optional
from utils import load_weights
from config import *
def load_model(snn_type: str,
plant_type: Optional[str] = None
) -> tuple[torch.nn.Module, int] or tuple[None, None]:
"""
This function either returns a 1snn or a 2snn model based on the snn_type argument.
For a 2snn, it also requires the plant_type argument.
"""
# Validate input values and handle errors
if snn_type not in ['1snn', '2snn']:
raise ValueError(f"Invalid SNN type. Expected '1snn' or '2snn', got {snn_type}")
if snn_type == '2snn':
if plant_type is None:
raise ValueError("Plant type must be specified when loading a 2snn model.")
elif plant_type not in PLANT_CLASSES:
raise ValueError(f"Invalid plant type: {plant_type}")
# Define the number of output nodes based on snn type
if snn_type == '1snn':
num_classes = TOTAL_CLASSES_NUMBER # len(PLANT_CLASSES)
else:
num_classes = len(PLANT_CLASSES[plant_type])
if num_classes < 2:
print(
f"Warning: Insufficient disease classes found for the plant type: {plant_type}.\n"
f"A model cannot be trained with less than two classes.\n\n")
return None, None
# Load pre-trained ResNet50
model = models.resnet50(weights=ResNet50_Weights.IMAGENET1K_V1)
# Replace the final layer
num_features = model.fc.in_features
model.fc = torch.nn.Linear(num_features, num_classes)
# Load model weights if a training on Plant-Village has already occurred
model, last_epoch = load_weights(model, snn_type, plant_type)
return model, last_epoch
| shaharelys/plant_disease_classification | model.py | model.py | py | 1,814 | python | en | code | 0 | github-code | 13 |
28367303569 | """
Core idea of value-iterations is to compute all values of Q(s, a) and for each
state calculate the max action of Q(s, a) given the state. We then know that
V(s) = the action that maximized Q(s, a)
"""
import numpy as np
import gym
def compute_q(P, s, nA, gamma, prev_v):
q = np.zeros(nA)
for a in range(nA):
q_a = []
# Probability of next state, value of next state, reward, and is done.
for p, s_, r, _ in P[s][a]:
q_a.append(p * (r + gamma * prev_v[s_]))
# Sum across all possible next states
q[a] = sum(q_a)
return q
def run_policy(env, policy, gamma, render):
obs = env.reset()
total_reward = 0
step_idx = 0
done = False
while not done:
if render:
env.render()
obs, reward, done, _ = env.step(int(policy[obs]))
total_reward += (gamma ** step_idx * reward)
step_idx += 1
return total_reward
def evaluate_policy(env, policy, gamma, n=100):
scores = [run_policy(env, policy, gamma, render=False) for _ in range(n)]
return np.mean(scores)
gamma = 1.0
eps = 1e-20
env = gym.make('FrozenLake8x8-v0')
nS = env.observation_space.n
nA = env.action_space.n
# Probability of reward given state and action.
P = env.env.P
v = np.zeros(nS)
for i in range(10000):
prev_v = np.copy(v)
for s in range(nS):
q = compute_q(P, s, nA, gamma, prev_v)
v[s] = max(q)
if np.sum(np.fabs(prev_v - v)) <= eps:
print('Converged at iteration %.2f' % i)
break
policy = np.zeros(nS)
for s in range(nS):
q = compute_q(P, s, nA, gamma, v)
policy[s] = np.argmax(q)
total_reward = evaluate_policy(env, policy, gamma)
print('Average policy score %.2f' % (total_reward))
print(policy)
| ASzot/random-implementations | reinforcement-learning/value_iteration.py | value_iteration.py | py | 1,770 | python | en | code | 0 | github-code | 13 |
6168189054 | import numpy as np
from flask import Flask, render_template, request, jsonify
from wl_model import wl_model
import ttide as ttide
import json
app = Flask(__name__)
@app.after_request
def cors(environ):
environ.headers['Access-Control-Allow-Origin']='*'
environ.headers['Access-Control-Allow-Method']='*'
environ.headers['Access-Control-Allow-Headers']='x-requested-with,content-type'
return environ
@app.route('/waterLevel',methods = ['POST', 'GET'])
def mean_water_level():
if request.method == 'POST':
data = request.get_data()
json_data = json.loads(data)
head_q = json_data.get("head_q")
foot_r = json_data.get("foot_r")
wl_day = json_data.get("wl_day")
param = wl_model.t_q_res(head_q,wl_day,foot_r)
dic_t = {'org':'org'}
dic_t['Costant'] = param.tolist()[0]
dic_t['Q'] = param.tolist()[1]
dic_t['Q2'] = param.tolist()[2]
dic_t['R'] = param.tolist()[3]
print(dic_t)
return jsonify(dic_t)
else:
return 'error'
@app.route('/fitting',methods = ['POST', 'GET'])
def tide_fit():
if request.method == 'POST':
# 获取前端json数据
# request.get_data()获取字符串,json.loads()转化为json
data = request.get_data()
# print(data)
json_data = json.loads(data)
# print(json_data)
wl_hour = json_data.get("water")
# print(type(np.array(wl_hour)))
wl_hour = np.array(wl_hour)
tfit_e = ttide.t_tide(wl_hour)
tide_out = tfit_e['xout'].tolist()
# 给前端传输json数据
dic = {'org':'org'} # 创建字典
if(tide_out[0][0]=='nan'):
print(tide_out)
dic['water'] = tide_out
return jsonify(dic)
else:
return 'error'
@app.route('/login', methods=['POST'])
def login():
# 获取前端json数据
# request.get_data()获取字符串,json.loads()转化为json
data = request.get_data()
print(data)
json_data = json.loads(data)
print(json_data)
Id = json_data.get("userId")
password = json_data.get("password")
print("userId is " + Id)
print("password is " + password)
# 给前端传输json数据
info = dict() # 创建字典
info['status'] = 'success'
return jsonify(info)
if __name__ == '__main__':
#默认为5000端口
# app.run()
app.run(port=8000)
| ggonekim9/flask_harmonic | web_back/app.py | app.py | py | 2,509 | python | en | code | 1 | github-code | 13 |
21984985365 |
nn = all_data.shape[0]
np.random.seed(999)
sample_idx = np.random.random_integers(0, 3, nn)
n_trees = 4100
predv_xgb = 0
batch = 0
day_test = 31
output_logloss = {}
pred_dict = {}
for idx in [0, 1, 2, 3]:
filter1 = np.logical_and(np.logical_and(day_values >= 17, day_values < day_test),
np.logical_and(sample_idx == idx, True))
filter_v1 = day_values == day_test
xt1 = all_data.ix[filter1, xgb_feature]
yt1 = cvrt_value[filter1]
xv1 = all_data.ix[filter_v1, xgb_feature]
yv1 = cvrt_value[filter_v1]
if xt1.shape[0] <= 0 or xt1.shape[0] != yt1.shape[0]:
print(xt1.shape, yt1.shape)
raise ValueError('wrong shape!')
dtrain = xgb.DMatrix(xt1, label=yt1)
dvalid = xgb.DMatrix(xv1)
watchlist = [(dtrain, 'train')]
print(xt1.shape, yt1.shape)
plst = list(xgb_param.items()) + [('eval_metric', 'logloss')]
xgb1 = xgb.train(plst, dtrain, n_trees, watchlist, early_stopping_rounds=50)
batch += 1
current_pred = xgb1.predict(dvalid)
yt_hat = xgb1.predict(dtrain)
pred_dict[idx] = current_pred
predv_xgb += current_pred
output_logloss[idx] = logloss(yt_hat, yt1)
print(logloss(yt_hat, yt1))
# print('-' * 30, batch, logloss(predv_xgb / batch, yv1))
| zxlmufc/penguin_click | script/generate_gbdt_feature_for_fm.py | generate_gbdt_feature_for_fm.py | py | 1,280 | python | en | code | 0 | github-code | 13 |
15812274833 | """
Implementation of alternating least squares with regularization.
The alternating least squares with regularization algorithm ALS-WR was first
demonstrated in the paper Large-scale Parallel Collaborative Filtering for
the Netflix Prize. The authors discuss the method as well as how they
parallelized the algorithm in Matlab. This module implements the algorithm in
parallel in python with the built in concurrent.futures module.
"""
import os
import subprocess
from joblib import Parallel, delayed
import numpy as np
import scipy.sparse as sps
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_is_fitted, check_random_state
from .utils import _check_x, _check_y, root_mean_squared_error
# pylint: disable=E1101,W0212
class ALS(BaseEstimator):
"""Implementation of Alternative Least Squares for Matrix Factorization.
Parameters
----------
rank : integer (default=10)
The number of latent features (rank) to include in the matrix
factorization.
alpha : float, optional (default=0.1)
Float representing the regularization penalty.
tol : float, optional (default=0.1)
Float representing the difference in RMSE between iterations at which
to stop factorization.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
verbose : int, optional (default=0)
Controls the verbosity of the ALS fitting process.
Attributes
----------
data : {array-like, sparse matrix} shape (n_samples, m_samples)
Constant matrix representing the data to be modeled.
item_features : array-like, shape (k_features, m_samples)
Array of shape (rank, m_samples) where m represents the number of items
contained in the data. Contains the latent features of items extracted
by the factorization process.
user_features : array-like, shape (k_features, n_samples)
Array of shape (rank, n_samples) where n represents the number of users
contained in the data. Contains the latent features of users extracted
by the factorization process.
reconstruction_err_ : float
The sum squared error between the values predicted by the model and the
real values of the training data.
"""
def __init__(self, rank=10, alpha=0.1, tol=0.001, random_state=None,
n_jobs=1, verbose=0):
"""Initialize instance of ALS."""
self.rank = rank
self.alpha = alpha
self.tol = tol
self.random_state = random_state
if n_jobs == -1:
n_jobs = os.cpu_count()
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y, shape=None):
"""Fit the model to the given data.
Parameters
----------
X : tuple, DataHolder
Structure containing arrays of user indices and item indices.
y : {array-like, sparse matrix}
1-D array or sparse matrix representing the data to be modeled.
shape : tuple or None, (default=None)
If y is a 1-D array shape must be the shape of the real data.
Returns
-------
self
"""
_, _ = self.fit_transform(X, y, shape=shape)
return self
def fit_transform(self, X, y, shape=None):
"""Fit the model to the given data.
Parameters
----------
X : tuple, DataHolder
Structure containing arrays of user indices and item indices.
y : {array-like, sparse matrix}
1-D array or sparse matrix representing the data to be modeled.
shape : tuple or None, (default=None)
If y is a 1-D array shape must be the shape of the real data.
Returns
-------
user_feats : array, shape (k_components, n_samples)
The array of latent user features.
item_feats : array, shape (k_components, m_samples)
The array of latent item features.
"""
if (y.ndim < 2 or y.shape[0] == 1) and not shape:
raise ValueError('When y is a scalar or 1-D array shape must be' +
'provided.')
users, items = _check_x(X)
if not sps.issparse(y):
data = sps.lil_matrix(shape)
for idx, (i, j) in enumerate(zip(users, items)):
data[i, j] = y[idx]
data = data.tocsr()
else:
data = y.tocsr()
random_state = check_random_state(self.random_state)
rmse = float('inf')
diff = rmse
item_avg = data.sum(0) / (data != 0).sum(0)
item_avg[np.isnan(item_avg)] = 0
self.item_feats = random_state.rand(self.rank, data.shape[1])
self.item_feats[0] = item_avg
self.user_feats = np.zeros((self.rank, data.shape[0]))
self.data = data
while diff > self.tol:
user_arrays = np.array_split(np.arange(self.data.shape[0]),
self.n_jobs)
self._update_parallel(user_arrays)
item_arrays = np.array_split(np.arange(self.data.shape[1]),
self.n_jobs)
self._update_parallel(item_arrays, user=False)
users, items = data.nonzero()
U = self.user_feats.T[users]
V = self.item_feats.T[items]
pred = (U * V).sum(-1)
new_rmse = root_mean_squared_error(data.data, pred)
diff = rmse - new_rmse
rmse = new_rmse
users, items = data.nonzero()
self.reconstruction_err_ = self.score(X, y)
return self.user_feats, self.item_feats
def _update_parallel(self, arrays, user=True):
"""Update the given features in parallel.
Parameters
----------
arrays : ndarray
Array of indices that represent which column of the features is
being updated.
user : bool
Boolean indicating wheter or not user features are being updated.
"""
params = {'rank': self.rank, 'alpha': self.alpha, 'user': user}
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(self._thread_update_features)(array, params)
for array in arrays)
for result in out:
for index, value in result.items():
if user:
self.user_feats[:, index] = value
else:
self.item_feats[:, index] = value
def _thread_update_features(self, indices, params):
"""Split updates of feature matrices to multiple threads.
Args:
indices (np.ndarray): Array of integers representing the index of
the user or item that is to be updated.
params (dict): Parameters for the ALS algorithm.
Returns:
data (dict): Dictionary of data with the user or item to be updated
as key and the array of features as the values.
"""
data = {}
out = Parallel(
n_jobs=self.n_jobs, backend='threading')(
delayed(self._update_one)(index, **params)
for index in indices)
for i, val in enumerate(out, start=indices[0]):
data[i] = val
return data
def _update_one(self, index, **params):
"""Update a single column for one of the feature matrices.
Parameters
----------
index : int
Integer representing the index of the user/item that is to be
updated.
params : dict
Parameters for the ALS algorithm.
Returns
-------
col : ndarray
An array that represents a column from the feature matrix that is
to be updated.
"""
rank, alpha, user = params['rank'], params['alpha'], params['user']
if user:
submat = self.make_item_submats(index)
row = self.data[index].data
else:
submat = self.make_user_submats(index)
row = self.data[:, index].data
num_ratings = row.size
reg_sums = submat.dot(submat.T) + alpha * num_ratings * np.eye(rank)
feature_sums = submat.dot(row[np.newaxis].T)
try:
col = np.linalg.inv(reg_sums).dot(feature_sums)
except np.linalg.LinAlgError:
col = np.zeros((1, rank))
return col.ravel()
def make_user_submats(self, item):
"""Get the user submatrix from a single item in the ratings matrix.
Parameters
----------
item : int
Index of the item to construct the user submatrix for.
Returns
-------
submat : np.ndarray
Array containing the submatrix constructed by selecting the columns
from the user features for the ratings that exist for the given
column in the ratings matrix.
"""
idx_dtype = sps.sputils.get_index_dtype(
(self.data.indptr, self.data.indices),
maxval=max(self.data.nnz, self.data.shape[0]))
indptr = np.empty(self.data.shape[1] + 1, dtype=idx_dtype)
indices = np.empty(self.data.nnz, dtype=idx_dtype)
data = np.empty(self.data.nnz,
dtype=sps.sputils.upcast(self.data.dtype))
sps._sparsetools.csr_tocsc(
self.data.shape[0], self.data.shape[1],
self.data.indptr.astype(idx_dtype),
self.data.indices.astype(idx_dtype), self.data.data, indptr,
indices, data)
submat = self.user_feats[:, indices[indptr[item]:indptr[item + 1]]]
return submat
def make_item_submats(self, user):
"""Get the item submatrix from a single user in the ratings matrix.
Parameters
----------
user : int
Index of the user to construct the user submatrix for.
Returns
-------
submat : np.ndarray
Array containing the submatrix constructed by selecting the columns
from the item features for the ratings that exist for the given row
in the ratings matrix.
"""
submat = self.item_feats[:, self.data[user].indices]
return submat
def _predict(self, X):
"""Make predictions for the given arrays.
Parameters
----------
X : tuple, DataHolder
Structure containing arrays of user indices and item indices.
Returns
-------
predictions : array, shape (n_samples, m_samples)
Array of all predicted values for the given user/item pairs.
"""
check_is_fitted(self, ['item_feats', 'user_feats'])
users, items = _check_x(X)
U = self.user_feats.T[users]
V = self.item_feats.T[items]
predictions = (U * V).sum(-1)
return predictions
def predict_one(self, user, item):
"""Given a user and item provide the predicted rating.
Predicted values for a single user, item pair can be provided by the
fitted model by taking the dot product of the user column from the
user_features and the item column from the item_features.
Parameters
----------
user : integer
Index for the user.
item : integer
Index for the item.
Returns
-------
prediction : float
Predicted value at index user, item in original data.
"""
prediction = self._predict((np.array([user]), np.array([item])))
return prediction
def predict_all(self, user):
"""Given a user provide all of the predicted values.
Parameters
----------
user : integer
Index for the user.
Returns
-------
predictions : array-like, shape (1, m_samples)
Array containing predicted values of all items for the given user.
"""
users = np.repeat(user, self.data.shape[1])
items = np.arange(self.data.shape[1])
predictions = self._predict((users, items))
return predictions
def score(self, X, y):
"""Return the root mean squared error for the predicted values.
Parameters
----------
X : tuple, DataHolder
Structure containing row and column values for predictions.
y : {array-like, sparse matrix}
The true values as a 1-D array or stored in a sparse matrix.
Returns
-------
rmse : float
The root mean squared error for the test set given the values
predicted by the model.
"""
check_is_fitted(self, ['item_feats', 'user_feats'])
users, items = _check_x(X)
r_ = _check_y(y, users, items)
pred = (self.user_feats.T[users] * self.item_feats.T[items]).sum(-1)
rmse = -root_mean_squared_error(r_, pred)
return rmse
def update_user(self, user, item, value):
"""Update a single user's feature vector.
When an existing user rates an item the feature vector for that user
can be updated withot having to rebuild the entire model. Eventually,
the entire model should be rebuilt, but this is as close to a real-time
update as is possible.
Parameters
----------
user : integer
Index for the user.
item : integer
Index for the item
value : integer
The value assigned to item by user.
"""
check_is_fitted(self, ['item_feats', 'user_feats'])
self.data[user, item] = value
sps.save_npz('data', self.data)
np.savez('features', user=self.user_feats, item=self.item_feats)
subprocess.run(
['fit_als.py', '-r', str(self.rank), '-a', str(self.alpha),
'One', str(user), 'data.npz', 'features.npz'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
with np.load('feature.npz') as loader:
user_feats = loader['user']
self.user_feats[:, user] = user_feats
for _file in ['data.npz', 'feature.npz']:
os.remove(_file)
def add_user(self):
"""Add a user to the model.
When a new user is added append a new row to the data matrix and
create a new column in user_feats. When the new user rates an item,
the model will be ready insert the rating and use the update_user
method to calculate the least squares approximation of the user
features.
"""
check_is_fitted(self, ['item_feats', 'user_feats'])
shape = self.data._shape
self.data = sps.vstack([self.data, sps.csr_matrix((1, shape[1]))],
format='csr')
new_col = np.zeros((self.rank, 1))
self.user_feats = np.hstack((self.user_feats, new_col))
| GrierPhillips/Recommendation-Models | src/als.py | als.py | py | 15,434 | python | en | code | 0 | github-code | 13 |
8805203936 | """
people을 내림차순으로 정렬한 후에 무거운 사람부터 새 보트에 집어넣습니다. limit/2보다 초과하는 사람은 다 넣어요.(어차피 이들끼리는 같이 보트를 탈 수 없기때문)
그리고나서 남은 사람들 중 가장 무거운 사람과 마지막 보트만 체크합니다. 왜냐하면 현재 있는 타고 있는 보트 중에서 마지막에 집어넣은 보트가 가장 여유가 클 것이기 때문에 거기에 못들어가면 어차피 다른 보트에도 못 들어가요. limit보다 작다면 그 보트에 넣어주면 됩니다.
이렇게 구현하니까 반복문 딱 2번만 돌고 효율성 통과했습니다!
"""
def solution(people, limit):
people.sort()
check = 0
answer = 1
for i in people:
if check + i <= limit:
check += i
else:
answer += 1
check = 0
check += i
return answer
print(solution([70, 80, 50],100)) | Chung-SungWoong/Practice_Python | Python_Test55.py | Python_Test55.py | py | 974 | python | ko | code | 0 | github-code | 13 |
71083990099 | import math
# Cooking Masterclass
# one student package:
# 1 package of flour
# 10 eggs
# 1 apron
budget = float(input())
students = int(input())
flour_pack_price = float(input()) # every fifth package is free
an_egg_price = float(input())
apron_price = float(input()) # increase aprons by 20% because they get dirty
total_cost = students * (
flour_pack_price + 10 * an_egg_price + apron_price
)
# increase aprons by 20% because they get dirty
total_cost += math.ceil(students / 5) * apron_price
# every fifth package is free
total_cost -= (students // 5) * flour_pack_price
diff = (total_cost - budget)
# # Driver code
# if __name__ == '__main__':
# # function call
# ...
if total_cost > budget:
print(f'{diff:.2f}$ more needed.')
else:
print(f'Items purchased for {total_cost:.2f}$.')
| bobsan42/SoftUni-Learning-42 | ProgrammingFunadamentals/20RegularMidExam/01.py | 01.py | py | 817 | python | en | code | 0 | github-code | 13 |
19481859765 | """
Fetch test lists from https://github.com/citizenlab/test-lists
Populate citizenlab table from the tests lists git repository and the
url_priorities table
The tables have few constraints on the database side: most of the validation
is done here and it is meant to be strict.
Local test run:
PYTHONPATH=analysis ./run_analysis --update-citizenlab --dry-run --stdout
"""
from argparse import Namespace
from pathlib import Path
from subprocess import check_call
from tempfile import TemporaryDirectory
from typing import List, Optional
import csv
import logging
import re
from clickhouse_driver import Client as Clickhouse
from analysis.metrics import setup_metrics
HTTPS_GIT_URL = "https://github.com/citizenlab/test-lists.git"
log = logging.getLogger("analysis.citizenlab_test_lists_updater")
metrics = setup_metrics(name="citizenlab_test_lists_updater")
VALID_URL = re.compile(
r"(^(?:http)s?://)?" # http:// or https://
r"((?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}))" # ...or ipaddr
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$",
re.IGNORECASE,
)
URL_BAD_CHARS = {"\r", "\n", "\t", "\\"}
def _extract_domain(url: str) -> Optional[str]:
if any(c in URL_BAD_CHARS for c in url):
return None
m = VALID_URL.match(url)
if m:
return m.group(2)
return None
@metrics.timer("fetch_citizen_lab_lists")
def fetch_citizen_lab_lists() -> List[dict]:
"""Clone repository in a temporary directory and extract files"""
out = [] # (cc or "ZZ", domain, url, category_code)
with TemporaryDirectory() as tmpdir:
cmd = ("git", "clone", "--depth", "1", HTTPS_GIT_URL, tmpdir)
check_call(cmd, timeout=120)
p = Path(tmpdir) / "lists"
for i in sorted(p.glob("*.csv")):
cc = i.stem
if cc == "global":
cc = "ZZ"
if len(cc) != 2:
continue
log.info("Processing %s", i.name)
with i.open() as f:
for item in csv.DictReader(f):
url = item["url"]
domain = _extract_domain(url)
if not domain:
log.debug("Ignoring", url)
continue
category_code = item["category_code"]
d = dict(
domain=domain,
url=url,
cc=cc,
category_code=category_code,
)
out.append(d)
assert len(out) > 20000
assert len(out) < 1000000
metrics.gauge("citizenlab_test_list_len", len(out))
return out
def query_c(click, query: str, qparams: dict):
click.execute(query, qparams, types_check=True)
@metrics.timer("update_citizenlab_table")
def update_citizenlab_table(conf: Namespace, citizenlab: list) -> None:
"""Overwrite citizenlab_flip and swap tables atomically"""
if conf.dry_run:
return
click = Clickhouse("localhost", user="citizenlab")
log.info("Emptying Clickhouse citizenlab_flip table")
q = "TRUNCATE TABLE citizenlab_flip"
click.execute(q)
log.info("Inserting %d citizenlab table entries", len(citizenlab))
q = "INSERT INTO citizenlab_flip (domain, url, cc, category_code) VALUES"
click.execute(q, citizenlab, types_check=True)
log.info("Swapping Clickhouse citizenlab tables")
q = "EXCHANGE TABLES citizenlab_flip AND citizenlab"
click.execute(q)
def update_citizenlab_test_lists(conf: Namespace) -> None:
log.info("update_citizenlab_test_lists")
citizenlab = fetch_citizen_lab_lists()
update_citizenlab_table(conf, citizenlab)
| ooni/backend | analysis/analysis/citizenlab_test_lists_updater.py | citizenlab_test_lists_updater.py | py | 3,782 | python | en | code | 43 | github-code | 13 |
12224635093 | import logging
import os
import re
import uuid
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import CallbackContext
from .constants import *
CALLBACK_SESSION = "callback_session"
logger = logging.getLogger(__name__)
def make_keyboard(buttons: list, context: CallbackContext = None, user_data: dict = None):
keyboard = []
session = str(uuid.uuid4())
if context:
context.user_data[CALLBACK_SESSION] = session
else:
user_data[CALLBACK_SESSION] = session
if isinstance(buttons, list):
for row in buttons:
keyboard.append(
[
InlineKeyboardButton(
text=button[0], callback_data=f"{button[1]}#{session}"
)
for button in row
]
)
elif isinstance(buttons, tuple):
keyboard = [
[
InlineKeyboardButton(
text=buttons[0], callback_data=f"{buttons[1]}#{session}"
)
]
]
else:
raise Exception("Invalid buttons type")
return InlineKeyboardMarkup(keyboard)
def logged_user(func):
def wrapper(*args, **kwargs):
update, context = args[0], args[1]
if context.user_data.get(LOGGED):
func(*args, **kwargs)
else:
message = "Devi prima loggarti per utilizzare questo comando! ⛔"
keyboard = make_keyboard(("Login", LOGIN_CALLBACK), context)
if update.callback_query:
update.callback_query.answer()
update.callback_query.edit_message_text(
text=message, reply_markup=keyboard
)
else:
update.message.reply_text(text=message, reply_markup=keyboard)
return wrapper
def admin_user(func):
def wrapper(*args, **kwargs):
update = args[0]
admins = os.getenv("ADMIN_USERS")
if not admins:
return
if not str(update.message.from_user.id) in admins:
return
func(*args, **kwargs)
return wrapper
def callback(func):
def wrapper(*args, **kwargs):
update, context = args[0], args[1]
if not update.callback_query:
func(*args, **kwargs)
return
update.callback_query.answer()
match = re.match(r"^([\w]+)#([\w-]+)$", update.callback_query.data)
if not match:
update.callback_query.delete_message()
return
if match[2] != context.user_data.get(CALLBACK_SESSION):
update.callback_query.delete_message()
return
func(*args, **kwargs)
return wrapper
def callback_pattern(key):
return "^" + key + "#[\w-]+$"
def command(func):
def wrapper(*args, **kwargs):
context = args[1]
context.user_data[INPUT_KIND] = None
func(*args, **kwargs)
return wrapper
| eciavatta/merdetti-bot | merdetti/helpers.py | helpers.py | py | 2,982 | python | en | code | 9 | github-code | 13 |
32656791020 | from dataclasses import asdict, dataclass
from typing import ClassVar, Dict
from undictify import type_checked_constructor
from .checksum_algorithm import ChecksumAlgorithm
@type_checked_constructor()
@dataclass
class Checksum:
algorithm: ChecksumAlgorithm
## algorithm: str
value: str
#: The Avro Schema associated to this class
_schema: ClassVar[str] = """{
"name": "Checksum",
"namespace": "org.cedar.schemas.avro.psi",
"type": "record",
"fields": [
{
"name": "algorithm",
"type": "org.cedar.schemas.avro.psi.ChecksumAlgorithm"
},
{
"name": "value",
"type": "string"
}
]
}"""
def to_dict(self) -> Dict:
"""
Returns a dictionary version of this instance.
"""
return asdict(self)
@classmethod
def from_dict(
cls,
the_dict: Dict
) -> 'Checksum':
"""
Returns an instance of this class from a dictionary.
:param the_dict: The dictionary from which to create an instance of this class.
"""
return cls(**the_dict)
| cedardevs/onestop-clients | onestop-python-client/onestop/schemas/psiSchemaClasses/org/cedar/schemas/avro/psi/checksum.py | checksum.py | py | 1,206 | python | en | code | 1 | github-code | 13 |
22002120764 | import random
value = 0
while value < 1:
# Initialize the throw count and the dice values
throw_count = 0
dice1 = 0
dice2 = 0
# Keep rolling the dice until they match
while dice1 != dice2:
dice1 = random.randint(1, 6)
dice2 = random.randint(1, 6)
throw_count += 1
print("Throw", throw_count, ":", dice1, dice2)
# Print the result
print("Matching pair found after", throw_count, "throws:", dice1, dice2)
| YumiVR/SDAM | Sem1/week5/dice_match.py | dice_match.py | py | 471 | python | en | code | 0 | github-code | 13 |
74648744017 | from googleapiclient.discovery import build
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import InstalledAppFlow
import os
from google.oauth2.credentials import Credentials
def move_email_to_trash(email_id):
# If modifying these scopes, delete the token.json file.
SCOPES = ['https://www.googleapis.com/auth/gmail.modify']
# Set up the Gmail API credentials
creds = None
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
with open('token.json', 'w') as token:
token.write(creds.to_json())
# Create the Gmail API service
service = build('gmail', 'v1', credentials=creds, cache_discovery=False)
try:
# Modify the labels of the email to move it to the trash
modify_labels = {'removeLabelIds': ['INBOX'], 'addLabelIds': ['TRASH']}
service.users().messages().modify(userId='me', id=email_id, body=modify_labels).execute()
print(f"Email with ID {email_id} moved to trash successfully.")
except Exception as e:
print(f"An error occurred while moving the email to trash: {e}")
if __name__ == "__main__":
email_id_to_move = "189da250851508b9"
move_email_to_trash(email_id_to_move)
| aryankhatana01/real-time-email-spam-detection | delete_spam/delete_emails_api.py | delete_emails_api.py | py | 1,581 | python | en | code | 0 | github-code | 13 |
10012564808 | import random
global used_question
global count
used_question = [ ]
count = 0
##Gets answer and adds the questions that have been used to a list
def get_answer():
answer = input("Please enter an answer: ")
answer = int(answer)
if answer > 4:
answer = input("Please enter a valid number: ")
else:
return str(answer)
def exclude_question(num):
used_question.append(num)
#####All the questions####
def question_one():
exclude_question(1)
print("When you pass another vehicle, before you return to the right lane, you must:")
print("1. Make sure you can see the front bumper of the vehicle you passed. \n2. Look at your interior rear-view mirror. \n3. Signal. \n4. All of the above ")
answer = get_answer()
if answer == "4":
print("Correct!")
num = random.randint(1,10)
get_question(num)
else:
print("Incorrect.")
num = random.randint(1,10)
get_question(num)
def question_two():
exclude_question(2)
print("Speed limit signs are:")
print("1. Destination (guide) signs. \n2. Service Signs. \n3. Warning signs. \n4. Regulatory signs. ")
answer = get_answer()
if answer == "4":
print("Correct!")
num = random.randint(1,10)
get_question(num)
else:
print("Incorrect.")
num = random.randint(1,10)
get_question(num)
def question_three():
exclude_question(3)
print("Where should your hands be positioned on the steering wheel?")
print("1. 10 and 2 o'clock. \n 2. 9 and 3 o'clock. \n3. 8 and 4 o'clock. \n4. Anywhere comfortable.")
answer = get_answer()
if answer == "1":
print("Correct!")
num = random.randint(1,10)
get_question(num)
else:
print("Incorrect.")
num = random.randint(1,10)
get_question(num)
def question_four():
exclude_question(4)
print("Vehicle inspection is required:")
print("1. Every six months. \n2. Only for vehicles over five years old. \n3. Every two years. \n4. Every year.")
answer = get_answer()
if answer == "4":
print("Correct!")
num = random.randint(1,10)
get_question(num)
else:
print("Incorrect.")
num = random.randint(1,10)
get_question(num)
def question_five():
exclude_question(5)
print("The minimum drinking age is:")
print("1. 21 \n2. 9 \n 3. 18 \n4. 20")
answer = get_answer()
if answer == "1":
print("Correct!")
num = random.randint(1,10)
get_question(num)
else:
print("Incorrect.")
num = random.randint(1,10)
get_question(num)
def question_six():
exclude_question(6)
print("A road is likely to be most slippery when:")
print("1. it is icy and the temperature is near freezing. \n2. in cold, dry weather. \n3. when tire marks have been left by other vehicles. \n4. in spring.")
answer = get_answer()
if answer == "1":
print("Correct!")
num = random.randint(1,10)
get_question(num)
else:
print("Incorrect.")
num = random.randint(1,10)
get_question(num)
def question_seven():
exclude_question(7)
print("A solid white line indicates:")
print("1. Two lanes travelling in different directions; passing is permitted \n2. Two lanes travelling in different directions; passing is not permitted \n3. Two lanes travelling in the same direction; passing is permitted \n4. Two lanes travelling in the same direction; passing is not permitted")
answer = get_answer()
if answer == "4":
print("Correct!")
num = random.randint(1,10)
get_question(num)
else:
print("Incorrect.")
num = random.randint(1,10)
get_question(num)
def question_eight():
exclude_question(8)
print("If you are sitting in the passenger seat and are at least 18, you are allowed to not wear a seatbelt:")
print("1. True \n2. False")
answer = get_answer()
if answer == "2":
print("Correct!")
num = random.randint(1,10)
get_question(num)
else:
print("Incorrect.")
num = random.randint(1,10)
get_question(num)
def question_nine():
exclude_question(9)
print("When should you use your turn signal?")
print("1. Before changing lanes \n2. To turn at an intersection \n3. To pull over on the shoulder of the road \n4. All of these")
answer = get_answer()
if answer == "4":
print("Correct!")
num = random.randint(1,10)
get_question(num)
else:
print("Incorrect.")
num = random.randint(1,10)
get_question(num)
def question_ten():
exclude_question(10)
print("What should you do if a traffic light is flashing red.")
print("1. Slow down before proceeding \n2. Stop only if there are other cars coming \nC. Stop and proceed when it’s safe to do so by following the right-of-way rules \n4. Stop, but only if the car in front has stopped ")
answer = get_answer()
if answer == "3":
print("Correct!")
num = random.randint(1,10)
get_question(num)
else:
print("Incorrect.")
num = random.randint(1,10)
get_question(num)
##Retrieves randomized questions
def get_question(num):
global used_question
global count
while count != 10:
if num == 1 and num not in used_question:
count = count + 1
question_one()
elif num == 2 and num not in used_question:
count = count + 1
question_two()
elif num == 3 and num not in used_question:
count = count + 1
question_three()
elif num == 4 and num not in used_question:
count = count + 1
question_four()
elif num == 5 and num not in used_question:
count = count + 1
question_five()
elif num == 6 and num not in used_question:
count = count + 1
question_six()
elif num == 7 and num not in used_question:
count = count + 1
question_seven()
elif num == 8 and num not in used_question:
count = count + 1
question_eight()
elif num == 9 and num not in used_question:
count = count + 1
question_nine()
elif num == 10 and num not in used_question:
count = count + 1
question_ten()
else:
num = random.randint(1,10)
def use_quiz():
question_ten()
| tsega200/Driving-Tutor | Actual Program/Quiz_Redone.py | Quiz_Redone.py | py | 7,342 | python | en | code | 0 | github-code | 13 |
25867771712 | from django_filters import FilterSet, DateFilter
from django.forms import DateInput
from .models import Advert, AdvertReply
class AdvertsFilter(FilterSet):
datetime = DateFilter(field_name='datetime',
widget=DateInput(attrs={'type': 'date'}),
lookup_expr='gt',
label='Позже выбранной даты')
class Meta:
model = Advert
fields = {
'title': ['icontains'],
'content': ['icontains'],
'category': ['exact'],
'author': ['exact']
}
class RepliesFilter(FilterSet):
datetime = DateFilter(field_name='datetime',
widget=DateInput(attrs={'type': 'date'}),
lookup_expr='gt',
label='Позже выбранной даты')
class Meta:
model = AdvertReply
fields = {
'advert': ['exact'],
'author': ['exact']
}
| egoranisimov/bboard | bboard/boardapp/filters.py | filters.py | py | 1,012 | python | en | code | 0 | github-code | 13 |
38073407298 | import sys
import traceback
def exc2string2():
"""Provide traceback ehen an exception has been raised"""
llist = sys.exc_info()
errmsg = str(llist[0])
errmsg += str(llist[1])
errmsg += ' '.join(traceback.format_tb(llist[2]))
return errmsg
| rushioda/PIXELVALID_athena | athena/Trigger/TriggerCommon/TriggerMenu/python/jet/exc2string.py | exc2string.py | py | 265 | python | en | code | 1 | github-code | 13 |
37086424886 | import pandas as pd
from itertools import islice
from collections import Counter
file_path = "./Harry Potter.txt"
test_file_path = "./originText.txt"
bksp_rate = 0
evaluation_switch = True
[21111212111, 21121112111, 21112112111, 21211212111, 21121112111, 21111112111, 21211212111, 21121121111, 21121211211, 21112112111, 21121121111, 21212112111, 21121121111]
trace_dict = {
21111111111: {'<non-US-1>'},
21111111121: {'<Release key>'},
21111111211: {'F11','KP','KP0','SL'},# scroll lock key pad
21111112111: {'8','u'},
21111121111: {'2','a'},
21111121211: {'Caps_Lock'},
21111211111: {'F4',"'"},
21111211211: {'-',';','KP7'},
21111212111: {'5','t'},
21112111111: {'F12','F2','F3'},
21112111121: {'Alt+SysRq'},
21112111211: {'9','Bksp','Esc','KP6','NL','o'},#number lock
21112112111: {'3','6','e','g'},
21112121111: {'1','CTRL_L'},
21112121211: {'['},
21121111111: {'F5','F7'},
21121111211: {'KP-','KP2','KP3','KP5','i','k'},
21121112111: {'b','d','h','j','m','x'},
21121121111: {'Shift','s','y'},
21121121211: {'’',' ',']'},
21121211111: {'F6','F8'},
21121211211: {'/','KP4','l'},
21121212111: {'f','v'},
21211111111: {'F9'},
21211111211: {',','KP+','KP.','KP9'},
21211112111: {'7','c','n'},
21211121111: {'Alt_L','w'},
21211121211: {'SHIFT_R','\\'},
21211211111: {'F10','Tab'},
21211211211: {'.','KP1','p'},
21211212111: {'Space','r'},
21212111111: {'F1'},
21212111211: {'0','KP8'},
21212112111: {'4','y'},
21212121111: {'q'},
21212121211: {'='}}
removed_items ={'F1','F2','F3','F4','F5','F6','F7','F8','F9','F10','F11','F12','NL','Alt+SysRq','Tab',
'<Release key>','<non-US-1>','Alt_L','SHIFT_R','CTRL_L','SL','KP+','KP.','KP-','’'}
not_in_table = {'z','\n'}
def get_key (dictionary, value):
#####################################################################################################################
# This function can get key based on unique value #
#####################################################################################################################
return str([k for k, v in dictionary.items() if value in v][0])
def split_every(n:int, iterable:str):
#####################################################################################################################
# This function is a iterator which generates a pair of bigrams for further use. #
# Note: #
# Input: n:int: 2 means bigram #
# iterable:str: the text to be splited #
#####################################################################################################################
i = iter(iterable)
j = iter(iterable[1:])
piece1 = ''.join(list(islice(i, n)))
piece2 = ''.join(list(islice(j, n)))
while piece1 and piece2:
yield piece1,piece2
piece1 = ''.join(list(islice(i, n)))
piece2 = ''.join(list(islice(j, n)))
def get_bigram_freq(text:str):
#####################################################################################################################
# This function generates a counter for frequence of each bigrams #
# Note: #
# Input: text:str: text to be bigramed #
#####################################################################################################################
freqs = Counter()
for combo1,combo2 in split_every(2, text): # adjust n here
freqs[combo1] += 1
freqs[combo2] += 1
dict(freqs)
return freqs
def get_count_matrix(bigram_dict:dict,count_mat:dict):
#####################################################################################################################
# This function could mapp plain text into the transition count matrix. It focuses only on the bigrams of the input #
# artilcles, counts each bigrams and generates a transition matrix of keyboard's press sequence. #
# Note:
# Input: bigram_dict:dict: the dictionary of the generates bigrams.
# count_mat:dict: the 2D dictionary which will save the count of transistions.
#####################################################################################################################
# set that need shift
dfcount_mat=pd.DataFrame(count_mat)
no_shift_set = {',','.','/',';',"'",'[',']','\\',' ',"\n"}
shift_set = {'!',"@",'#','$','%','^','&','*',"(",')','_','+','{','}','|',':','"','<','>','?'}
shift_dict = {'!':'1',"@":'2','#':'3','$':'4','%':'5','^':'6','&':'7','*':'8',"(":'9',')':'0','_':'-','+':'=','{':'[','}':']','|':'\\',':':';','"':';','<':',','>':'.','?':'/'}
#note if sure add 2, number and kp num add 1
for i in bigram_dict:
firstToken = i[0]
secondToken = i[1]
#sec1 checked
if firstToken.isalnum() and secondToken.isalnum():
if firstToken.islower() and secondToken.islower() or firstToken.isupper() and secondToken.isupper():#ALAL alal
dfcount_mat.loc[firstToken.lower(),secondToken.lower()] += 2 * bigram_dict[i]
elif firstToken.islower() and secondToken.isupper() or firstToken.isupper() and secondToken.islower():#ALal alAL
dfcount_mat.loc[firstToken.lower(),'Caps_Lock'] += 2 * bigram_dict[i]
dfcount_mat.loc['Caps_Lock',secondToken.lower()] += 2 * bigram_dict[i]
elif (firstToken.isdigit() and secondToken.isdigit()):#numnum
dfcount_mat.loc[('KP'+str(firstToken)),('KP'+str(secondToken))] += 1 * bigram_dict[i]
dfcount_mat.loc[firstToken,secondToken] += 1 * bigram_dict[i]
elif (firstToken.isdigit() and secondToken.islower()):#numal
dfcount_mat.loc[('KP'+str(firstToken)),secondToken] += 1 * bigram_dict[i]
dfcount_mat.loc[firstToken,secondToken] += 1 * bigram_dict[i]
elif (firstToken.islower() and secondToken.isdigit()):#alnum
dfcount_mat.loc[firstToken,('KP'+str(secondToken))] += 1 * bigram_dict[i]
dfcount_mat.loc[firstToken,secondToken] += 1 * bigram_dict[i]
elif (firstToken.isdigit() and secondToken.isupper()):#numAL problematic
dfcount_mat.loc[firstToken,'Caps_Lock'] += 1 * bigram_dict[i]
dfcount_mat.loc['Caps_Lock',secondToken.lower()] += 1 * bigram_dict[i]
dfcount_mat.loc[('KP'+str(firstToken)),'Caps_Lock'] += 1 * bigram_dict[i]
dfcount_mat.loc['Caps_Lock',secondToken.lower()] += 1 * bigram_dict[i]
elif (firstToken.isupper() and secondToken.isdigit()):#ALnum problematic
dfcount_mat.loc[firstToken.lower(),'Caps_Lock'] += 1 * bigram_dict[i]
dfcount_mat.loc['Caps_Lock',secondToken] += 1 * bigram_dict[i]
dfcount_mat.loc[firstToken.lower(),'Caps_Lock'] += 1 * bigram_dict[i]
dfcount_mat.loc['Caps_Lock',('KP'+str(secondToken))] += 1 * bigram_dict[i]
# print("alnm alnm")
# print(dfcount_mat)
#sec2 checked
elif (firstToken.islower() and secondToken in no_shift_set) or (secondToken.islower() and firstToken in no_shift_set) or(secondToken in no_shift_set and firstToken in no_shift_set):
#alpun punal punpun
dfcount_mat.loc[firstToken,secondToken] += 2 * bigram_dict[i]
# print("no change alpun punal punpun")
# print(dfcount_mat)
#sec3 checked
elif firstToken.isupper():
if secondToken in shift_set:#AL shpun
dfcount_mat.loc[firstToken.lower(),'Caps_Lock'] += 2 * bigram_dict[i]
dfcount_mat.loc['Caps_Lock',"Shift"] += 2 * bigram_dict[i]
dfcount_mat.loc["Shift",shift_dict[secondToken]] += 2 * bigram_dict[i]
elif secondToken in no_shift_set:#AL pun
dfcount_mat.loc[firstToken.lower(),'Caps_Lock'] += 2 * bigram_dict[i]
dfcount_mat.loc['Caps_Lock',secondToken] += 2 * bigram_dict[i]
elif firstToken.islower() and (secondToken in shift_set):#al shpun
dfcount_mat.loc[firstToken,"Shift"] += 2 * bigram_dict[i]
dfcount_mat.loc["Shift",shift_dict[secondToken]] += 2 * bigram_dict[i]
# print("AL shpun AL pun al shpun")
# print(dfcount_mat)
#sec4 checked
elif firstToken.isdigit():
if secondToken in shift_set:#num shpun
dfcount_mat.loc[('KP'+str(firstToken)),"Shift"] += 1 * bigram_dict[i]
dfcount_mat.loc["Shift",shift_dict[secondToken]] += 1 * bigram_dict[i]
dfcount_mat.loc[firstToken,"Shift"] += 1 * bigram_dict[i]
dfcount_mat.loc["Shift",shift_dict[secondToken]] += 1 * bigram_dict[i]
elif secondToken in no_shift_set:#num pun
dfcount_mat.loc[('KP'+str(firstToken)),secondToken] += 1 * bigram_dict[i]
dfcount_mat.loc[firstToken,secondToken] += 1 * bigram_dict[i]
# print("num shpun num pun")
# print(dfcount_mat)
#sec5 checked
elif firstToken in shift_set:# maybe assert "release??????"
if secondToken.isupper():#shpun AL
dfcount_mat.loc[shift_dict[firstToken],'Caps_Lock'] += 2 * bigram_dict[i]
dfcount_mat.loc['Caps_Lock',secondToken.lower()] += 2 * bigram_dict[i]
elif secondToken.islower():#shpun al
dfcount_mat.loc[shift_dict[firstToken],secondToken] += 2 * bigram_dict[i]
elif secondToken.isdigit():#shpun num
dfcount_mat.loc[shift_dict[firstToken],('KP'+str(secondToken))] += 1 * bigram_dict[i]
dfcount_mat.loc[shift_dict[firstToken],secondToken] += 1 * bigram_dict[i]
elif secondToken in shift_set:#shpun shpun
dfcount_mat.loc[shift_dict[firstToken],shift_dict[secondToken]] += 2 * bigram_dict[i]
elif secondToken in no_shift_set:#shpun pun
dfcount_mat.loc[shift_dict[firstToken],secondToken] += 2 * bigram_dict[i]
# print("shpun AL shpun al shpun num shpun shpun shpun pun")
# print(dfcount_mat)
#sec 6 checked
elif firstToken in no_shift_set:
if secondToken.isupper():#pun AL
dfcount_mat.loc[firstToken,'Caps_Lock'] += 2 * bigram_dict[i]
dfcount_mat.loc['Caps_Lock',secondToken.lower()] += 2 * bigram_dict[i]
elif secondToken.isdigit():#pun num
dfcount_mat.loc[firstToken,('KP'+str(secondToken))] += 1 * bigram_dict[i]
dfcount_mat.loc[firstToken,secondToken] += 1 * bigram_dict[i]
elif secondToken in shift_set:#pun shpun
dfcount_mat.loc[firstToken,"Shift"] += 2 * bigram_dict[i]
dfcount_mat.loc["Shift",shift_dict[secondToken]] += 2 * bigram_dict[i]
# print("pun AL pun num pun shpun")
# print(dfcount_mat)
return dfcount_mat
def get_trans_mat_and_obs(trace_dict:dict, not_in_table:set, removed_items:set):
#####################################################################################################################
# This function could generate the empty transition matrix for hmm and viterbi algorthum, empty count mat, along
# with the obsersation list and state list
# Input: trace_dict:dict
# not_in_table:set :items that are not listed in the table
# removed_items:set : items that are irrelavent or not important
# Output: states:list
# observations lsit:
# transition_empty_mat dict:
# count_mat dict:
#####################################################################################################################
observations = [observation for observation in trace_dict]
states_count = set()
for key in trace_dict:
states_count |= trace_dict[key]
# print("states_in_table:"+str(len(states_count)))
# print("not_in_table:"+str(len(not_in_table)))
# print("removed_items:"+str(len(removed_items)))
states_count |= not_in_table
# print("states_count:"+str(len(states_count)))
dictInDict = dict.fromkeys(states_count, 0)
count_matrix = dict.fromkeys(states_count, dictInDict)
states_trans = set()
for key in trace_dict:
states_trans |= trace_dict[key]
for key in removed_items:
states_trans.remove(key)
# print("states_trans:"+str(len(states_trans)))
_ = dict.fromkeys(states_trans, 0)
transition_empty_mat = dict.fromkeys(states_trans, _)
states = list(states_trans)
# observations = [_ for _ in]
# print('observations'+str(observations))
# print('states_trans'+str(states_trans))
return states, observations, transition_empty_mat, count_matrix
def fill_trans_mat(count_mat,trans_mat:dict,bksp,states):
#####################################################################################################################
# This function could generate the transition matrix for hmm and viterbi algorthum. To generate the transition #
# matrix, the keys in the count matrix has to be reduced.(Some keys which are not in the trace dictonary are going to
# be removed.) Count number will be transformed into a double [0,1] which represents the probabilities of transition#
# And at last some hyperparameter will be set(eg. the probilities of key "backspace") to generate corresponding key #
# transition prob.
# Input: trans_mat:dataframe: the 2D dictionary which represents the transition matrix
# count_mat:dict: the 2D dictionary which will store the count of transistions.
# bksp:double: typo rate[0,1]
# states: list or set
# Output: trans_mat: dict in dict
# start_prob: df : first letter distribution (the next letter after Space)
#####################################################################################################################
count_mat['Col_sum'] = count_mat.apply(lambda x: x.sum(), axis=1)
num_tran_state = len(trans_mat)
dftrans_mat =pd.DataFrame(trans_mat) # convet to df
rest_prob = 1 - bksp
if bksp>1 or bksp<0:
raise RuntimeError("Probability should be between 1 and 0")
else:
print("bksp richtig")
for i, row in dftrans_mat.iterrows(): #横向index
for j, value in row.iteritems():
if count_mat.loc[i,'Col_sum'] != 0:
dftrans_mat.loc[i,j] = count_mat.loc[i,j]/count_mat.loc[i,'Col_sum']*rest_prob
else:
dftrans_mat.loc[i,j] = 0
if i == 'Space':
dftrans_mat.loc['Space',j] = count_mat.loc[' ',j]/count_mat.loc[' ','Col_sum']*rest_prob
elif j == 'Space':
dftrans_mat.loc[i,'Space'] = count_mat.loc[i,' ']/count_mat.loc[i,'Col_sum']*rest_prob
for l in dftrans_mat:
dftrans_mat.loc['Bksp',l] = bksp/num_tran_state
dftrans_mat.loc[l,'Bksp'] = bksp*100
dftrans_mat.fillna(0, inplace = True)
set(states)
start_probability = dict()
start_probability = dict.fromkeys(states, 0)
for i in start_probability:
start_probability[i] = dftrans_mat.loc[' ',i]# get the first letter distribution (the next letter after Space)
return start_probability, dftrans_mat
def get_ave_start_prob(states:list):
#####################################################################################################################
# This function could generate the averaged start_probability . but the fuction fill_trans_mat() could generate #
# better start_probability
#####################################################################################################################
ave = 1/len(states)
average_start_probability = dict()
average_start_probability = dict.fromkeys(states, ave)
# print(average_start_probability)
return average_start_probability
def get_emission_prob(states, observation, trace_dict):
#####################################################################################################################
# This function could generate the averaged emission prob
# input : states, list of states
# observation, list of obs(int
# trace_dict dictInDict
#####################################################################################################################
em_mat = dict()
st_dict = dict.fromkeys(states, 0)
em_mat = dict.fromkeys(observation, st_dict)
dfem_mat=pd.DataFrame(em_mat)
# print(dfem_mat)
for i in states:
# print(i)
# print(int(get_key(trace_dict,i)))
dfem_mat[int(get_key(trace_dict,i))][i] = 1
# print(dfem_mat)
return dfem_mat
def viterbi(obs, states, start_p, trans_p, emit_p):
#####################################################################################################################
# This function is the example function in wikipedia.
# https://en.wikipedia.org/wiki/Viterbi_algorithm
# Thanks to the writer and editors of this article
#####################################################################################################################
V = [{}]
for st in states:
V[0][st] = {"prob": start_p[st] * emit_p[obs[0]][st], "prev": None}
# Run Viterbi when t > 0
for t in range(1, len(obs)):
V.append({})
for st in states:
max_tr_prob = V[t - 1][states[0]]["prob"] * trans_p[st][states[0]]
prev_st_selected = states[0]
for prev_st in states[1:]:
tr_prob = V[t - 1][prev_st]["prob"] * trans_p[st][prev_st]
if tr_prob > max_tr_prob:
max_tr_prob = tr_prob
prev_st_selected = prev_st
max_prob = max_tr_prob * emit_p[obs[t]][st]
V[t][st] = {"prob": max_prob, "prev": prev_st_selected}
# for line in dptable(V):
# print(line)
opt = []
max_prob = 0.0
best_st = None
# Get most probable state and its backtrack
for st, data in V[-1].items():
if data["prob"] > max_prob:
max_prob = data["prob"]
best_st = st
opt.append(best_st)
previous = best_st
# Follow the backtrack till the first observation
for t in range(len(V) - 2, -1, -1):
opt.insert(0, V[t + 1][previous]["prev"])
previous = V[t + 1][previous]["prev"]
print ("The inference hidden states are:\n" + " ".join(opt))
return list(opt)
def dptable(V):
#####################################################################################################################
# This function is the example function in wikipedia.
# https://en.wikipedia.org/wiki/Viterbi_algorithm
# Thanks to the writer and editors of this article
#####################################################################################################################
# Print a table of steps from dictionary
yield " ".join(("%12d" % i) for i in range(len(V)))
for state in V[0]:
yield "%.7s: " % state + " ".join("%.7s" % ("%f" % v[state]["prob"]) for v in V)
def falling_trace_gen(string:str):
#####################################################################################################################
# This function is used to generate the falling edge trace to evaluate and test the sequence.
# input string :str
# output trace list:list
#####################################################################################################################
resultlist = []
for c in string.lower():
if c != "\n":
if c == ' ':
tmp = 'Space'
else:
tmp = c
for key, values in trace_dict.items():
if tmp in values:
resultlist.append(key)
continue
return resultlist
def keystroke_trace_gen(string:str):
#####################################################################################################################
# This function is used to generate the keystrokes trace to evaluate and test the sequence.
# input string:str
# output string list:list
#####################################################################################################################
resultlist = []
for c in string.lower():
if c != "\n":
if c == ' ':
c = 'Space'
resultlist.append(c)
return resultlist
def compare_list(list1:list, list2:list):
#####################################################################################################################
# This function is used to generate the compare result remove shift and Caps_Lock
# input string:str1, original
# str2 inference
# output:same count: int count of same keystrokes
# len of total keystrokes
#####################################################################################################################
same_count = 0
for i in list2:
if i == "Shift":
list2.remove("Shift")
for i in list2:
if i == "Caps_Lock":
list2.remove("Caps_Lock")
if len(list1) == len(list2):
for i, item in enumerate(list1):
if item == list2[i]:
same_count += 1
return same_count , len(list1)
if __name__ == '__main__':
states, observations, transition_empty, count_matrix = get_trans_mat_and_obs(trace_dict, not_in_table, removed_items)
with open(file_path,'r', encoding='UTF-8') as handle:
f = handle.read()
ungrade_str = f if len(f)%2 == 1 else f[:-1]
freqs = get_bigram_freq(ungrade_str)# if handle.read()%2 ==0 else handle.read()[:-1])
#get count matrix
count_matrix = get_count_matrix(freqs,count_matrix)
# print(count_matrix)
#fill transiton matrix based on count matrix & bksp prob
start_probability, transition_probability = fill_trans_mat(count_matrix,transition_empty,bksp_rate,states)
print()
ave_start_probability = get_ave_start_prob(states)
emission_probability = get_emission_prob(states,observations,trace_dict)
states = tuple(states)
observations = tuple(observations)
############## * * * make inference here * * * ################################################
obs = [21121121111,21121112111,21111121111,21121111211,21112112111,21121121111,21211211211,21112112111,21111121111,21211212111,21112112111]
#shakspeare
viterbi(obs,
states,
start_probability,
transition_probability,
emission_probability)
obs = [21121121111, 21121112111, 21111121111, 21121111211, 21112112111, 21121121111, 21211211211, 21112112111, 21111121111, 21211212111, 21112112111, 21211212111, 21211121111, 21111121111, 21121121111, 21211212111, 21121212111, 21112112111, 21211212111, 21121121111, 21212112111, 21211212111, 21121111211, 21211112111, 21121121111, 21112112111, 21211112111, 21111112111, 21211212111, 21112112111]
viterbi(obs,
states,
start_probability,
transition_probability,
emission_probability)
obs =[21211211211, 21111121111, 21121121111, 21121121111, 21211121111, 21112111211, 21211212111, 21121112111]
viterbi(obs,
states,
ave_start_probability,
transition_probability,
emission_probability)
obs = [21121112111, 21112112111, 21121211211, 21121211211, 21112111211]
viterbi(obs,
states,
ave_start_probability,
transition_probability,
emission_probability)
####################################################### evaluation ##########################################################################################
if evaluation_switch == True:
accuracy = {}
for length in [3,5,10,15,20,30,40,50,70,90]:
with open(test_file_path,'r', encoding='UTF-8') as handle:
f = handle.read(130)
sentence_list = []
a = f.split(' ',1)
sentence_list.append(a[1][0:length])
while len(f)!= 0:
f = handle.read(120)
a = f.split(' ',1)
try:
sentence_list.append(a[1][0:length])
except:
print("\n")
correct_count_sum = 0
count_sum = 0
i = 0
for sent in sentence_list:
obs = falling_trace_gen(sent)
comp_list = keystroke_trace_gen(sent)
try :
inference_list = viterbi(obs,states,ave_start_probability,transition_probability,emission_probability)
a , b = compare_list(comp_list, inference_list)
print(comp_list)
print(inference_list)
if a != 0:
i +=1
correct_count_sum += a
count_sum += b
print(f"corret inferenz: {a} total length: {b} accuracy:{a/b}")
except:
print("")
if i ==20:
break
print(f"corret inferenz: {correct_count_sum} total length: {count_sum} accuracy:{correct_count_sum/count_sum}")
accuracy[length] = {'inferenz': correct_count_sum,'length':count_sum,'accuracy':correct_count_sum/count_sum}
for i in accuracy:
print(f"input length {i}:{accuracy[i]}")
| Klareliebe7/EMEmanationSEEMOO | hmm.py | hmm.py | py | 26,198 | python | en | code | 0 | github-code | 13 |
19466064085 | def unique_in_order(iterable):
result = []
prev = None
for char in iterable[0:]:
if char != prev:
result.append(char)
prev = char
return result
def main():
result = unique_in_order('AAAABBBCCDAABBB')
print(result)
if __name__ == "__main__":
main() | turo62/exercise | exercise/codewar/unique_in_order.py | unique_in_order.py | py | 309 | python | en | code | 0 | github-code | 13 |
9480889085 | # ---------------------------------------------------------------------------- #
# Title: Assignment 7
# Description: Description of a pickle
# ChangeLog (Who,When,What):
# MCLARK, 02.29.2021, created script
# ---------------------------------------------------------------------------- #
import pickle
# code a dictionary
dicSalmon = {"King": "30", "Chinook": "67", "Coho": "18", "Pink": "8.2", "Chum": "19", "Sockeye": "9.3"}
dicBear = {"Brown Bear": "250", "Black Bear": "150", "Polar Bear": "500"}
# pickle dictionary into binary format
file_P = open("pickledWeights.txt", "wb")
pickle.dump(dicSalmon, file_P)
file_P.close()
# unpickle dictionary list from binary to human readable format
fileUnpickle = open("pickledWeights.txt", "rb")
dicSalmonP = pickle.load(fileUnpickle)
fileUnpickle.close()
# print the dictionary
print(dicSalmonP) | MClark89/IntroToProg-Python-Mod07 | Pickling.py | Pickling.py | py | 870 | python | en | code | 0 | github-code | 13 |
3490030351 | def flames(name1,name2):
total=len(name1)+len(name2)
count=0
flame=['Just "Friends"','Uh-huh, "LOVE!"','Hmm, "Affection."',
'Congrats, "Marriage.."','Whooops.."Enemy!"','Huh.."Sister."']
for let1 in name1:
if let1 in name2:
name2.remove(let1)
count+=1
finc=total-(2*count)
#Simple method
while len(flame)>1:
del flame[finc%len(flame)-1]
print ('\n',*flame,'\n')
if __name__ == "__main__":
print("<----- FLAMES ----->\n")
name1=list(input("Unga name sollunga.. --> ").lower().strip())
name2=list(input("Avanga name enna.. --> ").lower().strip())
flames(name1,name2)
| PrinceofChum/100-Days-Of-Code | Day 22/flames.py | flames.py | py | 680 | python | en | code | 6 | github-code | 13 |
4005770687 | import matplotlib.pyplot as plt
import datetime
import numpy as np
import re
LOG_FILE_NAME = '20231002_13.04.14.log'
batt_status_re = re.compile(
r'(?P<timestamp>\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2},\d{3})\s'
r'\[\s+INFO\]\s(?:\w+:){6}\sBC300\s(?P<serial>A\d{10})\s-\sBattery\sStatus:'
r'\sVoltage:\s(?P<voltage>\d+),\sCurrent:\s(?P<current>[-]?\d+),\sCharge:\s(?P<charge>[-]?\d+),'
r'\sShunt\sTemperature:\s(?P<shunt_temp>[-]?\d+),\sBattery\sTemperature:\s(?P<battery_temp>[-]?\d+),'
r'\sUptime:\s(?P<uptime>\d+)')
if __name__ == '__main__':
dev1_timestamps = []
dev1_currents = []
dev2_timestamps = []
dev2_currents = []
with open(LOG_FILE_NAME, 'r') as logfile:
for line in logfile:
m = batt_status_re.match(line)
if m:
ts = datetime.datetime.fromisoformat(m['timestamp'])
crnt = int(m['current'])
if m['serial'] == 'A2232370046':
print(f'timestamp => {ts} - {crnt}')
dev1_timestamps.append(ts)
dev1_currents.append(crnt)
else:
dev2_timestamps.append(ts)
dev2_currents.append(crnt)
plt.plot(dev1_timestamps, dev1_currents, label='Device 1 (naked PCB)')
plt.plot(dev2_timestamps, dev2_currents, label='Device 2 (in Enclosure)')
plt.legend(loc="upper right")
plt.show()
| rene-becker-setec/BC300_WiringTest | analyze.py | analyze.py | py | 1,432 | python | en | code | 0 | github-code | 13 |
30172046093 | #!/usr/bin/env python
import barobo
from barobo import Linkbot, Dongle
import time
import sys
if __name__ == "__main__":
if len(sys.argv) < 2:
print ("Usage: {0} <Com_Port> [Linkbot Serial ID]".format(sys.argv[0]))
quit()
if len(sys.argv) == 3:
serialID = sys.argv[2]
else:
serialID = None
dongle = Dongle()
dongle.connectDongleSFP(sys.argv[1])
linkbot = dongle.getLinkbot(serialID)
linkbot.setMotorPowers(255, 255, 255)
while True:
results = linkbot.getJointAnglesTime()
print(results)
time.sleep(0.2)
| davidko/PyBarobo | demo/with_BaroboCtx_sfp/checkEncoders.py | checkEncoders.py | py | 594 | python | en | code | 0 | github-code | 13 |
70875592979 | # -*- coding: utf-8 -*-
"""
@author: Gabriel Maccari
"""
import pandas
import docx
from datetime import datetime
# Essas são as colunas que se espera que a tabela da caderneta terá
# (com exceção de colunas de estruturas, cujo nome varia com a estrutura)
COLUNAS_TABELA_CADERNETA = {
"Ponto": {
"dtype": "object", "nulo_ok": False, "dominio": None
},
"Disciplina": {
"dtype": "object", "nulo_ok": False,
"dominio": ["Mapeamento Geológico I", "Mapeamento Geológico II"]
},
"SRC": {
"dtype": "object", "nulo_ok": False, "dominio": None
},
"Easting": {
"dtype": "float64", "nulo_ok": False, "dominio": None
},
"Northing": {
"dtype": "float64", "nulo_ok": False, "dominio": None
},
"Altitude": {
"dtype": "float64", "nulo_ok": True, "dominio": None
},
"Toponimia": {
"dtype": "object", "nulo_ok": True, "dominio": None
},
"Data": {
"dtype": "datetime64[ns]", "nulo_ok": False, "dominio": None
},
"Equipe": {
"dtype": "object", "nulo_ok": False, "dominio": None
},
"Ponto_de_controle": {
"dtype": "object", "nulo_ok": False, "dominio": ["Sim", "Não"]
},
"Numero_de_amostras": {
"dtype": "int64", "nulo_ok": False, "dominio": None
},
"Possui_croquis": {
"dtype": "object", "nulo_ok": False, "dominio": ["Sim", "Não"]
},
"Possui_fotos": {
"dtype": "object", "nulo_ok": False, "dominio": ["Sim", "Não"]
},
"Tipo_de_afloramento": {
"dtype": "object", "nulo_ok": True, "dominio": None
},
"In_situ": {
"dtype": "object", "nulo_ok": True, "dominio": ["Sim", "Não"]
},
"Grau_de_intemperismo": {
"dtype": "object", "nulo_ok": True, "dominio": ["Baixo", "Médio", "Alto"]
},
"Unidade": {
"dtype": "object", "nulo_ok": True, "dominio": None
},
"Unidade_litoestratigrafica": {
"dtype": "object", "nulo_ok": True, "dominio": None
}
}
class ControladorPrincipal:
def __init__(self, caminho_template: str, df: pandas.DataFrame = None):
self.caminho_template = caminho_template
self.template = docx.Document(caminho_template)
self.df = df
self.caderneta = None
# Estilos de parágrafos e tabelas contidos no template de estilos
self.estilos = {
"normal": self.template.styles['Normal'],
"titulo": self.template.styles['Title'],
"titulo1": self.template.styles['Heading 1'],
"titulo2": self.template.styles['Heading 2'],
"subtitulo": self.template.styles['Subtitle'],
"titulo_informacao": self.template.styles['Título de informação'],
"texto_informacao": self.template.styles['Texto de informação'],
"legenda": self.template.styles['Caption'],
"tabela_esquerda": self.template.styles['Tabela - Coluna esquerda'],
"tabela_direita": self.template.styles['Tabela - Coluna direita'],
"tabela_cabecalho": self.template.styles['Tabela de cabeçalho'],
}
def recarregar_template(self):
self.template = None
self.template = docx.Document(self.caminho_template)
def abrir_tabela(self, caminho: str) -> object:
"""Abre uma tabela do excel e armazena o DataFrame no atributo "df" do controlador.
:param caminho: O caminho até um arquivo .xlsx ou .xlsm.
:return: Boolean dizendo se o DataFrame foi criado com sucesso e Integer com o número de linhas do DataFrame
"""
# Salva a primeira aba da tabela em um DataFrame
df = pandas.read_excel(caminho, engine='openpyxl')
# Converte os nomes das colunas para string
df.columns = df.columns.astype(str)
# Descarta colunas sem nome
colunas_remocao = [col for col in df.columns if 'Unnamed' in col]
df.drop(colunas_remocao, axis='columns', inplace=True)
# Descarta linhas vazias
df.dropna(how='all', axis='index', inplace=True)
# Verifica se existem linhas preenchidas no arquivo
linhas = len(df.index)
if linhas <= 0:
raise Exception('A tabela selecionada está vazia ou contém apenas cabeçalhos.')
# Checa se o dataframe foi criado ou não e armazena no atributo
if isinstance(df, pandas.DataFrame):
self.df = df
self.caderneta = None
return True, linhas
else:
return False, linhas
def checar_colunas(self) -> list[str]:
"""Checa se cada coluna esperada para a tabela existe, está no formato correto, contém apenas valores permitidos.
O DataFrame é obtido do atributo "df" do controlador.
:return: Lista de strings especificando o status de cada coluna. O status pode ser "ok", "faltando", "problemas", "nulos" ou "dominio"
"""
df = self.df
colunas_df = df.columns.to_list()
status_colunas = []
for c in COLUNAS_TABELA_CADERNETA:
dtype = COLUNAS_TABELA_CADERNETA[c]["dtype"]
nulo_ok = COLUNAS_TABELA_CADERNETA[c]["nulo_ok"]
dominio = COLUNAS_TABELA_CADERNETA[c]["dominio"]
# Checa se a coluna existe na tabela
if c not in colunas_df:
status_colunas.append("missing_column")
continue
# Verifica se existem nulos e se a coluna permite nulos
if not nulo_ok and df[c].isnull().values.any():
status_colunas.append("nan_not_allowed")
continue
# Tenta converter a tabela para o tipo de dado esperado
try:
df[c] = df[c].astype(dtype, errors="raise")
except ValueError:
status_colunas.append("wrong_dtype")
continue
# Verifica se a coluna possui valores controlados e se existe algum valor fora do domínio
if dominio is not None:
valores_coluna = df[c]
if nulo_ok:
valores_coluna.dropna(inplace=True)
if not valores_coluna.isin(dominio).all():
status_colunas.append("outside_domain")
continue
status_colunas.append("ok")
return status_colunas
def localizar_problemas_formato(self, coluna: str) -> list[int]:
"""Localiza as linhas da tabela com problemas que impedem a conversão para o tipo de dado esperado.
:param coluna: O nome da coluna a ser verificada.
:return: Lista contendo os indexes das linhas com problema.
"""
valores = self.df[coluna].dropna()
tipo_alvo = COLUNAS_TABELA_CADERNETA[coluna]["dtype"]
funcoes_conversao = {
"datetime64[ns]": pandas.to_datetime(valores, errors="coerce", format="%d/%m/%Y").isna(),
"float64": pandas.to_numeric(valores, errors="coerce", downcast="float").isna(),
"int64": pandas.to_numeric(valores, errors="coerce", downcast="integer").isna()
}
if tipo_alvo not in funcoes_conversao:
raise Exception(f"Checagem não implementada para o tipo de dado ({tipo_alvo}).")
# Valores que não podem ser convertidos tornam-se NaN devido ao "coerce"
convertido = funcoes_conversao[tipo_alvo]
indices_problemas = [i for i, is_nan in zip(convertido.index, convertido.values) if is_nan]
return indices_problemas
def localizar_celulas_vazias(self, coluna: str) -> list[int]:
"""Localiza as linhas da coluna especificada que contêm valores nulos.
:param coluna: O nome da coluna a ser verificada.
:return: Lista contendo os indexes das linhas com problema.
"""
valores_coluna = self.df.loc[:, coluna]
indices_problemas = self.df[valores_coluna.isnull()].index.tolist()
return indices_problemas
def localizar_problemas_dominio(self, coluna: str) -> list[int]:
"""Localiza células em uma coluna com valores fora de domínio.
:param coluna: O nome da coluna a ser verificada.
:return: Lista contendo os indexes das linhas com problema.
"""
valores_coluna = self.df.loc[:, coluna]
dominio = COLUNAS_TABELA_CADERNETA[coluna]["dominio"]
indices_problemas = valores_coluna.index[~valores_coluna.isin(dominio)].tolist()
return indices_problemas
def montar_msg_problemas(self, tipo_problema: str, coluna: str, indices: list[int]) -> str:
"""Monta a mensagem especificando quais linhas da tabela estão com problemas.
:param tipo_problema: "missing_column", "wrong_dtype", "nan_not_allowed" ou "outside_domain"
:param coluna: O nome da coluna.
:param indices: Os índices das linhas com problemas no DataFrame.
:return: String descrevendo o problema e as linhas que devem ser corrigidas.
"""
dtype_coluna = str(COLUNAS_TABELA_CADERNETA[coluna]["dtype"])
tipos_problemas = {
"missing_column": (
f"A coluna \"{coluna}\" não foi encontrada na tabela. "
f"Verifique se ela foi excluída ou se você selecionou a tabela errada. "
f"Restaure a coluna ou tente novamente com a tabela correta."
),
"wrong_dtype": (
f"A coluna \"{coluna}\" possui dados fora do formato aceito ({dtype_coluna}) "
f"nas linhas especificadas abaixo. Corrija-os e tente novamente.\n"
),
"nan_not_allowed": (
f"Existem células vazias nas seguintes linhas da coluna \"{coluna}\". "
f"Preencha apropriadamente as células em questão e tente novamente.\n"
),
"outside_domain": (
f"A coluna \"{coluna}\" possui valores fora da lista de valores permitidos "
f"nas seguintes linhas. Corrija-os e tente novamente.\n"
)
}
mensagem = [tipos_problemas.get(tipo_problema)]
for i in indices:
linha = i + 2
ponto = self.df.loc[i, ["Ponto"]].values[0]
mensagem.append(f"Linha {linha} (ponto {ponto})")
return "\n".join(mensagem)
def gerar_caderneta(self, montar_folha_de_rosto: bool = True):
"""Gera a caderneta pré-preenchida.
:param montar_folha_de_rosto: Opção para gerar ou não uma folha de rosto.
:return: Nada.
"""
# Limpa todos os objetos da classe docx.Document para evitar bugs comuns
self.recarregar_template()
self.caderneta = None
documento = None
documento = self.template
df = self.df
colunas_tabela = df.columns.to_list()
# Na tabela da caderneta, as colunas 19-33 são potenciais colunas de medidas estruturais
colunas_estrutura = (colunas_tabela[18:] if len(colunas_tabela) < 33
else colunas_tabela[18:33])
try:
df['Data'] = df['Data'].dt.strftime('%d/%m/%Y')
except:
pass
df["Possui_croquis"] = df["Possui_croquis"].map({"Sim": True, "Não": False})
df["Possui_fotos"] = df["Possui_fotos"].map({"Sim": True, "Não": False})
# Deleta o primeiro parágrafo do template (aviso para não excluir o arquivo)
paragraph = documento.paragraphs[0]
p = paragraph._element
p.getparent().remove(p)
paragraph._p = paragraph._element = None
# Monta a folha de rosto da caderneta
if montar_folha_de_rosto:
documento = self.montar_folha_rosto(documento)
d = 1 # Número sequencial do semestre/disciplina. Ex: Map1 = 1
disciplinas = COLUNAS_TABELA_CADERNETA["Disciplina"]["dominio"]
for linha in df.itertuples():
# Adiciona uma página de título antes do primeiro ponto de cada semestre/disciplina
if d <= 2 and linha.Disciplina == disciplinas[d-1]:
documento = self.montar_pagina_semestre(documento, linha.Disciplina)
d += 1
# Quebra a página antes do título do ponto
documento.paragraphs[-1].add_run().add_break(docx.enum.text.WD_BREAK.PAGE)
# Adiciona a página do ponto
documento = self.montar_pagina_ponto(documento, linha, colunas_estrutura)
self.caderneta = documento
def montar_folha_rosto(self, documento: docx.Document) -> docx.Document:
"""Adiciona uma folha de rosto à caderneta.
:param documento: O documento.
:return: O documento com a folha de rosto.
"""
for i in range(0, 15):
if i == 10:
documento.add_paragraph(text='CADERNETA DE CAMPO COMPILADA',
style=self.estilos["titulo"])
elif i == 13:
documento.add_paragraph(text='MAPEAMENTO GEOLÓGICO UFSC',
style=self.estilos["titulo_informacao"])
else:
documento.add_paragraph(text='', style=self.estilos['normal'])
lista_infos = ['PROJETO:', 'ANO:', 'PROFESSORES RESPONSÁVEIS:',
'NÚMERO DA ÁREA/FAIXA:', 'INTEGRANTES DO GRUPO:']
for info in lista_infos:
documento.add_paragraph(text=info, style=self.estilos["titulo_informacao"])
documento.add_paragraph(text='<PREENCHA AQUI>', style=self.estilos["texto_informacao"])
return documento
def montar_pagina_semestre(self, documento: docx.Document, disciplina: str) -> docx.Document:
"""Adiciona uma página de título à caderneta para dividir os semestres do mapeamento geológico.
:param documento: O documento.
:param disciplina: "Mapeamento Geológico I" ou "Mapeamento Geológico II".
:return: O documento com a página de título do semestre.
"""
try: # Quando não há folha de rosto, o documento está inicialmente vazio, e isso causa um IndexError
documento.paragraphs[-1].add_run().add_break(docx.enum.text.WD_BREAK.PAGE)
except IndexError:
pass
for i in range(0, 18):
documento.add_paragraph(text='', style=self.estilos["normal"])
documento.add_heading(text=disciplina, level=1)
return documento
def montar_pagina_ponto(self, documento: docx.Document, linha: pandas.core.frame.pandas,
colunas_estrutura: list[str]) -> docx.Document:
"""Acrescenta uma página de informações de um ponto à caderneta.
:param documento: O documento
:param linha: Duplas de rótulos e valores da linha do DataFrame (gerado via DataFrame.itertuples().
:param colunas_estrutura: Os nomes das colunas de medidas estruturais presentes na tabela.
:return: O documento com a página do ponto.
"""
# Valores das colunas para a linha
ponto = linha.Ponto
src = linha.SRC
easting = linha.Easting
northing = linha.Northing
altitude = linha.Altitude
toponimia = linha.Toponimia
data = linha.Data
equipe = linha.Equipe
ponto_controle = linha.Ponto_de_controle
num_amostras = linha.Numero_de_amostras
possui_croquis = linha.Possui_croquis
possui_fotos = linha.Possui_fotos
tipo_afloramento = linha.Tipo_de_afloramento
in_situ = linha.In_situ
intemperismo = linha.Grau_de_intemperismo
unidade = linha.Unidade
unidade_lito = linha.Unidade_litoestratigrafica
# Título do ponto
documento.add_heading(text=ponto, level=2)
# Dicionário com informações que irão para a tabela de cabeçalho
dados_tabela = {
'DATA:': f"{data}",
'COORDENADAS:': f"{easting:.0f}E {northing:.0f}N {src}",
'ALTITUDE:': f"{altitude:.0f} m" if not pandas.isna(altitude) else "-",
'TOPONÍMIA:': f"{toponimia}" if not pandas.isna(toponimia) else "-",
'EQUIPE:': f"{equipe}",
'PONTO DE CONTROLE:': f"{ponto_controle}",
'TIPO DE AFLORAMENTO:': f"{tipo_afloramento}" if not pandas.isna(tipo_afloramento) else "-",
'IN SITU:': f"{in_situ}" if not pandas.isna(in_situ) else "-",
'GRAU DE INTEMPERISMO:': f"{intemperismo}" if not pandas.isna(intemperismo) else "-",
'AMOSTRAS:': f"{num_amostras}" if num_amostras > 0 else "-",
'UNIDADE:': f"{unidade} - {unidade_lito}" if not pandas.isna(unidade) else "-"
}
# Preenche a tabela de cabeçalho
table = documento.add_table(rows=0, cols=2)
table.style = self.estilos["tabela_cabecalho"]
for key in dados_tabela.keys():
lin = table.add_row().cells
# Coluna esquerda
lin[0].text = key
lin[0].paragraphs[0].style = self.estilos["tabela_esquerda"]
# Coluna direita
lin[1].text = dados_tabela[key]
lin[1].paragraphs[0].style = self.estilos["tabela_direita"]
# Ajusta a largura das colunas da tabela
for celula in table.columns[0].cells:
celula.width = docx.shared.Inches(2.1)
for celula in table.columns[1].cells:
celula.width = docx.shared.Inches(3.8)
# Adiciona a seção de descrição do ponto
documento.add_paragraph(text='DESCRIÇÃO', style=self.estilos["subtitulo"])
documento.add_paragraph(text="<Descrição do afloramento aqui>", style=self.estilos["normal"])
# Se for um ponto de controle, encerra aqui
if ponto_controle == "Sim":
return documento
# Adiciona a seção de amostras, se houver alguma
if num_amostras > 0:
documento.add_paragraph(text='AMOSTRAS', style=self.estilos["subtitulo"])
abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for i in range(0, num_amostras):
letra = abc[i]
documento.add_paragraph(text=f"• {ponto}{letra}: <Descrição da amostra aqui>", style=self.estilos["normal"])
# Procura medidas estruturais na tabela
medidas_estruturais = []
for i, coluna in enumerate(colunas_estrutura):
# Se a coluna for uma das colunas essenciais, pula ela
# Obs: Se isso acontecer, significa que o usuário inseriu alguma coluna adicional na tabela
if coluna in COLUNAS_TABELA_CADERNETA.keys():
continue
# Conteúdo do campo
medida = linha[i + 19]
# Se não for uma célula vazia
if not pandas.isna(medida):
# Procura uma sigla entre parênteses
if '(' in coluna and ')' in coluna:
sigla = coluna[coluna.find("(") + 1:coluna.find(")")]
# Se não encontrar sigla, usa o nome da coluna
else:
sigla = coluna.replace('_', ' ')
# Adiciona as medidas a uma lista
medidas_estruturais.append(f"• {sigla} = {medida}")
# Adiciona a seção de medidas, se houver alguma
if len(medidas_estruturais) > 0:
documento.add_paragraph(text='MEDIDAS ESTRUTURAIS', style=self.estilos["subtitulo"])
for m in medidas_estruturais:
documento.add_paragraph(text=m, style=self.estilos["normal"])
# Adiciona a seção de croquis, se houver algum
if possui_croquis:
documento.add_paragraph(text='CROQUIS', style=self.estilos["subtitulo"])
documento.add_paragraph(
text="<Insira aqui os croquis elaborados para o afloramento e suas "
"respectivas legendas. Remova esta seção caso não haja croquis>",
style=self.estilos["normal"]
)
# Adiciona a seção de fotos, se houver alguma
if possui_fotos:
documento.add_paragraph(text='FOTOS', style=self.estilos["subtitulo"])
documento.add_paragraph(
text="<Insira aqui os painéis de fotos tiradas no afloramento e suas "
"respectivas legendas. Remova esta seção caso não haja fotos>",
style=self.estilos["normal"]
)
return documento
def salvar_caderneta(self, caminho: str):
"""Salva a caderneta como um arquivo .docx.
:param caminho: O caminho do arquivo.
:return: Nada.
"""
self.caderneta.core_properties.author = "Geologia UFSC"
self.caderneta.core_properties.category = "Relatório Técnico"
self.caderneta.core_properties.comments = "Caderneta de campo compilada elaborada na disciplina de Mapeamento " \
"Geológico do curso de graduação em Geologia da UFSC"
self.caderneta.core_properties.content_status = "Modelo"
self.caderneta.core_properties.created = datetime.now()
self.caderneta.core_properties.identifier = None
self.caderneta.core_properties.keywords = "Geologia, Mapeamento Geológico"
self.caderneta.core_properties.language = "Português (Brasil)"
self.caderneta.core_properties.last_modified_by = "Geologia UFSC"
#self.caderneta.core_properties.last_printed = None
self.caderneta.core_properties.modified = datetime.now()
self.caderneta.core_properties.revision = 1
self.caderneta.core_properties.subject = "Geologia"
self.caderneta.core_properties.title = "Caderneta de Campo Compilada"
self.caderneta.core_properties.version = "v1"
if not caminho.endswith(".docx"):
caminho += ".docx"
self.caderneta.save(caminho)
| FrostPredator/template-builder | Controller.py | Controller.py | py | 21,845 | python | pt | code | 1 | github-code | 13 |
2449670057 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def getMode(self,root,ans):
if not root:
return
ans[root.val] += 1
self.getMode(root.left,ans)
self.getMode(root.right,ans)
def findMode(self, root: Optional[TreeNode]) -> List[int]:
ans = defaultdict(int)
self.getMode(root,ans)
Max = max(ans.values())
res = []
for key,val in ans.items():
if val == Max:
res.append(key)
return res | asnakeassefa/A2SV_programming | find-mode-in-binary-search-tree.py | find-mode-in-binary-search-tree.py | py | 667 | python | en | code | 1 | github-code | 13 |
11032612897 | import os
from typing import Optional
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from Data_Analysis import Data_Analyse
class Data_Load_Old(object):
column_drop = ['Duplicate_Check',
'PdI Width (d.nm)',
'PdI',
'Z-Average (d.nm)']
datafile = None
datafile_cleaned = None
def __init__(self):
self.datafile = None
self.datafile_clean = None
def read_file(self, file) -> pd.DataFrame:
datafile = pd.read_csv(str(file))
print(datafile.head())
self.datafile = datafile
return self.datafile
def datafile_info(self, datafile):
print(list(datafile.columns))
print("Number of Column: ", len(datafile.columns.unique()))
def drop_columns(self):
datafile_clean = self.datafile.drop(self.column_drop, axis=1).reset_index(drop=True)
print(list(datafile_clean.columns))
print("Number of Column: ", len(datafile_clean.columns.unique()))
self.datafile_clean = datafile_clean
return self.datafile_clean
def target_check(self):
# First let's see what values exist in the target column ('ES_Aggregation') and return a count of NaN values
print(self.datafile_clean['ES_Aggregation'].unique())
print(self.datafile_clean['ES_Aggregation'].isnull().sum(axis=0))
datafile_cleaned = self.datafile_clean[self.datafile_clean['ES_Aggregation'].notna()].reset_index(drop=True)
# print(datafile_cleaned)
print(datafile_cleaned['ES_Aggregation'].unique())
# ax = sns.countplot(x='ES_Aggregation', data=datafile_cleaned)
# plt.show()
print(datafile_cleaned['ES_Aggregation'].value_counts())
# print(datafile_cleaned)
Data_Load_Old.datafile_cleaned = datafile_cleaned
return Data_Load_Old.datafile_cleaned
class Data_Load_Split(object):
def __init__(self, file, hide_component: str = None, alg_categ: str = None,
split_ratio: float = 0.2,
shuffle_data: bool = True, drop_useless_columns: bool = True,
filter_target: bool=False, target: str='Z-Average (d.nm)', smaller_than: float=1000.0,
column_removal_experiment: list=None
):
assert alg_categ in ['Regression', 'Classification', 'Regression and Classification', 'Reg&Class',
'MultiOutput Regression', 'MO Regression']
self.file = file
self.hide_component = hide_component
self.alg_categ = alg_categ
self.split_ratio = split_ratio
self.shuffle_data = shuffle_data
self.drop_useless_columns = drop_useless_columns
self.regression_table_drop = ['ES_Aggregation',
'PdI Width (d.nm)',
'PdI',
'Duplicate_Check']
self.classification_table_drop = ['PdI Width (d.nm)',
'PdI',
'Z-Average (d.nm)',
'Duplicate_Check']
self.multi_regression_table_drop = ['ES_Aggregation',
'Duplicate_Check']
self.datafile = None
self.train_table = None
self.dum = None
self.X = None
self.y = None
self.hide = None
self.columns_converted = []
self.filter_target = filter_target
self.target = target
self.smaller_than = smaller_than
self.dummation_occured = 0
self.column_removal_experiment = column_removal_experiment
###Functions to be run Automatically
self.initial_read_file()
self.label_encode()
self.dummation_groupby()
self.filter_table()
self.alg_category()
self.initial_x_array()
self.inital_y_array()
self.class_names_str = None
def initial_read_file(self):
try:
self.datafile = pd.read_csv(str(self.file))
except Exception:
self.datafile = pd.read_excel(str(self.file))
#if self.drop_useless_columns == True:
#self.datafile = self._useless_column_drop(self.datafile)
if self.filter_target == True:
self.datafile = self._target_filter(dataframe=self.datafile,target=self.target, smaller_than=self.smaller_than)
return self.datafile
def _target_filter(self, dataframe: pd.DataFrame, target: str, smaller_than: float):
self.datafile = dataframe[dataframe[str(target)] <= float(smaller_than)]
return self.datafile
#def _useless_column_drop(self, dataframe: pd.DataFrame):
#self.datafile = dataframe.drop(columns=self.drop_columns_useless)
#return self.datafile
def label_encode(self):
if self.alg_categ in {'Classification'}:
lb = LabelEncoder()
self.datafile = self.datafile[self.datafile['ES_Aggregation'].notna()].reset_index(drop=True)
self.datafile['ES_Aggregation_encoded'] = lb.fit_transform((self.datafile['ES_Aggregation']))
print(self.datafile['ES_Aggregation_encoded'].value_counts())
self.class_names_str = lb.classes_
elif self.alg_categ in {'Regression'}:
self.datafile = self.datafile[self.datafile['Z-Average (d.nm)'].notna()].reset_index(drop=True)
elif self.alg_categ in {'MultiOutput Regression', 'MO Regression'}:
self.datafile = self.datafile[self.datafile['PdI Width (d.nm)',
'PdI',
'Z-Average (d.nm)'].notna()].reset_index(drop=True)
def dummation_groupby(self) -> pd.DataFrame:
if "Component_1" and "Component_2" and "Component_3" in self.datafile.columns:
self.dum = pd.get_dummies(self.datafile, columns=['Component_1', 'Component_2', 'Component_3'],
prefix="", prefix_sep="")
# TODO Add in Component 4 into 'columns = ' when it becomes relevant. Currently not relevant due to PEG
self.dum = self.dum.groupby(level=0, axis=1, sort=False).sum()
self.dummation_occured = 1
else:
self.dum = self.datafile.copy()
def filter_table(self):
if self.hide_component is not None:
self.hide = self.dum[self.dum[str(self.hide_component)] == 1]
self.train_table = self.dum[self.dum[str(self.hide_component)] == 0]
return self.train_table, self.hide
else:
pass
def alg_category(self):
# Need to think of a better way to deal
if self.alg_categ in {'Regression'}:
if self.dum is not None and self.train_table is not None:
self.train_table.drop(self.regression_table_drop, axis=1, inplace=True)
self.dum.drop(self.regression_table_drop, axis=1, inplace=True)
else:
self.dum.drop(self.regression_table_drop, axis=1, inplace=True)
self.datafile.drop(self.regression_table_drop, axis=1, inplace=True)
elif self.alg_categ in {'Classification'}:
if self.dum is not None and self.train_table is not None:
self.train_table.drop(self.classification_table_drop, axis=1, inplace=True)
self.dum.drop(self.classification_table_drop, axis=1, inplace=True)
else:
self.dum.drop(self.classification_table_drop, axis=1, inplace=True)
self.datafile.drop(self.classification_table_drop, axis=1, inplace=True)
elif self.alg_categ in {'Regression and Classification', 'Reg&Class'}:
print('Needs to be implemented...')
elif self.alg_categ in {'MultiOutput Regression', 'MO Regression'}:
if self.dum is not None and self.train_table is not None:
self.train_table.drop(self.multi_regression_table_drop, axis=1, inplace=True)
self.dum.drop(self.multi_regression_table_drop, axis=1, inplace=True)
else:
self.dum.drop(self.multi_regression_table_drop, axis=1, inplace=True)
self.datafile.drop(self.multi_regression_table_drop, axis=1, inplace=True)
else:
print('What did you write that got you past the assertion check...')
def _column_removal_(self, column_removal_experiment: list=None):
if self.dum is not None and self.train_table is not None:
self.train_table.drop(columns=column_removal_experiment)
self.dum.drop(columns=column_removal_experiment)
elif self.train_table is None:
self.dum.drop(columns=column_removal_experiment, inplace=True)
def initial_x_array(self):
if self.column_removal_experiment is not None:
self._column_removal_(self.column_removal_experiment)
if self.dum is not None and self.train_table is not None:
if self.alg_categ in {'Classification'}:
x_table = self.train_table.drop(['ES_Aggregation_encoded', 'ES_Aggregation'], axis=1).reset_index(
drop=True)
elif self.alg_categ in {'Regression'}:
x_table = self.train_table.drop(['Z-Average (d.nm)'], axis=1).reset_index(drop=True)
elif self.alg_categ in {'MultiOutput Regression', 'MO Regression'}:
x_table = self.train_table.drop(['PdI Width (d.nm)', 'PdI', 'Z-Average (d.nm)'],
axis=1).reset_index(drop=True)
self.X = x_table.values
elif self.train_table is None:
if self.alg_categ in {'Classification'}:
x_table = self.dum.drop(['ES_Aggregation_encoded', 'ES_Aggregation'], axis=1).reset_index(drop=True)
elif self.alg_categ in {'Regression'}:
x_table = self.dum.drop(['Z-Average (d.nm)'], axis=1).reset_index(drop=True)
elif self.alg_categ in {'MultiOutput Regression', 'MO Regression'}:
x_table = self.dum.drop(['PdI Width (d.nm)', 'PdI', 'Z-Average (d.nm)'],
axis=1).reset_index(drop=True)
self.X = x_table.values
# #TODO Need to fix this at some point when I do dummy grouping again
# if self.dummation_occured == 1:
# for i in x_table.columns:
# if (x_table[str(i)].isin([0, 1]).all()) == True:
# self.columns_converted.append(True)
# else:
# self.columns_converted.append(False)
# else:
# for i in range(len(x_table.columns)):
# self.columns_converted.append(False)
for i in x_table.columns:
if (x_table[str(i)].between(0,1).all()) == True:
self.columns_converted.append(True)
else:
self.columns_converted.append(False)
self.zero_one_columns = []
self.min_max_scale_columns = []
for i in x_table.columns:
if (x_table[str(i)].between(0, 1).all()) == True:
self.zero_one_columns.append(str(i))
else:
self.min_max_scale_columns.append(str(i))
return self.X
def inital_y_array(self):
if self.dum is not None and self.train_table is not None:
if self.alg_categ in {'Classification'}:
self.y = self.train_table['ES_Aggregation_encoded'].values
elif self.alg_categ in {'Regression'}:
self.y = self.train_table['Z-Average (d.nm)'].values
elif self.alg_categ in {'MultiOutput Regression', 'MO Regression'}:
self.y = self.train_table['PdI Width (d.nm)', 'PdI', 'Z-Average (d.nm)'].values
elif self.train_table is None:
if self.alg_categ in {'Classification'}:
self.y = self.dum['ES_Aggregation_encoded'].values
elif self.alg_categ in {'Regression'}:
self.y = self.dum['Z-Average (d.nm)'].values
elif self.alg_categ in {'MultiOutput Regression', 'MO Regression'}:
self.y = self.dum['PdI Width (d.nm)', 'PdI', 'Z-Average (d.nm)'].values
return self.y
def analyse_data(self, save_path, column_names, plot):
data_analyse = Data_Analyse()
data_analyse.histogram(self.y, data_name='y_all', save_path=save_path, plot=plot)
data_analyse.qqplot_data(self.y, data_name='y_all', save_path=save_path, plot=plot)
print('----Shapiro Wilk Y Train----')
self.shapiro_wilk_y_train = data_analyse.shapiro_wilk_test(self.y)
print(self.shapiro_wilk_y_train)
print('-----------------------------')
print('----Dagostino K^2 Y Train----')
self.dagostino_k2_y, self.dagostino_p_y, self.dagiston_is_gaussian = data_analyse.dagostino_k2(self.y)
print('----Anderson Y Train----')
self.anderson_darling_train = data_analyse.anderson_darling(self.y)
print('----Heatmap X Train----')
self.heatmap_train = data_analyse.heatmap(self.X, column_names, data_name='x_all', save_path=save_path,
plot=False)
print('----Box Plot X Train----')
self.box_plot_train = data_analyse.box_plot(self.X, column_names, data_name='x_all', save_path=save_path,
plot=False)
print('----Variance Inflation Factor_X_train----')
self.variance_inflation_factor_x_train = data_analyse.variance_inflation_factor(self.X, column_names)
print(self.variance_inflation_factor_x_train)
file_name = os.path.join(save_path,"variance_inflation_factor")
self.variance_inflation_factor_x_train.to_csv(str(file_name) + ".csv", index=False)
print("------Sweet_Viz---------")
#self.sweet_viz = data_analyse.sweet_viz(self.X,column_names,save_path=save_path)
print("------Target Included---------")
temp_df = pd.DataFrame(self.X,columns=column_names)
temp_df['Average_Size'] = self.y
self.sweet_viz_target = data_analyse.sweet_viz(temp_df,feature_names=None,target="Average_Size", save_path=save_path)
print("------Pandas_Profile---------")
temp_df=pd.DataFrame(self.X,columns=column_names)
data_analyse.pandas_profiling(temp_df,save_path=save_path)
return self.dagiston_is_gaussian
def split_train_test(self):
# TODO Need to look into the stratify parameter - if function again...
if self.alg_categ in {'Classification'}:
(X_train, X_test, y_train, y_test) = \
train_test_split(self.X, self.y, test_size=self.split_ratio, random_state=42, shuffle=self.shuffle_data,
stratify=self.y)
else:
(X_train, X_test, y_train, y_test) = \
train_test_split(self.X, self.y, test_size=self.split_ratio, random_state=42, shuffle=self.shuffle_data)
if self.hide is not None:
if self.alg_categ in {'Classification'}:
x_temp = self.hide.drop(['ES_Aggregation_encoded', 'ES_Aggregation'], axis=1).reset_index(drop=True)
x_temp = x_temp.values
X_test = np.vstack([X_test, x_temp])
y_temp = self.hide['ES_Aggregation_encoded'].values
y_test = np.hstack([y_test, y_temp])
elif self.alg_categ in {'Regression'}:
x_temp = self.hide.drop(['Z-Average (d.nm)'], axis=1).reset_index(drop=True)
x_temp = x_temp.values
X_test = np.vstack([X_test, x_temp])
y_temp = self.hide['Z-Average (d.nm)'].values
y_test = np.hstack([y_test, y_temp])
elif self.alg_categ in {'MultiOutput Regression', 'MO Regression'}:
x_temp = self.hide.drop(['PdI Width (d.nm)', 'PdI', 'Z-Average (d.nm)'], axis=1).reset_index(drop=True)
x_temp = x_temp.values
X_test = np.vstack([X_test, x_temp])
y_temp = self.hide['PdI Width (d.nm)', 'PdI', 'Z-Average (d.nm)'].values
y_test = np.hstack([y_test, y_temp])
return X_train, X_test, y_train, y_test
def update_x_y_data(self, additional_x, additional_y,
prev_x_data, prev_y_data):
if prev_x_data is not None and prev_y_data is not None:
self.X = np.vstack((self.X,prev_x_data, additional_x))
self.y = np.hstack((self.y,prev_y_data, additional_y)).astype(float)
else:
self.X = np.vstack((self.X, additional_x))
self.y = np.hstack((self.y, additional_y)).astype(float)
return self.X, self.y
| calvinp0/AL_Master_ChemEng | DataLoad.py | DataLoad.py | py | 16,990 | python | en | code | 0 | github-code | 13 |
24218901240 | from collections import Counter, defaultdict
with open('in.txt') as f:
lines = f.read().splitlines()
lines.sort()
guard_minutes = defaultdict(Counter)
for line in lines:
command = line[19:]
current_minute = int(line[15:17])
if command == 'falls asleep':
sleep_start = current_minute
elif command == 'wakes up':
guard_minutes[current_guard].update(range(sleep_start, current_minute))
else:
current_guard = int(command.split()[1][1:])
def key(item):
guard, counter = item
return sum(counter.values())
guard, minutes = max(guard_minutes.items(), key=key)
((minute, count),) = minutes.most_common(1)
print(guard * minute)
def key(item):
guard, counter = item
((minute, count),) = counter.most_common(1)
return count
guard, minutes = max(guard_minutes.items(), key=key)
((minute, count),) = minutes.most_common(1)
print(guard * minute)
| prplz/aoc-2018-python | 04/04.py | 04.py | py | 911 | python | en | code | 1 | github-code | 13 |
42960432710 | import datetime
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Dict, Union
from boto3 import client
from botocore import UNSIGNED
from botocore.client import ClientError, Config
from loguru import logger
from buckets_hunter.utils import dns, hunter_utils
from buckets_hunter.utils.notify import print_open_bucket, print_service
S3_BUCKET_URL = "{}.s3.amazonaws.com"
AWS_APPS_URL = "{}.awsapps.com"
class S3BucketsScanner:
PLATFORM = "AWS"
def __init__(self, dns_utils: dns.DNSUtils):
self._dns_utils = dns_utils
self.s3_client = self._initialize_s3_client()
def _initialize_s3_client(self) -> client:
try:
s3_client = client(
"s3", # type of client
config=Config(signature_version=UNSIGNED), # without creds
use_ssl=True,
verify=True,
)
except Exception as err:
sys.exit(err)
return s3_client
def scan_aws_apps(self, bucket_name: str) -> Dict[str, str]:
aws_app_url = AWS_APPS_URL.format(bucket_name)
if not self._dns_utils.dns_lookup(aws_app_url):
return None
return {
"platform": S3BucketsScanner.PLATFORM,
"service": "AWS apps",
"bucket": aws_app_url,
}
def scan_bucket_permissions(
self, bucket_name: str
) -> Dict[str, Union[str, Dict[str, bool]]]:
print(bucket_name)
if not self._bucket_exists(bucket_name):
return None
bucket_url = S3_BUCKET_URL.format(bucket_name)
return {
"platform": S3BucketsScanner.PLATFORM,
"service": "S3",
"bucket": bucket_url,
"permissions": {
"readable": self._check_read_permission(bucket_name),
"writeable": self._check_write_permission(bucket_name),
"acp_readable": self._check_read_acl_permission(bucket_name),
"acp_writeable": self._check_write_acl_permission(bucket_name),
},
"files": hunter_utils.get_bucket_files(f"https://{bucket_url}"),
}
def _bucket_exists(self, bucket_name) -> False:
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except ClientError as _:
return False
return True
def _check_read_permission(self, bucket_name: str) -> bool:
try:
self.s3_client.list_objects_v2(Bucket=bucket_name, MaxKeys=0)
except ClientError as _:
return False
return True
def _check_write_permission(self, bucket_name: str) -> bool:
"""Checks if writing a file to bucket is possible."""
try:
temp_write_file = (
f"BucketHunter_{int(datetime.datetime.now().timestamp())}.txt"
)
# try to upload the file:
self.s3_client.put_object(Bucket=bucket_name, Key=temp_write_file, Body=b"")
except ClientError as _:
return False
else:
# successful upload, delete the file:
self.s3_client.delete_object(Bucket=bucket_name, Key=temp_write_file)
return True
def _check_read_acl_permission(self, bucket_name: str) -> bool:
"""Checks if reading Access Control List is possible."""
try:
self.s3_client.get_bucket_acl(Bucket=bucket_name)
except ClientError as _:
return False
return True
def _check_write_acl_permission(self, bucket_name: str) -> bool:
"""Checks if changing the Access Control List is possible.
NOTE: This changes permissions to be public-read."""
try:
self.s3_client.put_bucket_acl(Bucket=bucket_name, ACL="public-read")
except ClientError as _:
return False
return True
def run(scan_config):
s3_bucket_scanner = S3BucketsScanner(scan_config.dns_utils)
aws_scan_results = []
# to do: make a function to generate the iters
with ThreadPoolExecutor(max_workers=scan_config.threads) as executor:
found_buckets_futures = {
executor.submit(s3_bucket_scanner.scan_bucket_permissions, bucket_name)
for bucket_name in scan_config.buckets_permutations
}
for feature in as_completed(found_buckets_futures):
try:
s3_scan_result = feature.result()
except Exception as err:
logger.error(err)
else:
if s3_scan_result:
print_open_bucket(s3_scan_result)
aws_scan_results.append(s3_scan_result)
found_apps_futures = {
executor.submit(s3_bucket_scanner.scan_aws_apps, bucket_name)
for bucket_name in scan_config.buckets_permutations
}
for feature in as_completed(found_apps_futures):
try:
aws_app_scan_result = feature.result()
except Exception as err:
logger.error(err)
else:
if aws_app_scan_result:
print_service(aws_app_scan_result)
aws_scan_results.append(aws_app_scan_result)
return aws_scan_results
| DanielAzulayy/BucketsHunter | buckets_hunter/modules/aws/aws_scanner.py | aws_scanner.py | py | 5,287 | python | en | code | 2 | github-code | 13 |
72739974417 | N, K = map(int, input().split())
A = list(map(int, input().split()))
count = 0
history = [1]
index = -1
while count <= K:
current = history[-1]
_next = A[current - 1]
if _next in history:
index = history.index(_next)
break
else:
history.append(_next)
count += 1
if index == -1:
print(history[-1])
else:
loop = history[index:]
q, mod = divmod(K - count - (len(history) - index), len(loop))
print(loop[mod - 1])
| uu64/at-coder | 20200510-ABC167/D.py | D.py | py | 473 | python | en | code | 0 | github-code | 13 |
44716838431 | #!/usr/bin/env python3
# coding: utf-8
'''
Script para formatar e filtrar a tabela do HMMER.
Necessário python3 e o pacote pandas para rodar
o script.
- Para instalar o pacote pandas use:
pip3 install pandas
- Uso:
python3 mtr_00_hmm_table_filtering.py
'''
import pandas as pd
# Arquivo do hmmer e arquivo de saida
hmmer = "HMM_dominios_prot.txt"
hmm_saida = 'hmm_clean.tsv'
# Ler csv setando delimitador para qualquer espaço em branco.
hmm = pd.read_csv(hmmer, delim_whitespace = True)
# Colocar colunas na ordem desejada. ID precisa ser a primeira.
colunas = ['ID',
'target_name',
'accession',
'accession.1',
'E-value',
'score',
'bias',
'E-value.1',
'score.1',
'bias.1',
'exp',
'reg',
'clu',
'ov',
'env',
'dom',
'rep',
'inc',
'description_of_target']
# Reordenar colunas
hmm = hmm[colunas]
# Retirar colunas selecionadas sem valores
hmm.drop(['accession','accession.1','description_of_target'], axis=1, inplace=True)
# Pegar maior valor de score para cada id
hmm = hmm.loc[hmm.groupby('ID', sort=False)['score'].idxmax()]
# Escrever dataframe no formato tsv (tab separated values)
hmm.to_csv(hmm_saida,sep='\t', index=False) | Tiago-Minuzzi/lab-stuff | for_colleagues/mtr_01_hmm_table_formatting.py | mtr_01_hmm_table_formatting.py | py | 1,326 | python | pt | code | 0 | github-code | 13 |
10521125344 | # This script uses Python to read in .tif files I downloaded from
# https://croplandcros.scinet.usda.gov/
#import tifffile and pillow to use this script
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from tifffile import imread, TiffFile, memmap
from PIL import Image
def main():
directory = r"C:\Users\Neil\Documents\github\crops\Berks\rect\2022"
os.chdir(directory)
print("About to read in .tif file")
image2022 = imread("clipped.tif")
directory = r"C:\Users\Neil\Documents\github\crops\Berks\rect\2021"
os.chdir(directory)
image2021 = imread("clipped.tif")
tif = TiffFile("clipped.tif") # 1 page, series 1,
print(len(tif.series[0].axes))
change = image2022 - image2021
data = Image.fromarray(change)
data.save("change.png")
'''
numcorn22 = len(np.where(image2022 == 1)[0])
numcorn21 = len(np.where(image2021 == 1)[0])
print("num corn 22 ", numcorn22)
print("num corn 21 ", numcorn21)
numsoy22 = len(np.where(image2022 == 5)[0])
numsoy21 = len(np.where(image2021 == 5)[0])
print("num soy 22 ", numsoy22)
print("num soy 21 ", numsoy21)
print(image2021.shape)
print(image2021.dtype)
#print(change[560:601, 720:741])
#df_image=pd.DataFrame(image)
#print(df_image.describe())
'''
if __name__ == "__main__":
main()
| 9ngribbenc1/crops | read_tif.py | read_tif.py | py | 1,430 | python | en | code | 0 | github-code | 13 |
2263210201 | import sys
fname = sys.argv[1]
with open(fname,'r') as datfile:
data = datfile.readlines()
for i in range(len(data)):
if 'MISSING' in data[i]:
print(data[i-6].strip())
| mobergd/OneDMin | perl/find_missing.py | find_missing.py | py | 180 | python | en | code | 0 | github-code | 13 |
26570515575 | """Websocket server."""
import asyncio
import base64
import hashlib
import json
import time
import websockets
import auth
from typing import Callable
from configs import config_utils
from exceptions import exceptions
from log import LOG
from obs import obs_base_classes, obs_event_manager, obs_events
class OBSConnection(object):
"""OBS connection class."""
def __init__(self):
"""Init."""
super(OBSConnection, self).__init__()
async def init(self, authorization: auth.Auth = None) -> object:
"""Async init.
Args:
authorization (Auth): Authorization object.
Returns:
self (OBSConnection): Class instance.
"""
self.twitch_config = await config_utils.load_config_file('bot_config')
self.auth = authorization or await auth.Auth().init(self.twitch_config)
self.host = self.twitch_config['obs_host']
self.port = self.twitch_config['obs_port']
self.password = self.auth.obs_password
self.connected = False
self.eventmanager = (
await obs_event_manager.CallbackEventManager().init()
)
self.message_id = 1
self.answers = {}
return self
def message_wrapper(func: Callable) -> None:
"""Decorator to check the connection to OBS.
Args:
func (Callable): Decorated function.
Returns:
(Any): Result of the decorated function.
"""
async def wrapper(self, data: dict) -> None:
"""Wrapped function.
Args:
data (dict): Data to send.
Returns:
result (dict) from the server.
"""
await func(self, data)
result = await self.await_response(self.message_id)
self.message_id += 1
return result
return wrapper
async def connect(self) -> None:
"""Connect to the websocket server."""
if self.connected:
return
reconnect_time = 5
LOG.info('Connecting...')
while not self.connected:
try:
self.connection = await websockets.connect(
f'ws://{self.host}:{self.port}'
)
await self.authorize()
self.connected = True
except (websockets.WebSocketException, OSError) as e:
LOG.error(f'An error occured while trying to connect: {e}')
LOG.error(
f'Re-attempting connection in {reconnect_time} seconds.'
)
await asyncio.sleep(reconnect_time)
except Exception as e:
LOG.error(f'A connection error occured: {e}\nReconnecting...')
await asyncio.sleep(reconnect_time)
LOG.info('Connected.')
async def reconnect(self) -> None:
"""Restart the connection to the websocket server."""
LOG.info('Attempting reconnection...')
try:
await self.disconnect()
except Exception as e:
LOG.error(f'An error occured disconnecting: {e}')
await self.connect()
async def disconnect(self) -> None:
"""Disconnect from websocket server."""
LOG.info('Disconnecting...')
try:
await self.connection.close()
except Exception as e:
LOG.error(f'An error occured; closing the connection: {e}')
self.connected = False
LOG.info('Disconnected.')
async def authorize(self) -> None:
"""Authorize the connection."""
auth_payload = {
'request-type': 'GetAuthRequired',
'message-id': str(self.message_id),
}
await self.connection.send(json.dumps(auth_payload))
result = json.loads(await self.connection.recv())
if result['status'] != 'ok':
raise exceptions.OBSError(result['error'])
if result.get('authRequired'):
secret = base64.b64encode(
hashlib.sha256(
(self.password + result['salt']).encode('utf-8')
).digest()
)
auth = base64.b64encode(
hashlib.sha256(
secret + result['challenge'].encode('utf-8')
).digest()
).decode('utf-8')
auth_payload = {
'request-type': 'Authenticate',
'message-id': str(self.message_id),
'auth': auth,
}
await self.connection.send(json.dumps(auth_payload))
result = json.loads(await self.connection.recv())
if result['status'] != 'ok':
raise exceptions.OBSError(result['error'])
async def call(self, request: object) -> object:
"""Make a call to the OBS server.
Args:
request (object): Request to send to the server.
Returns:
request (object): Request with response data.
"""
if not isinstance(request, obs_base_classes.BaseRequests):
raise exceptions.OBSError(f'{request} is not a valid request.')
payload = await request.data()
response = await self.send(payload)
await request.input(response)
return request
@message_wrapper
async def send(self, data: dict) -> None:
"""Make a raw json call to the OBS server through the Websocket.
Args:
data (dict): Data to send.
"""
data['message-id'] = str(self.message_id)
LOG.debug(f'Sending message {self.message_id}: {data}')
await self.connection.send(json.dumps(data))
async def await_response(self, message_id: int) -> dict:
LOG.debug('Waiting for reply...')
timeout = time.time() + 60 # Timeout = 60s
while time.time() < timeout:
LOG.debug(f'{message_id} <-> {self.answers}')
if message_id in self.answers:
return self.answers.pop(message_id)
await asyncio.sleep(0.1)
raise exceptions.OBSError(f'No answer for message {message_id}')
async def register(self, func: Callable, event: object = None) -> None:
"""Register a new callback.
Args:
func (Callable): Callback function to run on event.
event (Event): Event to trigger.
Default is None, which will trigger on all events.
"""
await self.eventmanager.register(func, event)
async def unregister(self, func: Callable, event: object = None) -> None:
"""Unregister an existing callback.
Args:
func (Callable): Callback function to run on event.
event (Event): Event to trigger.
Default is None, which would have triggered on all events.
"""
await self.eventmanager.unregister(func, event)
async def process_message(self, message: json) -> None:
"""Process the received message.
Args:
message (json): JSON message received from OBS.
"""
if not message:
LOG.debug('Blank message; skipping.')
return
result = json.loads(message)
if 'update-type' in result:
event = await self.build_event(result)
await self.eventmanager.trigger(event)
elif 'message-id' in result:
LOG.debug(f'Got answer for id {result["message-id"]}: {result}')
self.answers[int(result['message-id'])] = result
else:
LOG.warning(f'Unknown message: {result}')
async def build_event(self, data: dict) -> object:
"""Build an event from a received message.
Args:
data (dict): Message data.
Returns:
obj (object): Event.
"""
name = data['update-type']
LOG.debug(f'Building event: {name}')
try:
call = await getattr(obs_events, name)().init()
except AttributeError:
raise exceptions.OBSError(f'Invalid event {name}')
await call.input(data)
return call
async def run(self) -> None: # noqa
"""Run the receiver."""
await self.connect()
self.running = True
LOG.debug('Running receiver loop.')
while self.running:
try:
message = await self.connection.recv()
LOG.debug(f'Received message: {message}')
await self.process_message(message)
except (
websockets.exceptions.ConnectionClosedOK,
websockets.ConnectionClosedError,
):
if self.running:
LOG.warning('OBS server has gone offline.')
# self.running = False
await self.reconnect()
except OSError:
if self.running:
LOG.warning('Cannot connect to OBS server.')
# self.running = False
await self.reconnect()
except (ValueError, exceptions.OBSError) as e:
LOG.warning(f'Invalid message: {message} ({e})')
message = ''
LOG.debug('Receiver loop no longer running.')
| amorphousWaste/twitch_bot_public | twitch_bot/obs/obs_connection.py | obs_connection.py | py | 9,268 | python | en | code | 0 | github-code | 13 |
73662946896 | import numpy as np
def load_a9a(data_folder):
L = []
file_path = data_folder + 'phpwCsLLW.csv'
with open(file_path, 'r') as f:
first_line = True
for line in f.readlines():
if first_line:
first_line = False
continue
line = line.strip()
line = line[1:-1]
items = line.split(' ')
d = [0] * 124
d[0] = int(items[0])
for item in items[1:-1]:
value, key = item.split(',')
d[int(key)-1] = float(value)
d[-1] = int(int(items[-1]) == 1)
L.append(d)
data = np.array(L)
return data[:, :-1], data[:, -1]
| dingdian110/alpha-ml | alphaml/datasets/cls_dataset/a9a.py | a9a.py | py | 710 | python | en | code | 1 | github-code | 13 |
41130638133 | import pygame, sys
from utilidades import intro_transition, cambiar_musica, dibujar_grid
from configuracion import *
from class_personaje import Personaje
from class_enemigo import Enemigo
from class_proyectil import Proyectil
from levels.class_stage_1 import Stage_1
from levels.class_stage_2 import Stage_2
from levels.class_stage_3 import Stage_3
from levels.class_stage_4 import Stage_4
from modo.modo_dev import get_modo, cambiar_modo
from class_tiempo_stages import TiempoStages
from class_esferas import Esferas
from class_radar import Radar
from class_jacki import Boss
from vid.pyvidplayer import Video
from class_poder_final import PoderFinalVid
from class_kame import Kame
import random
from class_score import ScoreStage
pygame.init()
def game()-> list[str and [list[int]]]:
'''
corre el juego principal
recibe : None
Devuelve : list[str and list[int]]
Win o Game over : str
Scores : list[int]
'''
# Dimensiones de la pantalla
ancho_pantalla = ANCHO_PANTALLA
alto_pantalla = ALTO_PANTALLA
screen = pygame.display.set_mode((ancho_pantalla, alto_pantalla))
fps = FPS
relog = pygame.time.Clock()
#rango de aparicion en screen esferas del dragon
ancho_screen_para_esferas = 950
alto_screen_para_esferas = 555
#instancio el stage actual. se append en una lista y se eligue el stage segun index
stage_1 = Stage_1(screen)
stage_2 = Stage_2(screen)
stage_3 = Stage_3(screen)
stage_4 = Stage_4(screen)
stage_list = [stage_1, stage_2, stage_3, stage_4]
poder_final = PoderFinalVid(0,0, screen)
pygame.mixer.music.play()
pygame.mixer.music.set_volume(0.4)
poder_kame = Kame(screen, ANCHO_PANTALLA,50, 1000, 1000, 0, 620)
score = ScoreStage(screen , 0, 0, 0)
stage_run = False
index_stage = 0 #define el stage inicial
running = True
stage_actual = None
radar_on = False
crono_on = False
start_time = False
lista_esferas = []
lista_esferas_generada = False
slide_boss = 600
dx_slide_boss = 20
balloon_position = (200, 250)
balloon_color = (255, 255, 255)
text_color = (0, 0, 0)
text = ["Has demostrado tu valentia\nllegando hasta aquí muchacho...", "Pero esta ves...\nno te sera tan facíl\npasar la prueba", "Asi que...\nPREPARATE!!", "A ver si puedes\ncontrarestar este ataque!!!"]
text_goku = ["No te tengo miedo...", "Pero tampoco puedo confiarme...", "Dare todo en este ultimo ataque!!!"]
time_text = 84
time_text_limit = 84
text_index = 0
load_musica_battle = False
load_music_intro = False
path_jacky = "asset\jacky-pose.png"
path_krillin = "asset\krillin_intro_game.png"
path_por_defecto = path_krillin
parte_final_2 = False
contador_escena = 0
flag_video_final = False
score_game = 0
game_over_win = False
game_over_defeat = False
credits_finished = False
while running and not game_over_win and not game_over_defeat:
# Stage
if not stage_run:
stage_run = True
stage_actual = stage_list[index_stage]
if(index_stage < 3):
enemigo = Enemigo(screen, 800, 200, stage_actual.tile_list)
else:
enemigo = Boss(800, 570)
personaje = Personaje(150, 600, stage_actual.tile_list, screen, enemigo, 0)
poder = Proyectil(1, personaje.rect.x, personaje.rect.y)
poder_list:list[Proyectil] = []
personaje.score = score_game
poder_list.append(poder)
score_game = personaje.score
score.score = score_game
if(personaje.vida <= 0):
# over_game.show_game_over("Game Over")
game_over_defeat = True
if(personaje.contador_esferas >= 7): #backup de score del personaje
if(index_stage < len(stage_list) -1):
index_stage += 1
intro_transition("vid/stage_{0}.avi".format(index_stage), screen)
if(index_stage < 3):
cambiar_musica(path = "sonido\musica_stage_{0}.mp3".format(index_stage))
tiempo_stage = None
stage_run = False
crono_on = False
radar_on = False
start_time = False
lista_esferas_generada = False
#------- correcion
pygame.display.flip() # ORDEN 1ro
screen.blit(stage_actual.bg, (0, 0))#bg ORDEN 2do
personaje.update(screen, index_stage) # ORDEN 3rO
stage_actual.draw()#pisos ORDEN 4to
#---------------
if(enemigo.vida <= 0 and not radar_on and not enemigo.esta_muerto):
radar = Radar(screen, enemigo.rect.x, enemigo.rect.y, "asset/radar.png", 50, 50, 10)
radar_on = True
enemigo.esta_muerto = True
enemigo.rect.x = 1200
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
running = False
pygame.quit()
sys.exit()
if evento.type == pygame.KEYDOWN :
if evento.key == pygame.K_SPACE and personaje.control_personaje:
personaje.acciones("saltar")
elif evento.key == pygame.K_w and personaje.control_personaje:
personaje.acciones("shot")
elif evento.key == pygame.K_TAB and personaje.control_personaje:
cambiar_modo()
elif evento.key == pygame.K_e and parte_final_2:
personaje.score += 2
poder_kame.contra_poder()
#Modo Dev - press Tab
if get_modo():
pygame.draw.rect(screen, (255, 255, 255), personaje.get_rect, 2)
pygame.draw.rect(screen, (255, 255, 255), enemigo.get_rect, 2)
pygame.draw.rect(screen, (255, 255, 255), personaje.poder.rect, 2)
dibujar_grid(screen, BLANCO, stage_actual.tile_size, ancho_pantalla, alto_pantalla, 0)
if(not enemigo.esta_muerto):
enemigo.update(screen, personaje, final_game_vid ,"vid\proyecto final creditos -v2.avi", credits_finished)
if enemigo.game_over_win:# termina el video y el enemigo avisa si ganamos.
game_over_win = True
if(radar_on):# Dibujar todas las esferas en la pantalla
radar.update(screen, personaje)
if(radar.catch_radar):
crono_on = True
radar_on = False
radar = None
if(crono_on):
if(not start_time):
tiempo_stage = TiempoStages(screen,420, 50, time_limit_stages)
start_time = True
tiempo_stage.update_time()
tiempo_stage.draw_time()
if(tiempo_stage.elapsed_time >= time_limit_stages):# show_game_over_screen(screen, ancho_pantalla, alto_pantalla)
# over_game.score = score.score
# over_game.show_game_over("Game Over")
game_over_defeat = True
if(start_time):
if(not lista_esferas_generada):# genera las esferas
for i in range(1, 8): # El rango debe ser de 1 a 8 para generar las rutas correctas
path_esfera = "asset/esferas/{i}.png".format(i=i)
x = random.randint(0, ancho_screen_para_esferas)
y = random.randint(0, alto_screen_para_esferas)
esfera = Esferas(screen, x, y, path_esfera, ancho=50, alto=50, id_propia = i)
lista_esferas.append(esfera)
lista_esferas_generada = True
for esfera in lista_esferas:
esfera.update(screen, personaje)
if(esfera.return_ID):
lista_esferas = filter_es(esfera.return_ID, lista_esferas)
esfera.return_ID = None
personaje.contador_esferas += 1
#######################intro Inicio##########################
#resulto en main
#######################Intro Final###########################
if(index_stage == 3 and contador_escena < 2):
personaje.control_personaje = False
if(not load_music_intro):
load_music_intro = True
cambiar_musica("sonido\intro_music.wav")
path_por_defecto = path_jacky
#cargamos fuente para interaccion
font = pygame.font.Font(None, 36)
#cargamos imagen de la interaccion - de gou, jacky
image = pygame.image.load(path_por_defecto)
#oscurese la pantalla - le damos un efecto mate
oscurecer_pantalla(screen)
if(slide_boss > 200):
slide_boss -= dx_slide_boss
draw_text_and_image(screen, image, slide_boss)# coversacion entre goku y jacky
if(slide_boss == 200):
if(time_text > 0 ):
if(text_index < len(text) ):
draw_text2(screen, text[text_index], font, text_color, balloon_position, balloon_color, max_width = 350 )
time_text -= 1
else:
time_text = time_text_limit
text_index += 1
if(text_index >= len(text)):# text voz goku
path_por_defecto = "asset\goku_chico.png" # por defecto antes era jacky
slide_boss = 600
text_index = 0
text = text_goku
contador_escena += 1
if contador_escena == 2 and not flag_video_final :# finaliza la coversacion entre goku y jacky
flag_video_final = True
correr_video("vid/video final goku vs roshi-coratodo-parte-1.avi", ancho_pantalla, alto_pantalla)
if(not load_musica_battle):# preparamos la pelea final en stage final
load_musica_battle = True
pygame.mixer.music.load("sonido\musica_resto_pelea.wav")
pygame.mixer.music.play(-1)
pygame.mixer.music.set_volume(0.5)
parte_final_2 = True
tiempo_stage_final_stage = TiempoStages(screen,420, 50, 40)
if(parte_final_2):# lucha Kame, incrementa con el tiempo el poder del boss
poder_final.update()
poder_kame.update()
tiempo_stage_final_stage.update_time(final=True)
if(tiempo_stage_final_stage.elapsed_time > 5 and tiempo_stage_final_stage.elapsed_time < 10):
poder_kame.caida_kame = 7
elif(tiempo_stage_final_stage.elapsed_time > 10 and tiempo_stage_final_stage.elapsed_time < 15):
poder_kame.caida_kame = 9
elif(tiempo_stage_final_stage.elapsed_time > 15 and tiempo_stage_final_stage.elapsed_time < 20):
poder_kame.caida_kame = 15
if(poder_kame.image_1.get_width() <= 15):
# over_game.score = score.score
# over_game.show_game_over("Game Over")
pygame.mixer.music.stop()
correr_video("vid\goku resultado_explosion.avi", ancho_pantalla, alto_pantalla)
game_over_defeat = True
elif(poder_kame.image_1.get_width() >= poder_kame.limit_power_screen):
pygame.mixer.music.stop()
correr_video("vid\jacki resultado_explosion.avi", ancho_pantalla, alto_pantalla)
parte_final_2 = False
# cambiar_musica("sonido/final_game.mp3")
personaje.control_personaje = True
enemigo.cambiar_imagen(screen)
# if final_game_vid(screen, "vid\proyecto final creditos -v2.avi"):# me seguro que consega juntar las 7 esferas al final
# game_over_win = True
# ver si funca
score.update_score()
delta_ms = relog.tick(fps)
personaje.delta_ms = delta_ms
enemigo.delta_ms = delta_ms
poder.delta_ms = delta_ms
# volver al menu principal y (llevar el score y el game over)
lista_game_over_score = []
lista_scores = []
if game_over_defeat:
lista_game_over_score.append("Game Over")
lista_scores.append(score_game)
lista_game_over_score.append(lista_scores)
return lista_game_over_score
elif game_over_win:
lista_game_over_score.append("Win")
lista_scores.append(score_game)
lista_game_over_score.append(lista_scores)
return lista_game_over_score
def draw_text_and_image(screen, image, slide_boss, pos_y = 0)-> None:
'''
Dibuja una imagen en la pantalla con un desplazamiento horizontal dado y una posición vertical opcional.
Recibe:
Args:
screen (Surface): Superficie de la pantalla de Pygame donde se dibujará la imagen.
image (Surface): Imagen que se desea dibujar.
slide_boss (int): Posición horizontal de la imagen.
pos_y (int, opcional): Posición vertical de la imagen. Por defecto es 0.
Returns:
None
'''
image_rect = image.get_rect()
image_rect.x = slide_boss
image_rect.y = pos_y
screen.blit(image, image_rect)
def oscurecer_pantalla(screen)-> None:
'''
Crea una superficie oscura semitransparente y la dibuja en la pantalla para oscurecerla.
Recibe:
Args:
screen (Surface): Superficie de la pantalla de Pygame donde se dibujará la superficie oscura.
Returns:
None
'''
darken_surface = pygame.Surface(screen.get_size(), pygame.SRCALPHA)
darken_surface.fill((0, 0, 0, 200))
screen.blit(darken_surface, (0, 0))
def draw_text2(screen, text, text_font, text_color, balloon_position, balloon_color, max_width)-> None:
'''
Dibuja un globo de texto con un texto dentro en la pantalla.
Recibe:
Args:
screen (Surface): Superficie de la pantalla de Pygame donde se dibujará el globo de texto.
text (str): El texto que se desea mostrar en el globo.
text_font (Font): Fuente utilizada para el texto.
text_color (Tuple[int]): Color del texto en formato (R, G, B).
balloon_position (Tuple[int]): Posición del globo de texto en la pantalla en formato (x, y).
balloon_color (Tuple[int]): Color del globo de texto en formato (R, G, B).
max_width (int): Ancho máximo del globo de texto.
Returns:
None
'''
balloon_padding_top = 20 # Ajusta el valor del padding superior del globo
balloon_padding_sides = 10 # Padding a los lados del globo
balloon_margin = 10
# Dividir el texto en líneas según el ancho máximo
lines = []
words = text.split()
current_line = words[0]
for word in words[1:]:
if text_font.size(current_line + ' ' + word)[0] <= max_width - balloon_padding_sides * 2:
current_line += ' ' + word
else:
lines.append(current_line)
current_line = word
lines.append(current_line)
# Calcular el alto del globo en función del número de líneas
balloon_height = len(lines) * text_font.get_height() + balloon_padding_top + balloon_padding_sides
balloon_rect = pygame.Rect(0, 0, max_width, balloon_height)
balloon_rect.midtop = balloon_position
balloon_radius = 10
pygame.draw.rect(screen, balloon_color, balloon_rect, border_radius=balloon_radius)
pygame.draw.polygon(screen, balloon_color, [(balloon_rect.bottomright[0], balloon_rect.bottomright[1] - balloon_padding_sides),
(balloon_rect.bottomright[0] + balloon_margin, balloon_rect.bottomright[1]),
(balloon_rect.bottomright[0], balloon_rect.bottomright[1] + balloon_padding_sides)])
line_height = text_font.get_height()
y = balloon_rect.y + balloon_padding_top // 2
for line in lines:
text_surface = text_font.render(line, True, text_color)
text_rect = text_surface.get_rect()
text_rect.midtop = (balloon_rect.centerx, y)
screen.blit(text_surface, text_rect)
y += line_height
def filter_es(id, lista_esferas: list[Esferas])-> list:
'''
Filtra una lista de objetos "Esferas" y elimina aquellos que tienen un ID específico.
Args:
id (int): El ID que se utilizará para filtrar la lista.
lista_esferas (list[Esferas]): La lista de objetos "Esferas" que se desea filtrar.
Returns:
list[Esferas]: Nueva lista que contiene los elementos filtrados.
'''
new_list = []
for esf in lista_esferas:
if(esf.id != id):
new_list.append(esf)
return new_list
#------------------------------------------------ vid
def correr_video(path, ancho, alto)-> None:
'''
Reproduce un video en la pantalla de Pygame con un tamaño y volumen específicos.
Args:
path (str): Ruta del archivo de video.
ancho (int): Ancho deseado del video.
alto (int): Alto deseado del video.
Returns:
None
'''
pygame.init()
screen = pygame.display.set_mode((ancho, alto))
pygame.display.set_caption("Dragon Ball Sprite")
vid_1 = Video(path)#vid final
vid_1.set_size((ancho, alto))
vid_1.set_volume(0.3)
runnig = True
while runnig:
pygame.display.update()
if vid_1.active == True: # si es true cirre ek video
vid_1.draw(screen, (0, 0))
vid_1.set_volume(0.5)
else:
vid_1.close()
runnig = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
vid_1.close()
runnig = False
#------------------------------------------------
# vid Creditos
def final_game_vid(SCREEN, path)-> bool:
'''
Reproduce un video de créditos en la pantalla de Pygame con un tamaño específico.
Args:
SCREEN (Surface): Superficie de la pantalla de Pygame donde se reproducirá el video.
path (str): Ruta del archivo de video.
Returns:
bool: Indica si los créditos del video han terminado.
'''
pygame.mixer.music.stop()
vid = Video(path)
vid.set_size((ANCHO_PANTALLA, ALTO_PANTALLA))
vid.set_volume(0.5)
while True:
if vid.active == True:
vid.draw(SCREEN, (0, 0))
else:
vid.close()
credits_finished = True
return credits_finished # Actualiza la variable de bandera
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.MOUSEBUTTONDOWN:
vid.close()
credits_finished = True
return credits_finished
pygame.display.update()
| HoracioxBarrios/mi_juego_final_limpio | game.py | game.py | py | 19,127 | python | es | code | 2 | github-code | 13 |
38363900596 | """
Data Persistent Loader
Utilities
"""
from simpledbf import Dbf5
import pandas as pd
import pyarrow.parquet as pq
import pyarrow as pa
import os
from tqdm import tqdm
import re
from datetime import datetime
from database_settings import hdfs_utilities as hdfs
import numpy as np
def exports_ingestion(files_folder, log_context):
"""Ingest the exports DBF files in HDFS
Ingest the files in DBF format into HDFS as Parquet files. Additionally, creates or updates a log to register
the ingestion.
Args:
files_folder (str): path to the exports folder in the temporal landing zone
log_context (string): context to add in the log file
"""
# Get all the paths of the files to upload
dbf_files = [os.path.join(files_folder, f) for f in os.listdir(files_folder) if os.path.isfile(os.path.join(files_folder, f)) if
f.endswith('.DBF')]
if len(dbf_files)>0:
# Create a parquet file for every DBF file:
print('Converting {} DBF files to Parquet...'.format(len(dbf_files)))
for file in tqdm(dbf_files):
try:
# Parse the DBF file into a dataframe
batch = Dbf5(file, codec='latin-1')
batch = batch.to_dataframe()
# Add the batch week column
batch['BATCH_WEEK'] = re.search(r'\d+', os.path.basename(file)).group()
# Add the loading date column
batch['LOAD_DATE'] = datetime.today().strftime('%Y%m%d')
# Create the row groups
# Get all the available boarding dates' years
years = sorted(pd.to_datetime(batch['FEMB'], format='%Y%m%d').dt.year.unique().tolist(), reverse=True)
# Convert the dataframe into a pyarrow table
batch = pa.Table.from_pandas(batch)
# For every year create a row group
# In this case, we will include all the columns in the row group
my_row_groups = []
for year in years:
string_column = [str(i) for i in batch.column('FEMB').to_pylist()]
mask = [s.startswith(str(year)) for s in string_column]
filtered_table = batch.filter(mask)
# Get all the rows from that year
my_row_groups.append(filtered_table)
# Create the Parquet file
parquet_writer = pq.ParquetWriter(files_folder + os.path.basename(file).split('.')[0] + '.parquet',
my_row_groups[0].schema)
# Add every row group
for rg in my_row_groups:
parquet_writer.write_table(rg)
parquet_writer.close()
except Exception as e:
print(f"Error generating parquet file for '{file}':{type(e).__name__}: {str(e)}")
else:
# Delete the DBF file from the temporal landing zone
os.remove(os.path.abspath(os.path.join(files_folder, file)))
else:
print('No DBF files in the temporal landing zone')
# Get all the parquet files paths
parquet_files = [os.path.abspath(os.path.join(files_folder, file_name)) for file_name in os.listdir(files_folder) if
file_name.endswith('.parquet')]
if len(parquet_files)>0:
print('Ingesting {} parquet files into HDFS...'.format(len(parquet_files)))
# Define the directory in HDFS to store the files
hdfs_directory = '/thesis/peru/exports/'
# Ingest the files in HDFS
failed_count =[]
for file in tqdm(parquet_files):
result = hdfs.add_file_to_hdfs(file, hdfs_directory, log_context)
# Delete the parquet file from the temporal landing zone if the transfer to HDFS was successfull
if result == 0 :
os.remove(file)
failed_count.append(result)
print('Ingestion finished! {} files ingested'.format(len(parquet_files)-np.sum(failed_count)))
else:
print('No parquet files in the temporal landing zone')
def headings_ingestion(file_path, log_context):
"""Ingest the headings file into HDFS
Ingest the headings file in .txt format into HDFS as a Parquet file. Additionally, creates or updates a log to register
the ingestion.
Args:
file_path (str): path to headings file in the temporal landing zone
log_context (string): context to add in the log file
"""
if os.path.exists(file_path):
# Parse the file and convert it into a Dataframe
with open(file_path, 'r') as f:
file_lines = f.readlines()
file_lines = [string.rstrip('\t\n') for string in file_lines][1:]
file_lines = [string.split('\t') for string in file_lines]
file_lines = [[element for element in inner_list if element.strip()] for inner_list in file_lines]
# Convert to dataframe
headings = pd.DataFrame(file_lines)
# Convert the column names into strings
headings.columns = headings.columns.astype(str)
# Add the loading date column
headings['LOAD_DATE'] = datetime.today().strftime('%Y%m%d')
# Create a parquet file
# Convert the dataframe into a pyarrow table
headings = pa.Table.from_pandas(headings)
# Generate the parquet file in the same folder than the original headings file
parquet_writer = pq.ParquetWriter(os.path.dirname(file_path) + '/headings' + '.parquet', headings.schema)
parquet_writer.write_table(headings)
parquet_writer.close()
# Define the directory in HDFS to store the files
hdfs_directory = '/thesis/peru/headings/'
# Add the files
result = hdfs.add_file_to_hdfs(os.path.dirname(file_path) + '/headings.parquet', hdfs_directory,
log_context=log_context)
if result == 0:
print('Ingestion finished! headings file added to HDFS')
# Delete the parquet file
os.remove(os.path.dirname(file_path) + '/headings.parquet')
# Delete the original file
os.remove(file_path)
else:
print('Ingestion of headings in HDFS failed!')
else:
print('No headings file in the temporal landing zone') | sergiopostigo/supertrade | data_persistent_loader/utilities.py | utilities.py | py | 6,425 | python | en | code | 0 | github-code | 13 |
22125973749 | from base_classes.article import Article
from base_classes.ArticleMetadata import ArticleMetadata
"""
An Article contains enough information for the article to be rendered anywhere.
"""
class DefaultArticle(Article):
def __init__(self, meta: ArticleMetadata, display_title: str = "", display_content: str ="", next_id: str ="", wpm: int = 200):
"""
Parameters
----------
meta: ArticleMetadata
Metadata for the Article
display_title: str, optional
Title you want displayed when the Article is rendered. Defaults to meta.title
Why: Allows for custom titles or custom length titles when rendering.
display_content: str, optional
Content you want displayed when Article is rendered. Defaults to meta.content
next_id: str, optional
The id of the Article coming after this. Defaults to empty string.
wpm: int, optional
The words per minute the user reads at. Defaults to 200.
Pass in -1 to not use wpm in rendering.
"""
super().__init__(meta, display_title, display_content)
self.id = meta.id
self.next_id = next_id
self._wpm = wpm
def time_to_read_in_minutes(self) -> int:
if self._wpm <= 0:
return 0
return self.word_count//self._wpm
def time_to_read_str(self) -> str:
per_min = self.time_to_read_in_minutes()
if per_min < 60:
return str(per_min) + ' min'
per_hour = per_min//60
remainder = per_min%60
return str(per_hour) + ' hr ' + str(remainder) + ' min'
def to_html_string(self) -> str:
reading_time_str = f': est. {self.time_to_read_str()})' if self._wpm > 0 else ')'
return (f""" <a href="#top">[← top]</a>
<h1 id="{self.meta.id}"><a href="{self.meta.url}">{self.display_title}</a></h1>
<a href="#{self.next_id}">[skip →]</a>
<h2>{self.meta.source_title}</h2>
<h3>{'Fetched content' if not self.used_meta_content else ''}({self.word_count} words{reading_time_str}</h3>
{self.display_content}""") | madCode/rss-to-e-reader | default_modules/DefaultArticle.py | DefaultArticle.py | py | 2,152 | python | en | code | 1 | github-code | 13 |
6343926669 | #!/usr/bin/env python
import sys
import unittest
from app.parselog import ParseLog
class TestParseLog(unittest.TestCase):
# CONSIDER ADDING PYTEST FIXTURES FOR CONSTANTS
def setUp( self):
self.parse = ParseLog()
self.goodlog = open('data/test_good.log','r')
self.badlog = open('data/test_bad.log', 'r')
def test_parse_apache_time_returns_correct_result(self):
datetime = self.parse.parse_apache_time("30/Aug/2015:05:13:53 +0200")
exp_datetime = 1440904433
self.assertEqual(exp_datetime, datetime)
def test_ip_lookup_method_returns_correct_result(self):
org, lat, lon, isp = self.parse.ip_lookup('74.125.225.229')
exp_org = exp_isp = 'Google Inc.'
exp_lat = 37.419200000000004
exp_lon = -122.0574
self.assertEqual(exp_org, org)
self.assertEqual(exp_isp, isp)
self.assertEqual(exp_lat, lat)
self.assertEqual(exp_lon, lon)
def test_ip_lookup_method_handles_bad_ip(self):
org, lat, lon, isp = self.parse.ip_lookup('0.0.0.0')
exp_org = exp_isp = exp_lat = exp_lon = None
self.assertEqual(exp_org, org)
self.assertEqual(exp_isp, isp)
self.assertEqual(exp_lat, lat)
self.assertEqual(exp_lon, lon)
def test_ip_lookup_method_handles_really_bad_ip(self):
org, lat, lon, isp = self.parse.ip_lookup('46.246.49.254')
exp_org = 'Portlane Network'
exp_isp = 'PrivActually Ltd'
exp_lat = exp_lon = None
self.assertEqual(exp_org, org)
self.assertEqual(exp_isp, isp)
self.assertEqual(exp_lat, lat)
self.assertEqual(exp_lon, lon)
def test_ip_lookup_method_handles_non_ip(self):
org, lat, lon, isp = self.parse.ip_lookup('Beetlejuice')
exp_org = exp_isp = exp_lat = exp_lon = None
self.assertEqual(exp_org, org)
self.assertEqual(exp_isp, isp)
self.assertEqual(exp_lat, lat)
self.assertEqual(exp_lon, lon)
def test_parse_line_method_handles_malformed_line(self):
line = self.badlog.readline()
result = self.parse.parse_line(line)
self.assertIsNone(result)
def test_parse_line_method_returns_correct_result(self):
line = self.goodlog.readline()
actual = self.parse.parse_line(line)
expected = [1389721010,'/svds.com','http://www.svds.com/rockandroll/','198.0.200.105','SILICON VALLEY DATA SCIENC',
37.8858, -122.118, 'Comcast Business Communications, LLC']
self.assertEqual(expected, actual)
def __del__(self):
# close files in destructor method
# destructors are controversial in Python but while seems awkward in this case
# IMPORTANT: avoid circular references with other classes when using destructor.
self.goodlog.close()
self.badlog.close()
if __name__ == '__main__':
unittest.main()
| jonneff/parselog | test/unit/parselog_test.py | parselog_test.py | py | 2,930 | python | en | code | 0 | github-code | 13 |
14848644011 | from flask.scaffold import F
from backend import UPLOAD_FOLDER, app
from flask.globals import request
from flask.json import jsonify
from backend.models import Notification, Report, Student, StudentSchema, Submission, SubmissionRequest, Teacher, TeacherSchema
from backend import db
from collections import defaultdict
studentSchema = StudentSchema()
teacherSchema = TeacherSchema()
@app.route("/")
def hello():
return "This is server page. This means the server is online and ready for smart classroom"
@app.route("/loginStudent", methods = ["POST"])
def loginStudent():
request_data = request.get_json()
username = request_data["username"]
password = request_data["password"]
user = Student.query.filter_by(username=username).first()
print(user)
if not user == None and user.password == password:
return jsonify({"message": "auth successful", "user": studentSchema.dump(user) })
else:
return jsonify({"message": "auth unsuccessful"})
# student routes
@app.route("/getNotification" , methods=["POST"])
def getNotification():
# parameters required
# class of the student
request_data = request.get_json()
classOfStudent = request_data["class"]
allNotification = Notification.query.filter_by(classid = classOfStudent).all()
print(allNotification)
res = []
for i in allNotification:
res.append({"title":i.title, "teacher": Teacher.query.filter_by(tid = i.tid).first().name , "priority": i.priority })
return jsonify(res)
@app.route("/getSubmissionRequest" , methods=["POST"])
def getSubmissionRequest():
# parameters required
# class of the student
request_data = request.get_json()
classOfStudent = request_data["class"]
allSubmissionRequest = SubmissionRequest.query.filter_by(classid = classOfStudent).all()
#print(allSubmissionRequest)
res = []
for i in allSubmissionRequest:
res.append({"title":i.title,
"assignedTeacher": Teacher.query.filter_by(tid = i.tid).first().name ,
"deadline": i.deadline,
"description" : i.desc,
"submissionID" : i.srid,
"teacherPicture":"https://via.placeholder.com/50",
"type": i.type
})
print(res)
return jsonify(res)
# teacher routes
@app.route("/loginTeacher", methods = ["POST"])
def loginTeacher():
request_data = request.get_json()
username = request_data["username"]
password = request_data["password"]
user = Teacher.query.filter_by(username=username).first()
print(user)
if not user == None and user.password == password:
return jsonify({"message": "auth successful", "user": teacherSchema.dump(user) })
else:
return jsonify({"message": "auth unsuccessful"})
@app.route("/getTeacherSubmissionDetails" , methods=["POST"])
def getTeacherSubmissionDetails():
# parameters required
# srid
request_data = request.get_json()
srid = request_data["srid"]
allSubmission = Submission.query.filter_by(srid=srid).all()
res = []
for i in allSubmission:
res.append({
"studentName": Student.query.filter_by(sid = i.sid).first().name ,
"studentUSN": Student.query.filter_by(sid = i.sid).first().usn ,
"class": Student.query.filter_by(sid = i.sid).first().classid ,
"deadline": SubmissionRequest.query.filter_by(srid= i.srid).first().deadline,
"filepath" : i.filepath
})
return jsonify(res)
@app.route("/getTeacherSubmissionRequest" , methods=["POST"])
def getTeacherSubmissionRequest():
# parameters required
# tid
request_data = request.get_json()
tid = request_data["tid"]
allSubmissionRequest = SubmissionRequest.query.filter_by(tid = tid).all()
res = []
for i in allSubmissionRequest:
res.append({"title":i.title,
"teacher": Teacher.query.filter_by(tid = i.tid).first().name ,
"deadline": i.deadline,
"desc" : i.desc,
"class": i.classid,
"srid": i.srid
})
return jsonify(res)
@app.route("/getTeacherNotification" , methods=["POST"])
def getTeacherNotification():
# parameters required
# tid
request_data = request.get_json()
tid = request_data["tid"]
allNotification = Notification.query.filter_by(tid= tid).all()
print(allNotification)
res = []
for i in allNotification:
res.append({ "class": i.classid ,"title":i.title, "teacher": Teacher.query.filter_by(tid = i.tid).first().name , "priority": i.priority, "createdAt": "10.2.2" })
return jsonify(res)
@app.route("/createNotification" , methods=["POST"])
def createNotification():
# parameters required
# class, priority, title, tid
request_data = request.get_json()
targetClass = request_data["class"]
tid = request_data["tid"]
priority = request_data["priority"]
title = request_data["title"]
notification = Notification(priority=priority, tid=tid, title=title, classid=targetClass)
teacher = Teacher.query.filter_by(tid = tid).first()
db.session.add(notification)
db.session.commit()
return jsonify({"message": "added successfully" , "statuscode": 200, "insertedNotification": notification.nid, "teacher": teacher.name})
@app.route("/createSubmissionRequest" , methods=["POST"])
def createSubmissionRequest():
# parameters required
# class, priority, title, tid
request_data = request.get_json()
targetClass = request_data["class"]
tid = request_data["tid"]
desc = request_data["desc"]
deadline = request_data["deadline"]
title = request_data["title"]
type1 = request_data["type"]
submissionRequest = SubmissionRequest(
tid=tid,
title=title,
deadline=deadline,
desc=desc,
classid=targetClass,
type=type1
)
db.session.add(submissionRequest)
db.session.commit()
return jsonify({"message": "added successfully" , "statuscode": 200, })
@app.route("/getAllStudents" , methods=["GET"])
def getAllStudents():
# parameters required
# class, priority, title, tid
d= {}
l = [studentSchema.dump(i) for i in Student.query.filter_by().all()]
classes = []
for i in l:
classes.append(i["classid"])
classes = list(set(classes))
for i in classes:
d[i] = []
print(d)
for i in l:
print(d[i["classid"]])
d[i["classid"]].append(i["usn"])
return jsonify({"students": d, })
@app.route("/addEntry" , methods=["POST"])
def addEntry():
request_data = request.get_json()
usn = request_data["usn"]
sid = Student.query.filter_by(usn=usn).first().sid
typeCategory = request_data["type"]
total = request_data["total"]
marksObtained = request_data["marksObtained"]
rep = Report(sid=sid, type=typeCategory ,total=total ,marksObtained=marksObtained)
db.session.add(rep)
db.session.commit()
return jsonify({"message": "added successfully" , "statuscode": 200, })
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
from werkzeug.utils import secure_filename
import os
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/turnin" , methods=["POST"])
def turnIn():
sid = request.form["sid"]
srid = request.form["srid"]
subtype = request.form["type"]
file = request.files['file']
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
sub = Submission(sid = sid, srid = srid, type=subtype, filepath=filename)
db.session.add(sub)
db.session.commit()
return jsonify({"message": "done"})
from flask import send_file
@app.route("/getfile" , methods=["GET"])
def getFile():
file = request.args.get('file')
return send_file(os.path.join("../uploads", file ))
@app.route("/getScores" , methods=["POST"])
def getScores():
d={
"assignments" : [],
"cie": [],
"quiz": []
}
request_data = request.get_json()
sid = request_data["sid"]
ass1 = Report.query.filter_by(sid=sid, type="Assignment1").first()
ass2 = Report.query.filter_by(sid=sid, type="Assignment2").first()
quiz1 = Report.query.filter_by(sid=sid, type="Quiz1").first()
quiz2 = Report.query.filter_by(sid=sid, type="Quiz2").first()
cie1 = Report.query.filter_by(sid=sid, type="CIE1").first()
cie2 = Report.query.filter_by(sid=sid, type="CIE2").first()
cie3 = Report.query.filter_by(sid=sid, type="CIE3").first()
if ass1 != None:
d["assignments"].append( {"name": ass1.type, "total": ass1.total, "marks": ass1.marksObtained})
if ass2 != None:
d["assignments"].append( {"name": ass2.type, "total": ass2.total, "marks": ass2.marksObtained})
if quiz1 != None:
d["quiz"].append( {"name": quiz1.type, "total": quiz1.total, "marks": quiz1.marksObtained})
if quiz2 != None:
d["quiz"].append( {"name": quiz2.type, "total": quiz2.total, "marks": quiz2.marksObtained})
if cie1 != None:
d["cie"].append( {"name": cie1.type, "total": cie1.total, "marks": cie1.marksObtained})
if cie2 != None:
d["cie"].append( {"name": cie2.type, "total": cie2.total, "marks": cie2.marksObtained})
if cie3 != None:
d["cie"].append( {"name": cie3.type, "total": cie3.total, "marks": cie3.marksObtained})
return jsonify(d)
| yajatvishwak/smartclassroom-backend | backend/routes.py | routes.py | py | 9,477 | python | en | code | 1 | github-code | 13 |
30627247997 | # %% Setup
from sklearn.model_selection import learning_curve
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier as RandForClassy
import numpy as np
import mratplotlib.pyplot as plt
import seaborn as sns
sns.set()
# %% Getting Data
X, y = make_blobs(500, 2, centers=10, cluster_std=1, random_state=1892)
plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap='jet',
clim=(y.min(), y.max()), zorder=3)
plt.axis('off')
plt.savefig('..\images\RAND_FOREST-CLASS-DATA.jpg')
# %% Helper Function From Text
def visualize_classifier(model, X, y, ax=None, cmap='rainbow'):
ax = ax or plt.gca() # Set the Plot axis
# Plot the training points
ax.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=cmap,
clim=(y.min(), y.max()), zorder=3) # Create a scatter plot of the data
ax.axis('tight') # Set the axis Range to tight
ax.axis('off') # Turn Off Axis Desplay
xlim = ax.get_xlim() # Get The X/Y LIMITS
ylim = ax.get_ylim()
# fit the estimator
model.fit(X, y) # Fit the model
xx, yy = np.meshgrid(np.linspace(*xlim, num=200),
np.linspace(*ylim, num=200)) # Make grid of datapoints
Z = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(
xx.shape) # Use model to predict data
# Create a color plot with the results
n_classes = len(np.unique(y))
contours = ax.contourf(xx, yy, Z, alpha=0.3,
levels=np.arange(n_classes + 1) - 0.5,
cmap=cmap, clim=(y.min(), y.max()),
zorder=1)
ax.set(xlim=xlim, ylim=ylim)
# %% Setting UP and Running Module
model = RandForClassy(n_estimators=200)
visualize_classifier(model, X, y, cmap='seismic')
# plt.savefig('..\images\RAND_FOREST-CLASS-MODEL_200.jpg')
model = RandForClassy(n_estimators=500)
visualize_classifier(model, X, y, cmap='rainbow')
# plt.savefig('..\images\RNAD_FOREST-CLASS-MODEL_500.jpg')
# %% BEST MODEL
def plot_learning_curve(Esitmator, X, y, axes=None, cv=5, n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
if axes is None:
_, axes = plt.subplots(1, 3, figsize=(20, 5))
axes[0].set_xlabel("Training examples")
axes[0].set_ylabel("Score")
train_sizes, train_scores, test_scores, fit_times, _ = \
learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
# Plot learning curve
axes[0].grid()
axes[0].fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
axes[0].fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color="g")
axes[0].plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
axes[0].plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
axes[0].legend(loc="best")
# Plot n_samples vs fit_times
axes[1].grid()
axes[1].plot(train_sizes, fit_times_mean, 'o-')
axes[1].fill_between(train_sizes, fit_times_mean - fit_times_std,
fit_times_mean + fit_times_std, alpha=0.1)
axes[1].set_xlabel("Training examples")
axes[1].set_ylabel("fit_times")
axes[1].set_title("Scalability of the model")
# Plot fit_time vs score
axes[2].grid()
axes[2].plot(fit_times_mean, test_scores_mean, 'o-')
axes[2].fill_between(fit_times_mean, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1)
axes[2].set_xlabel("fit_times")
axes[2].set_ylabel("Score")
axes[2].set_title("Performance of the model")
return plt
fig, axes = plt.subplots(3, 1, figsize=(10, 15))
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
axes[0].set_title('LEARNING CURVES')
estimator = RandForClassy()
plot_learning_curve(estimator, X, y, axes=axes,
cv=50, n_jobs=4)
plt.show()
# %%
| Negative-light/EGR491-PYMLDS | PROJECT 5/CODE/randForClass.py | randForClass.py | py | 4,570 | python | en | code | 0 | github-code | 13 |
10087228446 | class LRUCache:
#http://chaoren.is-programmer.com/posts/43116.html
#the collections.OrderedDict
#the elements inserted later is behind the elements inserted earlier
#
# @param capacity, an integer
def __init__(self, capacity):
LRUCache.Dict = collections.OrderedDict()
LRUCache.capacity = capacity
LRUCache.numItems = 0
# @return an integer
#use try-exception
#if key doesnt exist will trigger the exception---return -1
#if key exist then move the element to the front
def get(self, key):
try:
value = LRUCache.Dict[key]
del LRUCache.Dict[key]
LRUCache.Dict[key]=value
return value
except:
return -1
# @param key, an integer
# @param value, an integer
# @return nothing
#if the key exist update the value and move it to the front
#if the key doesnt exist:1.exceed the capacity then remove the last element and insert new element to the front 2. doesnt excedd the capacity then inser the element to the front
def set(self, key, value):
try:
del LRUCache.Dict[key]
LRUCache.Dict[key]=value
return
except:
if LRUCache.numItems == LRUCache.capacity:
LRUCache.Dict.popitem(last = False)
LRUCache.numItems-=1
LRUCache.Dict[key] = value
LRUCache.numItems+=1
return | xiaochenai/leetCode | Python/LRU Cache.py | LRU Cache.py | py | 1,232 | python | en | code | 0 | github-code | 13 |
23896881445 | from .d_exceptions import *
def get_logger(file_name: str, level: int) -> logging.Logger:
logger = logging.getLogger(file_name)
handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level)
return logger
logger = get_logger(__file__, LOGGING_LEVEL)
def run_cmd(
cmd: Command,
stdin=bytes(),
raise_on_error=True,
) -> Tuple[StdOut, StdErr]:
logger.info(f"Running {cmd}...")
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=True,
)
stdout, stderr = process.communicate(input=stdin)
process.wait()
if raise_on_error and process.returncode != 0:
raise AdocMathException(stdout.decode() + stderr.decode())
else:
return StdOut(stdout.decode()), StdErr(stderr.decode())
def for_each_apply_method(
ps: Iterable[Union[str, plib.Path]], # path strs
method: Callable[[plib.Path], None],
):
"""Calls a method with an argument of all of ps.
If a p is a directory, it searches it recursively.
The idea is that the method is bound to some object (such as set), and this function allows easy updating of that set over a tree of files.
"""
for p_path_or_str in ps:
p = plib.Path(p_path_or_str).resolve(strict=True)
if p.is_dir():
for_each_apply_method(
ps=sorted(p.glob("*")),
method=method,
)
elif p.is_file():
method(p)
else:
raise AdocMathException(DEAD_CODE_MSG)
def log(*args):
"""Useful for debuggin :-P
https://stackoverflow.com/a/2749857/4204961"""
frame = inspect.currentframe()
frame = inspect.getouterframes(frame)[1]
string = inspect.getframeinfo(frame[0]).code_context[0].strip() # type: ignore
params = string[string.find("(") + 1 : -1].split(",")
names = []
for i in params:
if i.find("=") != -1:
names.append(i.split("=")[1].strip())
else:
names.append(i)
for name, val in zip(names, args):
logger.debug(f"\n {name} =\n{' ' * 14}{pprint.pformat(val)}")
def join_with(
it: Iterable[str],
joiner: str,
) -> str:
"""Reverses the arguments of x.join(y)"""
return joiner.join(it)
@contextlib.contextmanager
def change_cwd(path: Union[plib.Path, str]):
"""
Temporary change the current working directory to the path provided as the first argument.
"""
orig_cwd = plib.Path.cwd().resolve()
try:
os.chdir(plib.Path(path).resolve())
yield
finally:
os.chdir(orig_cwd)
def lshave(string: str, sub: str) -> str:
if string.startswith(sub):
return string[len(sub) :]
else:
return string
def rshave(string, sub: str) -> str:
if not isinstance(string, str):
string = str(string)
if string.endswith(sub):
return string[: len(string) - len(sub)]
else:
return string
| hacker-DOM/adoc-math | adoc_math/_common/e_utils.py | e_utils.py | py | 3,163 | python | en | code | 4 | github-code | 13 |
15803989636 | import tkinter as tk
# Define the conversion fxn
def convert():
input_value = float(input_entry.get())
from_unit = from_unit_var.get()
to_unit = to_unit_var.get()
# Define conversion rates
conversidon_rates = {
("Miles", "Kilometers"): 1.60934,
("Kilometers", "Miles"): 0.621371,
("Pounds", "Kilograms"): 0.453592,
("Kilograms"," Pounds"): 2.20462,
("Inches", "Centimeters"): 2.54,
("Centimeters", "Inches"): 0.393701
}
# Perform the conversion
result = input_value * conversidon_rates[()]
| Jensen416/UnitConverter | unitconv.py | unitconv.py | py | 577 | python | en | code | 0 | github-code | 13 |
19283676709 | from object_checker.base_object_checker import AbacChecker
from apps.core.models import User, Image
class ImageChecker(AbacChecker):
@staticmethod
def check_delete(request_user: User, image: Image):
if request_user.is_superuser:
return True
if request_user == image.offer.user:
return True
return False
| Philliip/MTAA_SELLIT_BACKEND | apps/core/checkers/image.py | image.py | py | 364 | python | en | code | 0 | github-code | 13 |
40067170739 | # This Python file uses the following encoding: utf-8
# Question 1
# Author: Kelvin Zhang
# Date Created: 2015-10-15
# Prompt for initial user input
initialCost = float(input("What is the initial cost of the flight? £"))
suitcaseWeight = float(input("Enter the weight of your suitcase (kg): "))
totalCost = initialCost
# Calculate the suitcase cost
if suitcaseWeight > 20:
totalCost += 60
weightOverage = (suitcaseWeight - 20) // 0.5
totalCost += weightOverage
# Prompt for and calculate gift cost
while True:
hasGift = input("Will you buy your partner a gift at the airport? [Y/N] ").upper()
if hasGift == 'Y':
while True:
maintenanceLevel = input("What is the maintenance level of the gift? [low/med/high] ").lower()
if maintenanceLevel == 'low':
totalCost += 10
elif maintenanceLevel == 'medium':
totalCost += 20
elif maintenanceLevel == 'high':
totalCost += 50
else:
print("Invalid input. Please try again.")
continue
break
break
elif hasGift == 'N':
break
else:
print("Invalid input. Please try again.")
continue
# Prompt for and calculate drink price
drinkNum = int(input("How many drinks will you have at the bar? "))
if drinkNum <= 6:
totalCost += 6 * drinkNum
else:
totalCost += 300
print("A missed flight fee of £300 has been added.")
# Output the total cost of the flight
print("The total cost of the flight is £{:2.2f}".format(totalCost))
| kz/compsci-homework | 1. AS Level/1. IF Statements/Question 1.py | Question 1.py | py | 1,593 | python | en | code | 1 | github-code | 13 |
71702512659 | import socket
from dataclasses import dataclass, field
from os import getpid
from typing import List, Callable, Optional
from icmplib import ICMPRequest, ICMPv6Socket, ICMPv4Socket, is_ipv4_address, is_ipv6_address
from icmplib.exceptions import *
from icmplib.sockets import ICMPSocket
@dataclass
class Hop:
successful: bool
final: bool
address: str
times: List[Optional[int]] = field(default_factory=list)
@property
def failed_requests(self) -> int:
return self.times.count(None)
@property
def successful_requests(self) -> int:
return len(self.times) - self.failed_requests
class TraceRoute:
def __init__(self, dest: str, timeout: int = 2,
max_hops: int = 30, req_per_hop: int = 3,
on_hop: Callable[[Hop], None] = None):
self.dest = dest
self.ip_address = socket.gethostbyname(dest)
self.timeout = timeout
self.max_hops = max_hops
self.req_per_hop = req_per_hop
self.on_hop = on_hop
self.hops: List[Hop] = []
self.ended_successfully = False
self.error: Optional[Exception] = None
self.unique_id = getpid()
def start(self):
try:
self.hops = self.trace()
self.ended_successfully = True
except ICMPError as err:
self.error = err
self.ended_successfully = False
def trace(self) -> List[Hop]:
ttl = 1
with self.initialize_socket() as sock:
route = [self.try_reach(sock, ttl)]
while not route[-1].final:
if len(route) >= self.max_hops:
break
ttl += 1
hop = self.try_reach(sock, ttl)
self.on_hop(hop)
route.append(hop)
return route
def initialize_socket(self):
if is_ipv4_address(self.ip_address):
return ICMPv4Socket()
elif is_ipv6_address(self.ip_address):
return ICMPv6Socket()
else:
raise SocketAddressError
def try_reach(self, sock: ICMPSocket, ttl: int) -> Hop:
hop = Hop(False, False, "", [])
for i in range(self.req_per_hop):
icmp_id = self.generate_icmp_id()
request = ICMPRequest(
destination=self.ip_address,
sequence=i,
id=icmp_id,
ttl=ttl
)
try:
sock.send(request)
reply = sock.receive(request, self.timeout)
hop.times.append((reply.time - request.time) * 1000)
hop.address = reply.source
hop.successful = True
if reply.type == 0:
hop.final = True
except TimeoutExceeded:
hop.times.append(None)
return hop
def generate_icmp_id(self):
self.unique_id += 1
self.unique_id &= 0xffff
return self.unique_id
| illided/PyTrace | trace.py | trace.py | py | 2,989 | python | en | code | 0 | github-code | 13 |
21781131883 | #!/usr/bin/env python
import rospy
from std_srvs.srv import Empty
class clearService:
def __init__(self):
rospy.init_node('service_node_1')
rospy.wait_for_service('/move_base/clear_costmaps')
self.client = rospy.ServiceProxy('/move_base/clear_costmaps',Empty)
def request(self):
try :
self.client()
rospy.loginfo('service call granted')
except rospy.ServiceException as e:
rospy.loginfo('service call failed ' + e)
def run():
a = clearService()
rate = rospy.Rate(0.2)
while not rospy.is_shutdown():
a.request()
rate.sleep()
if __name__ == '__main__':
run() | ssahn0806/ROSLA | skeleton/clear_costmap.py | clear_costmap.py | py | 680 | python | en | code | 1 | github-code | 13 |
1339880961 | import statsmodels.api as sm
from gauge import tester
class detector():
def __init__(self, video, cropX1, cropY1, cropX2, cropY2):
self.video= video
self.cropX1 = cropX1
self.cropY1= cropY1
self.cropX2 = cropX2
self.cropY2=cropY2
def detect(self):
series = tester().pixel_count(self.video, self.cropX1, self.cropY1, self.cropX2, self.cropY2)
p_value = sm.tsa.stattools.adfuller(series)[1]
if p_value >0.001:
print('Gauge is Working Fine')
return 1
else:
print('Warning: Gauge is not Working as Expected')
return 0
| pranav168/Fauty-Gauge-Detector | detector.py | detector.py | py | 682 | python | en | code | 0 | github-code | 13 |
22626023122 | import mysql.connector
from random import randint, choice
import datetime
# Устанавливаем соединение с базой данных
connection = mysql.connector.connect(
host='localhost',
user='roanvl',
password='!And487052!',
database='co_crm'
)
# Создаем объект для выполнения SQL-запросов
cursor = connection.cursor()
# Генерируем случайную дату и время
def created_at():
year = randint(2023, 2023)
month = randint(9, 10)
day = randint(1, 30) # Предполагаем, что февраль считается до 28 числа
return f"{year}-{month:02d}-{day:02d}"
# Генерируем случайную дату в пределах последних года
def shipping_date():
year = randint(2023, 2023)
month = randint(10, 12)
day = randint(1, 30) # Предполагаем, что февраль считается до 28 числа
return f"{year}-{month:02d}-{day:02d}"
# Генерируем случайное описание заказа
def order_description():
return "Random description" # Замените эту строку на свою логику
# Пример SQL-запроса для вставки данных в таблицу website_orders
insert_query = "INSERT INTO co_crm.website_orders (order_status_id, company_id, product_id, quantity, manager_id, shipping_date, order_amount, created_at, order_description) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
# Пример данных, которые нужно вставить в таблицу
data_to_insert = [
(randint(1, 2), randint(1, 35), randint(2, 20), randint(1, 50), randint(2, 13), shipping_date(), 0, created_at(), order_description()) for _ in range(50)
]
# Вставляем данные в таблицу
cursor.executemany(insert_query, data_to_insert)
# Подтверждаем изменения в базе данных
connection.commit()
# Закрываем соединение
cursor.close()
connection.close()
| ROANVL/python-django-crm-graduation | fill_scripts/fill_db_orders.py | fill_db_orders.py | py | 2,094 | python | ru | code | 0 | github-code | 13 |
15369395925 | from repofish.utils import save_json
import numpy
import pandas
import json
folder = "/home/vanessa/Documents/Dropbox/Code/Python/repofish/analysis/pypi"
packages = pandas.read_csv("%s/pypi_filtered.tsv" %folder,sep="\t",index_col=0)
meta_folder = "%s/packages" %(folder)
# Making a dataframe will take too much memory, let's make nodes and links
# Let's try sigma js export
# {"nodes": [
# {
# "id": "chr1",
# "x": 0,
# "y": 0,
# "label": "Bob",
# "size": 8.75
# },
# {
# "id": "chr10",
# "label": "Alice",
# "x": 3,
# "y": 1,
# "size": 14.75
# }
#],
#"edges": [{
# "id": "1",
# "source": "chr1",
# "target": "chr10"
#}]
# ONLY INCLUDE PACKAGES WITH DEPENDENCIES #########################################################
nodes = []
single_nodes = [] # nodes in graph without dependencies (that will need to be added)
node_lookup = dict()
# NODES #####################################################################
def make_node(package_name,meta,node_count):
return {"name":package_name,
"size":len(meta["requires_dist"]),
"color":"#999",
"id":node_count,
"description":meta["description"],
"downloads":meta["downloads"],
"keywords":meta["keywords"],
"license":meta["license"],
"maintainer":meta["maintainer_email"],
"author":meta["author_email"]}
count=0
for row in packages.iterrows():
package_name = row[1].package
meta_file = "%s/%s.json" %(meta_folder,package_name)
if os.path.exists(meta_file):
meta = json.load(open(meta_file,"r"))
if "requires_dist" in meta["info"]:
if package_name not in node_lookup:
node = make_node(package_name,meta["info"],count)
nodes.append(node)
node_lookup[package_name] = count
count+=1
dependencies = meta["info"]["requires_dist"]
dependencies = [x.split(" ")[0].strip() for x in dependencies]
for dep in dependencies:
if dep not in node_lookup:
single_nodes.append(dep)
# Generate nodes for single_nodes list
# Note: did not wind up doing this to not clutter visualization
#for package_name in single_nodes:
# meta_file = "%s/%s.json" %(meta_folder,package_name)
# if os.path.exists(meta_file):
# meta = json.load(open(meta_file,"r"))
# node = make_node(package_name,meta["info"],count)
# nodes.append(node)
# node_lookup[package_name] = count
# count+=1
# LINKS ##############################################################################
links = []
seen_links = []
def make_link(source,target):
return {"id":"%s_%s" %(source,target),"source":source,"target":target}
for row in packages.iterrows():
package_name = row[1].package
meta_file = "%s/%s.json" %(meta_folder,package_name)
if os.path.exists(meta_file):
meta = json.load(open(meta_file,"r"))
if "requires_dist" in meta["info"] and package_name in node_lookup:
dependencies = meta["info"]["requires_dist"]
dependencies = [x.split(" ")[0].strip() for x in dependencies]
package_id = node_lookup[package_name]
for dep in dependencies:
if dep in node_lookup:
dep_id = node_lookup[dep]
link_id = "%s_%s" %(dep_id,package_id)
if link_id not in seen_links:
link = make_link(dep_id,package_id)
links.append(link)
seen_links.append(link_id)
# Save to file
res = {"nodes":nodes,"links":links}
os.mkdir("web")
save_json(res,"web/pypi.json")
# REPOFISH FLASK ####################################################################
nodes = dict()
def do_encode(param):
if param != None:
return param.encode("utf-8")
else:
return ""
# Data preparation for repofish flask application
def make_node(package_name,meta,node_count):
dl = dict()
for dl_key,dl_val in meta["downloads"].iteritems():
dl[do_encode(dl_key)] = dl_val
return {"name":do_encode(package_name),
"id":node_count,
#"description":do_encode(meta["description"]),
"downloads":dl,
"keywords":do_encode(meta["keywords"]),
"license":do_encode(meta["license"]),
"maintainer":do_encode(meta["maintainer_email"]),
"author":do_encode(meta["author_email"]),
"package_url":do_encode(meta["package_url"]),
"release_url":do_encode(meta["release_url"]),
"docs":do_encode(meta["docs_url"]),
"url":do_encode(meta["home_page"]),
"summary":do_encode(meta["summary"]),
"version":do_encode(meta["version"])}
count=0
for row in packages.iterrows():
package_name = row[1].package
meta_file = "%s/%s.json" %(meta_folder,package_name)
if os.path.exists(meta_file):
meta = json.load(open(meta_file,"r"),encoding="utf-8")
if package_name not in nodes:
node = make_node(package_name,meta["info"],count)
nodes[package_name] = node
count+=1
pickle.dump(nodes,open("web/packages.nodes.pkl","w"))
# We also need a links lookup, links to keep based on package
links = dict()
def make_link(source,target):
return {"id":"%s_%s" %(source,target),"source":source,"target":target}
for row in packages.iterrows():
package_name = row[1].package
meta_file = "%s/%s.json" %(meta_folder,package_name)
if os.path.exists(meta_file):
meta = json.load(open(meta_file,"r"))
if "requires_dist" in meta["info"] and package_name in node_lookup:
dependencies = meta["info"]["requires_dist"]
dependencies = [x.split(" ")[0].strip() for x in dependencies]
package_id = nodes[package_name]["id"]
link_list = []
for dep in dependencies:
if dep in nodes:
dep_id = nodes[dep]["id"]
link_id = "%s_%s" %(dep_id,package_id)
link = make_link(dep_id,package_id)
link_list.append(link)
links[package_name] = link_list
pickle.dump(links,open("web/packages.links.pkl","w"))
| vsoch/repofish | analysis/pypi/3.map_dependencies.py | 3.map_dependencies.py | py | 6,398 | python | en | code | 3 | github-code | 13 |
72063071379 | x = int(input())
y = int(input())
z = int(input())
n = int(input())
permutations = []
x_values = range(0, x+1)
y_values = range(0, y+1)
z_values = range(0, z+1)
for i in x_values:
for j in y_values:
for k in z_values:
sum = i+j+k
if sum != n and i<= x and j<=y and k<=z:
each = [i,j,k]
permutations.append(each)
print(permutations)
| 1realjoeford/learning-python | HackerRankanswers/list_que.py | list_que.py | py | 457 | python | en | code | 1 | github-code | 13 |
72245973139 | import random
class Question:
def __init__ (self, q_text, q_right_answer, q_all_answers):
self.text = q_text
self.right_answer = q_right_answer
self.all_answers = q_all_answers
class Quiz:
def __init__ (self, q_list):
self.question_list = q_list
self.score = 0
self.question_number = 0
#Check if there are any remaining questions
def checkRemaining(self):
return self.question_number < len(self.question_list)
#Print the currect question
def printQuestion(self):
current_question = self.question_list[self.question_number]
self.question_number +=1
user_answer = input (f"Q.{self.question_number}: {current_question.text}\n1.{current_question.all_answers[0]} \n1.{current_question.all_answers[1]}\n2.{current_question.all_answers[2]}\n3.{current_question.all_answers[3]}\n")
self.checkAnswer(user_answer, current_question.right_answer)
#Check the answer
def checkAnswer(self, user_answer, correct_answer):
if user_answer.lower() == correct_answer.lower():
self.score += 1
print("That is correct")
else:
print("That is not correct")
print(f"The correct answer was {correct_answer}")
print(f"Your current score is: {self.score}/{self.question_number}\n\n")
| kacpergondek/100daysofcode | Day_17_Quiz/objects.py | objects.py | py | 1,362 | python | en | code | 0 | github-code | 13 |
70095624018 | # The file contains internal elements for cards and components
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash_devices.dependencies import Input, Output, State, MATCH, ALL
import dash_table
import plotly.express as px
import pandas
import io
import base64
def button(id, *, text='Empty'):
return dbc.Button(
text,
color='link',
id=id,
className='text-body bg-success',
style={
'display': 'block',
'margin': '10px auto',
'font-size': '1.2em'
}
)
def upload(id='', *, multiple=False):
return dcc.Upload(
id=id,
children=html.Div([
'Drag and Drop or ',
html.A('Select Files',
className='text-primary')
]),
style={
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
multiple=multiple
)
def labelWithInput(id, *, labelText):
return html.Label(
children=[
labelText,
dcc.Input(
id=id,
type='number',
placeholder='>=1',
min=1,
style={
'margin-left': '20px',
'width': '60px',
}
),
],
style={
'display': 'block',
'margin-left': '10px'
}
)
def card(id:str, *, header:str, childs:list, app=None):
@app.callback(
Output(f'collapse-{id}', 'is_open'),
[Input(f'toggle-{id}', 'n_clicks')],
[State(f'collapse-{id}', 'is_open')]
)
def toggleButtonCallback(n_click, is_open):
return not is_open if n_click else is_open
return dbc.Card(
id=id,
children=[
dbc.CardHeader(
html.H6(
dbc.Button(
header,
color="link",
id=f'toggle-{id}',
className='text-body'
)
)
),
dbc.Collapse(
id=f'collapse-{id}',
children=childs,
style={
'padding': '15px',
}
),
],
className='shadow',
style={
'margin': '10px 0',
}
)
def form(action, location):
return html.Form(
action=location,
method="get",
target='_blank',
children=[
html.Img(
src=location,
className='img-thumbnail',
style={
'box-sizing': 'border-box',
}
),
html.Button(
className="btn btn-success",
type="submit",
children=[
"Download"
],
style={
'margin': '10px auto',
'display': 'block'
}
)
]
)
def graph(points):
return dcc.Graph(
figure=px.line(points, x='x', y='y')
)
def table(tableData):
content_type, content_string = tableData.split(',')
decoded = base64.b64decode(content_string)
df = pandas.read_excel(io.BytesIO(decoded))
return dash_table.DataTable(
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records')
)
def video(path):
return html.Video(
src=path,
controls=True,
style={
'width': '100%'
}
) | MalekovAzat/DashExp | workersSample/tools/internalComponentCreator.py | internalComponentCreator.py | py | 3,990 | python | en | code | 0 | github-code | 13 |
13523980417 | import numpy as np
import cv2 as cv
import sys
import numpy as np
import imutils
def box_centers(boxes):
'''Args:
boxes: array of [x,y,w,h], where (x,y) is the
top left corner and w and h are the width and length'''
return np.array([[x+w/2, y+h/2] for [x,y,w,h] in boxes])
def cluster_boxes(boxes, distance_threshold, max_width=None):
'''Performs a simple clustering algorithm based
on distance between points. Inspired by hierarchical clustering and DBScan.
Args:
centers: array of shape (n, 2), where each row is a point (x,y)
distance_threshold: float, maximum distance between cluster points
max_width: integer or None, split clusters if box size larger than this width, or do nothing if None.
Returns:
cluster_labels: array of shape (n,) labels each point with a number from 0 to m.
num_clusters: number of clusters
cluster_centers'''
centers = box_centers(boxes)
n = centers.shape[0] # number of points
m = 0 # number of clusters so far
cluster_labels = np.full(n,-1,dtype=np.int16)
for i in range(n):
# If not already in a cluster, make a new one
if cluster_labels[i] == -1:
cluster_labels[i] = m
m += 1
# Add nearby points to this points cluster
for j in range(i+1, n):
if np.linalg.norm(centers[i]-centers[j]) <= distance_threshold:
cluster_labels[j] = cluster_labels[i]
# alternatively, if the boxes overlap:
elif boxes[i,0] >= boxes[j,0] and boxes[i,0]-boxes[j,0] <= boxes[j,2] \
or boxes[j,0] >= boxes[i,0] >= 0 and boxes[i,0]-boxes[j,0] <= boxes[j,2]:
cluster_labels[j] = cluster_labels[i]
#Give boxes of clusters
cluster_boxes = np.zeros((m,4),dtype=np.int16)
top_left_corner = boxes[:,:2] # x and y
bot_right_corner = top_left_corner + boxes[:,2:4] # x+w and y+h
for c in range(m):
mask = (cluster_labels==c).nonzero() #Choose elements from particular cluster
cluster_boxes[c,0:2] = np.amin(top_left_corner[mask], axis=0)
cluster_boxes[c,2:4] = np.amax(bot_right_corner[mask], axis=0) - cluster_boxes[c,0:2]
if max_width is not None:
# Break apart large clusters
for (c, (x,y,w,h)) in enumerate(cluster_boxes):
if w > max_width:
to_check = set([c])
while to_check:
c = to_check.pop()
if cluster_boxes[c, 2] > max_width and len(cluster_labels==c) > 1: #width
# Find units in the cluster to the right of the cluster box center
(x,y,w,h) = cluster_boxes[c,:]
mask = np.logical_and(cluster_labels == c, centers[:,0] > np.average(centers[:,0][cluster_labels==c], axis=0))
mask = mask.nonzero()[0]
if len(mask) > 0:
# Give them a new cluster
cluster_labels[mask] = m
cluster_boxes = np.vstack((cluster_boxes, [0, 0, 0, 0]))
# Redo the boxes for the new left and right clusters
for k in (c, m):
mask = (cluster_labels==k).nonzero()
cluster_boxes[k,0:2] = np.amin(top_left_corner[mask], axis=0)
cluster_boxes[k,2:4] = np.amax(bot_right_corner[mask], axis=0) - cluster_boxes[k,0:2]
# check that the new clusters aren't also too wide
to_check.add(c)
to_check.add(m)
m = m+1
cluster_centers = np.zeros((m,2))
for c in range(m):
# Take all points in cluster c and average each coordinate
cluster_centers[c] = np.average(centers[cluster_labels==c], axis=0)
return cluster_labels, m, cluster_centers, cluster_boxes
if __name__ == "__main__":
# test the module
hog = cv.HOGDescriptor()
hog.setSVMDetector(cv.HOGDescriptor_getDefaultPeopleDetector())
img = cv.imread("group.jpg")
img = imutils.resize(img, width=img.shape[0]//4)
#cv.imshow('pano', pano)
cv.waitKey(0)
boxes, weights = hog.detectMultiScale(img, winStride=(8,8))
clabels, m, ccenters, cboxes = cluster_boxes(boxes, 1000, 20)
print("Number of clusters:", m)
# Generate m different colors for testing
colors_list = np.random.randint(25,255,(m,3))
for ((x, y, w, h), cluster) in zip(boxes, clabels):
cv.rectangle(img, (x, y, w, h), colors_list[cluster].tolist(), 2)
for (x,y,w,h) in cboxes:
cv.rectangle(img, (x, y, w, h), (0,0,0), 1)
for c, center in enumerate(ccenters):
cv.circle(img, center.astype(np.int16), 5, colors_list[c].tolist(), -1)
#display frame
cv.imshow('frame', img)
#cv2.imwrite('test_img.jpg', pano)
cv.waitKey(0)
cv.destroyAllWindows() | cesargvcompsci/Zephyrus | Tests/clustering_test.py | clustering_test.py | py | 5,071 | python | en | code | 0 | github-code | 13 |
73860127696 | # # Modified Simple Notebook Visualiser from psychemedia at https://gist.github.com/psychemedia/9b7808d81e3ee3461444330f3b0971ac
"""
Script to visualize time series for notebooks
Authors: Jerry Song (jerrysong1324), Doris Lee (dorisjlee)
"""
import glob
import json
import os
import shutil
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
import pandas as pd
import re
import math
import nbformat
import textwrap
def makeNestedDict(categories):
for category, functions in categories.items():
fnCounts = {}
for fn in functions:
fnCounts[fn] = 0
categories[category] = fnCounts
def nb_vis(cell_map, img_file='', linewidth=10, w=20, gap=None, gap_boost=1, gap_colour='black'):
"""Visualise notebook gross cell structure."""
def get_gap(cell_map):
"""Automatically set the gap value based on overall length"""
def get_overall_length(cell_map):
"""Get overall line length of a notebook."""
overall_len = 0
gap = 0
for i ,(l,t) in enumerate(cell_map):
#i is number of cells if that's useful too?
overall_len = overall_len + l
return overall_len
max_overall_len = 0
#If we are generating a plot for multiple notebooks, get the largest overall length
if isinstance(cell_map,dict):
for k in cell_map:
_overall_len = get_overall_length(cell_map[k])
max_overall_len = _overall_len if _overall_len > max_overall_len else max_overall_len
else:
max_overall_len = get_overall_length(cell_map)
#Set the gap at 0.5% of the overall length
return math.ceil(max_overall_len * 0.01)
def plotter(cell_map, x, y, label='', header_gap = 0.2):
"""Plot visualisation of gross cell structure for a single notebook."""
#Plot notebook path
plt.text(y, x, label)
x = x + header_gap
for _cell_map in cell_map:
_y = y + _cell_map[0] + 1 #Make tiny cells slightly bigger
plt.plot([y,_y],[x,x], _cell_map[1], linewidth=linewidth)
y = _y
x=0
y=0
#If we have a single cell_map for a single notebook
if isinstance(cell_map,list):
gap = gap if gap is not None else get_gap(cell_map) * gap_boost
fig, ax = plt.subplots(figsize=(w, 1))
plotter(cell_map, x, y)
#If we are plotting cell_maps for multiple notebooks
elif isinstance(cell_map,dict):
gap = gap if gap is not None else get_gap(cell_map) * gap_boost
fig, ax = plt.subplots(figsize=(w,len(cell_map)))
for k in cell_map:
plotter(cell_map[k], x, y, k)
x = x + 1
ax.axis('off')
plt.gca().invert_yaxis()
# VIS_COLOUR_MAP = {'markdown':'cornflowerblue','code':'pink'}
VIS_COLOUR_MAP = {'create':'#f54949','join':'#ff2121',
'cleaning':'#00ff3f','group':'#99ffb2','preprocessing':'#44fc71',
'model':'#00fbff',
'plot':'#fffc6b','print':'#faed25',
'postprocessing':'#7700ff','stats':'#b77dfa',
'other':'grey'}
def plotNb(df, nbNameList):
lineDict = {}
for nbName in nbNameList:
categoryList = df[df['name']==nbName]['category']
lineMap = []
for category in categoryList:
lineMap.append((1, VIS_COLOUR_MAP[category]))
lineDict[nbName] = lineMap
nb_vis(lineDict) | dorisjlee/jupyter_analysis | AnalysisNotebooks/nbvis.py | nbvis.py | py | 3,573 | python | en | code | 0 | github-code | 13 |
15637362873 | import pyttsx3
import datetime
import os
import smtplib
engine = pyttsx3.init('sapi5') #used for intake api voices from windows
voices = engine.getProperty('voices')
print(voices[0].id)
engine.setProperty('voice',voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
pass
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour >=0 and hour < 12 :
speak("Good Morning")
elif hour >= 12 and hour < 17 :
speak("Good Aftenoon")
elif hour >= 17 and hour < 18 :
speak("Good Evening")
else:
speak("Good Night")
speak("How can I help you ")
if __name__ == " __main__ ":
speak(" Hi I am Nova ")
| Yogishm22/Yogishm22 | nova.py | nova.py | py | 776 | python | en | code | 0 | github-code | 13 |
72922402577 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
unit tests for the add_af.cwl
"""
import os
import sys
from pluto import (
PlutoTestCase,
CWLFile
)
class TestAddAFCWL(PlutoTestCase):
cwl_file = CWLFile('add_af.cwl')
def test_add_af(self):
"""
Test IMPACT CWL with tiny dataset
"""
maf_lines = [
['# comment 1'],
['# comment 2'],
['Hugo_Symbol', 't_depth', 't_alt_count'],
['SUFU', '100', '75'],
['GOT1', '100', '1'],
['SOX9', '100', '0'],
]
input_maf = self.write_table(tmpdir = self.tmpdir, filename = 'input.maf', lines = maf_lines)
self.input = {
"input_file": {
"class": "File",
"path": input_maf
},
"output_filename": 'output.maf',
}
output_json, output_dir = self.run_cwl()
output_path = os.path.join(output_dir, 'output.maf')
expected_output = {
'output_file': {
'location': 'file://' + output_path,
'basename': 'output.maf',
'class': 'File',
'checksum': 'sha1$39de59ad5d736db692504012ce86d3395685112e',
'size': 109,
'path': output_path
}
}
self.assertCWLDictEqual(output_json, expected_output)
comments, mutations = self.load_mutations(output_path)
expected_comments = ['# comment 1', '# comment 2']
self.assertEqual(comments, expected_comments)
expected_mutations = [
{'Hugo_Symbol': 'SUFU', 't_depth': '100', 't_alt_count':'75', 't_af': '0.75'},
{'Hugo_Symbol': 'GOT1', 't_depth': '100', 't_alt_count':'1', 't_af': '0.01'},
{'Hugo_Symbol': 'SOX9', 't_depth': '100', 't_alt_count':'0', 't_af': '0.0'}
]
self.assertEqual(mutations, expected_mutations)
| mskcc/pluto-cwl | tests/test_add_af_cwl.py | test_add_af_cwl.py | py | 1,962 | python | en | code | 1 | github-code | 13 |
40016754761 | import numpy as np
import matplotlib.pyplot as plt
from openbox import Optimizer, sp, ParallelOptimizer
import warnings
warnings.filterwarnings("ignore")
# Define Search Space
space = sp.Space()
x1 = sp.Real(name="x1", lower=-5, upper=10, default_value=0)
x2 = sp.Real(name="x2", lower=0, upper=15, default_value=0)
x3 = sp.Int(name="x3", lower=0, upper=100)
x4 = sp.Categorical(name="x4",
choices=["rbf", "poly", "sigmoid"],
default_value="rbf")
space.add_variables([x1, x2])
# Define Objective Function
# OpenBox默认执行最小化
def branin(config):
x1, x2 = config['x1'], config['x2']
# y = (x2-5.1/(4*np.pi**2)*x1**2 + 5/np.pi*x1-6)**2 + 10*(1-1/(8*np.pi))*np.cos(x1)+10
y = (x2-5.1*x1**2 + 5*x1-6)**2 + 10*np.cos(x1)+10
# return y
return {'objs': (y,)}
# Run
if __name__ == '__main__':
opt = Optimizer(objective_function=branin,
config_space=space,
max_runs=10, # 最大迭代次数
num_objs=1, # 单目标优化
num_constraints=0, # 无约束条件
surrogate_type='auto', # 代理模型, 对数学问题推荐用高斯过程('gp')作为贝叶斯优化的代理模型, 对于实际问题,例如超参数优化(HPO)推荐用随机森林('prf')
runtime_limit=None, # 总时间限制
time_limit_per_trial=30, # 为每个目标函数评估设定最大时间预算(单位:s), 一旦评估时间超过这个限制,目标函数返回一个失败状态。
task_id='quick_start', # 用于区分不同的优化实验
logging_dir='openbox_logs', # 实验记录的保存路径, log文件用task_id命名
random_state=123,
)
history = opt.run()
# Parallel Evaluation on Local Machine 本机并行优化
opt = ParallelOptimizer(branin,
space, # 搜索空间
parallel_strategy='async', # 'sync'设置并行验证是异步还是同步, 使用'async'异步并行方式能更充分利用资源,减少空闲
batch_size=4, # 设置并行worker的数量
batch_strategy='default', # 设置如何同时提出多个建议的策略, 推荐使用默认参数 ‘default’ 来获取稳定的性能。
num_objs=1,
num_constraints=0,
max_runs=50,
# surrogate_type='gp',
surrogate_type='auto',
time_limit_per_trial=180,
task_id='parallel_async',
logging_dir='openbox_logs', # 实验记录的保存路径, log文件用task_id命名
random_state=123,
)
history = opt.run()
print(history)
print(history.get_importance()) # 输出参数重要性
history.plot_convergence(xlabel="Number of iterations $n$",
ylabel=r"Min objective value after $n$ iterations",
true_minimum=0.397887,
)
plt.show()
# history.visualize_jupyter()
| HuangHaoyu1997/Parallel-CGP | search_v4.py | search_v4.py | py | 3,438 | python | zh | code | 0 | github-code | 13 |
74076923216 | import base64
import json
import os
import zlib
import numpy as np
import cv2
from pietoolbelt.datasets.common import get_root_by_env, BasicDataset
__all__ = ['SuperviselyPersonDataset']
class SuperviselyPersonDataset(BasicDataset):
def __init__(self, include_not_marked_people: bool = False, include_neutral_objects: bool = False):
path = get_root_by_env('SUPERVISELY_DATASET')
items = {}
for root, path, files in os.walk(path):
for file in files:
name, ext = os.path.splitext(file)
if ext == '.json':
item_type = 'target'
name = os.path.splitext(name)[0]
elif ext == '.png' or ext == '.jpg':
item_type = 'data'
else:
continue
if name in items:
items[name][item_type] = os.path.join(root, file)
else:
items[name] = {item_type: os.path.join(root, file)}
final_items = []
for item, data in items.items():
if 'data' in data and 'target' in data:
final_items.append(data)
final_items = self._filter_items(final_items, include_not_marked_people, include_neutral_objects)
self._use_border_as_class = False
self._border_thikness = None
super().__init__(final_items)
def _interpret_item(self, item) -> any:
return {'data': cv2.cvtColor(cv2.imread(item['data']), cv2.COLOR_BGR2RGB),
'target': {'masks': [SuperviselyPersonDataset._object_to_mask(obj) for obj in item['target']['objects']],
'size': item['target']['size']}}
@staticmethod
def _object_to_mask(obj):
obj_mask, origin = None, None
if obj['bitmap'] is not None:
z = zlib.decompress(base64.b64decode(obj['bitmap']['data']))
n = np.fromstring(z, np.uint8)
origin = np.array([obj['bitmap']['origin'][0], obj['bitmap']['origin'][1]], dtype=np.uint16)
obj_mask = cv2.imdecode(n, cv2.IMREAD_UNCHANGED)[:, :, 3].astype(np.uint8)
obj_mask[obj_mask > 0] = 1
elif len(obj['points']['interior']) + len(obj['points']['exterior']) > 0:
pts = np.array(obj['points']['exterior'], dtype=np.int)
origin = pts.min(axis=0)
shape = pts.max(axis=0) - origin
obj_mask = cv2.drawContours(np.zeros((shape[1], shape[0]), dtype=np.uint8), [pts - origin], -1, 1, cv2.FILLED)
if len(obj['points']['interior']) > 0:
for pts in obj['points']['interior']:
pts = np.array(pts, dtype=np.int)
obj_mask = cv2.drawContours(obj_mask, [pts - origin], -1, 0, cv2.FILLED)
origin = np.array([origin[1], origin[0]], dtype=np.int)
return obj_mask, origin
@staticmethod
def _filter_items(items, include_not_marked_people: bool, include_neutral_objects: bool) -> list:
res = []
for item in items:
with open(item['target'], 'r') as file:
target = json.load(file)
if not include_not_marked_people and ('not-marked-people' in [n['name'] for n in target['tags'] if 'value' in n]):
continue
if not include_neutral_objects:
res_objects = []
for obj in target['objects']:
if obj['classTitle'] != 'neutral':
res_objects.append(obj)
target['objects'] = res_objects
res.append({'data': item['data'], 'target': target})
return res
| HumanParsingSDK/datasets | human_datasets/supervisely_person.py | supervisely_person.py | py | 3,663 | python | en | code | 2 | github-code | 13 |
72952640658 | __author__ = 'ando'
import numpy as np
from time import time
import logging as log
import random
import networkx as nx
from itertools import zip_longest
from scipy.io import loadmat
from scipy.sparse import issparse
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import cpu_count
from os import path
from collections import Counter
log.basicConfig(format='%(asctime).19s %(levelname)s %(filename)s: %(lineno)s %(message)s', level=log.INFO)
def __random_walk__(G, path_length, start, alpha=0, rand=random.Random()):
'''
Returns a truncated random walk.
:param G: networkx graph
:param path_length: Length of the random walk.
:param alpha: probability of restarts.
:param rand: random number generator
:param start: the start node of the random walk.
:return:
'''
path = [start]
while len(path) < path_length:
cur = path[-1]
if len(G.neighbors(cur)) > 0:
if rand.random() >= alpha:
path.append(rand.choice(G.neighbors(cur)))
else:
path.append(path[0])
else:
break
return path
def __parse_adjacencylist_unchecked__(f):
'''
read the adjacency matrix
:param f: line stream of the file opened
:return: the adjacency matrix
'''
adjlist = []
for l in f:
if l and l[0] != "#":
adjlist.extend([[int(x) for x in l.strip().split()]])
return adjlist
def __from_adjlist_unchecked__(adjlist):
'''
create graph form the an adjacency list
:param adjlist: the adjacency matrix
:return: networkx graph
'''
G = nx.Graph()
G.add_edges_from(adjlist)
return G
def load_adjacencylist(file_, undirected=False, chunksize=10000):
'''
multi-threaded function to read the adjacency matrix and build the graph
:param file_: graph file
:param undirected: is the graph undirected
:param chunksize: how many edges for thread
:return:
'''
parse_func = __parse_adjacencylist_unchecked__
convert_func = __from_adjlist_unchecked__
adjlist = []
#read the matrix file
t0 = time()
with open(file_, 'r') as f:
with ProcessPoolExecutor(max_workers=cpu_count()) as executor:
total = 0
for idx, adj_chunk in enumerate(executor.map(parse_func, grouper(int(chunksize), f))): #execute pare_function on the adiacent list of the file in multipe process
adjlist.extend(adj_chunk) #merge the results of different process
total += len(adj_chunk)
t1 = time()
adjlist = np.asarray(adjlist)
log.info('Parsed {} edges with {} chunks in {}s'.format(total, idx, t1-t0))
t0 = time()
G = convert_func(adjlist)
t1 = time()
log.debug('Converted edges to graph in {}s'.format(t1-t0))
if undirected:
G = G.to_undirected()
return G
def _write_walks_to_disk(args):
num_paths, path_length, alpha, rand, f = args
G = __current_graph
t_0 = time()
with open(f, 'w') as fout:
for walk in build_deepwalk_corpus_iter(G=G, num_paths=num_paths, path_length=path_length, alpha=alpha, rand=rand):
fout.write(u"{}\n".format(u" ".join(__vertex2str[v] for v in walk)))
log.info("Generated new file {}, it took {} seconds".format(f, time() - t_0))
return f
def write_walks_to_disk(G, filebase, num_paths, path_length, alpha=0, rand=random.Random(0), num_workers=cpu_count()):
'''
save the random walks on files so is not needed to perform the walks at each execution
:param G: graph to walks on
:param filebase: location where to save the final walks
:param num_paths: number of walks to do for each node
:param path_length: lenght of each walks
:param alpha: restart probability for the random walks
:param rand: generator of random numbers
:param num_workers: number of thread used to execute the job
:return:
'''
global __current_graph
global __vertex2str
__current_graph = G
__vertex2str = {v:str(v) for v in G.nodes()}
files_list = ["{}.{}".format(filebase, str(x)) for x in range(num_paths)]
expected_size = len(G)
args_list = []
files = []
log.info("file_base: {}".format(filebase))
if num_paths <= num_workers:
paths_per_worker = [1 for x in range(num_paths)]
else:
paths_per_worker = [len(list(filter(lambda z: z!= None, [y for y in x]))) for x in grouper(int(num_paths / num_workers)+1, range(1, num_paths+1))]
with ProcessPoolExecutor(max_workers=num_workers) as executor:
for size, file_, ppw in zip(executor.map(count_lines, files_list), files_list, paths_per_worker):
args_list.append((ppw, path_length, alpha, random.Random(rand.randint(0, 2**31)), file_))
with ProcessPoolExecutor(max_workers=num_workers) as executor:
for file_ in executor.map(_write_walks_to_disk, args_list):
files.append(file_)
return files
def combine_files_iter(file_list):
for file in file_list:
with open(file, 'r') as f:
for line in f:
yield map(int, line.split())
def count_lines(f):
if path.isfile(f):
num_lines = sum(1 for line in open(f))
return num_lines
else:
return 0
def build_deepwalk_corpus(G, num_paths, path_length, alpha=0, rand=random.Random(0)):
'''
extract the walks form the graph used for context embeddings
:param G: graph
:param num_paths: how many random walks to form a sentence
:param path_length: how long each path -> length of the sentence
:param alpha: restart probability
:param rand: random function
:return:
'''
walks = []
nodes = list(G.nodes())
for cnt in range(num_paths):
rand.shuffle(nodes)
for node in nodes:
walks.append(__random_walk__(G, path_length, rand=rand, alpha=alpha, start=node))
return np.array(walks)
def build_deepwalk_corpus_iter(G, num_paths, path_length, alpha=0, rand=random.Random(0)):
walks = []
nodes = list(G.nodes())
for cnt in range(num_paths):
rand.shuffle(nodes)
for node in nodes:
yield __random_walk__(G,path_length, rand=rand, alpha=alpha, start=node)
def count_textfiles(files, workers=1):
c = Counter()
with ProcessPoolExecutor(max_workers=workers) as executor:
for c_ in executor.map(count_words, files):
c.update(c_)
return c
def count_words(file):
""" Counts the word frequences in a list of sentences.
Note:
This is a helper function for parallel execution of `Vocabulary.from_text`
method.
"""
c = Counter()
with open(file, 'r') as f:
for l in f:
words = [int(word) for word in l.strip().split()]
c.update(words)
return c
def load_matfile(file_, variable_name="network", undirected=True):
mat_varables = loadmat(file_)
mat_matrix = mat_varables[variable_name]
return from_numpy(mat_matrix, undirected)
def from_numpy(x, undirected=True):
"""
Load graph form adjmatrix
:param x: numpy adj matrix
:param undirected:
:return:
"""
G = nx.Graph()
if issparse(x):
cx = x.tocoo()
for i,j,v in zip(cx.row, cx.col, cx.data):
G.add_edge(i, j)
else:
raise Exception("Dense matrices not yet supported.")
if undirected:
G = G.to_undirected()
return G
def grouper(n, iterable, padvalue=None):
"grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')"
return zip_longest(*[iter(iterable)]*n, fillvalue=padvalue)
| andompesta/ComE | utils/graph_utils.py | graph_utils.py | py | 7,648 | python | en | code | 58 | github-code | 13 |
37086365706 | from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton
def main_keyboard(send_url):
markup = InlineKeyboardMarkup()
markup.row_width = 1
markup.add(
InlineKeyboardButton(
text='Ссылка на оплату 👁',
url=f'{send_url}'
),
InlineKeyboardButton(
text='Проверить оплату 🔎',
callback_data='check'
)
)
return markup | KlareoN/Simple_Payment | keyboard.py | keyboard.py | py | 472 | python | ru | code | 1 | github-code | 13 |
34655342851 | import langchain
from langchain.schema import SystemMessage
from langchain.agents import OpenAIFunctionsAgent,initialize_agent
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
#from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.memory import ConversationBufferMemory
from langchain.prompts import MessagesPlaceholder
from dotenv import load_dotenv
from config import OPEN_AI_MODEL_NAME,DEBUG_MODE_LLM
from image_processor import ImageProcessor
langchain.debug = DEBUG_MODE_LLM
load_dotenv()
#img preproc and ocr helper
processor=ImageProcessor()
system_message = SystemMessage(content="""You are an expert invoice, receipt summarizer, you're supposed to analyze every text in english or spanish and return data like restaurant name, items or products bought and its price as well as the total amount, however you cannot read images so you must use a tool to convert and image to text""")
#initial system prompt
prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
#define LLM to use
llm = ChatOpenAI(temperature=0.1, model=OPEN_AI_MODEL_NAME,)
#tools to use as functions to trigger from the llm
tools = [processor]
#memory placeholder
# conversational_memory = ConversationBufferWindowMemory(
# memory_key='chat_history',
# k=5,
# return_messages=True
# )
agent_kwargs = {
"extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")],
}
conversational_memory = ConversationBufferMemory(memory_key="memory", return_messages=True)
llm = ChatOpenAI(
temperature=0,
model_name=OPEN_AI_MODEL_NAME
)
agent = initialize_agent(
agent=AgentType.OPENAI_FUNCTIONS,
tools=tools,
llm=llm,
max_iterations=10,
verbose=False,
memory=conversational_memory,
agent_kwargs=agent_kwargs,
prompt=prompt
)
##TO DO, Remove agent and test sequential chain
if __name__=="__main__":
image_path = "images/raw/invoice0.jpg"
user_question = "Hello, could you please tell me what products were bought in this receipt i'm attaching? return a python dictionary with all you find"
response = agent.run(f'{user_question}, here is the image path: {image_path}')
print(response)
##testing memory
response=agent.run("could you tell me what was the most expensive item in the last receipt?")
print(response)
| statscol/ocr-LLM-image-summarizer | src/text_summarizer.py | text_summarizer.py | py | 2,381 | python | en | code | 1 | github-code | 13 |
28880450855 | from checkers.constants import WHITE
from checkers import simulation
import random
def alpha_beta(board, depth, max_player, game, heuristic, max_color, min_color, alpha, beta):
"""
Create a minimax tree by recursively exploring every legal move till max depth is reached. We pass down our alpha and beta
values and measure if, depending if we are maximizing or minimizing, if a min or max value already explored in the tree has been
discovered. If so, we do not further explore that node. Evaluate the leaf nodes using a heuristic
recurse back up the tree and at each node assign either the maximimum or minimum value of its children till reaching the root.
The root, which in this use case will always be maximizing, then chooses the move that gives the maximum value and returns that value and a
new board.
:param board: current board
:param depth: max depth to extend the minimax tree
:param max_player: if we are maximizing
:param game: object containing game logic and visual updates
:param heuristic: the heuristic evaluation function to give our leaf nodes
:param max_color: color to maximize on
:param min_color: color to minimize on
:param alpha: alpha value (starting at -inf)
:param beta: beta value (starting at inf)
:return: best evaluation score and the new board generated from best move
"""
r = [0, 1]
if depth == 0 or board.winner():
if max_color == WHITE:
if heuristic == 2:
return board.white_heuristic_eval_2(), board
elif heuristic == 1:
return board.white_heuristic_eval_1(), board
elif heuristic == 3:
return board.white_heuristic_eval_3(), board
else:
if heuristic == 2:
return board.black_heuristic_eval_2(), board
elif heuristic == 1:
return board.black_heuristic_eval_1(), board
elif heuristic == 3:
return board.black_heuristic_eval_3(), board
if max_player:
maxEval = float('-inf')
best_move = None
for move in simulation.get_all_moves(board, max_color):
evaluation = alpha_beta(move, depth - 1, False, game, heuristic, max_color, min_color, alpha, beta)[0]
alpha = max(alpha, evaluation)
if maxEval == evaluation and best_move is not None:
if random.choice(r) == 1:
best_move = best_move
else:
maxEval = max(maxEval, evaluation)
if maxEval == evaluation:
best_move = move
if beta <= alpha:
break
return maxEval, best_move
else:
minEval = float('inf')
best_move = None
for move in simulation.get_all_moves(board, min_color):
evaluation = alpha_beta(move, depth - 1, True, game, heuristic, max_color, min_color, alpha, beta)[0]
beta = min(beta, evaluation)
if minEval == evaluation and best_move is not None:
if random.choice(r) == 0:
best_move = best_move
else:
minEval = min(minEval, evaluation)
if minEval == evaluation:
best_move = move
if beta <= alpha:
break
return minEval, best_move
| mh022396/Checkers-AI | src/minimax/alpha_beta.py | alpha_beta.py | py | 3,391 | python | en | code | 0 | github-code | 13 |
43728150643 | import numpy as np
import cv2
import pandas as pd
# Import required libraries
# read image
img = cv2.imread('./synthetic.jpg') # Read the image file
# convert to gray scale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Convert the image to grayscale
'''
IF you have a multi-channel image, then extract the channel you want to work on insted of converting to gray scale.
for example, if you have a 3-channel image, then you can extract the first channel as follows:
img_gray = img[:,:,0]
'''
# reshape the image
img2 = img_gray.reshape(-1) # Reshape the grayscale image into a 1D array
# create a dataframe
df = pd.DataFrame() # Create an empty DataFrame
# add pixel values to the data frame
df['original_image'] = img2 # Add the grayscale image as a column in the DataFrame with the label 'original_image'
# Generate Gabor features
num = 1 # To count numbers up in order to give Gabor features a label in the data frame
kernels = [] # Create an empty list to hold all kernels that we will generate in a loop
for theta in range(2): # Define number of thetas
theta = theta / 4. * np.pi # Convert theta to the corresponding angle in radians
for sigma in (1, 3): # Sigma values of 1 and 3
for lamda in np.arange(0, np.pi, np.pi / 4): # Range of wavelengths from 0 to pi with step size pi/4
for gamma in (0.05, 0.5): # Gamma values of 0.05 and 0.5 . if gamma is close to 1, the Gaussian kernel is almost circular. if gamma is close to 0, the Gaussian kernel is almost elliptical shape.
gabor_label = 'Gabor' + str(num) # Label Gabor columns as Gabor1, Gabor2, etc.
# create gabor kernel
ksize = 5 # Size of the Gabor filter (n, n)
kernel = cv2.getGaborKernel((ksize, ksize), sigma, theta, lamda, gamma, psi=0, ktype=cv2.CV_32F)
kernels.append(kernel) # Append the generated Gabor kernel to the list
# Now filter the image and add values to a new column
fimg = cv2.filter2D(img_gray, cv2.CV_8UC3, kernel) # Apply the Gabor filter to the grayscale image
filtered_img = fimg.reshape(-1) # Reshape the filtered image into a 1D array
df[gabor_label] = filtered_img # Add the filtered image values as a new column in the DataFrame with the Gabor label
print(gabor_label, ': theta=', theta, ': sigma=', sigma, ': lamda=', lamda, ': gamma=', gamma)
num += 1 # Increment the counter for the Gabor column label
# show images
cv2.imshow('original', img) # Display the original image
cv2.imshow('filtered', fimg) # Display the filtered image
cv2.waitKey(0) # Wait for a key press
cv2.destroyAllWindows() # Close all the windows
# show the dataframe
print(df.head()) # Display the first few rows of the DataFrame
# save the dataframe as csv file
df.to_csv('./Gabor_features.csv') # Save the DataFrame as a CSV file
| ahmadSoliman94/Computer-Vision | Image Processing/Gabor filter/gabor_filter_banks.py | gabor_filter_banks.py | py | 2,953 | python | en | code | 0 | github-code | 13 |
4882971427 | import sqlite3
# Define the path to your SQLite database
db_path = "data/bronze/comp_db_2.db"
# Create a connection to the database
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
try:
# Calculate and update change_180d and change_90d columns in mcap_change
cursor.execute(
"""
UPDATE mcap_change AS m
SET
change_180d = (
SELECT (m.value - prev180.value) AS change_180d
FROM mcap_change AS prev180
WHERE m.project = prev180.project
AND m.date_key = (prev180.date_key + 180)
),
change_90d = (
SELECT (m.value - prev90.value) AS change_90d
FROM mcap_change AS prev90
WHERE m.project = prev90.project
AND m.date_key = (prev90.date_key + 90)
)
"""
)
# Commit the changes to the database
conn.commit()
print(
"Changes in 'value' column over the past 180 days and 90 days calculated and updated in 'mcap_change' table successfully!"
)
except sqlite3.Error as e:
print("Error:", e)
finally:
# Close the database connection
conn.close()
| PaulApivat/data_engineer | practice/data-pipeline-project/comp_scripts/add_mcap_change.py | add_mcap_change.py | py | 1,206 | python | en | code | 0 | github-code | 13 |
6703466685 | starting_number = int(input('Enter Number of Organisms: '))
daily_increase = int(input('Enter Daily Increase: ')) / 100
total_days = int(input('Enter Days Left to Multiply: '))
first = True
print('Day Approximate',' ',' Population')
print('------------------------------')
for total_days in range (starting_number, total_days + 1):
if first:
print(1 ,'\t\t\t\t\t', starting_number)
first = False
add = starting_number * daily_increase
starting_number = starting_number + add
print(total_days, '\t\t\t\t\t', starting_number)
| alecmsmith18/Project-1 | pop.py | pop.py | py | 560 | python | en | code | 0 | github-code | 13 |
70054571859 | # pylint: skip-file
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
''' ansible module for gcloud iam service-account keys'''
module = AnsibleModule(
argument_spec=dict(
# credentials
state=dict(default='present', type='str', choices=['present', 'absent', 'list']),
service_account_name=dict(required=True, type='str'),
key_format=dict(type='str', choices=['p12', 'json']),
key_id=dict(default=None, type='str'),
display_name=dict(default=None, type='str'),
),
supports_check_mode=True,
)
gcloud = GcloudIAMServiceAccountKeys(module.params['service_account_name'],
key_format=module.params['key_format'])
state = module.params['state']
#####
# Get
#####
if state == 'list':
api_rval = gcloud.list_service_account_keys()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval, state="list")
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = gcloud.delete_service_account_key(module.params['key_id'])
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="absent")
if state == 'present':
########
# Create
########
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
outputfile = '/tmp/glcoud_iam_sa_keys'
api_rval = gcloud.create_service_account_key(outputfile)
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| openshift/openshift-tools | ansible/roles/lib_gcloud/build/ansible/gcloud_iam_sa_keys.py | gcloud_iam_sa_keys.py | py | 2,395 | python | en | code | 161 | github-code | 13 |
70958163218 | fname = input("Enter a filename: ")
try:
fhand = open(fname)
# finp = fhand.read()
except:
print("File cannot be found: ", fname)
quit() #or break or continue
count = 0
for line in fhand:
if not line.startswith("Subject:") :
continue
count = count + 1
print("There were", count, " subjects lines in ", fname)
| geniusboywonder/PY4E-Assignments | Course 2 - Python Data Structures/Openfile.py | Openfile.py | py | 342 | python | en | code | 1 | github-code | 13 |
20888441933 | from fastapi import APIRouter, status, UploadFile, File
from scripts.utils.s3_image_util import S3
from scripts.core.handlers.image_handler import ImageHandler
image_router = APIRouter(prefix='/api')
@image_router.post('/upload', status_code=status.HTTP_200_OK)
def upload_image(file: UploadFile = File(...)):
image_handler = ImageHandler()
resp = image_handler.upload_image(file,file.filename)
print(resp)
return {"success": True, 'url': resp}
@image_router.delete('/delete',status_code=status.HTTP_202_ACCEPTED)
def delete_image(filename: str):
image_handler = ImageHandler()
resp = image_handler.delete_image(filename)
return resp
| Sayed-Imran/AWS-S3-fastapi | scripts/services/images_service.py | images_service.py | py | 666 | python | en | code | 0 | github-code | 13 |
34766458600 | from unittest import TestCase
from piicatcher.explorer.files import Tokenizer
from piicatcher.piitypes import PiiTypes
from piicatcher.scanner import ColumnNameScanner, NERScanner, RegexScanner
class RegexTestCase(TestCase):
def setUp(self):
self.parser = RegexScanner()
def test_phones(self):
matching = [
"12345678900",
"1234567890",
"+1 234 567 8900",
"234-567-8900",
"1-234-567-8900",
"1.234.567.8900",
"5678900",
"567-8900",
"(123) 456 7890",
"+41 22 730 5989",
"(+41) 22 730 5989",
"+442345678900",
]
for text in matching:
self.assertEqual(self.parser.scan(text), [PiiTypes.PHONE])
def test_emails(self):
matching = ["john.smith@gmail.com", "john_smith@gmail.com", "john@example.net"]
non_matching = ["john.smith@gmail..com"]
for text in matching:
self.assertEqual(self.parser.scan(text), [PiiTypes.EMAIL])
for text in non_matching:
self.assertEqual(self.parser.scan(text), [])
def test_credit_cards(self):
matching = [
"0000-0000-0000-0000",
"0123456789012345",
"0000 0000 0000 0000",
"012345678901234",
]
for text in matching:
self.assertTrue(PiiTypes.CREDIT_CARD in self.parser.scan(text))
def test_street_addresses(self):
matching = [
"checkout the new place at 101 main st.",
"504 parkwood drive",
"3 elm boulevard",
"500 elm street ",
]
non_matching = ["101 main straight"]
for text in matching:
self.assertEqual(self.parser.scan(text), [PiiTypes.ADDRESS])
for text in non_matching:
self.assertEqual(self.parser.scan(text), [])
class NERTests(TestCase):
def setUp(self):
self.parser = NERScanner()
def test_person(self):
types = self.parser.scan("Roger is in the office")
self.assertTrue(PiiTypes.PERSON in types)
def test_location(self):
types = self.parser.scan("Jonathan is in Bangalore")
self.assertTrue(PiiTypes.LOCATION in types)
def test_date(self):
types = self.parser.scan("Jan 1 2016 is a new year")
self.assertTrue(PiiTypes.BIRTH_DATE in types)
class ColumnNameScannerTests(TestCase):
def setUp(self):
self.parser = ColumnNameScanner()
def test_person(self):
self.assertTrue(PiiTypes.PERSON in self.parser.scan("fname"))
self.assertTrue(PiiTypes.PERSON in self.parser.scan("full_name"))
self.assertTrue(PiiTypes.PERSON in self.parser.scan("name"))
def test_person_upper_case(self):
self.assertTrue(PiiTypes.PERSON in self.parser.scan("FNAME"))
self.assertTrue(PiiTypes.PERSON in self.parser.scan("FULL_NAME"))
self.assertTrue(PiiTypes.PERSON in self.parser.scan("NAME"))
def test_email(self):
self.assertTrue(PiiTypes.EMAIL in self.parser.scan("email"))
self.assertTrue(PiiTypes.EMAIL in self.parser.scan("EMAIL"))
def test_birth_date(self):
self.assertTrue(PiiTypes.BIRTH_DATE in self.parser.scan("dob"))
self.assertTrue(PiiTypes.BIRTH_DATE in self.parser.scan("birthday"))
def test_gender(self):
self.assertTrue(PiiTypes.GENDER in self.parser.scan("gender"))
def test_nationality(self):
self.assertTrue(PiiTypes.NATIONALITY in self.parser.scan("nationality"))
def test_address(self):
self.assertTrue(PiiTypes.ADDRESS in self.parser.scan("address"))
self.assertTrue(PiiTypes.ADDRESS in self.parser.scan("city"))
self.assertTrue(PiiTypes.ADDRESS in self.parser.scan("state"))
self.assertTrue(PiiTypes.ADDRESS in self.parser.scan("country"))
self.assertTrue(PiiTypes.ADDRESS in self.parser.scan("zipcode"))
self.assertTrue(PiiTypes.ADDRESS in self.parser.scan("postal"))
def test_user_name(self):
self.assertTrue(PiiTypes.USER_NAME in self.parser.scan("user"))
self.assertTrue(PiiTypes.USER_NAME in self.parser.scan("userid"))
self.assertTrue(PiiTypes.USER_NAME in self.parser.scan("username"))
def test_password(self):
self.assertTrue(PiiTypes.PASSWORD in self.parser.scan("pass"))
self.assertTrue(PiiTypes.PASSWORD in self.parser.scan("password"))
def test_ssn(self):
self.assertTrue(PiiTypes.SSN in self.parser.scan("ssn"))
class TestTokenizer(TestCase):
def test_tokenization(self):
tok = Tokenizer()
tokens = tok.tokenize("Jonathan is in Bangalore")
self.assertEqual(4, len(tokens))
| dm03514/piicatcher | tests/test_scanner.py | test_scanner.py | py | 4,741 | python | en | code | null | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.