hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0fbd2799a670eb06525707d7b4600e472a360266 | 3,049 | py | Python | taskplus/apps/rest/database.py | Himon-SYNCRAFT/taskplus | 9e6293840941d0cb4fd7bac0f8ff66f8e72cc62b | [
"BSD-3-Clause"
] | null | null | null | taskplus/apps/rest/database.py | Himon-SYNCRAFT/taskplus | 9e6293840941d0cb4fd7bac0f8ff66f8e72cc62b | [
"BSD-3-Clause"
] | null | null | null | taskplus/apps/rest/database.py | Himon-SYNCRAFT/taskplus | 9e6293840941d0cb4fd7bac0f8ff66f8e72cc62b | [
"BSD-3-Clause"
] | null | null | null | import os
from sqlalchemy import event, create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from taskplus.core.domain import Statuses
from taskplus.apps.rest.settings import ProdConfig, DevConfig, TestConfig
if os.environ.get('TESTING'):
config = TestConfig
elif os.environ.get('PRODUCTION'):
config = ProdConfig
else:
config = DevConfig
db_uri = config.DB_URI
engine = create_engine(db_uri, echo=False, convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=True,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def create_db():
# turn on foreign keys
if db_session.bind.driver == 'pysqlite':
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
from taskplus.apps.rest import models
Base.metadata.reflect(engine)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
creator_role = models.UserRole(name='creator')
doer_role = models.UserRole(name='doer')
admin_role = models.UserRole(name='admin')
db_session.add(creator_role)
db_session.add(doer_role)
db_session.add(admin_role)
db_session.commit()
creator = models.User(name='creator', roles=[creator_role],
password='creator')
doer = models.User(name='doer', roles=[doer_role], password='doer')
super_user = models.User(
name='super',
roles=[creator_role, doer_role, admin_role],
password='super'
)
db_session.add(creator)
db_session.add(doer)
db_session.add(super_user)
db_session.commit()
status_new = models.TaskStatus(id=Statuses.NEW, name='new')
status_in_progress = models.TaskStatus(
id=Statuses.IN_PROGRESS, name='in progress')
status_completed = models.TaskStatus(
id=Statuses.COMPLETED, name='completed')
status_canceled = models.TaskStatus(
id=Statuses.CANCELED, name='canceled')
db_session.add(status_new)
db_session.add(status_in_progress)
db_session.add(status_completed)
db_session.add(status_canceled)
db_session.commit()
task = models.Task(name='example task 1', content='lorem ipsum',
status_id=status_new.id, creator_id=creator.id,
doer_id=doer.id)
task2 = models.Task(name='example task 2', content='lorem ipsum2',
status_id=status_completed.id, creator_id=creator.id,
doer_id=doer.id)
task3 = models.Task(name='example task 3', content='lorem ipsum',
status_id=status_new.id, creator_id=creator.id)
db_session.add(task)
db_session.add(task2)
db_session.add(task3)
db_session.commit()
| 32.784946 | 77 | 0.681207 | 382 | 3,049 | 5.23822 | 0.259162 | 0.089955 | 0.077961 | 0.051974 | 0.113443 | 0.075962 | 0.075962 | 0.075962 | 0.075962 | 0.053973 | 0 | 0.003333 | 0.212857 | 3,049 | 92 | 78 | 33.141304 | 0.830417 | 0.00656 | 0 | 0.082192 | 0 | 0 | 0.069045 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027397 | false | 0.041096 | 0.109589 | 0 | 0.136986 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fbd33f2ae51a0fac3510fa7f66242f12103efe6 | 3,547 | py | Python | tftuner/tftuner.py | Emma926/mcbench | 14e4c4741fb823abb75b7bc5a68c88a7798ce904 | [
"Apache-2.0"
] | 13 | 2020-03-13T16:12:32.000Z | 2022-01-12T07:14:24.000Z | tftuner/tftuner.py | Emma926/mcbench | 14e4c4741fb823abb75b7bc5a68c88a7798ce904 | [
"Apache-2.0"
] | null | null | null | tftuner/tftuner.py | Emma926/mcbench | 14e4c4741fb823abb75b7bc5a68c88a7798ce904 | [
"Apache-2.0"
] | 6 | 2020-01-07T02:56:52.000Z | 2021-03-08T13:26:20.000Z | '''
TensorFlow Tuner
Improve TensorFlow model's performance on CPU. This code is under Apache 2.0 Lisence.
More details about this tuner please refer to the following paper. If you find it
useful, please cite our paper.
Wang, Yu Emma, Carole-Jean Wu, Xiaodong Wang, Kim Hazelwood, and David Brooks.
"Exploiting Parallelism Opportunities with Deep Learning Frameworks."
arXiv preprint arXiv:1908.04705 (2019).
This code was tested with fc_tf.py, inception, and NCF.
Yu Emma Wang
10/16/2019
'''
# To simplify the model graph, this function finds
# the parent heavy ops of a given op.
def findparentop(curr, g_dict, heavy_ops):
l = []
node = []
seen = set()
for i in g_dict[curr]:
node.append(i)
while node:
n = node[0]
del node[0]
seen.add(n)
if n in heavy_ops:
l.append(n)
continue
for i in g_dict[n]:
if not i in seen:
node.append(i)
return list(set(l))
# find the nodes at the bottom of the graph
# such nodes are not parents of any nodes
def find_bottom_node(s_graph, heavy_ops):
p_set = set()
for op in heavy_ops:
for p in s_graph[op]:
p_set.add(p)
return set(heavy_ops) - p_set
# find the depth of the graph by depth first search
def dfs(n, s_graph):
if len(s_graph[n]) == 0:
return 1
depths = []
for p in s_graph[n]:
depths.append(dfs(p, s_graph))
return max(depths) + 1
# An heavy operator is defined to be the ops taking
# much more execution time than other ops, such as
# MatMul, Conv and Embedding ops.
# It is important to note that heavy ops vary based
# on the models, hardware and frameworks.
# This function implements a heuristc to identify
# heavy ops, to generalize to more scenarios, it
# has to be validated.
def isheavy(n, embedding_flag):
if 'gradient' in n:
return False
if embedding_flag and 'embedding_lookup' in n.split('/')[-1]:
return True
if not embedding_flag and 'MatMul' in n.split('/')[-1] or 'Conv' in n.split('/')[-1]:
return True
return False
# the interface of TF-Tuner.
def tftuner(graph):
# Initialize data structures
embedding_flag = False
g_dict = {}
s_graph = {}
heavy_ops = []
for op in graph.get_operations():
g_dict[op.name] = []
for i in op.inputs:
g_dict[op.name].append(i.name.split(':')[0])
#print(op.name, '<-', i)
if 'embedding' in op.name:
embedding_flag = True
for op in graph.get_operations():
if isheavy(op.name, embedding_flag):
heavy_ops.append(op.name)
for op in heavy_ops:
s_graph[op] = []
print('=========== Graph Summary ===========')
total_nodes = 0
for op in g_dict:
total_nodes += len(g_dict[op])
print('Total Ops:', len(g_dict), 'Total nodes:', total_nodes)
print('Heavy Ops:', len(heavy_ops))
print('=========== Heavy Ops ===========')
for op in s_graph.keys():
print(op)
print('# of heavy ops:', len(heavy_ops))
#exit()
print('=========== Simplify Graph ===========')
for op in reversed(heavy_ops):
l = findparentop(op, g_dict, heavy_ops)
s_graph[op] = l
print(op, l)
print('=========== Find Graph Depth ===========')
bottoms = find_bottom_node(s_graph, heavy_ops)
print('Bottom nodes: ', bottoms)
depths = []
for node in bottoms:
d = dfs(node, s_graph)
depths.append(d)
heavy_op = len(s_graph)
heavy_layer = max(depths)
avg_width = heavy_op*1.0/heavy_layer
print('*** Heavy Ops = ', heavy_op)
print('*** Layers = ', heavy_layer)
print('*** Avg Graph Width = ', heavy_op/heavy_layer)
return avg_width
| 27.496124 | 87 | 0.649845 | 567 | 3,547 | 3.948854 | 0.301587 | 0.07146 | 0.021885 | 0.018758 | 0.136222 | 0.064314 | 0.025011 | 0 | 0 | 0 | 0 | 0.012531 | 0.212574 | 3,547 | 128 | 88 | 27.710938 | 0.789116 | 0.318579 | 0 | 0.144578 | 0 | 0 | 0.128291 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060241 | false | 0 | 0 | 0 | 0.168675 | 0.156627 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fbf9d0d72be7779952b7396a5dc680d7e0d159e | 746 | py | Python | asdfc/tools.py | surchs/ASD_subtype_code_supplement | 8ed67ada3cdceb2a45b53e5d69b2a0a8cd6035f1 | [
"CC-BY-4.0"
] | 1 | 2020-04-09T01:26:07.000Z | 2020-04-09T01:26:07.000Z | asdfc/tools.py | surchs/ASD_subtype_code_supplement | 8ed67ada3cdceb2a45b53e5d69b2a0a8cd6035f1 | [
"CC-BY-4.0"
] | null | null | null | asdfc/tools.py | surchs/ASD_subtype_code_supplement | 8ed67ada3cdceb2a45b53e5d69b2a0a8cd6035f1 | [
"CC-BY-4.0"
] | null | null | null | import sys
import time
import itertools as it
def find_all_combinations(n_elements, n_group=2):
# Define the session IDs
elements = list(range(n_elements))
# Find all combinations of 2 sessions to compute ICC on
icc_sessions = list(it.combinations(elements, n_group))
# Find the remaining sessions for each of the ICC sessions
remaining_sessions = [list(set(elements) - set(icc_s)) for icc_s in icc_sessions]
# Find all combinations of subtype sessions for 1 - 8 subtype sessions
session_pairs = [(icc, sbt) for rem, icc in zip(remaining_sessions, icc_sessions)
for n_sbt in range(1, len(rem) + 1)
for sbt in list(it.combinations(rem, n_sbt))]
return session_pairs
| 39.263158 | 85 | 0.697051 | 113 | 746 | 4.451327 | 0.362832 | 0.087475 | 0.11332 | 0.083499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010453 | 0.230563 | 746 | 18 | 86 | 41.444444 | 0.865854 | 0.270777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fc219ffeb544cb0eea7a82e76db069da521fd02 | 2,781 | py | Python | zsl_kg/gnn/mean_agg.py | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 | [
"Apache-2.0"
] | 83 | 2021-08-30T02:50:37.000Z | 2022-02-22T09:37:36.000Z | zsl_kg/gnn/mean_agg.py | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 | [
"Apache-2.0"
] | 2 | 2021-09-10T08:44:13.000Z | 2022-01-23T17:33:35.000Z | zsl_kg/gnn/mean_agg.py | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 | [
"Apache-2.0"
] | 6 | 2021-09-10T07:09:41.000Z | 2021-11-07T14:31:33.000Z | import torch
import torch.nn as nn
from zsl_kg.common.graph import GraphFeature, NeighSampler
from zsl_kg.knowledge_graph.kg import KG
class MeanAggregator(nn.Module):
def __init__(
self,
features: object,
sampler: NeighSampler = None,
feature_dropout: float = 0.5,
self_loop: bool = True,
):
"""Mean aggregator from Inductive Representation Learning on
Large Graphs.
Args:
features (object): The combine function or None depending
on the layer number.
sampler (NeighSampler, optional): Graph sampler for the
knowledge graph. Defaults to None.
feature_dropout (float, optional): dropout for the node
features. Defaults to 0.5.
self_loop (bool, optional): includes a self loop of the
node in its neighbourhood. Defaults to True.
"""
super(MeanAggregator, self).__init__()
self.features = GraphFeature(features)
if sampler is None:
self.sampler = NeighSampler(-1, mode="none")
else:
self.sampler = sampler
self.feature_dropout = nn.Dropout(feature_dropout)
self.self_loop = self_loop
def forward(self, nodes: torch.tensor, kg: KG):
"""Forward function for the attention aggregator.
Args:
nodes (torch.tensor): nodes in the knowledge graph.
kg (KG): knowledge graph (ConceptNet or WordNet).
Returns:
torch.Tensor: features/embeddings for nodes.
"""
_neighs = self.sampler.sample([int(n) for n in nodes], kg)
samp_neighs = []
for i, adj_list in enumerate(_neighs):
samp_neighs.append(set([node_tuple[0] for node_tuple in adj_list]))
if self.self_loop:
samp_neighs[i].add(int(nodes[i]))
unique_nodes_list = sorted(list(set.union(*samp_neighs)))
unique_nodes = {n: i for i, n in enumerate(unique_nodes_list)}
mask = torch.zeros(len(samp_neighs), len(unique_nodes))
column_indices = [
unique_nodes[n] for samp_neigh in samp_neighs for n in samp_neigh
]
row_indices = [
i
for i in range(len(samp_neighs))
for j in range(len(samp_neighs[i]))
]
mask[row_indices, column_indices] = 1
num_neigh = mask.sum(1, keepdim=True)
mask = mask.div(num_neigh.clamp(1e-8))
node_tensor = torch.tensor(unique_nodes_list).type_as(nodes)
embed_matrix = self.features(node_tensor, kg)
embed_matrix = self.feature_dropout(embed_matrix)
mask = mask.type_as(embed_matrix)
to_feats = mask.mm(embed_matrix)
return to_feats
| 33.914634 | 79 | 0.61165 | 347 | 2,781 | 4.720461 | 0.314121 | 0.04884 | 0.02381 | 0.028083 | 0.041514 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005149 | 0.30169 | 2,781 | 81 | 80 | 34.333333 | 0.838311 | 0.252787 | 0 | 0 | 0 | 0 | 0.002074 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.085106 | 0 | 0.170213 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fc750bf5e0808942c59d662eaf6775e93c1c3d6 | 819 | py | Python | backend/api/migrations/versions/df8b0d746557_.py | osmontrouge/osmybiz | 8253bb5a923f332c644db83f18dde48dde57f78c | [
"MIT"
] | null | null | null | backend/api/migrations/versions/df8b0d746557_.py | osmontrouge/osmybiz | 8253bb5a923f332c644db83f18dde48dde57f78c | [
"MIT"
] | 14 | 2022-02-10T22:25:41.000Z | 2022-03-02T09:40:55.000Z | backend/api/migrations/versions/df8b0d746557_.py | osmontrouge/osmybiz | 8253bb5a923f332c644db83f18dde48dde57f78c | [
"MIT"
] | null | null | null | """empty message
Revision ID: df8b0d746557
Revises: 21ed58aa3524
Create Date: 2018-11-06 14:08:37.197401
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'df8b0d746557'
down_revision = '21ed58aa3524'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('temporary_osm_id', sa.BigInteger(), nullable=True))
op.execute("""
UPDATE "user"
SET temporary_osm_id = -1
""")
op.alter_column('user', 'temporary_osm_id', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'temporary_osm_id')
# ### end Alembic commands ###
| 24.088235 | 88 | 0.677656 | 102 | 819 | 5.303922 | 0.54902 | 0.088725 | 0.103512 | 0.085028 | 0.251386 | 0.162662 | 0.162662 | 0.162662 | 0 | 0 | 0 | 0.07958 | 0.186813 | 819 | 33 | 89 | 24.818182 | 0.732733 | 0.360195 | 0 | 0 | 0 | 0 | 0.297741 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fc81f5133ddcd4f5e96b1eb61bb0ced6d2a95b6 | 1,979 | py | Python | scripts/fig2.py | Nanguage/miniMDS | 0502f33a01c4f7b1e5ef83a3079d7f2014f73f99 | [
"MIT"
] | null | null | null | scripts/fig2.py | Nanguage/miniMDS | 0502f33a01c4f7b1e5ef83a3079d7f2014f73f99 | [
"MIT"
] | null | null | null | scripts/fig2.py | Nanguage/miniMDS | 0502f33a01c4f7b1e5ef83a3079d7f2014f73f99 | [
"MIT"
] | null | null | null | import sys
sys.path.append("..")
from data_tools import ChromParameters
from tools import Tracker
import heatmap as hm
import simple_tad as tad
import numpy as np
def matFromDixon(path, chrom):
"""Creates contact matrix from Dixon tsv data"""
numBins = chrom.getLength()
mat = np.zeros((numBins, numBins))
tracker = Tracker("Reading " + path, chrom.size)
with open(path) as infile:
for line in infile:
line = line.strip().split()
pos1 = int(line[0])
pos2 = int(line[1])
if pos1 != pos2:
if pos1 >= chrom.minPos and pos1 <= chrom.maxPos and pos2 >= chrom.minPos and pos2 <= chrom.maxPos:
bin1 = chrom.getAbsoluteIndex(pos1)
bin2 = chrom.getAbsoluteIndex(pos2)
if bin1 > bin2:
row = bin1
col = bin2
else:
row = bin1
col = bin2
mat[row, col] += 1
tracker.increment()
infile.close()
return mat
def plotLevels(mat):
smoothingFactors = [1, 2, 3, 8, 33] #these smoothing factors were selected to demonstrate to best demonstrate TAD levels
domainsToInclude = [list(range(1, 15)), [2,3,4,5], [7], [1,6], [3]] #selected domains from these smoothing factors to maximize prettiness
all_tads = []
for i in range(len(smoothingFactors)):
smoothingFactor = smoothingFactors[i]
indices = domainsToInclude[i]
tads = tad.getDomains(mat, smoothingFactor, 0)
for index in indices:
all_tads.append(tads[index])
hm.heatMapFromMat(mat, 100, all_tads, "Fig2") #all levels combined
minPos = 49000000 #from Dixon
maxPos = 54066692 #from Dixon
res = 40000 #from Dixon
name = "chr22"
size = 30949158
path = "mESC_chr6.tsv"
chrom = ChromParameters(minPos, maxPos, res, name, size)
mat = matFromDixon(path, chrom)
plotLevels(mat)
| 34.719298 | 142 | 0.597271 | 237 | 1,979 | 4.962025 | 0.434599 | 0.030612 | 0.035714 | 0.02381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.054348 | 0.302678 | 1,979 | 56 | 143 | 35.339286 | 0.797826 | 0.123295 | 0 | 0.08 | 0 | 0 | 0.018561 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.12 | 0 | 0.18 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fcb5f0d3c80784eb50df1215708bd48df188cf9 | 4,122 | py | Python | examples/PlainPython/Charts/Matplotlib/app.py | Kitware/py-web-vue | 46ae0f999572e6dbf617617f89e552dc3c781e75 | [
"BSD-3-Clause"
] | 14 | 2021-04-30T09:19:05.000Z | 2022-03-29T06:47:37.000Z | examples/PlainPython/Charts/Matplotlib/app.py | Kitware/py-web-vue | 46ae0f999572e6dbf617617f89e552dc3c781e75 | [
"BSD-3-Clause"
] | 11 | 2021-06-11T17:54:15.000Z | 2022-03-17T19:54:50.000Z | examples/PlainPython/Charts/Matplotlib/app.py | Kitware/py-web-vue | 46ae0f999572e6dbf617617f89e552dc3c781e75 | [
"BSD-3-Clause"
] | 5 | 2021-09-06T11:30:54.000Z | 2022-03-11T10:01:24.000Z | import numpy as np
import matplotlib.pyplot as plt
import mpld3
from pywebvue import App
from pywebvue.modules import Matplotlib
# -----------------------------------------------------------------------------
# App initialization
# -----------------------------------------------------------------------------
app = App("Matplotlib Demo")
app.state = {
"spec": None,
"active": "FirstDemo",
"examples": [
{"text": "First Demo", "value": "FirstDemo"},
{"text": "Multi Lines", "value": "MultiLines"},
{"text": "Dots and Points", "value": "DotsandPoints"},
{"text": "Moving Window Average", "value": "MovingWindowAverage"},
{"text": "Subplots", "value": "Subplots"},
],
}
app.enable_module(Matplotlib)
# -----------------------------------------------------------------------------
@app.change("active")
def update_chart():
chart_name = app.get("active")
globals()[chart_name]()
# -----------------------------------------------------------------------------
# Chart examples from:
# - http://jakevdp.github.io/blog/2013/12/19/a-d3-viewer-for-matplotlib/
# -----------------------------------------------------------------------------
def FirstDemo():
fig, ax = plt.subplots()
np.random.seed(0)
ax.plot(
np.random.normal(size=100), np.random.normal(size=100), "or", ms=10, alpha=0.3
)
ax.plot(
np.random.normal(size=100), np.random.normal(size=100), "ob", ms=20, alpha=0.1
)
ax.set_xlabel("this is x")
ax.set_ylabel("this is y")
ax.set_title("Matplotlib Plot Rendered in D3!", size=14)
ax.grid(color="lightgray", alpha=0.7)
# Push chart to client
app.set("spec", mpld3.fig_to_dict(fig))
# -----------------------------------------------------------------------------
def MultiLines():
fig, ax = plt.subplots()
x = np.linspace(0, 10, 1000)
for offset in np.linspace(0, 3, 7):
ax.plot(x, 0.9 * np.sin(x - offset), lw=5, alpha=0.4)
ax.set_ylim(-1.2, 1.0)
ax.text(5, -1.1, "Here are some curves", size=18)
ax.grid(color="lightgray", alpha=0.7)
# Push chart to client
app.set("spec", mpld3.fig_to_dict(fig))
# -----------------------------------------------------------------------------
def DotsandPoints():
fig, ax = plt.subplots()
ax.plot(
np.random.rand(20),
"-o",
alpha=0.5,
color="black",
linewidth=5,
markerfacecolor="green",
markeredgecolor="lightgreen",
markersize=20,
markeredgewidth=10,
)
ax.grid(True, color="#EEEEEE", linestyle="solid")
ax.set_xlim(-2, 22)
ax.set_ylim(-0.1, 1.1)
# Push chart to client
app.set("spec", mpld3.fig_to_dict(fig))
# -----------------------------------------------------------------------------
def MovingWindowAverage():
np.random.seed(0)
t = np.linspace(0, 10, 300)
x = np.sin(t)
dx = np.random.normal(0, 0.3, 300)
kernel = np.ones(25) / 25.0
x_smooth = np.convolve(x + dx, kernel, mode="same")
fig, ax = plt.subplots()
ax.plot(t, x + dx, linestyle="", marker="o", color="black", markersize=3, alpha=0.3)
ax.plot(t, x_smooth, "-k", lw=3)
ax.plot(t, x, "--k", lw=3, color="blue")
# Push chart to client
app.set("spec", mpld3.fig_to_dict(fig))
# -----------------------------------------------------------------------------
def Subplots():
fig = plt.figure(figsize=(8, 6))
fig.subplots_adjust(hspace=0.3)
np.random.seed(0)
for i in range(1, 5):
ax = fig.add_subplot(2, 2, i)
color = np.random.random(3)
ax.plot(np.random.random(30), lw=2, c=color)
ax.set_title("RGB = ({0:.2f}, {1:.2f}, {2:.2f})".format(*color), size=14)
ax.grid(color="lightgray", alpha=0.7)
# Push chart to client
app.set("spec", mpld3.fig_to_dict(fig))
# -----------------------------------------------------------------------------
# Start server
# -----------------------------------------------------------------------------
if __name__ == "__main__":
update_chart()
app.run_server()
| 27.851351 | 88 | 0.466521 | 486 | 4,122 | 3.884774 | 0.320988 | 0.04661 | 0.037076 | 0.045021 | 0.270657 | 0.246292 | 0.222987 | 0.222987 | 0.222987 | 0.222987 | 0 | 0.03829 | 0.182678 | 4,122 | 147 | 89 | 28.040816 | 0.522113 | 0.26395 | 0 | 0.204545 | 0 | 0 | 0.145038 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.056818 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fd1d8425a9dd8536d7171c941d231615e229f90 | 9,274 | py | Python | kws/dataset/processor.py | ferb2015/wekws | 8db8007ca62382532bbaf791ace14bfc89d9b667 | [
"Apache-2.0"
] | 81 | 2021-05-22T17:21:05.000Z | 2021-11-28T06:56:01.000Z | kws/dataset/processor.py | ferb2015/wekws | 8db8007ca62382532bbaf791ace14bfc89d9b667 | [
"Apache-2.0"
] | 16 | 2021-11-30T08:56:15.000Z | 2022-03-23T03:17:28.000Z | kws/dataset/processor.py | ferb2015/wekws | 8db8007ca62382532bbaf791ace14bfc89d9b667 | [
"Apache-2.0"
] | 31 | 2021-12-06T04:52:32.000Z | 2022-03-22T08:28:13.000Z | # Copyright (c) 2021 Binbin Zhang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import random
import torch
import torchaudio
import torchaudio.compliance.kaldi as kaldi
from torch.nn.utils.rnn import pad_sequence
def parse_raw(data):
""" Parse key/wav/txt from json line
Args:
data: Iterable[str], str is a json line has key/wav/txt
Returns:
Iterable[{key, wav, label, sample_rate}]
"""
for sample in data:
assert 'src' in sample
json_line = sample['src']
obj = json.loads(json_line)
assert 'key' in obj
assert 'wav' in obj
assert 'txt' in obj
key = obj['key']
wav_file = obj['wav']
txt = obj['txt']
try:
waveform, sample_rate = torchaudio.load(wav_file)
example = dict(key=key,
label=txt,
wav=waveform,
sample_rate=sample_rate)
yield example
except Exception as ex:
logging.warning('Failed to read {}'.format(wav_file))
def filter(data, max_length=10240, min_length=10):
""" Filter sample according to feature and label length
Inplace operation.
Args::
data: Iterable[{key, wav, label, sample_rate}]
max_length: drop utterance which is greater than max_length(10ms)
min_length: drop utterance which is less than min_length(10ms)
Returns:
Iterable[{key, wav, label, sample_rate}]
"""
for sample in data:
assert 'sample_rate' in sample
assert 'wav' in sample
# sample['wav'] is torch.Tensor, we have 100 frames every second
num_frames = sample['wav'].size(1) / sample['sample_rate'] * 100
if num_frames < min_length:
continue
if num_frames > max_length:
continue
yield sample
def resample(data, resample_rate=16000):
""" Resample data.
Inplace operation.
Args:
data: Iterable[{key, wav, label, sample_rate}]
resample_rate: target resample rate
Returns:
Iterable[{key, wav, label, sample_rate}]
"""
for sample in data:
assert 'sample_rate' in sample
assert 'wav' in sample
sample_rate = sample['sample_rate']
waveform = sample['wav']
if sample_rate != resample_rate:
sample['sample_rate'] = resample_rate
sample['wav'] = torchaudio.transforms.Resample(
orig_freq=sample_rate, new_freq=resample_rate)(waveform)
yield sample
def speed_perturb(data, speeds=None):
""" Apply speed perturb to the data.
Inplace operation.
Args:
data: Iterable[{key, wav, label, sample_rate}]
speeds(List[float]): optional speed
Returns:
Iterable[{key, wav, label, sample_rate}]
"""
if speeds is None:
speeds = [0.9, 1.0, 1.1]
for sample in data:
assert 'sample_rate' in sample
assert 'wav' in sample
sample_rate = sample['sample_rate']
waveform = sample['wav']
speed = random.choice(speeds)
if speed != 1.0:
wav, _ = torchaudio.sox_effects.apply_effects_tensor(
waveform, sample_rate,
[['speed', str(speed)], ['rate', str(sample_rate)]])
sample['wav'] = wav
yield sample
def compute_mfcc(
data,
feature_type='mfcc',
num_ceps=80,
num_mel_bins=80,
frame_length=25,
frame_shift=10,
dither=0.0,
):
"""Extract mfcc
Args:
data: Iterable[{key, wav, label, sample_rate}]
Returns:
Iterable[{key, feat, label}]
"""
for sample in data:
assert 'sample_rate' in sample
assert 'wav' in sample
assert 'key' in sample
assert 'label' in sample
sample_rate = sample['sample_rate']
waveform = sample['wav']
waveform = waveform * (1 << 15)
# Only keep key, feat, label
mat = kaldi.mfcc(
waveform,
num_ceps=num_ceps,
num_mel_bins=num_mel_bins,
frame_length=frame_length,
frame_shift=frame_shift,
dither=dither,
energy_floor=0.0,
sample_frequency=sample_rate,
)
yield dict(key=sample['key'], label=sample['label'], feat=mat)
def compute_fbank(data,
feature_type='fbank',
num_mel_bins=23,
frame_length=25,
frame_shift=10,
dither=0.0):
""" Extract fbank
Args:
data: Iterable[{key, wav, label, sample_rate}]
Returns:
Iterable[{key, feat, label}]
"""
for sample in data:
assert 'sample_rate' in sample
assert 'wav' in sample
assert 'key' in sample
assert 'label' in sample
sample_rate = sample['sample_rate']
waveform = sample['wav']
waveform = waveform * (1 << 15)
# Only keep key, feat, label
mat = kaldi.fbank(waveform,
num_mel_bins=num_mel_bins,
frame_length=frame_length,
frame_shift=frame_shift,
dither=dither,
energy_floor=0.0,
sample_frequency=sample_rate)
yield dict(key=sample['key'], label=sample['label'], feat=mat)
def spec_aug(data, num_t_mask=2, num_f_mask=2, max_t=50, max_f=10):
""" Do spec augmentation
Inplace operation
Args:
data: Iterable[{key, feat, label}]
num_t_mask: number of time mask to apply
num_f_mask: number of freq mask to apply
max_t: max width of time mask
max_f: max width of freq mask
Returns
Iterable[{key, feat, label}]
"""
for sample in data:
assert 'feat' in sample
x = sample['feat']
assert isinstance(x, torch.Tensor)
y = x.clone().detach()
max_frames = y.size(0)
max_freq = y.size(1)
# time mask
for i in range(num_t_mask):
start = random.randint(0, max_frames - 1)
length = random.randint(1, max_t)
end = min(max_frames, start + length)
y[start:end, :] = 0
# freq mask
for i in range(num_f_mask):
start = random.randint(0, max_freq - 1)
length = random.randint(1, max_f)
end = min(max_freq, start + length)
y[:, start:end] = 0
sample['feat'] = y
yield sample
def shuffle(data, shuffle_size=1000):
""" Local shuffle the data
Args:
data: Iterable[{key, feat, label}]
shuffle_size: buffer size for shuffle
Returns:
Iterable[{key, feat, label}]
"""
buf = []
for sample in data:
buf.append(sample)
if len(buf) >= shuffle_size:
random.shuffle(buf)
for x in buf:
yield x
buf = []
# The sample left over
random.shuffle(buf)
for x in buf:
yield x
def batch(data, batch_size=16):
""" Static batch the data by `batch_size`
Args:
data: Iterable[{key, feat, label}]
batch_size: batch size
Returns:
Iterable[List[{key, feat, label}]]
"""
buf = []
for sample in data:
buf.append(sample)
if len(buf) >= batch_size:
yield buf
buf = []
if len(buf) > 0:
yield buf
def padding(data):
""" Padding the data into training data
Args:
data: Iterable[List[{key, feat, label}]]
Returns:
Iterable[Tuple(keys, feats, labels, feats lengths, label lengths)]
"""
for sample in data:
assert isinstance(sample, list)
feats_length = torch.tensor([x['feat'].size(0) for x in sample],
dtype=torch.int32)
order = torch.argsort(feats_length, descending=True)
feats_lengths = torch.tensor(
[sample[i]['feat'].size(0) for i in order], dtype=torch.int32)
sorted_feats = [sample[i]['feat'] for i in order]
sorted_keys = [sample[i]['key'] for i in order]
sorted_labels = torch.tensor([sample[i]['label'] for i in order],
dtype=torch.int64)
padded_feats = pad_sequence(sorted_feats,
batch_first=True,
padding_value=0)
yield (sorted_keys, padded_feats, sorted_labels, feats_lengths)
| 30.208469 | 78 | 0.558119 | 1,129 | 9,274 | 4.456156 | 0.204606 | 0.065593 | 0.026237 | 0.029815 | 0.443649 | 0.401113 | 0.337507 | 0.330352 | 0.330352 | 0.318028 | 0 | 0.016288 | 0.344619 | 9,274 | 306 | 79 | 30.30719 | 0.811451 | 0.277981 | 0 | 0.411765 | 0 | 0 | 0.045514 | 0 | 0 | 0 | 0 | 0 | 0.123529 | 1 | 0.058824 | false | 0 | 0.041176 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fd37e4a94f005d2e4698ad102127c46078554ac | 530 | py | Python | 6 kyu/Product Partitions I.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 6 | 2020-09-03T09:32:25.000Z | 2020-12-07T04:10:01.000Z | 6 kyu/Product Partitions I.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 1 | 2021-12-13T15:30:21.000Z | 2021-12-13T15:30:21.000Z | 6 kyu/Product Partitions I.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | null | null | null | def prod_int_part(n):
res=helper(n, [], set())
return [len(res), [] if not res else list(res[0])]
def factorization(n):
res=set()
for i in range(2, int(n**0.5)+1):
if n%i==0:
res.add(i)
res.add(n//i)
res.add(n)
return sorted(res)
def helper(n, cur, memo):
if n==1:
if len(cur)>1:
memo.add(tuple(sorted(cur)))
return
res=factorization(n)
for i in res:
helper(n//i, cur+[i], memo)
return sorted(memo, key=lambda x: -len(x)) | 26.5 | 54 | 0.518868 | 90 | 530 | 3.033333 | 0.344444 | 0.076923 | 0.07326 | 0.058608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021563 | 0.3 | 530 | 20 | 55 | 26.5 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fd3f38639b58bfaa2d13a8e15df20cc8f1aedbf | 1,675 | py | Python | keckcode/esiredux/esi1d.py | cdfassnacht/keck_code | a952b3806b3e64eef70deec2b2d1352e6ef6dfa0 | [
"MIT"
] | null | null | null | keckcode/esiredux/esi1d.py | cdfassnacht/keck_code | a952b3806b3e64eef70deec2b2d1352e6ef6dfa0 | [
"MIT"
] | null | null | null | keckcode/esiredux/esi1d.py | cdfassnacht/keck_code | a952b3806b3e64eef70deec2b2d1352e6ef6dfa0 | [
"MIT"
] | 1 | 2020-07-15T23:16:36.000Z | 2020-07-15T23:16:36.000Z | import numpy as np
from astropy.table import Table
from specim.specfuncs import echelle1d
"""
============================== Esi1d class ==============================
"""
class Esi1d(echelle1d.Ech1d):
"""
A class for ESI 1D spectra, which have been extracted by the Esi2d
methods, but have not yet been combined into one final output spectrum.
Therefore, there are 10 extracted 1d spectra, one for each order.
These 10 extracted spectra will be stored in an array of Spec1d instances.
"""
def __init__(self, inspec, informat='text', summary=True, verbose=True):
"""
Initializes an Esi1d instance, essentially by initializing an
Ech1d instance with the ESI order information
"""
"""
Define the information pertaining to the ESI echelle orders
Note that the pixmin and pixmax are not used at this point since
any trimming of the orders should have been done in previous
steps.
"""
dtype = [('order', int), ('pixmin', int), ('pixmax', int)]
oinfo = np.array([
(1, 0, -1),
(2, 0, -1),
(3, 0, -1),
(4, 0, -1),
(5, 0, -1),
(6, 0, -1),
(7, 0, -1),
(8, 0, -1),
(9, 0, -1),
(10, 0, -1),
], dtype=dtype)
ordinfo = Table(oinfo)
# ordinfo = None
""" Initialize by calling the parent class """
super(Esi1d, self).__init__(inspec, informat=informat, ordinfo=ordinfo,
summary=summary, verbose=verbose)
| 31.603774 | 79 | 0.519403 | 194 | 1,675 | 4.443299 | 0.546392 | 0.023202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042962 | 0.346866 | 1,675 | 52 | 80 | 32.211538 | 0.744973 | 0.240597 | 0 | 0 | 0 | 0 | 0.024941 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.142857 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fd4fa03a38f40f452e7288c8fe9015dd26bbabd | 6,292 | py | Python | astroquery/utils/tap/xmlparser/tableSaxParser.py | rickynilsson/astroquery | b7edec0d8e36b11c25baa39ad72e4160bc30d465 | [
"BSD-3-Clause"
] | 577 | 2015-02-12T18:23:49.000Z | 2022-03-22T21:38:58.000Z | astroquery/utils/tap/xmlparser/tableSaxParser.py | rickynilsson/astroquery | b7edec0d8e36b11c25baa39ad72e4160bc30d465 | [
"BSD-3-Clause"
] | 1,812 | 2015-01-01T08:02:20.000Z | 2022-03-31T13:03:52.000Z | astroquery/utils/tap/xmlparser/tableSaxParser.py | rickynilsson/astroquery | b7edec0d8e36b11c25baa39ad72e4160bc30d465 | [
"BSD-3-Clause"
] | 322 | 2015-02-23T19:31:29.000Z | 2022-03-25T18:51:30.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=============
TAP plus
=============
@author: Juan Carlos Segovia
@contact: juan.carlos.segovia@sciops.esa.int
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
Created on 30 jun. 2016
"""
import xml.sax
from astroquery.utils.tap.model.taptable import TapTableMeta
from astroquery.utils.tap.model.tapcolumn import TapColumn
from astroquery.utils.tap.xmlparser import utils as Utils
READING_SCHEMA = 10
READING_TABLE = 20
READING_TABLE_COLUMN = 30
class TableSaxParser(xml.sax.ContentHandler):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.__internal_init()
def __internal_init(self):
self.__concatData = False
self.__charBuffer = []
self.__tables = []
self.__status = 0
self.__currentSchemaName = None
self.__currentTable = None
self.__currentColumn = None
def __create_string_from_buffer(self):
return Utils.util_create_string_from_buffer(self.__charBuffer)
def __check_item_id(self, itemId, tmpValue):
if str(itemId).lower() == str(tmpValue).lower():
return True
return False
def __start_reading_data(self):
self.__concatData = True
del self.__charBuffer[:]
def __stop_reading_data(self):
self.__concatData = False
def parseData(self, data):
del self.__tables[:]
self.__status = READING_SCHEMA
xml.sax.parse(data, self)
return self.__tables
def startElement(self, name, attrs):
if self.__status == READING_SCHEMA:
self.__reading_schema(name, attrs)
elif self.__status == READING_TABLE:
self.__reading_table(name, attrs)
elif self.__status == READING_TABLE_COLUMN:
self.__reading_table_column(name, attrs)
def endElement(self, name):
if self.__status == READING_SCHEMA:
self.__end_schema(name)
elif self.__status == READING_TABLE:
self.__end_table(name)
elif self.__status == READING_TABLE_COLUMN:
self.__end_table_column(name)
def characters(self, content):
if self.__concatData:
self.__charBuffer.append(content)
def __reading_schema(self, name, attrs):
if self.__check_item_id("name", name):
self.__start_reading_data()
if self.__check_item_id("table", name):
self.__status = READING_TABLE
self.__currentTable = TapTableMeta()
self.__currentTable.schema = self.__currentSchemaName
def __end_schema(self, name):
if self.__check_item_id("name", name):
self.__currentSchemaName = self.__create_string_from_buffer()
self.__stop_reading_data()
def __reading_table(self, name, attrs):
if self.__check_item_id("name", name):
self.__start_reading_data()
elif self.__check_item_id("description", name):
self.__start_reading_data()
elif self.__check_item_id("column", name):
self.__status = READING_TABLE_COLUMN
self.__currentColumn = TapColumn(attrs.getValue('esatapplus:flags'))
def __end_table(self, name):
if self.__check_item_id("name", name):
self.__stop_reading_data()
self.__currentTable.name = self.__create_string_from_buffer()
elif self.__check_item_id("description", name):
self.__stop_reading_data()
self.__currentTable.description = self.__create_string_from_buffer()
elif self.__check_item_id("table", name):
self.__tables.append(self.__currentTable)
self.__status = READING_SCHEMA
def __reading_table_column(self, name, attrs):
if self.__check_item_id("name", name):
self.__start_reading_data()
elif self.__check_item_id("description", name):
self.__start_reading_data()
elif self.__check_item_id("unit", name):
self.__start_reading_data()
elif self.__check_item_id("ucd", name):
self.__start_reading_data()
elif self.__check_item_id("utype", name):
self.__start_reading_data()
elif self.__check_item_id("datatype", name):
self.__start_reading_data()
elif self.__check_item_id("flag", name):
self.__start_reading_data()
def __end_table_column(self, name):
if self.__check_item_id("name", name):
self.__currentColumn.name = self.__create_string_from_buffer()
self.__stop_reading_data()
elif self.__check_item_id("description", name):
self.__currentColumn.description = self.__create_string_from_buffer()
self.__stop_reading_data()
elif self.__check_item_id("unit", name):
self.__currentColumn.unit = self.__create_string_from_buffer()
self.__stop_reading_data()
elif self.__check_item_id("ucd", name):
self.__currentColumn.ucd = self.__create_string_from_buffer()
self.__stop_reading_data()
elif self.__check_item_id("utype", name):
self.__currentColumn.utype = self.__create_string_from_buffer()
self.__stop_reading_data()
elif self.__check_item_id("datatype", name):
self.__currentColumn.data_type = self.__create_string_from_buffer()
self.__stop_reading_data()
elif self.__check_item_id("flag", name):
self.__currentColumn.flag = self.__create_string_from_buffer()
self.__stop_reading_data()
if self.__check_item_id("column", name):
self.__status = READING_TABLE
self.__currentTable.add_column(self.__currentColumn)
def __show_attributes(self, attrs):
return str(attrs.getNames())
def __nothing(self, name, attrs):
pass
def get_table(self):
if len(self.__tables) < 1:
return None
return self.__tables[0]
def get_tables(self):
return self.__tables
| 35.348315 | 82 | 0.632867 | 707 | 6,292 | 5.041018 | 0.164074 | 0.058361 | 0.07716 | 0.10101 | 0.557239 | 0.503086 | 0.463805 | 0.387205 | 0.381874 | 0.356622 | 0 | 0.003487 | 0.270661 | 6,292 | 177 | 83 | 35.548023 | 0.773153 | 0.045931 | 0 | 0.415385 | 0 | 0 | 0.026653 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0.007692 | 0.030769 | 0.023077 | 0.253846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fd7e0649ad5d2af858373d0d850045798bbb5c7 | 1,940 | py | Python | app/views.py | cfp2000/gender-decoder | a8f5477c55a802fa20a549401b1bb234f0f4f22f | [
"MIT"
] | null | null | null | app/views.py | cfp2000/gender-decoder | a8f5477c55a802fa20a549401b1bb234f0f4f22f | [
"MIT"
] | null | null | null | app/views.py | cfp2000/gender-decoder | a8f5477c55a802fa20a549401b1bb234f0f4f22f | [
"MIT"
] | null | null | null | from flask import render_template, redirect, request
import app.wordlists as wordlists
from app import app
from app.forms import JobAdForm
from app.models import JobAd, TranslatedWordlist
@app.route("/", methods=["GET", "POST"])
def home():
form = JobAdForm()
if request.method == "POST" and form.validate_on_submit():
ad = JobAd(form.texttotest.data, form.language.data)
return redirect("results/{0}".format(ad.hash))
return render_template(
"home.html", form=form, number_of_languages=len(wordlists.__all__)
)
@app.route("/about")
def about():
language = request.values.get("language")
if language not in wordlists.all_lists.keys():
language = "en"
return render_template(
"about.html",
language_code=language,
language_name=wordlists.all_lists[language]["language_name"],
masculine_coded_words=wordlists.all_lists[language]["masculine_coded_words"],
feminine_coded_words=wordlists.all_lists[language]["feminine_coded_words"],
domain=request.headers.get("Host"),
)
@app.route("/results/<ad_hash>")
def results(ad_hash):
job_ad = JobAd.query.get_or_404(ad_hash)
masculine_coded_words, feminine_coded_words = job_ad.list_words()
name, code, source = TranslatedWordlist.get_language_name_and_source(
job_ad.language
)
return render_template(
"results.html",
job_ad=job_ad,
masculine_coded_words=masculine_coded_words,
masculine_coded_word_count=job_ad.masculine_word_count,
feminine_coded_words=feminine_coded_words,
feminine_coded_word_count=job_ad.feminine_word_count,
explanation=job_ad.provide_explanation(),
language_name=name,
language_code=code,
source=source,
domain=request.headers.get("Host"),
)
@app.errorhandler(404)
def page_not_found(error):
return render_template("404.html"), 404
| 32.333333 | 85 | 0.706186 | 245 | 1,940 | 5.293878 | 0.297959 | 0.077101 | 0.073246 | 0.070933 | 0.249036 | 0.149576 | 0 | 0 | 0 | 0 | 0 | 0.008197 | 0.182474 | 1,940 | 59 | 86 | 32.881356 | 0.809584 | 0 | 0 | 0.1 | 0 | 0 | 0.081443 | 0.010825 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.1 | 0.02 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fd83e14a102958fbbf95c7e479eab382f1d55e3 | 10,063 | py | Python | lit_ref_search/get_memberdb_pmid.py | ProteinsWebTeam/interpro-pfam-curation-tools | 41df7e4ad390ace8c68f137e582b6bd2bfe4b23a | [
"MIT"
] | null | null | null | lit_ref_search/get_memberdb_pmid.py | ProteinsWebTeam/interpro-pfam-curation-tools | 41df7e4ad390ace8c68f137e582b6bd2bfe4b23a | [
"MIT"
] | null | null | null | lit_ref_search/get_memberdb_pmid.py | ProteinsWebTeam/interpro-pfam-curation-tools | 41df7e4ad390ace8c68f137e582b6bd2bfe4b23a | [
"MIT"
] | null | null | null | import sys, os, json, ssl, re
from urllib import request
from urllib.error import HTTPError
from time import sleep
import argparse
from configparser import ConfigParser
from multiprocessing import Pool
class memberdb_pmid:
def __init__(self, member_db, boringfile):
self.load_boring_pmids(boringfile)
self.database = member_db
self.sign_in = list()
def load_boring_pmids(self, boringfile):
print("Loading boring PMIDs into memory")
self.boring_pmids = list()
if not os.path.isfile(boringfile):
print(f"Error file not found '{boringfile}'")
sys.exit(1)
with open(boringfile, "r") as f:
for line in f:
pmid = line.strip("\n").split()[0]
self.boring_pmids.append(pmid)
def has_swissprot(self, signature):
# disable SSL verification to avoid config issues
context = ssl._create_unverified_context()
next = f"https://www.ebi.ac.uk/interpro/api/protein/entry/{self.database}/{signature}/"
attempts = 0
while next:
try:
req = request.Request(next, headers={"Accept": "application/json"})
res = request.urlopen(req, context=context)
# If the API times out due a long running query
if res.status == 408:
# wait just over a minute
sleep(61)
# then continue this loop with the same URL
continue
elif res.status == 204:
# no data so leave loop
break
payload = json.loads(res.read().decode())
next = ""
attempts = 0
except HTTPError as e:
if e.code == 408:
sleep(61)
continue
else:
# If there is a different HTTP error, it wil re-try 3 times before failing
if attempts < 3:
attempts += 1
sleep(61)
continue
else:
print("LAST URL: " + next)
print(e)
next = ""
count_swissprot = 0
if "reviewed" in payload["proteins"]:
count_swissprot = payload["proteins"]["reviewed"]
# count_trembl = payload["proteins"]["unreviewed"]
if count_swissprot > 10:
return True
else:
# print(signature, count_trembl)
return False
def process_sign(self, signature):
url = f"https://www.ebi.ac.uk/interpro/api/protein/unreviewed/entry/{self.database}/{signature}/?page_size=200"
list_pmid_acc = self.search_trembl_pmid(url)
if len(list_pmid_acc) != 0:
text_complete = f"{signature}, "
for pmid, acc_list in list_pmid_acc.items():
text_complete += f"{pmid}: "
text = "; ".join(acc_list)
text_complete += f"{text} | "
text_complete = text_complete.strip(" | ")
return text_complete
return
def search_trembl_pmid(self, BASE_URL):
# disable SSL verification to avoid config issues
context = ssl._create_unverified_context()
next = BASE_URL
attempts = 0
list_pmid_acc = dict()
while next:
try:
req = request.Request(next, headers={"Accept": "application/json"})
res = request.urlopen(req, context=context)
# If the API times out due a long running query
if res.status == 408:
# wait just over a minute
sleep(61)
# then continue this loop with the same URL
continue
elif res.status == 204:
# no data so leave loop
break
payload = json.loads(res.read().decode())
next = payload["next"]
attempts = 0
except HTTPError as e:
if e.code == 408:
sleep(61)
continue
else:
# If there is a different HTTP error, it wil re-try 3 times before failing
if attempts < 3:
attempts += 1
sleep(61)
continue
else:
print("LAST URL: " + next)
print(e)
next = ""
for i, item in enumerate(payload["results"]):
# get UniProt accession
accession = item["metadata"]["accession"]
# search for list of PMIDs
list_pmid = self.search_pmid(accession)
if len(list_pmid) != 0:
for pmid in list_pmid:
try:
list_pmid_acc[pmid].append(accession)
except KeyError:
list_pmid_acc[pmid] = [accession]
# Don't overload the server, give it time before asking for more
if next:
sleep(1)
return list_pmid_acc
def search_pmid(self, accession):
# disable SSL verification to avoid config issues
context = ssl._create_unverified_context()
next = f"https://www.ebi.ac.uk/proteins/api/proteins/{accession}"
attempts = 0
while next:
try:
sleep(5)
req = request.Request(next, headers={"Accept": "application/json"})
# res = request.urlopen(req, context=context)
with request.urlopen(req) as res:
# If the API times out due a long running query
if res.status == 408:
# wait just over a minute
sleep(61)
# then continue this loop with the same URL
continue
elif res.status == 204:
# no data so leave loop
break
payload = json.loads(res.read().decode())
next = ""
attemps = 0
except HTTPError as e:
if e.code == 408:
sleep(61)
continue
else:
# If there is a different HTTP error, it wil re-try 3 times before failing
if attempts < 3:
attempts += 1
sleep(61)
continue
else:
print("LAST URL: " + next)
print(e)
next = ""
except http.client.RemoteDisconnected as e:
# http.client.RemoteDisconnected
if attempts < 3:
attempts += 1
sleep(61)
continue
else:
print("LAST URL: " + next)
print(e)
next = ""
list_pmids = set()
pmid = ""
if payload:
for i, item in enumerate(payload["references"]):
if "dbReferences" in item["citation"]:
title = item["citation"]["title"]
t = re.search(r"(gene|genome|Gene|Genome|Genomic|genomic|sequen|Sequen)", title)
for j, ref in enumerate(item["citation"]["dbReferences"]):
if ref["type"] == "PubMed":
pmid = ref["id"]
if not t and pmid not in self.boring_pmids:
list_pmids.add(pmid)
return list_pmids
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("config", metavar="FILE", help="configuration file")
args = parser.parse_args()
if not os.path.isfile(args.config):
parser.error(f"cannot open '{args.config}'': " f"no such file or directory")
config = ConfigParser()
config.read(args.config)
database = config["files"]["member_db"]
inputf = config["files"]["inputfile"]
outputf = config["files"]["outputfile"]
boringpmidf = config["files"]["boringpmidfile"]
# init
process = memberdb_pmid(database, boringpmidf)
if not os.path.isfile(inputf):
parser.error(f"Error file not found '{inputf}'")
# get list of panther accession from file
print(f"Searching data for {database} signatures")
with open(inputf, "r") as f:
count = 0
for line in f:
signature = line.strip("\n")
if process.has_swissprot(signature):
pass
else:
count += 1
process.sign_in.append(signature)
if count == 100:
break
print(len(process.sign_in))
# results = []
# with Pool(10) as p:
# results = p.map(process.process_sign, process.sign_in)
# print("Writing results in file")
# with open(outputf, "w") as outf:
# for item in results:
# # print(item)
# if item != None:
# outf.write(f"{item}\n")
with open(outputf, "a") as outf:
for signature in process.sign_in:
print(f"Processing {signature}")
results = process.process_sign(signature)
if results:
outf.write(f"{results}\n")
# get list of panther signatures unintegrated without comments:
# select m.method_ac
# from interpro.method m
# left join interpro.entry2method e2m on m.method_ac=e2m.method_ac
# left join interpro.method_comment c on c.method_ac=m.method_ac
# where m.method_ac like 'PTHR%' and m.method_ac not like 'PTHR%:%' and c.status is null and e2m.method_ac is null
# ;
| 35.433099 | 119 | 0.496472 | 1,063 | 10,063 | 4.609595 | 0.228598 | 0.014286 | 0.021429 | 0.027143 | 0.392245 | 0.368367 | 0.357755 | 0.357755 | 0.357755 | 0.347143 | 0 | 0.014756 | 0.414091 | 10,063 | 283 | 120 | 35.558304 | 0.816316 | 0.167544 | 0 | 0.464646 | 0 | 0.010101 | 0.107537 | 0.006601 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0.005051 | 0.035354 | 0 | 0.10101 | 0.065657 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fd88cb1e655d90ca250f694b15fe7f9133eee04 | 1,280 | py | Python | rester/apirunner.py | skyarch-networks/Rester | aaaf25320c11467d1089eb500567011b59a31864 | [
"MIT"
] | null | null | null | rester/apirunner.py | skyarch-networks/Rester | aaaf25320c11467d1089eb500567011b59a31864 | [
"MIT"
] | null | null | null | rester/apirunner.py | skyarch-networks/Rester | aaaf25320c11467d1089eb500567011b59a31864 | [
"MIT"
] | null | null | null | from .testcase import ApiTestCaseRunner
import argparse
import logging
import sys
DEFAULT_TEST_CASE = 'test_case.json'
def parse_cmdln_args():
parser = argparse.ArgumentParser(description='Process command line args')
parser.add_argument('--log', help='log help', default='INFO')
parser.add_argument(
'--tc', help='tc help')
parser.add_argument(
'--ts', help='ts help')
args = parser.parse_args()
return (args.log.upper(), args.tc, args.ts)
def run():
log_level, test_case_file, test_suite_file = parse_cmdln_args()
print(log_level, test_case_file, test_suite_file)
logging.basicConfig()
logger = logging.getLogger('rester')
logger.setLevel(log_level)
test_runner = ApiTestCaseRunner()
if test_case_file is not None:
print("test case has been specified")
test_runner.run_test_case(test_case_file)
elif test_suite_file is not None:
print("test suite has been specified")
test_runner.run_test_suite(test_suite_file)
else:
print("running the default test case")
test_runner.run_test_case(DEFAULT_TEST_CASE)
test_runner.display_report()
return any((result.get('failed') for result in test_runner.results))
if (__name__ == '__main__'):
run()
| 29.767442 | 77 | 0.701563 | 174 | 1,280 | 4.856322 | 0.362069 | 0.104142 | 0.056805 | 0.067456 | 0.285207 | 0.208284 | 0.156213 | 0.078107 | 0 | 0 | 0 | 0 | 0.189844 | 1,280 | 42 | 78 | 30.47619 | 0.814851 | 0 | 0 | 0.058824 | 0 | 0 | 0.143862 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.235294 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fd8d2df985ceff73eec1be44e59b496e04d6eb1 | 2,582 | py | Python | logreg_predict.py | bcarlier75/dslr | ec9f8b676c136e5e3cf6aa6a11902caaa10adbd4 | [
"MIT"
] | null | null | null | logreg_predict.py | bcarlier75/dslr | ec9f8b676c136e5e3cf6aa6a11902caaa10adbd4 | [
"MIT"
] | null | null | null | logreg_predict.py | bcarlier75/dslr | ec9f8b676c136e5e3cf6aa6a11902caaa10adbd4 | [
"MIT"
] | 1 | 2021-06-15T13:44:24.000Z | 2021-06-15T13:44:24.000Z | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sys import argv
from collections import OrderedDict
from logreg_tools import plot_confusion_matrix, metrics, confusion_matrix, score
class LogisticRegressionOvrPredict(object):
def _normalize(self, x):
for i in range(len(x)):
x[i] = (x[i] - x.mean()) / x.std()
return x
def preprocessing(self, df: pd.DataFrame):
# Features wrangling
df_features = df.iloc[:, 5:]
df_features = df_features.fillna(df.mean())
df_features = np.array(df_features)
np.apply_along_axis(self._normalize, 0, df_features)
return df_features
def _sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def predict(self, classes, thetas, x):
x = np.insert(x, 0, 1, axis=1) # adding interception feature
preds = [np.argmax([self._sigmoid(np.dot(xi, theta))
for theta in thetas]) for xi in x]
return np.array([classes[p] for p in preds])
if __name__ == "__main__":
verbose = False
if len(argv) > 3 and argv[3] == '-v':
verbose = True
# Initialization and data wrangling
df_test = pd.read_csv(argv[1], index_col="Index")
logreg = LogisticRegressionOvrPredict()
x_test = logreg.preprocessing(df_test)
u_classes = ['Gryffindor', 'Hufflepuff', 'Ravenclaw', 'Slytherin']
# Compute predictions and save it to houses.csv
y_pred = logreg.predict(u_classes, np.load(argv[2], allow_pickle=True), x_test)
houses = pd.DataFrame(OrderedDict({'Index': range(len(y_pred)), 'Hogwarts House': y_pred}))
houses.to_csv('houses.csv', index=False)
print("Predictions saved to houses.csv.")
if verbose:
df_truth = pd.read_csv('datasets/dataset_truth.csv', index_col="Index")
y_true = df_truth.loc[:, 'Hogwarts House']
final_cm = confusion_matrix(u_classes, y_true, y_pred)
final_metrics = metrics(final_cm, u_classes, debug=False)
print(f'\n-------- Metrics on test dataset --------'
f'\n. . . . . . . . .\nAccuracy: {score(y_true, y_pred):.5f}'
f'\n. . . . . . . . .\nConfusion matrix:\n{final_cm}'
f'\n. . . . . . . . .\nMetrics:\n{final_metrics}'
f'\n. . . . . . . . .\n------------------------------------------\n')
# Plot confusion matrix.
# Change normalize to False for non-normalized version. (False by default)
plot_confusion_matrix(y_true, y_pred, classes=u_classes, cm=final_cm, normalize=True)
plt.show()
| 40.984127 | 95 | 0.611154 | 342 | 2,582 | 4.432749 | 0.359649 | 0.046174 | 0.037599 | 0.019789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006091 | 0.237026 | 2,582 | 62 | 96 | 41.645161 | 0.763452 | 0.08598 | 0 | 0 | 0 | 0.020833 | 0.178921 | 0.042924 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.125 | 0.020833 | 0.3125 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fdad6edce7be163d62db6aac19cf576ededbecb | 1,506 | py | Python | tests/garage/envs/test_point_env.py | st2yang/garage | 50186a9630df038aeba36d6b06b006ab32ed48f5 | [
"MIT"
] | null | null | null | tests/garage/envs/test_point_env.py | st2yang/garage | 50186a9630df038aeba36d6b06b006ab32ed48f5 | [
"MIT"
] | null | null | null | tests/garage/envs/test_point_env.py | st2yang/garage | 50186a9630df038aeba36d6b06b006ab32ed48f5 | [
"MIT"
] | null | null | null | import pickle
import numpy as np
from garage.envs.point_env import PointEnv
from tests.helpers import step_env
class TestPointEnv:
def test_pickleable(self):
env = PointEnv()
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip
step_env(round_trip)
env.close()
round_trip.close()
def test_does_not_modify_action(self):
env = PointEnv()
a = env.action_space.sample()
a_copy = a.copy()
env.reset()
env.step(a)
assert a.all() == a_copy.all()
env.close()
def test_observation_space(self):
env = PointEnv()
obs_space = env.observation_space
a = env.action_space.sample()
obs, _, _, _ = env.step(a)
assert obs_space.contains(obs)
def test_reset(self):
env = PointEnv()
assert (env._point == np.array([0, 0])).all()
a = env.action_space.sample()
_ = env.step(a)
env.reset()
assert (env._point == np.array([0, 0])).all()
def test_task(self):
env = PointEnv()
tasks = env.sample_tasks(5)
assert len(tasks) == 5
for task in tasks:
env.set_task(task)
assert (env._goal == task['goal']).all()
def test_done(self):
env = PointEnv()
for _ in range(1000):
_, _, done, _ = env.step(env._goal)
if done:
break
else:
assert False, 'Should report done'
| 23.53125 | 53 | 0.552457 | 188 | 1,506 | 4.218085 | 0.308511 | 0.052963 | 0.113493 | 0.056747 | 0.145019 | 0.065574 | 0.065574 | 0.065574 | 0 | 0 | 0 | 0.009901 | 0.329349 | 1,506 | 63 | 54 | 23.904762 | 0.775248 | 0 | 0 | 0.3125 | 0 | 0 | 0.014608 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.125 | false | 0 | 0.083333 | 0 | 0.229167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fdb957e4ef0d8a22aad5d31d2e10a8b90f60975 | 4,140 | py | Python | src/sample.py | lorainemg/fuzzy-inference-system | 5fb16bb59aec9e43f5a6d93c7063cd629ba94920 | [
"MIT"
] | null | null | null | src/sample.py | lorainemg/fuzzy-inference-system | 5fb16bb59aec9e43f5a6d93c7063cd629ba94920 | [
"MIT"
] | null | null | null | src/sample.py | lorainemg/fuzzy-inference-system | 5fb16bb59aec9e43f5a6d93c7063cd629ba94920 | [
"MIT"
] | null | null | null | from system import FuzzyInferenceSystem
from membership import Triangular, Trapezoidal, Singleton
from rule import Antecedent, Consequent, Rule
from linguistic_var import Adjective, Variable
import matplotlib.pyplot as plt
import numpy as np
near = Adjective('near', Trapezoidal(-1, 0, 1, 10))
medium = Adjective('medium', Triangular(1, 10, 40))
far = Adjective('far', Trapezoidal(10, 40, 50, 60))
left = Variable('left', near, medium, far)
right = Variable('right', near, medium, far)
center = Variable('center', near, medium, far)
left.plot(np.arange(0, 50, 1))
center.plot(np.arange(0, 50, 1))
right.plot(np.arange(0, 50, 1))
low = Adjective('low', Trapezoidal(0, 0.10, 0.30, 0.40))
normal = Adjective('normal', Triangular(0.30, 0.40, 0.60))
high = Adjective('high', Triangular(0.50, 0.80, 0.90))
very_high = Adjective('very_high', Trapezoidal(0.60, 0.80, 1, 1.2))
plausibility_left = Variable('pl', low, normal, high, very_high)
plausibility_right = Variable('pr', low, normal, high, very_high)
plausibility_center = Variable('pc', low, normal, high, very_high)
adjectives = {value.name: value for var_name, value in locals().items() if isinstance(value, Adjective)}
variables = {value.name: value for var_name, value in locals().items() if isinstance(value, Variable)}
rule1 = Rule('if left is near then '
'pl is normal')
rule2 = Rule('if left is medium then '
'pl is high')
rule3 = Rule('if left is far then '
'pl is low')
rule4 = Rule('if left is near and center is near then '
'pl is low')
rule5 = Rule('if left is medium and center is medium then '
'pl is low')
ruleBlock1 = [rule1, rule2, rule3, rule4, rule5]
rule6 = Rule('if center is near then '
'pc is normal')
rule7 = Rule('if left is near and center is near and right is near then '
'pc is high')
rule8 = Rule('if center is far then '
'pc is low')
rule9 = Rule('if left is far and center is far then '
'pc is high')
rule10 = Rule('if left is medium then '
'pc is high')
rule11 = Rule('if left is medium and center is far then '
'pc is low')
rule12 = Rule('if right is medium and center is far then '
'pc is low')
rule13 = Rule('if left is medium and center is medium and right is medium then '
'pc is very_high')
ruleBlock2 = [rule6, rule7, rule8, rule9, rule10, rule11, rule12, rule13]
rule14 = Rule('if right is near then '
'pr is normal')
rule15 = Rule('if right is medium then '
'pr is high')
rule16 = Rule('if right is far then'
'pr is low')
rule17 = Rule('if right is near and center is near then'
'pr is low')
rule18 = Rule('if right is medium and center is medium then'
'pr is low')
ruleBlock3 = [rule14, rule15, rule16, rule18]
rules = ruleBlock1 + ruleBlock2 + ruleBlock3
inputs = {
'left': 40,
'right': 10,
'center': 10
}
def evaluate(fuzzy_sistem, rules, inputs):
fuzzy_system.infer(rules, variables, adjectives, (0, 1), 0.01)
return fuzzy_system.evaluate(inputs)
def plot_result(sample, membership, name):
fig = plt.figure()
plt.plot(sample, membership)
fig.savefig(f'img/{name}.png')
plt.close(fig)
if __name__ == "__main__":
left = int(input('Left distance: '))
right = int(input('Right distance: '))
center = int(input('Center ditance: '))
agg_mth = input('Please specify aggregation method [mamdani, larsen]:\n> ')
defuzz_mth = input('Please specify defuzzification method [mean_of_max, left_of_max, right_of_max, median_of_max, centroid, bisector]:\n> ')
fuzzy_system = FuzzyInferenceSystem(agg_mth, defuzz_mth)
inputs = {'left': left, 'right': right, 'center': center}
result = evaluate(fuzzy_system, rules, inputs)
for var, output in result.items():
value = output['value']
print(f'The result of the variable corresponding to {var} is', value)
sample = output['sample']
plot_result(sample, output['membership'], 'ruleblock' + var)
# for var in variables.values():
# var.plot(sample) | 35.689655 | 144 | 0.652899 | 602 | 4,140 | 4.425249 | 0.212625 | 0.040541 | 0.037538 | 0.045045 | 0.307057 | 0.22973 | 0.17042 | 0.137763 | 0.118619 | 0.069069 | 0 | 0.042118 | 0.220048 | 4,140 | 116 | 145 | 35.689655 | 0.782905 | 0.012319 | 0 | 0.131868 | 0 | 0 | 0.294103 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021978 | false | 0 | 0.065934 | 0 | 0.098901 | 0.010989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fde7eeeb1b78941947e0e75f209e01cf5fed64b | 2,618 | py | Python | eval.py | chldkato/Tacotron | 004c3cf9d1006ddf48fa67d9e4cbd9a90f9f2001 | [
"MIT"
] | 1 | 2021-04-08T00:56:20.000Z | 2021-04-08T00:56:20.000Z | eval.py | chldkato/Tacotron | 004c3cf9d1006ddf48fa67d9e4cbd9a90f9f2001 | [
"MIT"
] | null | null | null | eval.py | chldkato/Tacotron | 004c3cf9d1006ddf48fa67d9e4cbd9a90f9f2001 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import os, re, io, argparse
from jamo import hangul_to_jamo
from hparams import hparams
from librosa import effects
from models import create_model
from util.text import text_to_sequence, sequence_to_text
from util import audio, plot
sentences = [
'흔들리는 꽃들 속에서 네 샴푸향이 느껴진거야'
]
class Synthesizer:
def load(self, checkpoint_path, model_name='tacotron'):
print('Constructing model: %s' % model_name)
inputs = tf.placeholder(tf.int32, [1, None], 'inputs')
input_lengths = tf.placeholder(tf.int32, [1], 'input_lengths')
with tf.variable_scope('model') as scope:
self.model = create_model(model_name, hparams)
self.model.initialize(inputs, input_lengths)
self.wav_output = audio.inv_spectrogram_tensorflow(self.model.linear_outputs[0])
self.alignments = self.model.alignments[0]
self.inputs = self.model.inputs[0]
print('Loading checkpoint: %s' % checkpoint_path)
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(self.session, checkpoint_path)
def synthesize(self, text, base_path, idx):
seq = text_to_sequence(text)
feed_dict = {
self.model.inputs: [np.asarray(seq, dtype=np.int32)],
self.model.input_lengths: np.asarray([len(seq)], dtype=np.int32)
}
input_seq, wav, alignment = self.session.run([self.inputs, self.wav_output, self.alignments], feed_dict=feed_dict)
wav = audio.inv_preemphasis(wav)
wav = wav[:audio.find_endpoint(wav)]
out = io.BytesIO()
audio.save_wav(wav, out)
input_seq = sequence_to_text(input_seq)
plot.plot_alignment(alignment, '%s-%d-align.png' % (base_path, idx), input_seq)
return out.getvalue()
def get_output_base_path(checkpoint_path):
base_dir = os.path.dirname(checkpoint_path)
m = re.compile(r'.*?\.ckpt\-([0-9]+)').match(checkpoint_path)
name = 'eval-%d' % int(m.group(1)) if m else 'eval'
return os.path.join(base_dir, name)
def run_eval(args):
synth = Synthesizer()
synth.load(args.checkpoint)
base_path = get_output_base_path(args.checkpoint)
for i, text in enumerate(sentences):
jamo = ''.join(list(hangul_to_jamo(text)))
path = '%s-%d.wav' % (base_path, i)
print('Synthesizing: %s' % path)
with open(path, 'wb') as f:
f.write(synth.synthesize(jamo, base_path, i))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', required=True)
args = parser.parse_args()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
run_eval(args)
if __name__ == '__main__':
main()
| 32.320988 | 118 | 0.704354 | 380 | 2,618 | 4.655263 | 0.352632 | 0.035613 | 0.013567 | 0.022612 | 0.023742 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007731 | 0.160046 | 2,618 | 80 | 119 | 32.725 | 0.796726 | 0 | 0 | 0 | 0 | 0 | 0.08136 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.138462 | 0 | 0.261538 | 0.046154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fe0a32cfa64a96e9ef052e933640c38630bf7bd | 21,258 | py | Python | weather/weather.py | FPVogel/Fixator10-Cogs | 002a90e06952b7bf7a0ffdbd93c9d423f238f124 | [
"MIT"
] | 76 | 2018-07-21T21:09:00.000Z | 2022-03-17T06:56:03.000Z | weather/weather.py | FPVogel/Fixator10-Cogs | 002a90e06952b7bf7a0ffdbd93c9d423f238f124 | [
"MIT"
] | 59 | 2019-01-23T08:13:13.000Z | 2022-03-13T16:39:05.000Z | weather/weather.py | FPVogel/Fixator10-Cogs | 002a90e06952b7bf7a0ffdbd93c9d423f238f124 | [
"MIT"
] | 63 | 2019-03-06T01:43:45.000Z | 2022-02-14T20:16:19.000Z | from functools import partial
from textwrap import shorten
import aiohttp
import discord
import forecastio
from forecastio.utils import PropertyUnavailable
from redbot.core import __version__ as redbot_ver
from redbot.core import commands
from redbot.core.config import Config
from redbot.core.i18n import Translator, cog_i18n, get_locale
from redbot.core.utils import chat_formatting as chat
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import HTTPError, Timeout
try:
from redbot import json # support of Draper's branch
except ImportError:
import json
FORECASTIO_SUPPORTED_LANGS = [
"ar",
"az",
"be",
"bg",
"bn",
"bs",
"ca",
"cs",
"da",
"de",
"el",
"en",
"eo",
"es",
"et",
"fi",
"fr",
"he",
"hi",
"hr",
"hu",
"id",
"is",
"it",
"ja",
"ka",
"kn",
"ko",
"kw",
"lv",
"ml",
"mr",
"nb",
"nl",
"no",
"pa",
"pl",
"pt",
"ro",
"ru",
"sk",
"sl",
"sr",
"sv",
"ta",
"te",
"tr",
"uk",
"ur",
"x-pig-latin",
"zh",
"zh-tw",
]
WEATHER_STATES = {
"clear-day": "\N{Black Sun with Rays}",
"clear-night": "\N{Night with Stars}",
"rain": "\N{Cloud with Rain}",
"snow": "\N{Cloud with Snow}",
"sleet": "\N{Snowflake}",
"wind": "\N{Wind Blowing Face}",
"fog": "\N{Foggy}",
"cloudy": "\N{White Sun Behind Cloud}",
"partly-cloudy-day": "\N{White Sun with Small Cloud}",
"partly-cloudy-night": "\N{Night with Stars}",
}
# Emoji that will be used for "unknown" strings
UNKNOWN_EMOJI = "\N{White Question Mark Ornament}"
T_ = Translator("Weather", __file__)
_ = lambda s: s
UNITS = {
"si": {
"distance": _("km"),
"intensity": _("mm/h"),
"accumulation": _("cm"),
"temp": _("℃"),
"speed": _("m/s"),
"pressure": _("hPa"),
},
"ca": {
"distance": _("km"),
"intensity": _("mm/h"),
"accumulation": _("cm"),
"temp": _("℃"),
"speed": _("km/h"),
"pressure": _("hPa"),
},
"uk2": {
"distance": _("mi"),
"intensity": _("mm/h"),
"accumulation": _("cm"),
"temp": _("℃"),
"speed": _("mph"),
"pressure": _("hPa"),
},
"us": {
"distance": _("mi"),
"intensity": _("″"),
"accumulation": _("″"),
"temp": _("℉"),
"speed": _("mph"),
"pressure": _("mbar"),
},
}
PRECIP_TYPE_I18N = {"rain": _("Rain"), "snow": _("Snow"), "sleet": _("Sleet")}
_ = T_
@cog_i18n(_)
class Weather(commands.Cog):
"""Weather forecast"""
__version__ = "2.0.6"
# noinspection PyMissingConstructor
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=0xDC5A74E677F24720AA82AD1C237721E7)
default_guild = {"units": "si"}
self.config.register_guild(**default_guild)
self.session = aiohttp.ClientSession(
json_serialize=json.dumps,
raise_for_status=True,
)
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
async def red_delete_data_for_user(self, *, requester, user_id: int):
await self.config.user_from_id(user_id).clear()
@commands.command()
@commands.is_owner()
async def forecastapi(self, ctx):
"""Set API key for forecast.io"""
message = _(
"To get forecast.io API key:\n"
'1. Find your ["Your Secret Key"](https://darksky.net/dev/account)\n'
"2. Use `{}set api forecastio secret <your_apikey>`\n"
"Note: DarkSky API is going to close at end of 2021. "
"Im already aware of this issue and will change API later. "
"For now you can use cog with already existing API keys."
).format(ctx.clean_prefix)
await ctx.maybe_send_embed(message)
@commands.group(invoke_without_command=True)
async def forecastunits(self, ctx, units: str = None):
"""Set forecast units for yourself
Applicable units:
si - SI units (default)
us - Imperial units
uk2 - Same as si, but distance in miles and speed in mph
ca - Same as si, but speed in km/h
reset - reset your unit preference"""
if not units:
if ctx.guild:
await ctx.send(
chat.info(
_("Your current units are: {}").format(
await self.config.user(ctx.author).units()
or _("Not set, using server's default {}").format(
await self.config.guild(ctx.guild).units()
)
)
)
)
else:
await ctx.send(
chat.info(
_("Your current units are: {}").format(
await self.config.user(ctx.author).units() or "si"
)
)
)
return
units = units.casefold()
if units == "reset":
await self.config.user(ctx.author).units.clear()
await ctx.tick()
return
if units not in UNITS.keys():
await ctx.send(
chat.error(
_('Units "{}" are not supported, check {}help forecastunits').format(
units, ctx.clean_prefix
)
)
)
return
await self.config.user(ctx.author).units.set(units)
await ctx.tick()
@forecastunits.command(name="guild")
@commands.guild_only()
@commands.admin_or_permissions(manage_guild=True)
async def set_guild_units(self, ctx, units: str = None):
"""Set forecast units for this guild
Applicable units:
si - SI units (default)
us - Imperial units
uk2 - Same as si, but distance in miles and speed in mph
ca - Same as si, but speed in km/h"""
if not units:
await ctx.send(
chat.info(
_("Current units are: {}").format(await self.config.guild(ctx.guild).units())
)
)
return
units = units.casefold()
if units not in UNITS.keys():
await ctx.send(
chat.error(
_('Units "{}" are not supported, check {}help forecastunits guild').format(
units, ctx.clean_prefix
)
)
)
return
await self.config.guild(ctx.guild).units.set(units)
await ctx.tick()
@commands.command()
@commands.cooldown(1, 1, commands.BucketType.default)
@commands.bot_has_permissions(embed_links=True)
async def weather(self, ctx, *, place: str):
"""Shows weather in provided place"""
apikeys = await self.bot.get_shared_api_tokens("forecastio")
async with ctx.typing():
try:
async with self.session.get(
f"https://nominatim.openstreetmap.org/search?q={place}&format=jsonv2&addressdetails=1&limit=1",
headers={
"Accept-Language": get_locale(),
"User-Agent": f"Red-DiscordBot/{redbot_ver} Fixator10-Cogs/Weather/{self.__version__}",
},
) as r:
location = await r.json(loads=json.loads)
except aiohttp.ClientResponseError as e:
await ctx.send(
chat.error(
_("Cannot find a place {}. OSM returned {}").format(
chat.inline(place), e.status
)
)
)
return
if not location:
await ctx.send(chat.error(_("Cannot find a place {}").format(chat.inline(place))))
return
location = location[0]
try:
forecast = await self.bot.loop.run_in_executor(
None,
partial(
forecastio.load_forecast,
apikeys.get("secret"),
location.get("lat", 0),
location.get("lon", 0),
units=await self.get_units(ctx),
lang=await self.get_lang(),
),
)
except HTTPError:
await ctx.send(
chat.error(
_(
"This command requires API key. "
"Use {}forecastapi to get more information"
).format(ctx.clean_prefix)
)
)
return
except (RequestsConnectionError, Timeout):
await ctx.send(chat.error(_("Unable to get data from forecast.io")))
return
by_hour = forecast.currently()
em = discord.Embed(
title=_("Weather in {}").format(
shorten(location.get("display_name", UNKNOWN_EMOJI), 244, placeholder="…")
),
description=_("[View on Google Maps](https://www.google.com/maps/place/{},{})").format(
location.get("lat", 0), location.get("lon", 0)
),
color=await ctx.embed_color(),
timestamp=by_hour.time,
)
em.set_author(name=_("Powered by Dark Sky"), url="https://darksky.net/poweredby/")
em.add_field(
name=_("Summary"),
value="{} {}".format(
WEATHER_STATES.get(by_hour.icon, UNKNOWN_EMOJI),
by_hour.summary,
),
)
em.add_field(
name=_("Temperature"),
value=f"{by_hour.temperature} {await self.get_localized_units(ctx, 'temp')} "
f"({by_hour.apparentTemperature} {await self.get_localized_units(ctx, 'temp')})",
)
em.add_field(
name=_("Air pressure"),
value="{} {}".format(
by_hour.pressure, await self.get_localized_units(ctx, "pressure")
),
)
em.add_field(name=_("Humidity"), value=f"{int(by_hour.humidity * 100)}%")
em.add_field(
name=_("Visibility"),
value="{} {}".format(
by_hour.visibility, await self.get_localized_units(ctx, "distance")
),
)
em.add_field(
name=_("Wind speed"),
value="{} {} {}".format(
await self.wind_bearing_direction(by_hour.windBearing),
by_hour.windSpeed,
await self.get_localized_units(ctx, "speed"),
),
)
em.add_field(name=_("Cloud cover"), value=f"{int(by_hour.cloudCover * 100)}%")
em.add_field(
name=_("Ozone density"),
value="{} [DU](https://en.wikipedia.org/wiki/Dobson_unit)".format(by_hour.ozone),
)
em.add_field(name=_("UV index"), value=by_hour.uvIndex)
try:
preciptype = by_hour.precipType
except PropertyUnavailable:
preciptype = None
em.add_field(
name=_("Precipitation"),
value=_("Probability: {}%\n").format(int(by_hour.precipProbability * 100))
+ _("Intensity: {} {}").format(
int(by_hour.precipIntensity * 100),
await self.get_localized_units(ctx, "intensity"),
)
+ (
preciptype
and _("\nType: {}").format(_(PRECIP_TYPE_I18N.get(preciptype, preciptype)))
or ""
),
)
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 1, commands.BucketType.default)
@commands.bot_has_permissions(embed_links=True)
async def forecast(self, ctx, *, place: str):
"""Shows 7 days forecast for provided place"""
apikeys = await self.bot.get_shared_api_tokens("forecastio")
async with ctx.typing():
try:
async with self.session.get(
f"https://nominatim.openstreetmap.org/search?q={place}&format=jsonv2&addressdetails=1&limit=1",
headers={
"Accept-Language": get_locale(),
"User-Agent": f"Red-DiscordBot/{redbot_ver} Fixator10-Cogs/Weather/{self.__version__}",
},
) as r:
location = await r.json(loads=json.loads)
except aiohttp.ClientResponseError as e:
await ctx.send(
chat.error(
_("Cannot find a place {}. OSM returned {}").format(
chat.inline(place), e.status
)
)
)
return
if not location:
await ctx.send(chat.error(_("Cannot find a place {}").format(chat.inline(place))))
return
location = location[0]
try:
forecast = await self.bot.loop.run_in_executor(
None,
partial(
forecastio.load_forecast,
apikeys.get("secret"),
location.get("lat", 0),
location.get("lon", 0),
units=await self.get_units(ctx),
lang=await self.get_lang(),
),
)
except HTTPError:
await ctx.send(
chat.error(
_(
"This command requires API key. "
"Use {}forecastapi to get more information"
).format(ctx.clean_prefix)
)
)
return
except (RequestsConnectionError, Timeout):
await ctx.send(chat.error(_("Unable to get data from forecast.io")))
return
by_day = forecast.daily()
pages = []
for i in range(0, 8):
data = by_day.data[i]
em = discord.Embed(
title=_("Weather in {}").format(
shorten(
location.get("display_name", UNKNOWN_EMOJI),
244,
placeholder="…",
)
),
description=f"{by_day.summary}\n"
+ _("[View on Google Maps](https://www.google.com/maps/place/{},{})").format(
location.get("lat", 0),
location.get("lon", 0),
),
color=await ctx.embed_color(),
timestamp=data.time,
)
em.set_author(name=_("Powered by Dark Sky"), url="https://darksky.net/poweredby/")
em.set_footer(text=_("Page {}/8").format(i + 1))
try:
# FIXME: find a better way to do that
summary = data.summary
except PropertyUnavailable:
summary = _("No summary for this day")
em.add_field(
name=_("Summary"),
value="{} {}".format(
WEATHER_STATES.get(data.icon, UNKNOWN_EMOJI),
summary,
),
)
em.add_field(
name=_("Temperature"),
value=f"{data.temperatureMin} — {data.temperatureMax} {await self.get_localized_units(ctx, 'temp')}\n"
f"({data.apparentTemperatureMin} — {data.apparentTemperatureMax}{await self.get_localized_units(ctx, 'temp')})",
)
em.add_field(
name=_("Air pressure"),
value="{} {}".format(
data.pressure, await self.get_localized_units(ctx, "pressure")
),
)
em.add_field(name=_("Humidity"), value=f"{int(data.humidity * 100)}%")
em.add_field(
name=_("Visibility"),
value="{} {}".format(
data.visibility, await self.get_localized_units(ctx, "distance")
),
)
em.add_field(
name=_("Wind speed"),
value="{} {} {}".format(
await self.wind_bearing_direction(data.windBearing),
data.windSpeed,
await self.get_localized_units(ctx, "speed"),
),
)
em.add_field(name=_("Cloud cover"), value=f"{int(data.cloudCover * 100)}%")
em.add_field(
name=_("Ozone density"),
value="{} [DU](https://en.wikipedia.org/wiki/Dobson_unit)".format(data.ozone),
)
em.add_field(name=_("UV index"), value=data.uvIndex)
try:
preciptype = data.precipType
except PropertyUnavailable:
preciptype = None
try:
precipaccumulation = data.precipAccumulation
except PropertyUnavailable:
precipaccumulation = None
em.add_field(
name=_("Precipitation"),
value=_("Probability: {}%\n").format(int(data.precipProbability * 100))
+ _("Intensity: {} {}").format(
int(data.precipIntensity * 100),
await self.get_localized_units(ctx, "intensity"),
)
+ (
preciptype
and _("\nType: {}").format(_(PRECIP_TYPE_I18N.get(preciptype, preciptype)))
or ""
)
+ (
precipaccumulation
and _("\nSnowfall accumulation: {} {}").format(
precipaccumulation,
await self.get_localized_units(ctx, "accumulation"),
)
or ""
),
)
em.add_field(name=_("Moon phase"), value=await self.num_to_moon(data.moonPhase))
pages.append(em)
await menu(ctx, pages, DEFAULT_CONTROLS)
async def get_units(self, ctx: commands.Context):
return (
await self.config.user(ctx.author).units()
or (await self.config.guild(ctx.guild).units() if ctx.guild else None)
or "si"
)
async def get_localized_units(self, ctx: commands.Context, units_type: str):
"""Get translated contextual units for type"""
if not ctx.guild:
return _(
UNITS.get(await self.config.user(ctx.author).units(), UNITS["si"]).get(
units_type, "?"
)
)
current_system = (
await self.config.user(ctx.author).units()
or await self.config.guild(ctx.guild).units()
)
return _(UNITS.get(current_system, {}).get(units_type, "?"))
async def get_lang(self):
"""Get language for forecastio, based on current's bot language"""
locale = get_locale()
special_cases = {"lol-US": "x-pig-latin", "debugging": "en", "zh-TW": "zh-tw"}
lang = special_cases.get(locale, locale[:2])
if lang in FORECASTIO_SUPPORTED_LANGS:
return lang
return "en"
async def wind_bearing_direction(self, bearing: int):
"""Returns direction based on wind bearing"""
# https://github.com/pandabubblepants/forecastSMS/blob/e396d978e1ec47b5f3023ce13d5a5f55c57e4f6e/forecastSMS.py#L12-L16
dirs = [
_("N"),
_("NNE"),
_("NE"),
_("ENE"),
_("E"),
_("ESE"),
_("SE"),
_("SSE"),
_("S"),
_("SSW"),
_("SW"),
_("WSW"),
_("W"),
_("WNW"),
_("NW"),
_("NNW"),
]
return dirs[int((bearing / 22.5) + 0.5) % 16]
async def num_to_moon(self, moonphase: float) -> str:
"""Converts lunation number to lunar phase emoji"""
if moonphase == 0:
return "\N{New Moon Symbol}"
if 0 < moonphase < 0.25:
return "\N{Waxing Crescent Moon Symbol}"
if moonphase == 0.25:
return "\N{First Quarter Moon Symbol}"
if 0.25 < moonphase < 0.5:
return "\N{Waxing Gibbous Moon Symbol}"
if moonphase == 0.5:
return "\N{First Quarter Moon Symbol}"
if 0.5 < moonphase < 0.75:
return "\N{Waning Gibbous Moon Symbol}"
if moonphase == 0.75:
return "\N{Last Quarter Moon Symbol}"
if 0.75 < moonphase < 1:
return "\N{Waning Crescent Moon Symbol}"
if moonphase == 1:
return "\N{Full Moon Symbol}"
return str(moonphase)
| 35.195364 | 128 | 0.488193 | 2,107 | 21,258 | 4.78785 | 0.220218 | 0.03301 | 0.020817 | 0.029144 | 0.600714 | 0.552835 | 0.535884 | 0.519528 | 0.492665 | 0.457772 | 0 | 0.012886 | 0.383056 | 21,258 | 603 | 129 | 35.253731 | 0.754785 | 0.012983 | 0 | 0.418182 | 0 | 0.007273 | 0.18408 | 0.022544 | 0 | 0 | 0.001688 | 0.001658 | 0 | 1 | 0.003636 | false | 0 | 0.030909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fe0a5afa56831ed4a0ceebc1b92d807eb17f30a | 7,589 | py | Python | Wiki_files/How-to-run-multiple-cases-using-PyCOMPSs/launch-multiple-simulations-pycompss.py | KratosMultiphysics/Documentation | 0db1d8f70fb1c60afce65ba9d85a54b84c03622d | [
"BSD-3-Clause"
] | 12 | 2017-02-19T22:27:08.000Z | 2022-03-12T14:57:06.000Z | Wiki_files/How-to-run-multiple-cases-using-PyCOMPSs/launch-multiple-simulations-pycompss.py | KratosMultiphysics/Documentation | 0db1d8f70fb1c60afce65ba9d85a54b84c03622d | [
"BSD-3-Clause"
] | 2 | 2019-04-25T10:27:25.000Z | 2021-11-22T10:19:10.000Z | Wiki_files/How-to-run-multiple-cases-using-PyCOMPSs/launch-multiple-simulations-pycompss.py | KratosMultiphysics/Documentation | 0db1d8f70fb1c60afce65ba9d85a54b84c03622d | [
"BSD-3-Clause"
] | 12 | 2017-07-13T11:17:42.000Z | 2022-01-09T01:10:03.000Z | """
This script provides a minimal example showing how tu run multiple Kratos simulations in parallel, exploiting concurrency capabilities of modern high performance computing systems.
The main operations we do are
* Create an analysis stage, here called SimulationScenario, which is derived from the analysis stage of our problem, in this case analysis stage.
* Serialize the Kratos project parameters and the Kratos model within a task.
* Run the Kratos simulation in parallel within a task.
The script works correctly under the following scenarios:
* workflow is serial,
* workflow is serial and managed by distributed environment scheduler PyCOMPSs.
To run the first scenario:
python3 launch-multiple-simulations-pycompss.py
To run with runcompss the second scenario:
sh run.sh
In this last case, the environment variable EXAQUTE_BACKEND has to be changed to pycompss; see the documentation related to the configuration of COMPSs for details.
Dependencies
------------
- KratosMultiphysics ≥ 9.0."Dev"-96fb824069, and applications:
- ConvectionDiffusionApplication,
- COMPSs ≥ 2.8 (to run in parallel).
"""
# Import Python libraries
import numpy as np
import pickle
# Importing the Kratos Library
import KratosMultiphysics
import KratosMultiphysics.ConvectionDiffusionApplication
from KratosMultiphysics.analysis_stage import AnalysisStage
# Import PyCOMPSs
from exaqute import task, FILE_IN, get_value_from_remote
from exaqute import init as exaqute_init
exaqute_init() # must not be called more than once
def GetValueFromListList(values,iteration):
"""
Function generating the random sample; in this case, we return a value from an input
"""
value = values[iteration]
return value
@task(returns=1)
def ExecuteInstance_Task(pickled_model,pickled_parameters,heat_flux_list,instance):
"""
Function executing an instance of the problem
input:
pickled_model: serialization of the model
pickled_parameters: serialization of the Project Parameters
heat_flux_list: list of values for \varepsilon
instance: iteration number
output:
QoI: Quantity of Interest
"""
# overwrite the old model serializer with the unpickled one
model_serializer = pickle.loads(pickled_model)
current_model = KratosMultiphysics.Model()
model_serializer.Load("ModelSerialization",current_model)
del(model_serializer)
# overwrite the old parameters serializer with the unpickled one
serialized_parameters = pickle.loads(pickled_parameters)
current_parameters = KratosMultiphysics.Parameters()
serialized_parameters.Load("ParametersSerialization",current_parameters)
del(serialized_parameters)
# get sample
sample = GetValueFromListList(heat_flux_list,instance)
simulation = SimulationScenario(current_model,current_parameters,sample)
simulation.Run()
QoI = simulation.EvaluateQuantityOfInterest()
return QoI
@task(parameter_file_name=FILE_IN,returns=2)
def SerializeModelParameters_Task(parameter_file_name):
"""
Function serializing and pickling the model and the parameters of the problem
input:
parameter_file_name: path of the Project Parameters file
output:
pickled_model: model serializaton
pickled_parameters: project parameters serialization
"""
with open(parameter_file_name,'r') as parameter_file:
parameters = KratosMultiphysics.Parameters(parameter_file.read())
model = KratosMultiphysics.Model()
# parameters["solver_settings"]["model_import_settings"]["input_filename"].SetString(model_part_file_name[:-5])
fake_sample = 0.25
simulation = SimulationScenario(model,parameters,fake_sample)
simulation.Initialize()
# reset general flags
# it is not required to remove the materials, since the Kratos variable
# IS_RESTARTED is set to True
simulation.model.GetModelPart(parameters["solver_settings"]["model_part_name"].GetString()).ProcessInfo.SetValue(KratosMultiphysics.IS_RESTARTED,True)
# serialize
serialized_model = KratosMultiphysics.StreamSerializer()
serialized_model.Save("ModelSerialization",simulation.model)
serialized_parameters = KratosMultiphysics.StreamSerializer()
serialized_parameters.Save("ParametersSerialization",simulation.project_parameters)
# pickle dataserialized_data
pickled_model = pickle.dumps(serialized_model, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs)
pickled_parameters = pickle.dumps(serialized_parameters, 2)
KratosMultiphysics.Logger.PrintInfo("SerializeModelParameters_Task", "Model and parameters serialized correctly.")
return pickled_model,pickled_parameters
class SimulationScenario(AnalysisStage):
"""
This SimulationScenario analysis stage class solves the elliptic PDE in (0,1)^2 with zero Dirichlet boundary conditions
-lapl(u) = xi*f
f= -432*(x**2+y**2-x-y)
and computes the Quantity of Interest
Q = int_(0,1)^2 u(x,y)dxdy
where psi is the random variable and follows a beta distribution Beta(2,6)
"""
def __init__(self,input_model,input_parameters,sample):
self.sample = sample
super(SimulationScenario,self).__init__(input_model,input_parameters)
self._GetSolver().main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_AREA)
def _CreateSolver(self):
import KratosMultiphysics.ConvectionDiffusionApplication.convection_diffusion_stationary_solver
return KratosMultiphysics.ConvectionDiffusionApplication.convection_diffusion_stationary_solver.CreateSolver(self.model,self.project_parameters["solver_settings"])
def ModifyInitialProperties(self):
"""
Method introducing the stochasticity in the right hand side defining the forcing function and apply the stochastic contribute
"""
model_part_name = self.project_parameters["problem_data"]["model_part_name"].GetString()
for node in self.model.GetModelPart(model_part_name).Nodes:
coord_x = node.X
coord_y = node.Y
forcing = -432.0 * (coord_x**2 + coord_y**2 - coord_x - coord_y)
node.SetSolutionStepValue(KratosMultiphysics.HEAT_FLUX,forcing*self.sample)
def EvaluateQuantityOfInterest(self):
"""
Method evaluating the QoI of the problem: int_{domain} TEMPERATURE(x,y) dx dy
"""
KratosMultiphysics.CalculateNodalAreaProcess(self._GetSolver().main_model_part,2).Execute()
Q = 0.0
for node in self._GetSolver().main_model_part.Nodes:
Q = Q + (node.GetSolutionStepValue(KratosMultiphysics.NODAL_AREA)*node.GetSolutionStepValue(KratosMultiphysics.TEMPERATURE))
return Q
if __name__ == '__main__':
# set the ProjectParameters.json path
parameter_file_name = "problem_settings/project_parameters.json"
# create a serialization of the model and of the project parameters
pickled_model,pickled_parameters = SerializeModelParameters_Task(parameter_file_name)
# set batch size and initialize qoi list where to append Quantity of Interests values
batch_size = 20
qoi = []
# define the list for heat flux values
heat_flux_list = np.random.beta(2.0,6.0,batch_size)
# start algorithm
for instance in range (0,batch_size):
qoi.append(ExecuteInstance_Task(pickled_model,pickled_parameters,heat_flux_list,instance))
# synchronize to local machine
qoi = get_value_from_remote(qoi)
print("\nqoi values:\n",qoi)
| 45.443114 | 180 | 0.755831 | 904 | 7,589 | 6.178097 | 0.313053 | 0.027395 | 0.018263 | 0.02077 | 0.094539 | 0.054073 | 0.024351 | 0.024351 | 0.024351 | 0.024351 | 0 | 0.008288 | 0.173277 | 7,589 | 166 | 181 | 45.716867 | 0.881575 | 0.412307 | 0 | 0 | 0 | 0 | 0.067872 | 0.027008 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094595 | false | 0 | 0.108108 | 0 | 0.283784 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fe3d823aba38786773c3821c4e10a00a0b2bba1 | 770 | py | Python | lib_bgp_data/forecast/api/roas.py | jfuruness/lib_bgp_data | 25f7d57b9e2101c7aefb325e8d728bd91f47d557 | [
"BSD-3-Clause"
] | 16 | 2018-09-24T05:10:03.000Z | 2021-11-29T19:18:59.000Z | lib_bgp_data/forecast/api/roas.py | jfuruness/lib_bgp_data | 25f7d57b9e2101c7aefb325e8d728bd91f47d557 | [
"BSD-3-Clause"
] | 4 | 2019-10-09T18:54:17.000Z | 2021-03-05T14:02:50.000Z | lib_bgp_data/forecast/api/roas.py | jfuruness/lib_bgp_data | 25f7d57b9e2101c7aefb325e8d728bd91f47d557 | [
"BSD-3-Clause"
] | 3 | 2018-09-17T17:35:18.000Z | 2020-03-24T16:03:31.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This file contains the blueprint for the ROAs API endpoint.
The ROAs API endpoint returns all ROAs.
Design Choices:
-A separate blueprint was used for readability
"""
from flask import Blueprint
from flasgger import swag_from
from .api_utils import format_json
__author__ = "Justin Furuness"
__credits__ = ["Justin Furuness"]
__Lisence__ = "BSD"
__maintainer__ = "Justin Furuness"
__email__ = "jfuruness@gmail.com"
__status__ = "Development"
roas_app = Blueprint("roas_app", __name__)
@roas_app.route("/roas_data/")
@swag_from("flasgger_docs/roas.yml")
@format_json(lambda: {"description": "All ROAs used"})
def roas():
"""Returns all roas data."""
return roas_app.db.execute("SELECT * FROM roas;")
| 23.333333 | 62 | 0.731169 | 103 | 770 | 5.087379 | 0.572816 | 0.053435 | 0.038168 | 0.068702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00303 | 0.142857 | 770 | 32 | 63 | 24.0625 | 0.790909 | 0.305195 | 0 | 0 | 0 | 0 | 0.310345 | 0.042146 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.333333 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fe548f5abac07512e63108abaf0d1bc025d4642 | 6,356 | py | Python | bapsflib/_hdf/maps/controls/tests/test_nixz.py | BaPSF/bapsflib | 999c88f813d3a7c5c244a77873850c5c5a4042b8 | [
"BSD-3-Clause"
] | 11 | 2018-07-05T21:37:52.000Z | 2022-01-05T00:41:52.000Z | bapsflib/_hdf/maps/controls/tests/test_nixz.py | BaPSF/bapsflib | 999c88f813d3a7c5c244a77873850c5c5a4042b8 | [
"BSD-3-Clause"
] | 54 | 2018-08-19T00:28:52.000Z | 2022-03-22T17:16:22.000Z | bapsflib/_hdf/maps/controls/tests/test_nixz.py | rocco8773/bapsflib | 999c88f813d3a7c5c244a77873850c5c5a4042b8 | [
"BSD-3-Clause"
] | 9 | 2018-08-18T00:16:07.000Z | 2022-03-18T00:06:33.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2019 Erik T. Everson and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
#
import h5py
import unittest as ut
from bapsflib.utils.exceptions import HDFMappingError
from .. import ConType
from ..nixz import HDFMapControlNIXZ
from .common import ControlTestCase
class TestNIXZ(ControlTestCase):
"""Test class for HDFMapControlNIXZ"""
# define setup variables
DEVICE_NAME = "NI_XZ"
DEVICE_PATH = "Raw data + config/NI_XZ"
MAP_CLASS = HDFMapControlNIXZ
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
def test_contype(self):
self.assertEqual(self.map.info["contype"], ConType.motion)
def test_map_failures(self):
"""Test conditions that result in unsuccessful mappings."""
# any failed build must throw a HDFMappingError
#
# 1. 'Run time list' is missing
# 2. dataset is missing 'Shot number' field
# 3. dataset is missing 'x' and 'z' fields
#
# make a default/clean 'NI_XZ' module
self.mod.knobs.reset()
# dataset 'Run time list' missing (1)
# - rename 'Run time list' dataset
self.mod.move("Run time list", "NIXZ data")
with self.assertRaises(HDFMappingError):
_map = self.map
self.mod.move("NIXZ data", "Run time list")
# dataset missing 'Shot number' field (2)
self.mod.move("Run time list", "NIXZ data")
odata = self.mod["NIXZ data"][...]
fields = list(odata.dtype.names)
fields.remove("Shot number")
data = odata[fields]
self.mod.create_dataset("Run time list", data=data)
with self.assertRaises(HDFMappingError):
_map = self.map
del self.mod["Run time list"]
self.mod.move("NIXZ data", "Run time list")
# dataset missing 'x' and 'z' fields (3)
self.mod.move("Run time list", "NIXZ data")
odata = self.mod["NIXZ data"][...]
fields = list(odata.dtype.names)
fields.remove("x")
fields.remove("z")
data = odata[fields]
self.mod.create_dataset("Run time list", data=data)
with self.assertRaises(HDFMappingError):
_map = self.map
del self.mod["Run time list"]
self.mod.move("NIXZ data", "Run time list")
def test_map_warnings(self):
"""Test conditions that issue a UserWarning"""
# Warnings relate to unexpected behavior that does not affect
# reading of data from the HDF5 file
#
# 1. No motion list group is found
# 2. motion list group is missing an attribute
# 3. dataset 'Run time list' is missing one of 'x' or 'z' fields
#
# make a default/clean 'NI_XZ' module
self.mod.knobs.reset()
# no motion list group is found (1)
del self.mod["ml-0001"]
with self.assertWarns(UserWarning):
_map = self.map
self.assertNIXZDetails(_map, self.dgroup)
self.mod.knobs.reset()
# motion list group is missing an attribute (2)
del self.mod["ml-0001"].attrs["Nx"]
with self.assertWarns(UserWarning):
_map = self.map
self.assertNIXZDetails(_map, self.dgroup)
self.mod.knobs.reset()
# dataset 'Run time list' is missing one of 'x' or 'z' (3)
# fields
self.mod.move("Run time list", "NIXZ data")
odata = self.mod["NIXZ data"][...]
fields = list(odata.dtype.names)
fields.remove("x")
data = odata[fields]
self.mod.create_dataset("Run time list", data=data)
del self.mod["NIXZ data"]
with self.assertWarns(UserWarning):
_map = self.map
self.assertNIXZDetails(_map, self.dgroup)
self.mod.knobs.reset()
def test_misc(self):
"""Test miscellaneous behavior"""
# 1. there are 2 motion list groups
# 2. motion list group is missing all key attributes
#
# make a default/clean 'NI_XZ' module
self.mod.knobs.reset()
# there are 2 motion list groups (1)
self.mod.knobs.n_motionlists = 2
_map = self.map
self.assertNIXZDetails(_map, self.dgroup)
for name in self.mod.configs["config01"]["motion lists"]:
self.assertIn(name, _map.configs["config01"]["motion lists"])
self.mod.knobs.reset()
# motion list group is missing all key attributes (2)
# key attributes: Nx, Nz, dx, dz, x0, z0
self.mod.knobs.n_motionlists = 2
for key in ("Nx", "Nz", "dx", "dz", "x0", "z0"):
del self.mod["ml-0001"].attrs[key]
_map = self.map
self.assertNIXZDetails(_map, self.dgroup)
self.assertNotIn("ml-0001", _map.configs["config01"]["motion lists"])
self.assertIn("ml-0002", _map.configs["config01"]["motion lists"])
def assertNIXZDetails(self, _map: HDFMapControlNIXZ, _group: h5py.Group):
"""Assert details of the 'NI_XZ' mapping."""
# confirm basics
self.assertControlMapBasics(_map, _group)
# check dataset names
self.assertEqual(_map.construct_dataset_name(), "Run time list")
# no command list
self.assertFalse(_map.has_command_list)
# there only ever one configuration
self.assertEqual(len(_map.configs), 1)
self.assertEqual(list(_map.configs), ["config01"])
self.assertTrue(_map.one_config_per_dset)
# test general item sin configs
self.assertNIXZConfigItems(_map)
def assertNIXZConfigItems(self, _map):
"""
Test structure of the general, polymorphic elements of the
`configs` mapping dictionary
"""
config = _map.configs["config01"]
self.assertIn("motion lists", config)
self.assertIsInstance(config["motion lists"], dict)
if __name__ == "__main__":
ut.main()
| 35.116022 | 77 | 0.596916 | 775 | 6,356 | 4.814194 | 0.259355 | 0.054409 | 0.053069 | 0.031895 | 0.516752 | 0.493433 | 0.406593 | 0.394264 | 0.342536 | 0.312249 | 0 | 0.015722 | 0.28949 | 6,356 | 180 | 78 | 35.311111 | 0.810452 | 0.302077 | 0 | 0.536842 | 0 | 0 | 0.114061 | 0 | 0 | 0 | 0 | 0 | 0.273684 | 1 | 0.084211 | false | 0 | 0.063158 | 0 | 0.189474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fe61d9b600fd79ba20e0212c206870a63e509a8 | 6,551 | py | Python | onnxruntime/test/python/transformers/parity_utilities.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 669 | 2018-12-03T22:00:31.000Z | 2019-05-06T19:42:49.000Z | onnxruntime/test/python/transformers/parity_utilities.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 440 | 2018-12-03T21:09:56.000Z | 2019-05-06T20:47:23.000Z | onnxruntime/test/python/transformers/parity_utilities.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 140 | 2018-12-03T21:15:28.000Z | 2019-05-06T18:02:36.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import os
import sys
import numpy
import torch
def find_transformers_source(sub_dir_paths=[]):
source_dir = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"python",
"tools",
"transformers",
*sub_dir_paths,
)
if os.path.exists(source_dir):
if source_dir not in sys.path:
sys.path.append(source_dir)
return True
return False
def create_inputs(
batch_size=1,
sequence_length=1,
hidden_size=768,
float16=False,
device=torch.device("cuda"),
):
float_type = torch.float16 if float16 else torch.float32
input = torch.normal(mean=0.0, std=10.0, size=(batch_size, sequence_length, hidden_size)).to(float_type).to(device)
return input
def export_onnx(model, onnx_model_path, float16, hidden_size, device):
from pathlib import Path
Path(onnx_model_path).parent.mkdir(parents=True, exist_ok=True)
input_hidden_states = create_inputs(hidden_size=hidden_size, float16=float16, device=device)
with torch.no_grad():
outputs = model(input_hidden_states)
dynamic_axes = {
"input": {0: "batch_size", 1: "seq_len"},
"output": {0: "batch_size", 1: "seq_len"},
}
torch.onnx.export(
model,
args=(input_hidden_states),
f=onnx_model_path,
input_names=["input"],
output_names=["output"],
dynamic_axes=dynamic_axes,
opset_version=11,
do_constant_folding=True,
)
print("exported:", onnx_model_path)
def optimize_onnx(
input_onnx_path,
optimized_onnx_path,
expected_op=None,
use_gpu=False,
opt_level=None,
):
if find_transformers_source():
from optimizer import optimize_model
else:
from onnxruntime.transformers.optimizer import optimize_model
onnx_model = optimize_model(input_onnx_path, model_type="gpt2", use_gpu=use_gpu, opt_level=opt_level)
onnx_model.save_model_to_file(optimized_onnx_path)
if expected_op is not None:
assert (
len(onnx_model.get_nodes_by_op_type(expected_op)) == 1
), f"Expected {expected_op} node not found in the optimized model {optimized_onnx_path}"
def diff_outputs(torch_outputs, ort_outputs, index):
"""Returns the maximum difference between PyTorch and OnnxRuntime outputs."""
expected_outputs = torch_outputs[index].cpu().numpy()
diff = numpy.abs(expected_outputs - ort_outputs[index])
return numpy.amax(diff)
def compare_outputs(torch_outputs, ort_outputs, atol=1e-06, verbose=True):
"""Compare outputs from PyTorch and OnnxRuntime
Args:
torch_outputs (Tuple[Torch.Tensor]): PyTorch model output
ort_outputs (List[numpy.ndarray]): OnnxRuntime output
atol (float, optional): Absolute tollerance. Defaults to 1e-06.
verbose (bool, optional): Print more information. Defaults to True.
Returns:
is_all_close(bool): whether all elements are close.
max_abs_diff(float): maximum absolute difference.
"""
same = numpy.asarray(
[
numpy.allclose(ort_outputs[i], torch_outputs[i].cpu().numpy(), atol=atol, rtol=0)
for i in range(len(ort_outputs))
]
)
max_abs_diff = [diff_outputs(torch_outputs, ort_outputs, i) for i in range(len(ort_outputs))]
is_all_close = same.all()
if (not is_all_close) and verbose:
for i in numpy.where(numpy.logical_not(same))[0]:
diff = numpy.fabs(ort_outputs[i] - torch_outputs[i].cpu().numpy())
idx = numpy.unravel_index(diff.argmax(), diff.shape)
print(
f"Output {i}, diff={diff[idx]:.9f} index={idx} ort={ort_outputs[i][idx]:.9f} torch={float(torch_outputs[i][idx]):.9f}"
)
return is_all_close, max(max_abs_diff)
def create_ort_session(onnx_model_path, use_gpu=True):
from onnxruntime import GraphOptimizationLevel, InferenceSession, SessionOptions
from onnxruntime import __version__ as onnxruntime_version
sess_options = SessionOptions()
sess_options.graph_optimization_level = GraphOptimizationLevel.ORT_DISABLE_ALL
sess_options.intra_op_num_threads = 2
sess_options.log_severity_level = 2
execution_providers = ["CPUExecutionProvider"] if not use_gpu else ["CUDAExecutionProvider", "CPUExecutionProvider"]
return InferenceSession(onnx_model_path, sess_options, providers=execution_providers)
def onnxruntime_inference(ort_session, input):
ort_inputs = {"input": numpy.ascontiguousarray(input.cpu().numpy())}
ort_outputs = ort_session.run(None, ort_inputs)
return ort_outputs
def run_parity(
model,
onnx_model_path,
batch_size,
hidden_size,
sequence_length,
float16,
device,
optimized,
test_cases=100,
verbose=False,
tolerance=None,
):
passed_cases = 0
max_diffs = []
printed = False # print only one sample
ort_session = create_ort_session(onnx_model_path, device.type == "cuda")
for i in range(test_cases):
input_hidden_states = create_inputs(batch_size, sequence_length, hidden_size, float16, device)
with torch.no_grad():
torch_outputs = model(input_hidden_states)
ort_outputs = onnxruntime_inference(ort_session, input_hidden_states)
if tolerance is None:
tolerance = 2e-03 if float16 else 1e-05
is_all_close, max_diff = compare_outputs(torch_outputs, ort_outputs, atol=tolerance, verbose=verbose)
max_diffs.append(max_diff)
if is_all_close:
passed_cases += 1
elif verbose and not printed:
printed = True
numpy.set_printoptions(precision=10, floatmode="fixed")
torch.set_printoptions(precision=10)
print("input", input_hidden_states)
print("torch_outputs", torch_outputs)
print("ort_outputs", ort_outputs)
max_diff = max(max_diffs)
diff_count = len([i for i in max_diffs if i > 0])
success_flag = "[FAILED]" if passed_cases < test_cases else "[OK]"
print(f"{success_flag} Passed_cases={passed_cases}/{test_cases}; Max_diff={max_diff}; Diff_count={diff_count}")
return test_cases - passed_cases
| 33.594872 | 134 | 0.66631 | 833 | 6,551 | 4.959184 | 0.253301 | 0.038732 | 0.025176 | 0.021302 | 0.155894 | 0.100702 | 0.046478 | 0.015493 | 0 | 0 | 0 | 0.012985 | 0.212334 | 6,551 | 194 | 135 | 33.768041 | 0.787597 | 0.12624 | 0 | 0.070423 | 0 | 0.007042 | 0.090989 | 0.030859 | 0 | 0 | 0 | 0 | 0.007042 | 1 | 0.06338 | false | 0.035211 | 0.06338 | 0 | 0.183099 | 0.077465 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fea213cee341cbe3ece6c51091f4ba3db5cfd56 | 1,638 | py | Python | tamizdat/convert.py | ioreshnikov/tamizdat | 847a4e9e5b80ffc7010d30d11feee5d3229aa46a | [
"MIT"
] | 3 | 2019-12-12T08:21:11.000Z | 2021-05-12T20:36:00.000Z | tamizdat/convert.py | ioreshnikov/tamizdat | 847a4e9e5b80ffc7010d30d11feee5d3229aa46a | [
"MIT"
] | 4 | 2019-04-29T22:50:27.000Z | 2022-02-08T13:58:32.000Z | tamizdat/convert.py | ioreshnikov/tamizdat | 847a4e9e5b80ffc7010d30d11feee5d3229aa46a | [
"MIT"
] | null | null | null | import logging
import os
import subprocess
from .models import File
def prepare_cover(book):
"""
Prepare a book cover.
:param book: a Book instance.
"""
if not book.cover_image:
return
input_path = book.cover_image.local_path
basename, ext = os.path.splitext(input_path)
output_path = "{}_cover{}".format(basename, ext)
subprocess.check_call([
"convert", input_path,
"-filter", "lanczos",
"-resize", "x650",
output_path
])
return output_path
def convert_book(book):
"""
Converts an ebook from .fb2.zip to .mobi with some extra enhancements.
:param book: a Book instance.
"""
if book.ebook_mobi is not None and os.path.exists(book.ebook_mobi.local_path):
logging.info("Converted book already exists. Doing nothing.")
return
input_path = book.ebook_fb2.local_path
basename, _ = input_path.split(os.extsep, 1)
output_path = "{}.mobi".format(basename)
command = [
"ebook-convert", input_path, output_path,
"--no-inline-fb2-toc",
"--sr1-search=(?s)<div><h3>Annotation</h3>.*<div class=\"paragraph\">.*</div>.*</div><hr/>",
"--sr1-replace=",
"--output-profile=kindle",
]
cover_path = prepare_cover(book)
if cover_path:
command.append("--cover={}".format(cover_path))
logging.info("Converting {} to {}".format(input_path, output_path))
subprocess.check_call(command)
logging.info("Conversion to {} done!".format(output_path))
book.ebook_mobi = File(local_path=output_path)
book.ebook_mobi.save()
book.save()
| 24.818182 | 100 | 0.63431 | 207 | 1,638 | 4.845411 | 0.371981 | 0.079761 | 0.055833 | 0.05683 | 0.093719 | 0.047856 | 0 | 0 | 0 | 0 | 0 | 0.008634 | 0.222222 | 1,638 | 65 | 101 | 25.2 | 0.77865 | 0.094017 | 0 | 0.05 | 0 | 0 | 0.201384 | 0.063668 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.225 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fecd15b4869f6133eef3d4d6ff422e23198906b | 2,645 | py | Python | pymidicontroller/extensions/volumemixer.py | Kamaroth92/pymidicontroller | ee7e5cc4280fdb9e4482a8e1b2b98d1eb51e4138 | [
"MIT"
] | 25 | 2021-09-06T21:52:18.000Z | 2022-02-04T13:42:18.000Z | pymidicontroller/extensions/volumemixer.py | Kamaroth92/pymidicontroller | ee7e5cc4280fdb9e4482a8e1b2b98d1eb51e4138 | [
"MIT"
] | null | null | null | pymidicontroller/extensions/volumemixer.py | Kamaroth92/pymidicontroller | ee7e5cc4280fdb9e4482a8e1b2b98d1eb51e4138 | [
"MIT"
] | 2 | 2021-09-07T18:36:21.000Z | 2021-09-13T01:08:25.000Z | from __future__ import print_function
from dataclasses import dataclass, field
from pycaw.pycaw import AudioUtilities, ISimpleAudioVolume, IAudioEndpointVolume
from ctypes import cast, POINTER
from comtypes import CLSCTX_ALL, COMError
from pymidicontroller.classes.controller import ControllerExtension
from pymidicontroller.extensions.common import translate
@dataclass()
class Device(ControllerExtension):
"""Device"""
min: float = -65.25
max: float = 0
def __post_init__(self):
super().__post_init__()
self.set_metadata('update_frequency', 0)
def update(self, attribute, value): #Optional if you need to do further processing on the value
self.set_metadata('post_flag', True)
super().update(attribute, value)
def get_device(self):
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
return volume
def execute(self):
post_flag = self.get_metadata('post_flag')
if post_flag:
translated_volume = translate(self.get_attribute('value'),0,127,self.min,self.max)
device = self.get_device()
try:
device.SetMasterVolumeLevel(translated_volume, None)
except COMError as ce:
print(ce)
pass
self.set_metadata('post_flag', False)
@dataclass()
class Application(ControllerExtension):
"""Application"""
application: str = 'default'
min: float = 0
max: float = 1
def __post_init__(self):
super().__post_init__()
self.set_metadata('update_frequency', 0)
def update(self, attribute, value): #Optional if you need to do further processing on the value
self.set_metadata('post_flag', True)
super().update(attribute, value)
def get_session(self):
session_list = []
sessions = AudioUtilities.GetAllSessions()
for session in sessions:
volume = session._ctl.QueryInterface(ISimpleAudioVolume)
if session.Process and session.Process.name() == self.application:
session_list.append(volume)
return session_list
def execute(self):
post_flag = self.get_metadata('post_flag')
if post_flag:
translated_volume = translate(self.get_attribute('value'),0,127,self.min,self.max)
for application in self.get_session():
application.SetMasterVolume(translated_volume, None)
self.set_metadata('post_flag', False) | 36.232877 | 99 | 0.666541 | 292 | 2,645 | 5.821918 | 0.318493 | 0.047059 | 0.052941 | 0.044706 | 0.398824 | 0.398824 | 0.365882 | 0.365882 | 0.365882 | 0.365882 | 0 | 0.008487 | 0.242722 | 2,645 | 73 | 100 | 36.232877 | 0.84024 | 0.05104 | 0 | 0.4 | 0 | 0 | 0.041216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0.016667 | 0.116667 | 0 | 0.4 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fee34c2cafc34892c682b948893888fc81e10b5 | 24,638 | py | Python | fluent.syntax/fluent/syntax/parser.py | olleolleolle/python-fluent | 9730d3f90a4bff7f43614d85b5c9e20205c10d3b | [
"Apache-2.0"
] | null | null | null | fluent.syntax/fluent/syntax/parser.py | olleolleolle/python-fluent | 9730d3f90a4bff7f43614d85b5c9e20205c10d3b | [
"Apache-2.0"
] | null | null | null | fluent.syntax/fluent/syntax/parser.py | olleolleolle/python-fluent | 9730d3f90a4bff7f43614d85b5c9e20205c10d3b | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import re
from . import ast
from .stream import EOF, EOL, FluentParserStream
from .errors import ParseError
def with_span(fn):
def decorated(self, ps, *args, **kwargs):
if not self.with_spans:
return fn(self, ps, *args, **kwargs)
start = ps.index
node = fn(self, ps, *args, **kwargs)
# Don't re-add the span if the node already has it. This may happen
# when one decorated function calls another decorated function.
if node.span is not None:
return node
end = ps.index
node.add_span(start, end)
return node
return decorated
class FluentParser(object):
def __init__(self, with_spans=True):
self.with_spans = with_spans
def parse(self, source):
ps = FluentParserStream(source)
ps.skip_blank_block()
entries = []
last_comment = None
while ps.current_char:
entry = self.get_entry_or_junk(ps)
blank_lines = ps.skip_blank_block()
# Regular Comments require special logic. Comments may be attached
# to Messages or Terms if they are followed immediately by them.
# However they should parse as standalone when they're followed by
# Junk. Consequently, we only attach Comments once we know that the
# Message or the Term parsed successfully.
if isinstance(entry, ast.Comment) and len(blank_lines) == 0 \
and ps.current_char:
# Stash the comment and decide what to do with it
# in the next pass.
last_comment = entry
continue
if last_comment is not None:
if isinstance(entry, (ast.Message, ast.Term)):
entry.comment = last_comment
if self.with_spans:
entry.span.start = entry.comment.span.start
else:
entries.append(last_comment)
# In either case, the stashed comment has been dealt with;
# clear it.
last_comment = None
if isinstance(entry, ast.Comment) \
and ps.last_comment_zero_four_syntax \
and len(entries) == 0:
comment = ast.ResourceComment(entry.content)
comment.span = entry.span
entries.append(comment)
else:
entries.append(entry)
ps.last_comment_zero_four_syntax = False
res = ast.Resource(entries)
if self.with_spans:
res.add_span(0, ps.index)
return res
def parse_entry(self, source):
"""Parse the first Message or Term in source.
Skip all encountered comments and start parsing at the first Mesage
or Term start. Return Junk if the parsing is not successful.
Preceding comments are ignored unless they contain syntax errors
themselves, in which case Junk for the invalid comment is returned.
"""
ps = FluentParserStream(source)
ps.skip_blank_block()
while ps.current_char == '#':
skipped = self.get_entry_or_junk(ps)
if isinstance(skipped, ast.Junk):
# Don't skip Junk comments.
return skipped
ps.skip_blank_block()
return self.get_entry_or_junk(ps)
def get_entry_or_junk(self, ps):
entry_start_pos = ps.index
try:
entry = self.get_entry(ps)
ps.expect_line_end()
return entry
except ParseError as err:
error_index = ps.index
ps.skip_to_next_entry_start(entry_start_pos)
next_entry_start = ps.index
if next_entry_start < error_index:
# The position of the error must be inside of the Junk's span.
error_index = next_entry_start
# Create a Junk instance
slice = ps.string[entry_start_pos:next_entry_start]
junk = ast.Junk(slice)
if self.with_spans:
junk.add_span(entry_start_pos, next_entry_start)
annot = ast.Annotation(err.code, err.args, err.message)
annot.add_span(error_index, error_index)
junk.add_annotation(annot)
return junk
def get_entry(self, ps):
if ps.current_char == '#':
return self.get_comment(ps)
if ps.current_char == '/':
return self.get_zero_four_style_comment(ps)
if ps.current_char == '[':
return self.get_group_comment_from_section(ps)
if ps.current_char == '-':
return self.get_term(ps)
if ps.is_identifier_start():
return self.get_message(ps)
raise ParseError('E0002')
@with_span
def get_zero_four_style_comment(self, ps):
ps.expect_char('/')
ps.expect_char('/')
ps.take_char(lambda x: x == ' ')
content = ''
while True:
ch = ps.take_char(lambda x: x != EOL)
while ch:
content += ch
ch = ps.take_char(lambda x: x != EOL)
if ps.is_next_line_zero_four_comment():
content += ps.current_char
ps.next()
ps.expect_char('/')
ps.expect_char('/')
ps.take_char(lambda x: x == ' ')
else:
break
# Comments followed by Sections become GroupComments.
if ps.peek() == '[':
ps.skip_to_peek()
self.get_group_comment_from_section(ps)
return ast.GroupComment(content)
ps.reset_peek()
ps.last_comment_zero_four_syntax = True
return ast.Comment(content)
@with_span
def get_comment(self, ps):
# 0 - comment
# 1 - group comment
# 2 - resource comment
level = -1
content = ''
while True:
i = -1
while ps.current_char == '#' \
and (i < (2 if level == -1 else level)):
ps.next()
i += 1
if level == -1:
level = i
if ps.current_char != EOL:
ps.expect_char(' ')
ch = ps.take_char(lambda x: x != EOL)
while ch:
content += ch
ch = ps.take_char(lambda x: x != EOL)
if ps.is_next_line_comment(level=level):
content += ps.current_char
ps.next()
else:
break
if level == 0:
return ast.Comment(content)
elif level == 1:
return ast.GroupComment(content)
elif level == 2:
return ast.ResourceComment(content)
@with_span
def get_group_comment_from_section(self, ps):
def until_closing_bracket_or_eol(ch):
return ch not in (']', EOL)
ps.expect_char('[')
ps.expect_char('[')
while ps.take_char(until_closing_bracket_or_eol):
pass
ps.expect_char(']')
ps.expect_char(']')
# A Section without a comment is like an empty Group Comment.
# Semantically it ends the previous group and starts a new one.
return ast.GroupComment('')
@with_span
def get_message(self, ps):
id = self.get_identifier(ps)
ps.skip_blank_inline()
# XXX Syntax 0.4 compat
if ps.current_char == '=':
ps.next()
value = self.maybe_get_pattern(ps)
else:
value = None
attrs = self.get_attributes(ps)
if value is None and len(attrs) == 0:
raise ParseError('E0005', id.name)
return ast.Message(id, value, attrs)
@with_span
def get_term(self, ps):
ps.expect_char('-')
id = self.get_identifier(ps)
ps.skip_blank_inline()
ps.expect_char('=')
# Syntax 0.8 compat: VariantLists are supported but deprecated. They
# can only be found as values of Terms. Nested VariantLists are not
# allowed.
value = self.maybe_get_variant_list(ps) or self.maybe_get_pattern(ps)
if value is None:
raise ParseError('E0006', id.name)
attrs = self.get_attributes(ps)
return ast.Term(id, value, attrs)
@with_span
def get_attribute(self, ps):
ps.expect_char('.')
key = self.get_identifier(ps)
ps.skip_blank_inline()
ps.expect_char('=')
value = self.maybe_get_pattern(ps)
if value is None:
raise ParseError('E0012')
return ast.Attribute(key, value)
def get_attributes(self, ps):
attrs = []
ps.peek_blank()
while ps.is_attribute_start():
ps.skip_to_peek()
attr = self.get_attribute(ps)
attrs.append(attr)
ps.peek_blank();
return attrs
@with_span
def get_identifier(self, ps):
name = ps.take_id_start()
ch = ps.take_id_char()
while ch:
name += ch
ch = ps.take_id_char()
return ast.Identifier(name)
def get_variant_key(self, ps):
ch = ps.current_char
if ch is EOF:
raise ParseError('E0013')
cc = ord(ch)
if ((cc >= 48 and cc <= 57) or cc == 45): # 0-9, -
return self.get_number(ps)
return self.get_identifier(ps)
@with_span
def get_variant(self, ps, has_default):
default_index = False
if ps.current_char == '*':
if has_default:
raise ParseError('E0015')
ps.next()
default_index = True
ps.expect_char('[')
ps.skip_blank()
key = self.get_variant_key(ps)
ps.skip_blank()
ps.expect_char(']')
value = self.maybe_get_pattern(ps)
if value is None:
raise ParseError('E0012')
return ast.Variant(key, value, default_index)
def get_variants(self, ps):
variants = []
has_default = False
ps.skip_blank()
while ps.is_variant_start():
variant = self.get_variant(ps, has_default)
if variant.default:
has_default = True
variants.append(variant)
ps.expect_line_end()
ps.skip_blank()
if len(variants) == 0:
raise ParseError('E0011')
if not has_default:
raise ParseError('E0010')
return variants
def get_digits(self, ps):
num = ''
ch = ps.take_digit()
while ch:
num += ch
ch = ps.take_digit()
if len(num) == 0:
raise ParseError('E0004', '0-9')
return num
@with_span
def get_number(self, ps):
num = ''
if ps.current_char == '-':
num += '-'
ps.next()
num += self.get_digits(ps)
if ps.current_char == '.':
num += '.'
ps.next()
num += self.get_digits(ps)
return ast.NumberLiteral(num)
def maybe_get_pattern(self, ps):
'''Parse an inline or a block Pattern, or None
maybe_get_pattern distinguishes between patterns which start on the
same line as the indentifier (aka inline singleline patterns and inline
multiline patterns), and patterns which start on a new line (aka block
patterns). The distinction is important for the dedentation logic: the
indent of the first line of a block pattern must be taken into account
when calculating the maximum common indent.
'''
ps.peek_blank_inline()
if ps.is_value_start():
ps.skip_to_peek()
return self.get_pattern(ps, is_block=False)
ps.peek_blank_block()
if ps.is_value_continuation():
ps.skip_to_peek()
return self.get_pattern(ps, is_block=True)
return None
def maybe_get_variant_list(self, ps):
'''Parse a VariantList, or None
Deprecated in Syntax 0.8. VariantLists are only allowed as values of
Terms. Values of Messages, Attributes and Variants must be Patterns.
This method is only used in get_term.
'''
ps.peek_blank()
if ps.current_peek == '{':
start = ps.peek_offset
ps.peek()
ps.peek_blank_inline()
if ps.current_peek == EOL:
ps.peek_blank()
if ps.is_variant_start():
ps.reset_peek(start)
ps.skip_to_peek()
return self.get_variant_list(ps)
ps.reset_peek()
return None
@with_span
def get_variant_list(self, ps):
ps.expect_char('{')
variants = self.get_variants(ps)
ps.expect_char('}')
return ast.VariantList(variants)
@with_span
def get_pattern(self, ps, is_block):
elements = []
if is_block:
# A block pattern is a pattern which starts on a new line. Measure
# the indent of this first line for the dedentation logic.
blank_start = ps.index
first_indent = ps.skip_blank_inline()
elements.append(self.Indent(first_indent, blank_start, ps.index))
common_indent_length = len(first_indent)
else:
common_indent_length = float('infinity')
while ps.current_char:
if ps.current_char == EOL:
blank_start = ps.index
blank_lines = ps.peek_blank_block()
if ps.is_value_continuation():
ps.skip_to_peek()
indent = ps.skip_blank_inline()
common_indent_length = min(common_indent_length, len(indent))
elements.append(self.Indent(blank_lines + indent, blank_start, ps.index))
continue
# The end condition for get_pattern's while loop is a newline
# which is not followed by a valid pattern continuation.
ps.reset_peek()
break
if ps.current_char == '}':
raise ParseError('E0027')
if ps.current_char == '{':
element = self.get_placeable(ps)
else:
element = self.get_text_element(ps)
elements.append(element)
dedented = self.dedent(elements, common_indent_length)
return ast.Pattern(dedented)
class Indent(ast.SyntaxNode):
def __init__(self, value, start, end):
super(FluentParser.Indent, self).__init__()
self.value = value
self.add_span(start, end)
def dedent(self, elements, common_indent):
'''Dedent a list of elements by removing the maximum common indent from
the beginning of text lines. The common indent is calculated in
get_pattern.
'''
trimmed = []
for element in elements:
if isinstance(element, ast.Placeable):
trimmed.append(element)
continue
if isinstance(element, self.Indent):
# Strip the common indent.
element.value = element.value[:len(element.value) - common_indent]
if len(element.value) == 0:
continue
prev = trimmed[-1] if len(trimmed) > 0 else None
if isinstance(prev, ast.TextElement):
# Join adjacent TextElements by replacing them with their sum.
sum = ast.TextElement(prev.value + element.value)
if self.with_spans:
sum.add_span(prev.span.start, element.span.end)
trimmed[-1] = sum
continue
if isinstance(element, self.Indent):
# If the indent hasn't been merged into a preceding
# TextElements, convert it into a new TextElement.
text_element = ast.TextElement(element.value)
if self.with_spans:
text_element.add_span(element.span.start, element.span.end)
element = text_element
trimmed.append(element)
# Trim trailing whitespace from the Pattern.
last_element = trimmed[-1] if len(trimmed) > 0 else None
if isinstance(last_element, ast.TextElement):
last_element.value = last_element.value.rstrip(' \t\n\r')
if last_element.value == "":
trimmed.pop()
return trimmed
@with_span
def get_text_element(self, ps):
buf = ''
while ps.current_char:
ch = ps.current_char
if ch == '{' or ch == '}':
return ast.TextElement(buf)
if ch == EOL:
return ast.TextElement(buf)
buf += ch
ps.next()
return ast.TextElement(buf)
def get_escape_sequence(self, ps):
next = ps.current_char
if next == '\\' or next == '"':
ps.next()
return '\\{}'.format(next), next
if next == 'u':
return self.get_unicode_escape_sequence(ps, next, 4)
if next == 'U':
return self.get_unicode_escape_sequence(ps, next, 6)
raise ParseError('E0025', next)
def get_unicode_escape_sequence(self, ps, u, digits):
ps.expect_char(u)
sequence = ''
for _ in range(digits):
ch = ps.take_hex_digit()
if not ch:
raise ParseError('E0026', '\\{}{}{}'.format(u, sequence, ps.current_char))
sequence += ch
codepoint = int(sequence, 16)
if codepoint <= 0xD7FF or 0xE000 <= codepoint:
# It's a Unicode scalar value. The escape sequence is 4 or 6 digits
# long. Convert it to a 8-digit-long \UHHHHHHHH sequence and encode
# it as bytes, because in Python 3 decode is not available on str.
byte_sequence = "\\U{:08x}".format(codepoint).encode('utf-8')
unescaped = byte_sequence.decode('unicode-escape')
else:
# Escape sequences reresenting surrogate code points are
# well-formed but invalid in Fluent. Replace them with U+FFFD
# REPLACEMENT CHARACTER.
unescaped = '\uFFFD'
return '\\{}{}'.format(u, sequence), unescaped
@with_span
def get_placeable(self, ps):
ps.expect_char('{')
ps.skip_blank()
expression = self.get_expression(ps)
ps.expect_char('}')
return ast.Placeable(expression)
@with_span
def get_expression(self, ps):
selector = self.get_inline_expression(ps)
ps.skip_blank()
if ps.current_char == '-':
if ps.peek() != '>':
ps.reset_peek()
return selector
if isinstance(selector, ast.MessageReference):
raise ParseError('E0016')
if isinstance(selector, ast.AttributeExpression) \
and isinstance(selector.ref, ast.MessageReference):
raise ParseError('E0018')
if isinstance(selector, ast.TermReference) \
or isinstance(selector, ast.VariantExpression):
raise ParseError('E0017')
if isinstance(selector, ast.CallExpression) \
and isinstance(selector.callee, ast.TermReference):
raise ParseError('E0017')
ps.next()
ps.next()
ps.skip_blank_inline()
ps.expect_line_end()
variants = self.get_variants(ps)
return ast.SelectExpression(selector, variants)
if isinstance(selector, ast.AttributeExpression) \
and isinstance(selector.ref, ast.TermReference):
raise ParseError('E0019')
if isinstance(selector, ast.CallExpression) \
and isinstance(selector.callee, ast.AttributeExpression):
raise ParseError('E0019')
return selector
@with_span
def get_inline_expression(self, ps):
if ps.current_char == '{':
return self.get_placeable(ps)
expr = self.get_simple_expression(ps)
if isinstance(expr, (ast.NumberLiteral, ast.StringLiteral,
ast.VariableReference)):
return expr
if isinstance(expr, ast.MessageReference):
if ps.current_char == '.':
ps.next()
attr = self.get_identifier(ps)
return ast.AttributeExpression(expr, attr)
if ps.current_char == '(':
# It's a Function. Ensure it's all upper-case.
if not re.match('^[A-Z][A-Z_?-]*$', expr.id.name):
raise ParseError('E0008')
func = ast.FunctionReference(expr.id)
if self.with_spans:
func.add_span(expr.span.start, expr.span.end)
return ast.CallExpression(func, *self.get_call_arguments(ps))
return expr
if isinstance(expr, ast.TermReference):
if (ps.current_char == '['):
ps.next()
key = self.get_variant_key(ps)
ps.expect_char(']')
return ast.VariantExpression(expr, key)
if (ps.current_char == '.'):
ps.next()
attr = self.get_identifier(ps)
expr = ast.AttributeExpression(expr, attr)
if (ps.current_char == '('):
return ast.CallExpression(expr, *self.get_call_arguments(ps))
return expr
raise ParseError('E0028')
@with_span
def get_simple_expression(self, ps):
if ps.is_number_start():
return self.get_number(ps)
if ps.current_char == '"':
return self.get_string(ps)
if ps.current_char == '$':
ps.next()
id = self.get_identifier(ps)
return ast.VariableReference(id)
if ps.current_char == '-':
ps.next()
id = self.get_identifier(ps)
return ast.TermReference(id)
if ps.is_identifier_start():
id = self.get_identifier(ps)
return ast.MessageReference(id)
raise ParseError('E0028')
@with_span
def get_call_argument(self, ps):
exp = self.get_inline_expression(ps)
ps.skip_blank()
if ps.current_char != ':':
return exp
if not isinstance(exp, ast.MessageReference):
raise ParseError('E0009')
ps.next()
ps.skip_blank()
value = self.get_literal(ps)
return ast.NamedArgument(exp.id, value)
def get_call_arguments(self, ps):
positional = []
named = []
argument_names = set()
ps.expect_char('(')
ps.skip_blank()
while True:
if ps.current_char == ')':
break
arg = self.get_call_argument(ps)
if isinstance(arg, ast.NamedArgument):
if arg.name.name in argument_names:
raise ParseError('E0022')
named.append(arg)
argument_names.add(arg.name.name)
elif len(argument_names) > 0:
raise ParseError('E0021')
else:
positional.append(arg)
ps.skip_blank()
if ps.current_char == ',':
ps.next()
ps.skip_blank()
continue
break
ps.expect_char(')')
return positional, named
@with_span
def get_string(self, ps):
raw = ''
value = ''
ps.expect_char('"')
while True:
ch = ps.take_char(lambda x: x != '"' and x != EOL)
if not ch:
break
if ch == '\\':
sequence, unescaped = self.get_escape_sequence(ps)
raw += sequence
value += unescaped
else:
raw += ch
value += ch
if ps.current_char == EOL:
raise ParseError('E0020')
ps.expect_char('"')
return ast.StringLiteral(raw, value)
@with_span
def get_literal(self, ps):
if ps.is_number_start():
return self.get_number(ps)
if ps.current_char == '"':
return self.get_string(ps)
raise ParseError('E0014')
| 30.492574 | 93 | 0.546635 | 2,842 | 24,638 | 4.566854 | 0.138987 | 0.027506 | 0.039063 | 0.031204 | 0.33038 | 0.260267 | 0.191617 | 0.158718 | 0.146776 | 0.129825 | 0 | 0.010534 | 0.360378 | 24,638 | 807 | 94 | 30.530359 | 0.813059 | 0.122291 | 0 | 0.417407 | 0 | 0 | 0.0135 | 0 | 0 | 0 | 0.000561 | 0 | 0 | 1 | 0.067496 | false | 0.001776 | 0.008881 | 0.001776 | 0.204263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ff079035d60c3604243ac395ffd0160728e6f5a | 1,102 | py | Python | camera.py | buruhsd/live-stream-face-detection | 6e8da476e90779ff262cd5f4adeb36fd0a8091fb | [
"MIT"
] | null | null | null | camera.py | buruhsd/live-stream-face-detection | 6e8da476e90779ff262cd5f4adeb36fd0a8091fb | [
"MIT"
] | null | null | null | camera.py | buruhsd/live-stream-face-detection | 6e8da476e90779ff262cd5f4adeb36fd0a8091fb | [
"MIT"
] | null | null | null | import cv2
cascPath = 'haarcascade_frontalface_dataset.xml' # dataset
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture('http://192.168.10.132:8081') # 0 for web camera live stream
# for cctv camera'rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp'
# example of cctv or rtsp: 'rtsp://mamun:123456@101.134.16.117:554/user=mamun_password=123456_channel=1_stream=0.sdp'
def camera_stream():
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Display the resulting frame in browser
return cv2.imencode('.jpg', frame)[1].tobytes()
| 32.411765 | 129 | 0.644283 | 146 | 1,102 | 4.753425 | 0.589041 | 0.034582 | 0.028818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.084223 | 0.235027 | 1,102 | 33 | 130 | 33.393939 | 0.739027 | 0.342105 | 0 | 0 | 0 | 0 | 0.090656 | 0.048815 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ff0c40fe551547d2768ef7a23a0b6f9b46d07d1 | 3,839 | py | Python | envs/atari/environment.py | silgence/synapse | 0bfba5e07b9d517288356bb6e76ef7c24a72b599 | [
"MIT"
] | null | null | null | envs/atari/environment.py | silgence/synapse | 0bfba5e07b9d517288356bb6e76ef7c24a72b599 | [
"MIT"
] | null | null | null | envs/atari/environment.py | silgence/synapse | 0bfba5e07b9d517288356bb6e76ef7c24a72b599 | [
"MIT"
] | null | null | null | """Environment for Atari 2600."""
from envs import environment as environment_lib
import gym
import numpy as np
import PIL.Image
import random
DEFAULT_GYM_ENV = 'MontezumaRevengeNoFrameskip-v4'
# Hyperparameters based on research consensus.
DEFAULT_SINGLE_STATE_SHAPE = (84, 84)
_STICKY_ACTION_PROBABILITY = 0.25
_GYM_STEPS_PER_STEP = 4
def _get_state(observations, single_state_shape):
"""Return state given list of gym observations."""
state = None
# Use np.maximum of last two observations to remove flickering (e.g. shots
# in Space Invaders).
for observation in observations[-2:]:
observation = PIL.Image.fromarray(observation).convert('L')
observation = observation.resize(single_state_shape, PIL.Image.LANCZOS)
observation = np.array(observation, dtype=np.uint8)
state = observation if state is None else np.maximum(state, observation)
return state
def newEnvironment(single_state_shape=None, gym_env=None, sticky_actions=True,
get_state_fn=_get_state, rand_fn=random.random):
"""Return a new atari Environment.
Args:
single_state_shape: An optional tuple. Shape of a single state.
Defaults to DEFAULT_SINGLE_STATE_SHAPE.
gym_env: An optional gym.core.Env. Defaults to gym.core.Env running
DEFAULT_GYM_ENV.
sticky_actions: Optional boolean for if sticky actions are enabled.
Defaults to True.
get_state_fn: A function like _get_state.
rand_fn: A function like random.random.
Returns:
An atari Environment instance.
"""
if single_state_shape is None:
single_state_shape = DEFAULT_SINGLE_STATE_SHAPE
if gym_env is None:
gym_env = gym.make(DEFAULT_GYM_ENV)
return Environment(single_state_shape, gym_env, sticky_actions, get_state_fn,
rand_fn)
class Environment(environment_lib.Environment):
"""An environment for Atari 2600.
Args:
single_state_shape: Tuple. Shape of a single state.
gym_env: A gym.core.Env.
sticky_actions: Boolean for if sticky actions are enabled.
get_state_fn: A function like _get_state.
rand_fn: A function like random.random.
"""
def __init__(self, single_state_shape, gym_env, sticky_actions, get_state_fn,
rand_fn):
self._single_state_shape = single_state_shape
self._gym_env = gym_env
self._sticky_actions = sticky_actions
self._get_state_fn = get_state_fn
self._rand_fn = rand_fn
# Mutable.
self._previous_action = 0
self._is_closed = False
def reset(self):
self._assert_not_closed()
self._previous_action = 0
observations = np.expand_dims(self._gym_env.reset(), axis=0)
return environment_lib.ResetResult(
state=self._get_state_fn(observations, self._single_state_shape))
def step(self, action):
self._assert_not_closed()
observations = []
reward = 0.0
is_terminal = False
for _ in range(_GYM_STEPS_PER_STEP):
observation, partial_reward, is_terminal, info = self._gym_step(action)
observations.append(observation)
reward += partial_reward
if is_terminal:
break
return environment_lib.StepResult(
state=self._get_state_fn(observations, self._single_state_shape),
reward=reward, is_terminal=is_terminal)
def _gym_step(self, action):
"""Take a single step in the OpenAI gym environment."""
if (self._sticky_actions and
self._rand_fn() < _STICKY_ACTION_PROBABILITY):
action = self._previous_action
self._previous_action = action
return self._gym_env.step(action)
def close(self):
self._assert_not_closed()
self._is_closed = True
self._gym_env.close()
def _assert_not_closed(self):
"""Assert that the close method has not been called already."""
if self._is_closed:
raise Exception('Environment already closed.')
| 31.991667 | 79 | 0.727533 | 531 | 3,839 | 4.939736 | 0.241055 | 0.075486 | 0.097598 | 0.022875 | 0.212734 | 0.190621 | 0.151735 | 0.125048 | 0.125048 | 0.125048 | 0 | 0.007747 | 0.193019 | 3,839 | 119 | 80 | 32.260504 | 0.838928 | 0.276114 | 0 | 0.073529 | 0 | 0 | 0.021442 | 0.011091 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.117647 | false | 0 | 0.073529 | 0 | 0.279412 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ff1996cf102accc43ce9553ffcbb636a25e35f5 | 545 | py | Python | tema-3/flag-extract-file.py | cheez3d/acs-assembly-language-programming | ad50a87cbce973136f89faa0cb44fc579804fb2b | [
"MIT"
] | null | null | null | tema-3/flag-extract-file.py | cheez3d/acs-assembly-language-programming | ad50a87cbce973136f89faa0cb44fc579804fb2b | [
"MIT"
] | null | null | null | tema-3/flag-extract-file.py | cheez3d/acs-assembly-language-programming | ad50a87cbce973136f89faa0cb44fc579804fb2b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from elftools.elf.elffile import ELFFile
from rc4 import RC4
def flag_extract(path):
with open(path, 'rb') as file:
elf_file = ELFFile(file)
data = elf_file.get_section_by_name('.data').data()
flag = bytearray(data[8 : data.find(b'All done!') - 1])
key = bytes(data[-5 : -1])
return flag, key
def flag_decrypt(flag, key):
keystream = RC4(key)
return ''.join(map(chr, [b ^ next(keystream) for b in flag]))
print(flag_decrypt(*flag_extract(sys.argv[1])))
| 20.961538 | 65 | 0.640367 | 84 | 545 | 4.047619 | 0.547619 | 0.041176 | 0.088235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02093 | 0.211009 | 545 | 25 | 66 | 21.8 | 0.769767 | 0.038532 | 0 | 0 | 0 | 0 | 0.030593 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.214286 | 0 | 0.5 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ff31ee85e1051a2e345d93bd918b1442b88b058 | 8,609 | py | Python | anomaly_detection/train.py | p829911/study | a31c2b1808297b854e2d7cb992113b7e98fcfb1c | [
"BSD-2-Clause"
] | 1 | 2019-03-20T12:08:37.000Z | 2019-03-20T12:08:37.000Z | anomaly_detection/train.py | p829911/study | a31c2b1808297b854e2d7cb992113b7e98fcfb1c | [
"BSD-2-Clause"
] | null | null | null | anomaly_detection/train.py | p829911/study | a31c2b1808297b854e2d7cb992113b7e98fcfb1c | [
"BSD-2-Clause"
] | 1 | 2018-11-19T11:16:28.000Z | 2018-11-19T11:16:28.000Z | from __future__ import print_function
import argparse
import random # to set the python random seed
import numpy # to set the numpy random seed
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
# Ignore excessive warnings
import logging
logging.propagate = False
logging.getLogger().setLevel(logging.ERROR)
# WandB – Import the wandb library
import wandb
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# In our constructor, we define our neural network architecture that we'll use in the forward pass.
# Conv2d() adds a convolution layer that generates 2 dimensional feature maps to learn different aspects of our image
self.conv1 = nn.Conv2d(3, 6, kernel_size=5)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
# Linear(x,y) creates dense, fully connected layers with x inputs and y outputs
# Linear layers simply output the dot product of our inputs and weights.
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Here we feed the feature maps from the convolutional layers into a max_pool2d layer.
# The max_pool2d layer reduces the size of the image representation our convolutional layers learnt,
# and in doing so it reduces the number of parameters and computations the network needs to perform.
# Finally we apply the relu activation function which gives us max(0, max_pool2d_output)
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
# Reshapes x into size (-1, 16 * 5 * 5) so we can feed the convolution layer outputs into our fully connected layer
x = x.view(-1, 16 * 5 * 5)
# We apply the relu activation function and dropout to the output of our fully connected layers
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
# Finally we apply the softmax function to squash the probabilities of each class (0-9) and ensure they add to 1.
return F.log_softmax(x, dim=1)
def train(args, model, device, train_loader, optimizer, epoch):
# Switch model to training mode. This is necessary for layers like dropout, batchnorm etc which behave differently in training and evaluation mode
model.train()
# We loop over the data iterator, and feed the inputs to the network and adjust the weights.
for batch_idx, (data, target) in enumerate(train_loader):
if batch_idx > 20:
break
# Load the input features and labels from the training dataset
data, target = data.to(device), target.to(device)
# Reset the gradients to 0 for all learnable weight parameters
optimizer.zero_grad()
# Forward pass: Pass image data from training dataset, make predictions about class image belongs to (0-9 in this case)
output = model(data)
# Define our loss function, and compute the loss
loss = F.nll_loss(output, target)
# Backward pass: compute the gradients of the loss w.r.t. the model's parameters
loss.backward()
# Update the neural network weights
optimizer.step()
def test(args, model, device, test_loader, classes):
# Switch model to evaluation mode. This is necessary for layers like dropout, batchnorm etc which behave differently in training and evaluation mode
model.eval()
test_loss = 0
correct = 0
example_images = []
with torch.no_grad():
for data, target in test_loader:
# Load the input features and labels from the test dataset
data, target = data.to(device), target.to(device)
# Make predictions: Pass image data from test dataset, make predictions about class image belongs to (0-9 in this case)
output = model(data)
# Compute the loss sum up batch loss
test_loss += F.nll_loss(output, target, reduction="sum").item()
# Get the index of the max log-probability
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
# WandB – Log images in your test dataset automatically, along with predicted and true labels by passing pytorch tensors with image data into wandb.Image
example_images.append(
wandb.Image(
data[0],
caption="Pred: {} Truth: {}".format(
classes[pred[0].item()], classes[target[0]]
),
)
)
# WandB – wandb.log(a_dict) logs the keys and values of the dictionary passed in and associates the values with a step.
# You can log anything by passing it to wandb.log, including histograms, custom matplotlib objects, images, video, text, tables, html, pointclouds and other 3D objects.
# Here we use it to log test accuracy, loss and some test images (along with their true and predicted labels).
wandb.log(
{
"Examples": example_images,
"Test Accuracy": 100.0 * correct / len(test_loader.dataset),
"Test Loss": test_loss,
}
)
# WandB – Initialize a new run
wandb.init(project="pytorch-intro", reinit=True)
wandb.watch_called = False # Re-run the model without restarting the runtime, unnecessary after our next release
# WandB – Config is a variable that holds and saves hyperparameters and inputs
config = wandb.config # Initialize config
config.batch_size = 4 # input batch size for training (default: 64)
config.test_batch_size = 10 # input batch size for testing (default: 1000)
config.epochs = 50 # number of epochs to train (default: 10)
config.lr = 0.1 # learning rate (default: 0.01)
config.momentum = 0.1 # SGD momentum (default: 0.5)
config.no_cuda = False # disables CUDA training
config.seed = 42 # random seed (default: 42)
config.log_interval = 10 # how many batches to wait before logging training status
def main():
use_cuda = not config.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
# Set random seeds and deterministic pytorch for reproducibility
# random.seed(config.seed) # python random seed
torch.manual_seed(config.seed) # pytorch random seed
# numpy.random.seed(config.seed) # numpy random seed
torch.backends.cudnn.deterministic = True
# Load the dataset: We're training our CNN on CIFAR10 (https://www.cs.toronto.edu/~kriz/cifar.html)
# First we define the tranformations to apply to our images
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
# Now we load our training and test datasets and apply the transformations defined above
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root="./data", train=True, download=True, transform=transform),
batch_size=config.batch_size,
shuffle=True,
**kwargs
)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
root="./data", train=False, download=True, transform=transform
),
batch_size=config.test_batch_size,
shuffle=False,
**kwargs
)
classes = (
"plane",
"car",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
)
# Initialize our model, recursively go over all modules and convert their parameters and buffers to CUDA tensors (if device is set to cuda)
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=config.lr, momentum=config.momentum)
# WandB – wandb.watch() automatically fetches all layer dimensions, gradients, model parameters and logs them automatically to your dashboard.
# Using log="all" log histograms of parameter values in addition to gradients
wandb.watch(model, log="all")
for epoch in range(1, config.epochs + 1):
train(config, model, device, train_loader, optimizer, epoch)
test(config, model, device, test_loader, classes)
# WandB – Save the model checkpoint. This automatically saves a file to the cloud and associates it with the current run.
torch.save(model.state_dict(), "model.h5")
wandb.save("model.h5")
if __name__ == "__main__":
main()
| 41.389423 | 172 | 0.667325 | 1,216 | 8,609 | 4.67023 | 0.306743 | 0.014087 | 0.002641 | 0.003522 | 0.1782 | 0.16834 | 0.135939 | 0.113048 | 0.10037 | 0.085226 | 0 | 0.019904 | 0.247183 | 8,609 | 207 | 173 | 41.589372 | 0.855269 | 0.482286 | 0 | 0.065041 | 0 | 0 | 0.038872 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04065 | false | 0 | 0.089431 | 0 | 0.146341 | 0.00813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ff623ca3a173fe61be66ba417aa0b732dab5cef | 951 | py | Python | apfell-docker/app/attack_parse.py | gavz/Apfell | 715f692c30490e9d4c358e64aad3b5747d735a9b | [
"BSD-3-Clause"
] | 3 | 2020-07-28T01:47:29.000Z | 2022-03-03T22:24:11.000Z | apfell-docker/app/attack_parse.py | gavz/Apfell | 715f692c30490e9d4c358e64aad3b5747d735a9b | [
"BSD-3-Clause"
] | null | null | null | apfell-docker/app/attack_parse.py | gavz/Apfell | 715f692c30490e9d4c358e64aad3b5747d735a9b | [
"BSD-3-Clause"
] | null | null | null | import json as js
import pprint
file = open('full_attack.json', 'r')
output = open('small_attack.json', 'w')
attack = js.load(file)
attack_list = []
for obj in attack['objects']:
if obj['type'] == 'attack-pattern':
t_num = "Not Found" # just an error case
for ext_ref in obj['external_references']:
if 'external_id' in ext_ref and ext_ref['source_name'] == 'mitre-attack':
t_num = ext_ref['external_id']
name = obj['name']
os = ' '.join(obj['x_mitre_platforms'])
tactics = [x['phase_name'] for x in obj['kill_chain_phases'] if x['kill_chain_name'] == 'mitre-attack']
tactics = " ".join(tactics)
#tactic = obj['kill_chain_phases'][0]['phase_name']
attack_list.append({"t_num": t_num, "name": name, "os": os, "tactic": tactics})
full_output = {"techniques": attack_list}
output.write(js.dumps(full_output))
| 45.285714 | 119 | 0.59306 | 130 | 951 | 4.115385 | 0.415385 | 0.029907 | 0.056075 | 0.06729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001399 | 0.24816 | 951 | 20 | 120 | 47.55 | 0.746853 | 0.071504 | 0 | 0 | 0 | 0 | 0.269318 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ff69146ceb6daa83ba78a6f123a8c4e207b0c89 | 5,615 | py | Python | evaluate.py | kevin1zc/csci662-project | f945d5fd7ef61393c8b8df6484e6ca76fbf12c9e | [
"MIT"
] | null | null | null | evaluate.py | kevin1zc/csci662-project | f945d5fd7ef61393c8b8df6484e6ca76fbf12c9e | [
"MIT"
] | null | null | null | evaluate.py | kevin1zc/csci662-project | f945d5fd7ef61393c8b8df6484e6ca76fbf12c9e | [
"MIT"
] | null | null | null | import json
from fairseq.models.roberta import RobertaModel
from collections import defaultdict
from tqdm import tqdm
def evaluate_structured_decode(data_file, roberta):
if "test" in data_file:
data_name = "DECODE"
elif "human-bot" in data_file:
data_name = "Human-Bot"
elif "a2t" in data_file:
data_name = "A2T"
else:
data_name = "RCT"
print(f"Evaluating Utterance-based model on {data_name} dataset:")
with open(data_file, 'r') as f:
raw_data = f.read().splitlines()
raw_data = [json.loads(line) for line in raw_data]
tp = tn = fp = fn = 0
instance_correct = 0
instance_strict_correct = 0
pairs = 0
for instance in tqdm(raw_data):
contradiction_idx = instance['aggregated_contradiction_indices']
instance_label = 1 if instance["is_contradiction"] else 0
speaker_utterances = defaultdict(list)
last_speaker = -1
for turn in instance['turns']:
speaker = turn['agent_id']
last_speaker = speaker
speaker_utterances[speaker].append(turn['text'])
utterances = speaker_utterances[last_speaker]
pairs += len(utterances) - 1
instance_label_pred = 0
all_pairs_correct = True
for i in range(len(utterances) - 1):
turn_idx = i * 2 + last_speaker
pair_label = 1 if turn_idx in contradiction_idx else 0
tokens = roberta.encode(utterances[i], utterances[-1])
pair_label_pred = roberta.predict('decode_head', tokens).argmax()
if pair_label_pred == pair_label == 1:
instance_label_pred = 1
tp += 1
elif pair_label_pred == pair_label == 0:
tn += 1
elif pair_label_pred == 1 and pair_label == 0:
instance_label_pred = 1
fp += 1
all_pairs_correct = False
else: # label_pred==0, label==1
fn += 1
all_pairs_correct = False
if all_pairs_correct:
instance_strict_correct += 1
if instance_label_pred == instance_label:
instance_correct += 1
print(f" MT: {instance_correct / len(raw_data)}")
if data_name == "DECODE":
print(f" MT strict: {instance_strict_correct / len(raw_data)}")
print(f" SE F1: {tp / (tp + 0.5 * (fp + fn))}")
print(f" pairs: {(tp + tn) / pairs}")
def evaluate_unstructured_decode(data_file, roberta):
if "test" in data_file:
data_name = "DECODE"
elif "human-bot" in data_file:
data_name = "Human-Bot"
elif "a2t" in data_file:
data_name = "A2T"
else:
data_name = "RCT"
print(f"Evaluating Unstructured model on {data_name} dataset:")
with open(data_file, 'r') as f:
raw_data = f.read().splitlines()
raw_data = [json.loads(line) for line in raw_data]
instance_correct = 0
unevaluated = 0
for instance in tqdm(raw_data):
all_utterances = []
label = 1 if instance["is_contradiction"] else 0
for turn in instance['turns']:
speaker = turn['agent_id']
# prepend each utterance with special token that denotes the speaker
all_utterances.append('<{0}> {1}'.format(speaker, turn['text']))
prev_utterances = ' '.join(all_utterances[:-1])
tokens = roberta.encode(prev_utterances, all_utterances[-1])
try: # Concatenated utterances may exceed the max length of RoBERTa. Simply ignore this instance.
label_pred = roberta.predict('decode_head', tokens).argmax()
if label_pred == label:
instance_correct += 1
except:
unevaluated += 1
print(f" MT: {instance_correct / len(raw_data)}")
print(f" Unevaluated: {unevaluated}")
if __name__ == "__main__":
roberta = RobertaModel.from_pretrained(
'model_structured/checkpoints',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='decode-bin/structured'
)
roberta.eval().cuda()
evaluate_structured_decode("decode_v0.1/test.jsonl", roberta)
evaluate_structured_decode("decode_v0.1/human-bot.jsonl", roberta)
evaluate_structured_decode("decode_v0.1/a2t.jsonl", roberta)
evaluate_structured_decode("decode_v0.1/rct.jsonl", roberta)
roberta = RobertaModel.from_pretrained(
'model_unstructured/checkpoints',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='decode-bin/unstructured'
)
roberta.eval().cuda()
evaluate_unstructured_decode("decode_v0.1/test.jsonl", roberta)
evaluate_unstructured_decode("decode_v0.1/human-bot.jsonl", roberta)
evaluate_unstructured_decode("decode_v0.1/a2t.jsonl", roberta)
evaluate_unstructured_decode("decode_v0.1/rct.jsonl", roberta)
roberta = RobertaModel.from_pretrained(
'model_unstructured_anli/checkpoints',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='anli-r3-bin'
)
roberta.eval().cuda()
evaluate_structured_decode("decode_v0.1/test.jsonl", roberta)
evaluate_structured_decode("decode_v0.1/human-bot.jsonl", roberta)
evaluate_structured_decode("decode_v0.1/a2t.jsonl", roberta)
evaluate_structured_decode("decode_v0.1/rct.jsonl", roberta)
evaluate_unstructured_decode("decode_v0.1/test.jsonl", roberta)
evaluate_unstructured_decode("decode_v0.1/human-bot.jsonl", roberta)
evaluate_unstructured_decode("decode_v0.1/a2t.jsonl", roberta)
evaluate_unstructured_decode("decode_v0.1/rct.jsonl", roberta)
| 38.724138 | 106 | 0.647017 | 703 | 5,615 | 4.906117 | 0.180654 | 0.055668 | 0.064946 | 0.069585 | 0.625399 | 0.582778 | 0.574369 | 0.559872 | 0.538997 | 0.468832 | 0 | 0.019376 | 0.246305 | 5,615 | 144 | 107 | 38.993056 | 0.795605 | 0.032235 | 0 | 0.507937 | 0 | 0 | 0.206814 | 0.102578 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015873 | false | 0 | 0.031746 | 0 | 0.047619 | 0.063492 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ff7d50a006b0f26b65e154b31ce5b6443c61206 | 6,675 | py | Python | deploy/deployer.py | rexengineering/metaflow | fcba7cd6aaccd3806ce7d6a4a8aaeef350bbeaf8 | [
"Apache-2.0"
] | null | null | null | deploy/deployer.py | rexengineering/metaflow | fcba7cd6aaccd3806ce7d6a4a8aaeef350bbeaf8 | [
"Apache-2.0"
] | null | null | null | deploy/deployer.py | rexengineering/metaflow | fcba7cd6aaccd3806ce7d6a4a8aaeef350bbeaf8 | [
"Apache-2.0"
] | null | null | null | import logging
import kubernetes.client
import kubernetes.client.rest
import os
import subprocess
from . import specs
def wrap_api_call(api_call):
def _wrapped_api_call(*args, **kws):
result = None
try:
result = api_call(*args, **kws)
except kubernetes.client.rest.ApiException as exn:
# a 404 usually indicates that istio is not active on the k8s cluster
if exn.status == 404:
logging.error('\n***\nIs istio installed? istioctl install --set profile=demo\n***')
logging.exception(exn)
return result
return _wrapped_api_call
class Deployer:
def __init__(self):
self.core_v1 = kubernetes.client.CoreV1Api()
self.create_namespace = wrap_api_call(self.core_v1.create_namespace)
self.delete_namespace = wrap_api_call(self.core_v1.delete_namespace)
self.create_namespaced_service_account = wrap_api_call(
self.core_v1.create_namespaced_service_account)
self.delete_namespaced_service_account = wrap_api_call(
self.core_v1.delete_namespaced_service_account)
self.create_namespaced_service = wrap_api_call(
self.core_v1.create_namespaced_service)
self.delete_namespaced_service = wrap_api_call(
self.core_v1.delete_namespaced_service)
self.apps_v1 = kubernetes.client.AppsV1Api()
self.create_namespaced_deployment = wrap_api_call(
self.apps_v1.create_namespaced_deployment)
self.delete_namespaced_deployment = wrap_api_call(
self.apps_v1.delete_namespaced_deployment)
self.rbac_v1 = kubernetes.client.RbacAuthorizationV1Api()
self.create_namespaced_role_binding = wrap_api_call(
self.rbac_v1.create_namespaced_role_binding)
self.delete_namespaced_role_binding = wrap_api_call(
self.rbac_v1.delete_namespaced_role_binding)
self.custom_api = kubernetes.client.CustomObjectsApi()
self.create_namespaced_custom_object = wrap_api_call(
self.custom_api.create_namespaced_custom_object)
self.delete_namespaced_custom_object = wrap_api_call(
self.custom_api.delete_namespaced_custom_object)
def create(self, namespace):
print("The deploy module is used for dev deployments. As such, we are now "
"setting the kube context to docker-desktop.", flush=True)
subprocess.check_output("kubectl config use-context docker-desktop".split())
self.create_namespace(specs.rexflow_namespace_spec)
# ETCD
self.create_namespaced_service_account(
'rexflow', specs.etcd_service_acct_spec)
self.create_namespaced_service(
'rexflow', specs.etcd_service_specs)
self.create_namespaced_deployment(
'rexflow', specs.etcd_deployment_spec)
# flowd
self.create_namespaced_service_account(
'rexflow', specs.flowd_service_acct_spec)
self.create_namespaced_service(
'default', specs.flowd_service_specs['default'])
self.create_namespaced_service(
'rexflow', specs.flowd_service_specs['rexflow'])
self.create_namespaced_deployment(
'rexflow', specs.mk_flowd_deployment_spec('rexflow-etcd.rexflow',
namespace.kafka
))
self.create_namespaced_role_binding(
'default', specs.flowd_edit_default_spec)
# healthd
self.create_namespaced_service_account(
'rexflow', specs.healthd_service_acct_spec)
self.create_namespaced_service(
'rexflow', specs.healthd_service_spec)
self.create_namespaced_deployment(
'rexflow', specs.mk_healthd_deployment_spec('rexflow-etcd.rexflow',
namespace.kafka
))
self.create_namespaced_role_binding(
'default', specs.healthd_edit_default_spec)
# Gateway and virtual services
self.create_namespaced_custom_object(
'networking.istio.io', 'v1alpha3', 'default', 'gateways',
specs.rexflow_gateway_spec)
self.create_namespaced_custom_object(
'networking.istio.io', 'v1alpha3', 'default', 'virtualservices',
specs.flowd_virtual_service_spec)
self.create_namespaced_custom_object(
'networking.istio.io', 'v1alpha3', 'default', 'virtualservices',
specs.healthd_virtual_service_spec)
if namespace.kafka:
os.system("kubectl create ns kafka")
os.system("kubectl create -f 'https://strimzi.io/install/latest?namespace=kafka' -n kafka")
os.system("kubectl create -f "
"https://strimzi.io/examples/latest/kafka/kafka-persistent-single.yaml -n kafka ")
def delete(self, namespace):
print("The deploy module is used for dev deployments. As such, we are now "
"setting the kube context to docker-desktop.", flush=True)
subprocess.check_output("kubectl config use-context docker-desktop".split())
self.delete_namespaced_custom_object(
'networking.istio.io', 'v1alpha3', 'default', 'virtualservices',
'healthd')
self.delete_namespaced_custom_object(
'networking.istio.io', 'v1alpha3', 'default', 'virtualservices',
'flowd')
self.delete_namespaced_custom_object(
'networking.istio.io', 'v1alpha3', 'default', 'gateways',
'rexflow-gateway')
self.delete_namespaced_service('flowd', 'default')
self.delete_namespaced_service('flowd', 'rexflow')
self.delete_namespaced_deployment('flowd', 'rexflow')
self.delete_namespaced_service('healthd', 'rexflow')
self.delete_namespaced_deployment('healthd', 'rexflow')
self.delete_namespaced_role_binding('flowd-edit-default', 'default')
self.delete_namespaced_role_binding('healthd-edit-default', 'default')
self.delete_namespaced_service_account('healthd', 'rexflow')
self.delete_namespaced_service_account('flowd', 'rexflow')
self.delete_namespaced_service('rexflow-etcd', 'rexflow')
self.delete_namespaced_deployment('rexflow-etcd', 'rexflow')
self.delete_namespaced_service_account('rexflow-etcd', 'rexflow')
self.delete_namespace('rexflow')
if namespace.kafka:
os.system("kubectl delete -f "
"https://strimzi.io/examples/latest/kafka/kafka-persistent-single.yaml -n kafka ")
os.system("kubectl delete -f 'https://strimzi.io/install/latest?namespace=kafka' -n kafka")
os.system("kubectl delete ns kafka")
| 48.021583 | 103 | 0.68015 | 746 | 6,675 | 5.776139 | 0.168901 | 0.092829 | 0.092829 | 0.041773 | 0.714319 | 0.632861 | 0.536783 | 0.457879 | 0.430958 | 0.291019 | 0 | 0.006771 | 0.225618 | 6,675 | 138 | 104 | 48.369565 | 0.826852 | 0.017228 | 0 | 0.308943 | 0 | 0.03252 | 0.216051 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04065 | false | 0 | 0.04878 | 0 | 0.113821 | 0.01626 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ff9ccafc8706c01c8f4aa39cb1046dcdc0d559a | 3,358 | py | Python | setup.py | TomoyaFukui/Jupiter | 13f2433c9cf15053dc73c7718c56d0a2d060b723 | [
"MIT"
] | 6 | 2017-12-11T05:02:55.000Z | 2018-12-03T02:54:50.000Z | setup.py | TomoyaFukui/Jupiter | 13f2433c9cf15053dc73c7718c56d0a2d060b723 | [
"MIT"
] | 1 | 2018-04-10T03:55:14.000Z | 2018-11-02T15:02:02.000Z | setup.py | TomoyaFukui/Jupiter | 13f2433c9cf15053dc73c7718c56d0a2d060b723 | [
"MIT"
] | 5 | 2018-07-30T18:07:24.000Z | 2019-07-31T09:51:35.000Z | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
import glob
import site
from setuptools import setup, find_packages
# from distutils.extension import Extension
# from Cython.Distutils import build_ext
from setuptools.extension import Extension
from Cython.Build import cythonize
from Cython.Distutils import build_ext
import numpy as np
try:
with open('readme.md') as f:
readme = f.read()
except IOError:
readme = ''
def _requires_from_file(filename):
return open(filename).read().splitlines()
extensions = [
Extension(
"jupiter.simulator.cython.make_bid",
sources=["jupiter/simulator/cython/make_bid.pyx"],
include_dirs=[np.get_include()],
),
]
# version
# here = os.path.dirname(os.path.abspath(__file__)) + '/jupiter'
here = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'jupiter')
version = next((line.split('=')[1].strip().replace("'", '')
for line in open(os.path.join(here,
'simulator',
'__init__.py'))
if line.startswith('__version__ = ')),
'1.0.2')
# data_files
# site-packageディレクトリのパスを取得
# ※リストの先頭に"C:\Python34"が入ってるみたいなので最後がsite-packageだと想定して処理します(確実ではなさそうなのでいい方法があったら教えてください)
site_dir = os.path.join(site.getsitepackages()[-1], "jupiter-negotiation")
domain_dir = os.path.join(here, 'domain')
datafiles = []
for filename in glob.glob(os.path.join(domain_dir, '*')):
if os.path.isdir(filename):
xmlfile_list = []
for xmlfile_path in glob.glob(os.path.join(filename, '*.xml')):
xmlfile_list.append(xmlfile_path[xmlfile_path.find("jupiter"):])
domain_path = site_dir + "/" + filename[len(domain_dir):]
datafiles.append((domain_path, xmlfile_list))
agents_list = []
agents_dir = os.path.join(here, 'agents')
for i in glob.glob(os.path.join(agents_dir, '*.py')):
if i.find("__init__.py") > 0:
continue
agents_list.append(i[i.find("jupiter"):])
agents_dir_save = os.path.join(site_dir, "agents")
datafiles.append((agents_dir_save, agents_list))
print("-" * 100)
for i in datafiles:
print(i[0])
print("\t", i[1])
print("-" * 100)
setup(
name="jupiter-negotiation",
version=version,
url='https://github.com/TomoyaFukui/Jupiter',
author='TomoyaFukui',
author_email='sumabura6581@gmail.com',
maintainer='TomoyaFukui',
maintainer_email='sumabura6581@gmail.com',
description='Simulator for automated negotiation',
long_description=readme,
packages=find_packages(),
ext_modules=cythonize(extensions),
# data_files=datafiles,
include_package_data=True,
install_requires=_requires_from_file('requirements.txt'),
license="MIT",
keywords="negotiation, jupiter",
classifiers=[
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
entry_points={
"console_scripts": [
"jupiter=jupiter.__main__:main"
],
},
# cmdclass={'build_ext': build_ext}
)
| 31.092593 | 89 | 0.651876 | 398 | 3,358 | 5.301508 | 0.349246 | 0.03981 | 0.042654 | 0.061611 | 0.181043 | 0.08436 | 0 | 0 | 0 | 0 | 0 | 0.012739 | 0.205182 | 3,358 | 107 | 90 | 31.383178 | 0.777445 | 0.128648 | 0 | 0.048193 | 0 | 0 | 0.203844 | 0.049073 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012048 | false | 0 | 0.13253 | 0.012048 | 0.156627 | 0.048193 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ffb0ded1cbf32290c36cec8eaa3225171b8ee43 | 388 | py | Python | examples/ASPHERE/tri/tri.srd.viz.py | neoshanarayanan/lammps_simulations | 04e55e3b74da588e70a08b6b9f1d79fc4dc0b7d4 | [
"MIT"
] | null | null | null | examples/ASPHERE/tri/tri.srd.viz.py | neoshanarayanan/lammps_simulations | 04e55e3b74da588e70a08b6b9f1d79fc4dc0b7d4 | [
"MIT"
] | null | null | null | examples/ASPHERE/tri/tri.srd.viz.py | neoshanarayanan/lammps_simulations | 04e55e3b74da588e70a08b6b9f1d79fc4dc0b7d4 | [
"MIT"
] | null | null | null | # Pizza.py viz of triangle + SRD output
d = dump("dump1.atom.srd dump2.atom.srd")
t = tdump("dump1.tri.srd dump2.tri.srd")
t.map(1,"id",2,"type",
3,"corner1x",4,"corner1y",5,"corner1z",
6,"corner2x",7,"corner2y",8,"corner2z",
9,"corner3x",10,"corner3y",11,"corner3z")
d.extra(t)
g = gl(d)
g.arad(1,0.02)
g.acol(1,"green")
g.arad(2,0.05)
g.acol(2,"green")
v = vcr(g)
| 20.421053 | 47 | 0.610825 | 74 | 388 | 3.202703 | 0.662162 | 0.059072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107463 | 0.136598 | 388 | 18 | 48 | 21.555556 | 0.6 | 0.095361 | 0 | 0 | 0 | 0 | 0.412607 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ffbb929c85c36054a213f5d4c6886c74931b33c | 19,541 | py | Python | OREGON.py | oof-123/python-oregon-trail | 8a7df645469d555e950b99d1bfe79fe63fe58edb | [
"MIT"
] | null | null | null | OREGON.py | oof-123/python-oregon-trail | 8a7df645469d555e950b99d1bfe79fe63fe58edb | [
"MIT"
] | null | null | null | OREGON.py | oof-123/python-oregon-trail | 8a7df645469d555e950b99d1bfe79fe63fe58edb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys, os, subprocess, random, time
global w, ansi, g, enableRemovedFeatures
subprocess.call('', shell=True) #This makes it possible to use color graphics on Windows
enableRemovedFeatures = False
#init graphics and stuff
class ANSI:
def __init__(self):
self.BLACK = "\u001b[30m"
self.RED = "\u001b[31m"
self.GREEN = "\u001b[32m"
self.YELLOW = "\u001b[33m"
self.BLUE = "\u001b[34m"
self.MAGENTA = "\u001b[35m"
self.CYAN = "\u001b[36m"
self.RESET = "\u001b[0m"
self.WHITE = "\u001b[37;1m"
self.REVERSED = "\u001b[7m"
self.BOLD = "\u001b[1m"
self.BACKRED = "\u001b[41m"
ansi = ANSI()
class Window:
def __init__(self):
try:
from PROGDETAILS import Program
p = Program()
except:
print("The game appears to be missing files (or the PROGDETAILS file is corrupted)")
print("If the file \"version\" exists, run the file \"UpdateVersionInfo.py\" to fix this error. If not, create")
print("a file called \"version\" (no extension) with the contents \"1 0 0\" and run the \"UpdateVersionInfo.py\" file.")
sys.exit(0)
self.header = ansi.WHITE + "████████████████████████████████████████████████████████████████████████████\n█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█\n█▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓█\n█▓▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░▒▓█\n█▓▒░ " + ansi.CYAN + "The Oregon Trail 2018 Abridged: " + ansi.MAGENTA + "The Manga - " + ansi.RED + "The Netflix adaptation" + ansi.WHITE + " ░▒▓█\n█▓▒░ " + ansi.GREEN + " v" + p.version + " By Johnny" + ansi.WHITE + " ░▒▓█\n█▓▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░▒▓█\n█▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓█\n█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█\n████████████████████████████████████████████████████████████████████████████\n"
def menu(self, items, choices):
os.system("cls")
print(self.header)
if(choices == []):
for item in items:
print(item)
return
try:
menu = True
while(menu):
os.system("cls")
print(self.header)
for item in items:
print(item)
try:
choice = int(input("\nWhat is your choice?"))
if(choice in choices):
return choice
except:
errors = 0
except:
self.error("Error while creating list")
def wait(self, text):
print(text)
os.system("pause >> nul")
os.system("cls")
w = Window()
###################### GAME CLASS ######################
class Game:
def __init__(self):
self.party = []
self.person = 0
self.banker = 1
self.carpenter = 2
self.farmer = 3
self.month = 0
self.day = 0
self.cash = 1600.00
self.food = 0
self.oxen = 0
self.ammo = 0
self.clothing = 0
self.wheels = 0
self.axles = 0
self.tongues = 0
self.weather = 0
self.health = 100
self.pricemodifier = 1
self.sickness = 1
random.seed(int(round(time.time() * 1000)))
def error(self, err):
print(ansi.BACKRED, err)
print(self.cash, self.month, self.day, self.weather, self.wheels, self.axles, self.tongues, self.health)
w.wait("Press any key...")
sys.exit(1)
def createstore(self):
try:
#Set up variables
left = False
oxencost = 0
foodcost = 0
clothingcost = 0
ammocost = 0
partscost = 0
while not left:
cost = oxencost + foodcost + clothingcost + ammocost + partscost
choice = w.menu(["Matt's General Store", ["", "March", "April", "May", "June", "July"][self.month] + " 1st, 1848\n", "1. Oxen $" + str(oxencost), "2. Food $" + str(foodcost), "3. Clothing $" + str(clothingcost), "4. Ammunition $" + str(ammocost), "5. Spare parts $" + str(partscost), "6. Leave store\n", "Total bill: $" + str(cost)], [1,2,3,4,5,6])
#leave store
if(choice == 6):
left = True
#oxen
if(choice == 1):
if(self.person == 1):
print("There are 2 oxen in a yoke. I recommend at least 3 yoke. I charge $24 a yoke.")
else:
print("There are 2 oxen in a yoke. I recommend at least 3 yoke. I charge $40 a yoke.")
self.oxen = int(input("How many do you want? "))
if(self.cash - self.oxen * (40 * self.pricemodifier) < 0):
print("You don't have enough for that!")
self.oxen = 0
oxencost = self.oxen * (40 * self.pricemodifier)
#food
if(choice == 2):
if(self.person == 1):
print("I recommend you take at least 200 pounds of food for each person in your family. I see that you have five people in all. You'll need flour, sugar, bacon and coffee. My price is 12 cents a pound.")
else:
print("I recommend you take at least 200 pounds of food for each person in your family. I see that you have five people in all. You'll need flour, sugar, bacon and coffee. My price is 20 cents a pound.")
self.food = int(input("How many do you want? "))
if(self.cash - self.food * (0.2 * self.pricemodifier) < 0):
print("You don't have enough for that!")
self.food = 0
foodcost = self.food * (0.2 * self.pricemodifier)
#clothing
if(choice == 3):
if(self.person == 1):
print("You'll need warm clothing in the mountains. I recommend taking at least 2 sets of clothes per person. Each set is $6.00.")
else:
print("You'll need warm clothing in the mountains. I recommend taking at least 2 sets of clothes per person. Each set is $10.00.")
self.clothing = int(input("How many do you want? "))
if(self.cash - self.clothing * (10 * self.pricemodifier) < 0):
print("You don't have enough for that!")
self.clothing = 0
clothingcost = self.clothing * (10 * self.pricemodifier)
#ammo
if(choice == 4):
if(self.person == 1):
print("I sell ammunition in boxes of 20 bullets. Each box costs $1.20.")
else:
print("I sell ammunition in boxes of 20 bullets. Each box costs $2.00.")
self.ammo = int(input("How many do you want? "))
if(self.cash - self.ammo * (2 * self.pricemodifier) < 0):
print("You don't have enough for that!")
aelf.ammo = 0
ammocost = self.ammo * (2 * self.pricemodifier)
#spare parts
if(choice == 5):
print("It's a good idea to have spare parts for your wagon. Here are the prices:")
if(self.person == 1):
print(" wagon wheel - $6 each\n wagon axle - $6 each\n wagon tongue - $6 each")
else:
print(" wagon wheel - $10 each\n wagon axle - $10 each\n wagon tongue - $10 each")
self.wheels = int(input("How many wheels do you want? "))
self.axles = int(input("How many axles do you want? "))
self.tongues = int(input("How many tongues do you want? "))
if(self.cash - (self.wheels + self.axles + self.tongues) * (10 * self.pricemodifier) < 0):
print("You don't have enough for that!")
self.wheels = 0
self.axles = 0
self.tongues = 0
partscost = (self.wheels + self.axles + self.tongues) * (10 * self.pricemodifier)
#buy
self.cash = self.cash - cost
except:
self.error("Error creating store page.")
def start(self):
if(enableRemovedFeatures):
while(self.person == 4 or self.person == 0):
self.person = w.menu(["Many kinds of people made the trip to Oregon.", "\nYou may:\n", " 1. Be a banker from Boston", " 2. Be a carpenter from Ohio", " 3. Be a farmer from Illinois", " 4. Find out the differences between the choices"], [1, 2, 3, 4])
if(self.person == 4):
print("Banker gets extra money, carpenter gets extra spare parts and can repair parts, farmer gets 4 free oxen.")
else:
self.person = w.menu(["Many kinds of people made the trip to Oregon.", "\nYou may:\n", " 1. Be a banker from Boston", " 2. Be a carpenter from Ohio", " 3. Be a farmer from Illinois"], [1, 2, 3])
#Class bonuses
if(self.person == 1):
self.cash = self.cash + 1000
self.pricemodifier = 0.6 #discount
elif(self.person == 2):
self.axles = 3
self.wheels = 3
self.tongues = 3
elif(self.person == 3):
self.oxen = 4
correct = False
while(correct != True):
os.system("cls")
print(w.header)
self.party.append(input("What is the first name of the wagon leader?"))
for i in range(4):
self.party.append(input("What is the first name of the next member of your party?"))
os.system("cls")
print(w.header)
for name in self.party:
print(name)
choice = input("Are these names correct? ")
if(choice in ['y', 'yes', 'absolutely', 'uh-huh', 'correct', 'true']):
correct = True
else:
self.party = []
if(enableRemovedFeatures):
#Removed 1/8/2019
self.month = w.menu(["It is 1848. Your jumping off place for Oregon is Independence, Missouri. You must decide which month to leave Independence.\n\n", " 1. March", " 2. April", " 3. May", " 4. June", " 5. July", " 6. Ask for advice"], [1,2,3,4,5,6])
if(self.month == 6):
os.system("cls")
print(w.header)
print("You attend a public meeting held for \"Folks with the California-Oregon Fever.\" You're told:\n\nIf you leave too early, there won't be any grass for your oxen to eat. If you leave too late, you may not get to Oregon before winter comes. If you leave at just the right time, there will be green grass and the weather will be cool.")
w.wait("Press any key...")
else:
self.month = 1
self.day = (self.month - 1) * 30
w.menu(["Before leaving Independence you should buy equipment and supplies. You have $" + str(int(self.cash)) + ".00 in cash, but you don't have to spend all of it now.", "You can buy what you need at Matt's General Store."], [])
w.wait("Press any key...")
w.menu(["Hello, I'm Matt. So you're going to Oregon! I can fix you up with what you need:\n"," - a team of oxen to pull your wagon"," - clothing for both summer and winter"," - plenty of food for the trip"," - ammunition for your rifles"," - spare parts for your wagon"], [])
w.wait("Press any key...")
self.createstore()
print("Well then, you're ready to start. Good luck! You have a long and difficult journey ahead of you.")
if(True): #This code used to be contained in a try/except block. IDLE lacks shift+tab, so I did this (it used to crash if you died)
self.weather = self.month * 10
w.wait("Press any key...")
gameLoop = True
while(gameLoop == True):
time.sleep(2)
#If the player's entire party dies or runs out of food or oxen, it's game over.
if(self.health <= 19 or self.food < 0.5 * (health / 5) or self.oxen == 0):
print("Game over.")
w.wait("Press any key...")
sys.exit(3)
if(enableRemovedFeatures):
#Determine weather (Removed 1/9/2019)
if(self.weather > 7):
weather = "Hot"
elif(self.weather < 4):
weather = "Cold"
else:
weather = "Warm"
#Determine health
if(self.health > 70):
health = "Good"
elif(self.health < 40):
health = "Poor"
else:
health = "Fair"
#Determine how much food to take
if(self.food > (150 * 5)):
rations = "Filling"
elif(self.food < (70* 5)):
rations = "Bare Bones"
else:
rations = "Meager"
#3 months and you win.
if(self.day == 90):
print("You win!")
sys.exit(1)
#All the ANSI codes make the text look nicer.
print(ansi.RESET + ansi.WHITE)
choice = w.menu([ansi.RESET + ansi.WHITE + "Weather: " + ansi.REVERSED + weather, ansi.RESET + ansi.WHITE + "Health: " + ansi.REVERSED + health, ansi.RESET + ansi.WHITE + "Rations: " + ansi.REVERSED + rations, ansi.RESET + ansi.WHITE + "\nYou may:\n", " 1. Continue on trail", " 2. Buy supplies"], [1,2])
print(ansi.RESET + ansi.WHITE)
if(choice == 2): #Pretty self-explanitory.
self.createstore()
elif(choice == 1):
self.day = self.day + 1
#Remove food
if(rations == "Filling"):
self.food = self.food - 2
elif(rations == "Meager"):
self.food = self.food - 1
else:
self.food = self.food - 0.5
#Choose if players die
if(random.randrange(10) >= 7):
try:
self.health = self.health - (self.health / 5)
ax = random.choice(self.party)
self.party.remove(ax)
print(ax + " has died.")
except:
#The above code will fail if there are no more characters to kill.
#If so, we know it's a game over.
print("Game over.")
w.wait("Press any key...")
sys.exit(3)
#This used to be impossible, but you kinda need it in case you run out of money
if(random.randrange(1000) > 450 and random.randrange(60) < 10):
print("Some indians helped you find food.")
self.food = self.food + 30
#Rare
if(random.randrange(100) == 69):
print("A nuclear warhead struck your cart, killing everyone.")
print("Game over.")
w.wait("Press any key...")
sys.exit(3)
#Made more common
if(random.randrange(100) < 80):
print("One of your oxen has died.")
self.oxen = self.oxen - 1
#Wagon parts can now break.
if(random.randrange(100) == 30):
ax = random.randrange(3)
if(self.person == 2 and random.randrange(20) == 10):
print("You broke a part on your wagon, but you were able to repair it.")
break
if(ax == 1):
self.wheels = self.wheels - 1
print("You broke a wheel.")
if(self.wheels < 0):
print("You have run out of wheels. Game over.")
sys.exit(1)
elif(ax == 2):
self.tongues = self.tongues - 1
print("You broke a tongue.")
if(self.tongues < 0):
print("You have run out of tongues. Game over.")
sys.exit(1)
else:
self.axles = self.axles - 1
print("You broke an axle.")
if(self.axles < 0):
print("You have run out of axles. Game over.")
sys.exit(1)
###################### END GAME CLASS ######################
#make sure we aren't running in IDLE. Graphics don't work properly in IDLE so you end up with a mess.
if("idlelib" in sys.modules):
print("It looks like you're running in IDLE! This breaks the graphics, please run in \nterminal instead.")
sys.exit(0)
while(True):
g = Game()
choice = w.menu(["\nYou may:\n 1. Travel the trail\n 2. Run a Python script\n 3. End"], [1, 2, 3])
if(choice == 1):
g.start()
elif(choice == 2):
load = input("Please type the name of the python script (it should be in the same folder as the game)")
try:
#You can't import files if the names aren't hard-coded directly in the game,
#unless you use a library. To get around this, we read the contents of the file
#into exec and run it like that. Works pretty well, too. Only real issue is
#setting up classes and defs don't always work, but I'm pretty sure that's a bug
#in Python.
exec(open(load).read())
print("Success.")
os.system("pause")
except:
print("There was an error during execution. Does the file exist?")
elif(choice == 3):
sys.exit(0)
| 50.363402 | 937 | 0.453815 | 2,242 | 19,541 | 4.228368 | 0.220785 | 0.013924 | 0.010127 | 0.01097 | 0.25538 | 0.215506 | 0.187342 | 0.176266 | 0.176266 | 0.161392 | 0 | 0.032484 | 0.409242 | 19,541 | 387 | 938 | 50.49354 | 0.734667 | 0.064685 | 0 | 0.324759 | 0 | 0.048232 | 0.30729 | 0.035688 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025723 | false | 0 | 0.006431 | 0 | 0.048232 | 0.170418 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ffc69fde3a58f420984e811e4c2ddadcc94445f | 3,095 | py | Python | bot/player_commands/kills.py | UP929312/CommunityBot | c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a | [
"Apache-2.0"
] | 1 | 2021-06-15T07:31:13.000Z | 2021-06-15T07:31:13.000Z | bot/player_commands/kills.py | UP929312/CommunityBot | c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a | [
"Apache-2.0"
] | 1 | 2021-06-01T10:14:32.000Z | 2021-06-02T10:54:12.000Z | bot/player_commands/kills.py | UP929312/CommunityBot | c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a | [
"Apache-2.0"
] | 2 | 2021-06-01T10:59:15.000Z | 2021-06-03T18:29:36.000Z | import discord # type: ignore
from discord.ext import commands # type: ignore
from discord.commands import Option # type: ignore
from typing import Optional
import requests
from bisect import bisect
from parse_profile import get_profile_data
from utils import error, format_duration, clean, PROFILE_NAMES, guild_ids
def comma_seperate(num: float) -> str:
return f"{int(num):,}" # var:, = 10,000 (the comma)
class kills_cog(commands.Cog):
def __init__(self, bot) -> None:
self.client = bot
@commands.command(name="kills", aliases=['k', 'kill'])
async def kills_command(self, ctx, provided_username: Optional[str] = None, provided_profile: Optional[str] = None) -> None:
await self.get_kills(ctx, provided_username, provided_profile, is_response=False)
@commands.slash_command(name="kills", description="Gets the entities the player has killed the most", guild_ids=guild_ids)
async def kills_slash(self, ctx, username: Option(str, "username:", required=False),
profile: Option(str, "profile", choices=PROFILE_NAMES, required=False)):
if not (ctx.channel.permissions_for(ctx.guild.me)).send_messages:
return await ctx.respond("You're not allowed to do that here.", ephemeral=True)
await self.get_kills(ctx, username, profile, is_response=True)
#=========================================================================================================================================
async def get_kills(self, ctx, provided_username: Optional[str] = None, provided_profile_name: Optional[str] = None, is_response: bool = False) -> None:
player_data: Optional[dict] = await get_profile_data(ctx, provided_username, provided_profile_name, is_response=is_response)
if player_data is None:
return
username = player_data["username"]
stats = player_data["stats"]
total_mobs_killed = f"**{comma_seperate(stats['kills'])}**" if "kills" in stats else "Unknown"
kills_stats = {k: v for k, v in stats.items() if k.startswith("kills_")}
sorted_kills = dict(sorted(kills_stats.items(), key=lambda mob: mob[1], reverse=True)[:12])
embed = discord.Embed(title=f"{username}", url=f"https://sky.shiiyu.moe/stats/{username}", colour=0x3498DB)
embed.set_thumbnail(url=f"https://mc-heads.net/head/{username}")
embed.add_field(name=f"Kills Data", value=f"Total Mobs Killed {total_mobs_killed}", inline=False)
for index, (key, value) in enumerate(sorted_kills.items(), 1):
formatted_name = key.removeprefix("kills_").replace('_', ' ').title().replace('Unburried Zombie', 'Crypt Ghoul')
embed.add_field(name=f"#{index} {formatted_name}", value=f":knife: {comma_seperate(value)}", inline=True)
embed.set_footer(text=f"Command executed by {ctx.author.display_name} | Community Bot. By the community, for the community.")
if is_response:
await ctx.respond(embed=embed)
else:
await ctx.send(embed=embed)
| 52.457627 | 156 | 0.65105 | 398 | 3,095 | 4.899497 | 0.351759 | 0.030769 | 0.038974 | 0.021538 | 0.126667 | 0.054359 | 0.054359 | 0.054359 | 0.054359 | 0 | 0 | 0.00554 | 0.183522 | 3,095 | 58 | 157 | 53.362069 | 0.766126 | 0.065267 | 0 | 0 | 0 | 0.02381 | 0.178386 | 0.029096 | 0 | 0 | 0.002771 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.190476 | 0.02381 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ffdc7e7822b61246aa3c0db4eea642a03dc2a4b | 17,144 | py | Python | integration/run.py | aryavohra/acme | bd9c84d5ac46356794f15d54dbb5fe122cb3b321 | [
"Apache-2.0"
] | null | null | null | integration/run.py | aryavohra/acme | bd9c84d5ac46356794f15d54dbb5fe122cb3b321 | [
"Apache-2.0"
] | null | null | null | integration/run.py | aryavohra/acme | bd9c84d5ac46356794f15d54dbb5fe122cb3b321 | [
"Apache-2.0"
] | null | null | null | """
Actor
- just run continuously, populating the self-play buffer until learner sends a terminate signal
Learner
- continuously sample from the replay buffer until has done sufficient learning steps
- at regular intervals, perform an evaluation
Cache # ?
- fetch params from the learner every x seconds
SharedStorage
- store the terminate signal
CustomConfig
- holds all the config-related stuff (e.g. print intervals, learning rate)
"""
import ray
import jax
import jax.numpy as jnp
import rlax
import optax
import reverb
import numpy as np
import haiku as hk
import time, datetime
import functools
import gym
from acme import wrappers
import uuid
import pickle
import argparse
import operator
import tree
import acme
from acme import specs
from acme import datasets
from acme.jax import utils
from acme.jax import networks as networks_lib
from acme.agents import replay
from acme.agents.jax import actors
from acme.adders import reverb as adders
from typing import Generic, List, Optional, Sequence, TypeVar
from acme import types
from acme.utils import counting
from acme.utils import loggers
from acme.jax import variable_utils
from custom_variable_utils import RayVariableClient
from acme.agents.jax.dqn import DQNConfig
from acme.agents.jax.dqn.agent import DQNFromConfig
from acme.agents.jax.dqn import learning
import custom_learning_lib
from custom_environment_loop import CustomEnvironmentLoop
from custom_config import RainbowDQNConfig
parser = argparse.ArgumentParser(description='Run integration tests of custom Acme features.')
parser.add_argument('--rainbow_config', help="Enables Rainbow DQN Config with lr=625e-7.", action="store_true")
parser.add_argument('--ram_states', help='Enables training on RAM states instead of images.', action="store_true")
parser.add_argument("--force_cpu", help="Force all workers to use CPU.", action="store_true")
parser.add_argument("--multicore_tpu", help="Enables custom learning_lib with cross-TPU-core training.", action="store_true")
parser.add_argument('--num_actors', type=int, default=1,help='Number of actors to run.')
parser.add_argument('--custom_variable_update', help="Enables custom variable_client with Ray compatibility.", action="store_true")
parser.add_argument('--episode_return_goal', type=float, default=100.0 ,help='Target max return for model test.')
parser.add_argument('--num_log_episodes', type=int, default=200 ,help='Number of episodes to store for plotting.')
parser.add_argument('--total_learning_steps', type=float, default=2e8 ,help='Number of training steps to run.')
parser.add_argument("--enable_checkpointing", help="Learner will checkpoint at preconfigured intervals.", action="store_true")
parser.add_argument("--initial_checkpoint", help="Learner will load from initial checkpoint before training.", action="store_true")
parser.add_argument("--initial_checkpoint_path", type=str, default="initial_checkpoint", help="Initial checkpoint for learner. `initial_checkpoint` must be True.")
def environment_factory(evaluation: bool = False, level: str = 'BreakoutNoFrameskip-v4', ram_states=False):
"""Creates environment."""
if ram_states:
env = gym.make(level, full_action_space=True, obs_type="ram")
else:
env = gym.make(level, full_action_space=True)
max_episode_len = 108_000 if evaluation else 50_000
if ram_states:
return wrappers.wrap_all(env, [
wrappers.GymAtariRAMAdapter,
functools.partial(
wrappers.AtariRAMWrapper,
to_float=True,
max_episode_len=max_episode_len,
# zero_discount_on_life_loss=True,
),
wrappers.SinglePrecisionWrapper,
])
else:
return wrappers.wrap_all(env, [
wrappers.GymAtariAdapter,
functools.partial(
wrappers.AtariWrapper,
to_float=True,
max_episode_len=max_episode_len,
# zero_discount_on_life_loss=True,
),
wrappers.SinglePrecisionWrapper,
])
def network_factory(ram_states, spec):
"""Creates network."""
def network(x):
if ram_states:
model = hk.Sequential([
hk.Flatten(),
hk.nets.MLP([256, 512, 1024, spec.actions.num_values])
])
else:
model = hk.Sequential([
networks_lib.AtariTorso(),
hk.Flatten(),
hk.nets.MLP([50, 50, spec.actions.num_values])
])
return model(x)
# Make network purely functional
network_hk = hk.without_apply_rng(hk.transform(network, apply_rng=True))
dummy_obs = utils.add_batch_dim(utils.zeros_like(spec.observations))
network = networks_lib.FeedForwardNetwork(
init=lambda rng: network_hk.init(rng, dummy_obs),
apply=network_hk.apply)
return network
def make_actor(policy_network, random_key, adder = None, variable_source = None, temp_client_key=None):
"""Creates an actor."""
assert variable_source is not None, "make_actor doesn't support None for `variable_source` right now"
variable_client = RayVariableClient(
client=variable_source,
key='',
# variables={'policy': policy_network.variables},
update_period=100,
temp_client_key=temp_client_key
)
variable_client.update_and_wait()
actor = actors.FeedForwardActor(
policy=policy_network,
random_key=random_key,
variable_client=variable_client, # need to write a custom wrapper around learner so it calls .remote
adder=adder)
return actor
def make_adder(reverb_client):
"""Creates a reverb adder."""
return adders.NStepTransitionAdder(reverb_client, config.n_step, config.discount)
def make_learner(network, optimizer, data_iterator, reverb_client, random_key, logger=None, checkpoint=None, custom=False):
# TODO: add a sexy logger here
source = custom_learning_lib if custom else learning
learner = source.DQNLearner(
network=network,
random_key=random_key,
optimizer=optimizer,
discount=config.discount,
importance_sampling_exponent=config.importance_sampling_exponent,
target_update_period=config.target_update_period,
iterator=data_iterator,
replay_client=reverb_client,
logger=logger
)
return learner
def make_optimizer():
optimizer = optax.chain(
optax.clip_by_global_norm(config.max_gradient_norm),
optax.adam(config.learning_rate),
)
return optimizer
class ActorLogger():
def __init__(self, interval=1, disable_printing=False):
self.data = []
self.counter = 0
self.interval = interval
self.disable_printing = disable_printing
if self.disable_printing: print("actor logger printing temporarily disabled")
def write(self, s):
self.data.append(s)
if self.counter % self.interval == 0:
if not self.disable_printing: print(s)
self.counter += 1
@ray.remote
class SharedStorage():
"""
Class which run in a dedicated thread to store the network weights and some information.
"""
def __init__(self):
self.current_checkpoint = {}
def get_info(self, keys):
if isinstance(keys, str):
return self.current_checkpoint[keys]
elif isinstance(keys, list):
return {key: self.current_checkpoint[key] for key in keys}
else:
raise TypeError
def set_info(self, keys, values=None):
if isinstance(keys, str) and values is not None:
self.current_checkpoint[keys] = values
elif isinstance(keys, dict):
self.current_checkpoint.update(keys)
else:
raise TypeError
@ray.remote(num_cpus=1)
class ActorRay():
"""Glorified wrapper for environment loop."""
def __init__(self, reverb_address, variable_source, shared_storage, id=None, verbose=False, ram_states=False, spec=None):
self._verbose = verbose
self._id = str(id) or uuid.uuid1()
self._shared_storage = shared_storage
self._client = reverb.Client(reverb_address)
print("A - flag 0.5")
network = network_factory(ram_states, spec)
def policy(params: networks_lib.Params, key: jnp.ndarray,
observation: jnp.ndarray) -> jnp.ndarray:
action_values = network.apply(params, observation) # how will this work when they're on different devices?
return rlax.epsilon_greedy(config.epsilon).sample(key, action_values)
# print("A - flag 1")
# todo: make this proper splitting and everything
random_key=jax.random.PRNGKey(1701)
self._actor = make_actor(
policy,
random_key,
adder=make_adder(self._client),
variable_source=variable_source,
temp_client_key=self._id
)
print("A - flag 2")
self._environment = environment_factory()
self._counter = counting.Counter() # prefix='actor'
self._logger = ActorLogger(
# interval=10, # log every 10 steps
# disable_printing=(type(id) == int and (id % 4 == 0)) # only get every 4th actor to print shit
) # TODO: use config for `interval` arg
self._env_loop = CustomEnvironmentLoop(
self._environment,
self._actor,
counter=self._counter,
logger=self._logger,
should_update=True
)
print("A - flag 3")
# TODO: migrate all print statements to the logger
# or should i? logger is for the environment loop
if self._verbose: print(f"Actor {self._id}: instantiated on {jnp.ones(3).device_buffer.device()}.")
def ready(self):
return True
def run(self):
if self._verbose: print(f"Actor {self._id}: beginning training.")
steps=0
result = self._env_loop.run_episode()
while result["episode_return"] < args.episode_return_goal and not ray.get(self._shared_storage.get_info.remote("terminate")):
#result["counts"] < args.total_learning_steps and \
result.update({
"id": self._id
})
self._logger.write(result)
steps += result['episode_length']
result = self._env_loop.run_episode()
#counts = result["counts"]
print("******************************************")
print("***** TEST COMPLETE *****")
print("******************************************")
print(f"Single-actor test reached episode_return_goal of {args.episode_return_goal}!")
#print(f"Took {counts} learner steps.")
print(f"Took {steps} self-play transitions.")
@ray.remote # max_concurrency=1 + N(cacher nodes)
class LearnerRay():
def __init__(self, reverb_address, shared_storage, enable_checkpointing=False, verbose=False, ram_states=False, spec=None):
self._verbose = verbose
self._enable_checkpointing = enable_checkpointing
self._shared_storage = shared_storage
self._client = reverb.Client(reverb_address)
print("L - flag 0.5")
data_iterator = datasets.make_reverb_dataset(
table="priority_table",
server_address=reverb_address,
batch_size=config.batch_size,
prefetch_size=4,
).as_numpy_iterator()
print("L - flag 1")
# todo: sort out the key
# disabled the logger because it's not toooo useful
# self._logger = ActorLogger()
random_key = jax.random.PRNGKey(1701)
self._learner = make_learner(
network_factory(ram_states, spec),
make_optimizer(),
data_iterator,
self._client,
random_key,
# logger=self._logger
)
print("L - flag 2")
print("devices:", jax.devices())
if self._verbose: print(f"Learner: instantiated on {jnp.ones(3).device_buffer.device()}.")
@staticmethod
def _calculate_num_learner_steps(num_observations: int, min_observations: int, observations_per_step: float) -> int:
"""Calculates the number of learner steps to do at step=num_observations."""
n = num_observations - min_observations
if observations_per_step > 1:
# One batch every 1/obs_per_step observations, otherwise zero.
return int(n % int(observations_per_step) == 0)
else:
# Always return 1/obs_per_step batches every observation.
return int(1 / observations_per_step)
def get_variables(self, names: Sequence[str]) -> List[types.NestedArray]:
"""This has to be called by a wrapper which uses the .remote postfix."""
return self._learner.get_variables(names)
def save_checkpoint(self, path):
weights_to_save = self._learner.get_variables("")
# path = "/home/aryavohra/temp/acme/refactor_test/checkpoint"
# path = "checkpoint"
# todo: checkpoint_directory
with open(path, 'wb') as f:
pickle.dump(weights_to_save, f)
if self._verbose: print("Learner: checkpoint saved successfully.")
return True # todo: can we remove this?
def load_checkpoint(self, path):
with open(path, 'rb') as f:
weights = pickle.load(f)
self._learner.restore_from_single_weights(weights)
if self._verbose: print("Learner: checkpoint restored successfully.")
# once we've loaded the weights, wtf do we do with them
def run(self, total_learning_steps: int = 2e8):
if self._verbose: print("Learner: starting training.")
while self._client.server_info()["priority_table"].current_size < max(config.batch_size, config.min_replay_size):
time.sleep(0.1)
observations_per_step = config.batch_size / config.samples_per_insert
steps_completed = 0
# TODO: migrate to the learner internal counter instance
while steps_completed < total_learning_steps:
steps = self._calculate_num_learner_steps(
num_observations=self._client.server_info()["priority_table"].current_size,
min_observations=max(config.batch_size, config.min_replay_size),
observations_per_step=observations_per_step
)
for _ in range(steps):
self._learner.step()
steps_completed += 1
if self._enable_checkpointing and (steps_completed % config.checkpoint_interval == 0):
self.save_checkpoint(f"checkpoint-{steps_completed}.pickle")
# todo: add evaluation
# perhaps make a coordinator which runs learner for x steps, then calls an eval actor?
# if steps_completed % config.eval_interval == 0:
# pass
if self._verbose: print(f"Learner complete at {steps_completed}. Terminating actors.")
self._shared_storage.set_info.remote({
"terminate": True
})
if __name__ == '__main__':
ray.init(address="auto")
args = parser.parse_args()
if args.force_cpu: jax.config.update('jax_platform_name', "cpu")
config = RainbowDQNConfig() if args.rainbow_config else DQNConfig()
spec = specs.make_environment_spec(environment_factory(args.ram_states))
storage = SharedStorage.remote()
storage.set_info.remote({
"terminate": False
})
reverb_replay = replay.make_reverb_prioritized_nstep_replay(
environment_spec=spec,
n_step=config.n_step,
batch_size=config.batch_size,
max_replay_size=config.max_replay_size,
min_replay_size=config.min_replay_size,
priority_exponent=config.priority_exponent,
discount=config.discount,
)
if args.num_actors == 1:
# bone stock Acme DQN
network = network_factory(args.ram_states, spec)
environment = environment_factory(args.ram_states)
agent = DQNFromConfig(
environment_spec=spec,
network=network,
config=config
)
counter = counting.Counter()
logger = ActorLogger(
# interval=10, # log every 10 steps
# disable_printing=(type(id) == int and (id % 4 == 0)) # only get every 4th actor to print shit
)
loop = CustomEnvironmentLoop(
environment,
agent,
counter=counter,
logger=logger,
should_update=True
)
print("Single-actor test beginning training.")
steps=0
result = loop.run_episode()
while result["episode_return"] < args.episode_return_goal:# and result["counts"] < args.total_learning_steps:
logger.write(result)
steps += result['episode_length']
result = loop.run_episode()
#counts = result["counts"]
print("******************************************")
print("***** TEST COMPLETE *****")
print("******************************************")
print(f"Single-actor test reached episode_return_goal of {args.episode_return_goal}!")
#print(f"Took {counts} learner steps.")
print(f"Took {steps} self-play transitions.")
else:
# custom Ray Actor and Learner
learner = LearnerRay.options(max_concurrency=2).remote(
"localhost:8000",
storage,
enable_checkpointing=args.enable_checkpointing,
verbose=True
)
# important to force the learner onto TPU
ray.get(learner.get_variables.remote(""))
# load the initial checkpoint if relevant
if args.initial_checkpoint:
ray.get(learner.load_checkpoint.remote(args.initial_checkpoint_path))
actors = [ActorRay.remote(
"localhost:8000",
learner,
storage,
verbose=True,
id=i
) for i in range(args.num_actors)] # 50
[a.run.remote() for a in actors]
# actor.run.remote()
# learner.run.remote(total_learning_steps=200)
learner.run.remote(total_learning_steps=args.total_learning_steps)
while not ray.get(storage.get_info.remote("terminate")):
time.sleep(1)
| 32.469697 | 163 | 0.698203 | 2,191 | 17,144 | 5.256047 | 0.201734 | 0.01042 | 0.017714 | 0.012765 | 0.267714 | 0.21049 | 0.158388 | 0.148489 | 0.099687 | 0.099687 | 0 | 0.008297 | 0.191554 | 17,144 | 527 | 164 | 32.531309 | 0.822583 | 0.154223 | 0 | 0.235127 | 0 | 0 | 0.150778 | 0.032449 | 0 | 0 | 0 | 0.003795 | 0.002833 | 1 | 0.062323 | false | 0 | 0.107649 | 0.002833 | 0.226629 | 0.082153 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba006a7d739748f078a0267ea74b3bdbffc54922 | 1,752 | py | Python | 2021-11-24-pytest/test_caesar_cipher.py | Anastasiia-Grishina/simula-tools-meetup | 2a1d661e818fb31750ced15170797d6ad47c7996 | [
"Unlicense"
] | null | null | null | 2021-11-24-pytest/test_caesar_cipher.py | Anastasiia-Grishina/simula-tools-meetup | 2a1d661e818fb31750ced15170797d6ad47c7996 | [
"Unlicense"
] | null | null | null | 2021-11-24-pytest/test_caesar_cipher.py | Anastasiia-Grishina/simula-tools-meetup | 2a1d661e818fb31750ced15170797d6ad47c7996 | [
"Unlicense"
] | 2 | 2021-08-30T12:38:40.000Z | 2021-11-05T14:14:59.000Z | # https://cryptii.com/pipes/caesar-cipher
import pytest
import caesar_cipher
@pytest.mark.parametrize(
"msg, shift, expected_output", [("hello", 1, "ifmmp"), ("welcome", 7, "dlsjvtl")]
)
def test_encrypt(msg, shift, expected_output):
assert caesar_cipher.encrypt(msg, shift=shift) == expected_output
@pytest.mark.parametrize(
"encrypted_msg, shift, expected_output",
[("ifmmp", 1, "hello"), ("dlsjvtl", 7, "welcome")],
)
def test_decrypt(encrypted_msg, shift, expected_output):
assert caesar_cipher.decrypt(encrypted_msg, shift=shift) == expected_output
@pytest.mark.parametrize(
"msg, shift",
[
("programming", 3),
("math", 15),
("physics", -18),
("Hei", 6),
],
)
def test_encrypt_decrypt_yields_same_result(msg, shift):
encrypted_message = caesar_cipher.encrypt(message=msg, shift=shift)
decrypted_message = caesar_cipher.decrypt(
encrypted_message=encrypted_message, shift=shift
)
assert decrypted_message == msg.lower()
def test_encrypt_raises_TypeError_on_int_input():
with pytest.raises(TypeError):
caesar_cipher.encrypt(1910, 4)
@pytest.mark.parametrize(
"letter, new_letter, shift",
[
("a", "b", 1),
("m", "n", 1),
("z", "a", 1),
("a", "f", 5),
("m", "r", 5),
("z", "e", 5),
("a", "a", 26),
("m", "m", 26),
("z", "z", 26),
],
)
def test_create_shifted_alphabet(letter, new_letter, shift):
new_letters = caesar_cipher.create_shifted_alphabet(shift)
assert new_letters[letter] == new_letter
def test_rotate_string():
assert caesar_cipher.rotate_string("hello", 1) == "elloh"
assert caesar_cipher.rotate_string("hello", 2) == "llohe"
| 26.545455 | 85 | 0.631849 | 213 | 1,752 | 4.962441 | 0.323944 | 0.113529 | 0.107852 | 0.083254 | 0.305582 | 0.232734 | 0.166509 | 0.090823 | 0 | 0 | 0 | 0.020729 | 0.201484 | 1,752 | 65 | 86 | 26.953846 | 0.734811 | 0.02226 | 0 | 0.117647 | 0 | 0 | 0.122735 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.117647 | false | 0 | 0.039216 | 0 | 0.156863 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba04b160fcc3818b62a47ac901274bdc57870fc3 | 3,228 | py | Python | tractosplit/models/LSTM/lstm_classifier.py | Aleph-GORY/tractosplit | d20902e0fbd618bd1371cd69f28a598a5416a7a0 | [
"Apache-2.0"
] | null | null | null | tractosplit/models/LSTM/lstm_classifier.py | Aleph-GORY/tractosplit | d20902e0fbd618bd1371cd69f28a598a5416a7a0 | [
"Apache-2.0"
] | null | null | null | tractosplit/models/LSTM/lstm_classifier.py | Aleph-GORY/tractosplit | d20902e0fbd618bd1371cd69f28a598a5416a7a0 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import matplotlib.pyplot as plt
import tractosplit.utils.constants as constants
from tractosplit.models.generators import SL_generator
def plot_graphs(history, metric):
plt.plot(history.history[metric])
plt.plot(history.history["val_" + metric], "")
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, "val_" + metric])
# Small recurrent model
class lstmClassifier(tf.keras.Model):
_emb_size = 32
_rnn_size = 32
_int_size = 32
_s_batch = 12
_epochs = 25
def __init__(self):
super(lstmClassifier, self).__init__(name="lstm_classifier")
nclasses = constants.clusters["size"] + 1
self.embedding = tf.keras.layers.Dense(self._emb_size)
self.lstm = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(self._rnn_size))
self.dropout1 = tf.keras.layers.Dropout(0.1)
self.dropout2 = tf.keras.layers.Dropout(0.1)
self.dense = tf.keras.layers.Dense(self._int_size, activation="relu")
self.final = tf.keras.layers.Dense(nclasses)
def call(self, inputs):
x = self.embedding(inputs)
x = self.dropout1(x)
x = self.lstm(x)
x = self.dropout2(x)
x = self.dense(x)
x = self.final(x)
return x
def train(self, train_subjects, test_subjects, train_id):
self.compile(
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=["accuracy"],
)
# Checkpoint
checkpoint_dir = constants.lstm_path + train_id
checkpoint_prefix = checkpoint_dir + constants.lstm_prefix
checkpoint = tf.train.Checkpoint(optimizer=self.optimizer, model=self)
print("[INFO] Used for training:", train_subjects)
print("[INFO] Used for testing:", test_subjects)
# Training
training_generator = SL_generator(train_subjects)
validation_generator = SL_generator(test_subjects, batchsize=10000)
history = self.fit(
training_generator,
epochs=self._epochs,
validation_data=validation_generator,
)
checkpoint.save(file_prefix=checkpoint_prefix)
# Plots
plt.figure(figsize=(16, 8))
plt.subplot(1, 2, 1)
plot_graphs(history, "accuracy")
plt.ylim(None, 1)
plt.subplot(1, 2, 2)
plot_graphs(history, "loss")
plt.ylim(0, None)
plt.savefig(constants.train_report_path + train_id + "accuracy_loss.png")
def restore(self, train_id):
self.compile(
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=["accuracy"],
)
# Checkpoint
checkpoint_dir = constants.lstm_path + train_id
checkpoint = tf.train.Checkpoint(optimizer=self.optimizer, model=self)
# Load parameters saved in previous trainings
print("[INFO] Restoring lstm model:", train_id)
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
status.assert_existing_objects_matched()
print("[INFO] Restored correctly")
| 36.681818 | 87 | 0.649938 | 386 | 3,228 | 5.256477 | 0.326425 | 0.0414 | 0.04485 | 0.026614 | 0.304091 | 0.282405 | 0.252341 | 0.226713 | 0.226713 | 0.17447 | 0 | 0.015886 | 0.239467 | 3,228 | 87 | 88 | 37.103448 | 0.810591 | 0.031599 | 0 | 0.166667 | 0 | 0 | 0.058993 | 0 | 0 | 0 | 0 | 0 | 0.013889 | 1 | 0.069444 | false | 0 | 0.055556 | 0 | 0.222222 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba05421c51a743c2a09b812bb6ea669fe17fefe2 | 2,201 | py | Python | testjs.py | rodolfomiranda/aries-cloudagent-python | 58071a82550c3c4852dc41f703d0a85649a673e4 | [
"Apache-2.0"
] | 1 | 2022-03-23T18:17:16.000Z | 2022-03-23T18:17:16.000Z | testjs.py | rodolfomiranda/aries-cloudagent-python | 58071a82550c3c4852dc41f703d0a85649a673e4 | [
"Apache-2.0"
] | null | null | null | testjs.py | rodolfomiranda/aries-cloudagent-python | 58071a82550c3c4852dc41f703d0a85649a673e4 | [
"Apache-2.0"
] | null | null | null | from ctypes import util
from email import utils
import codecs
import ecdsa
import subprocess
import os
import base64
import json
from aries_cloudagent.wallet.util import b58_to_bytes, bytes_to_b58, random_seed
seed = os.urandom(ecdsa.SECP256k1.baselen)
secexp = ecdsa.util.randrange_from_seed__trytryagain(seed,ecdsa.SECP256k1.order)
sk = ecdsa.SigningKey.from_secret_exponent(secexp, curve=ecdsa.SECP256k1)
#d in base 64 (43 bytes)
d = codecs.encode(codecs.decode(sk.to_string().hex(), 'hex'), 'base64').decode()[:43]
vk = sk.get_verifying_key()
# x and y coordinates in base 64 (43 bytes)
x = codecs.encode(codecs.decode(vk.to_string().hex()[:64], 'hex'), 'base64').decode()[:43]
y = codecs.encode(codecs.decode(vk.to_string().hex()[64:], 'hex'), 'base64').decode()[:43]
####### TEST SSIGN VERIFY
# secret2 = sk.to_string().hex()
# sk2 = ecdsa.SigningKey.from_string(bytes.fromhex(secret2), curve=ecdsa.SECP256k1)
# sig = sk.sign(b"pepe")
# verkey2 = vk.to_string().hex()
# vk2 = ecdsa.VerifyingKey.from_string(bytes.fromhex(verkey2), curve=ecdsa.SECP256k1)
# print(vk.verify(sig, b"pepse"))
# key in a JWK format style
keyJWK = {
"publicJwk": {
"kty": 'EC',
"crv": 'secp256k1',
"x": codecs.encode(codecs.decode(vk.to_string().hex()[:64], 'hex'), 'base64').decode()[:43],
"y": codecs.encode(codecs.decode(vk.to_string().hex()[64:], 'hex'), 'base64').decode()[:43]
},
"privateJwk": {
"kty": 'EC',
"crv": 'secp256k1',
"d": codecs.encode(codecs.decode(sk.to_string().hex(), 'hex'), 'base64').decode()[:43],
"x": codecs.encode(codecs.decode(vk.to_string().hex()[:64], 'hex'), 'base64').decode()[:43],
"y": codecs.encode(codecs.decode(vk.to_string().hex()[64:], 'hex'), 'base64').decode()[:43]
}
}
# create a W3C DID Document
diddoc = {
"services": [
{
"id": 'domain-1',
"type": 'LinkedDomains',
"serviceEndpoint": 'https://foo.example.com'
}
]
}
diddocbase64 = base64.encodebytes(json.dumps(diddoc).encode())
# call ION create.js
did = subprocess.check_output(["node", "./aries_cloudagent/wallet/sidetree-cardano/create.js", x, y, diddocbase64]).decode('utf-8')
print(did)
| 31.898551 | 131 | 0.657428 | 302 | 2,201 | 4.698676 | 0.34106 | 0.056378 | 0.077519 | 0.135307 | 0.339676 | 0.318534 | 0.318534 | 0.318534 | 0.318534 | 0.318534 | 0 | 0.053411 | 0.140845 | 2,201 | 68 | 132 | 32.367647 | 0.696986 | 0.199 | 0 | 0.186047 | 0 | 0 | 0.151463 | 0.029834 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.209302 | 0 | 0.209302 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba05fe49a47d431fcdc95ab1f5dfbe49163bcab4 | 1,660 | py | Python | 9_Dimensionality_reduction_and_metric_learning/Code/kNN/Project_1(Sklearn)/kNN.py | jaheel/Machine-Learning-Method_Code | 6b2766a72ab9f4814d6f9e69080dc39e23a0000d | [
"MIT"
] | 2 | 2021-10-12T01:50:03.000Z | 2021-10-12T12:15:23.000Z | 9_Dimensionality_reduction_and_metric_learning/Code/kNN/Project_1(Sklearn)/kNN.py | jaheel/Machine-Learning-Method_Code | 6b2766a72ab9f4814d6f9e69080dc39e23a0000d | [
"MIT"
] | null | null | null | 9_Dimensionality_reduction_and_metric_learning/Code/kNN/Project_1(Sklearn)/kNN.py | jaheel/Machine-Learning-Method_Code | 6b2766a72ab9f4814d6f9e69080dc39e23a0000d | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, confusion_matrix
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
# Assign colum names to the dataset
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'Class']
# Read dataset to pandas dataframe
dataset = pd.read_csv(url, names=names)
# Show the top 5 rows of dataset
# print(dataset.head())
# Preprocessing
# X : the first four columns of the dataset
# y : the labels
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values
# Train Test Split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)
# Feature Scaling(The gradient descent algorithm)
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# classifier=KNeighborsClassifier(n_neighbors=5)
# classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
# print(confusion_matrix(y_test, y_pred))
# print(classification_report(y_test, y_pred))
error=[]
for i in range(1,40):
knn=KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error.append(np.mean(pred_i != y_test))
plt.figure(figsize=(12,6))
plt.plot(range(1,40), error, color='red', linestyle='dashed', marker='o', markerfacecolor='blue', markersize=10)
plt.title('Error Rate K Value')
plt.xlabel('K Value')
plt.ylabel('Mean Error')
plt.show() | 27.666667 | 112 | 0.753012 | 256 | 1,660 | 4.742188 | 0.441406 | 0.029654 | 0.034596 | 0.024712 | 0.024712 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01232 | 0.11988 | 1,660 | 60 | 113 | 27.666667 | 0.818617 | 0.274699 | 0 | 0 | 0 | 0.034483 | 0.144538 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.241379 | 0 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba06a39cd57c07c463507d73f594a59a6dd51375 | 4,258 | py | Python | demo/inference_demo1.py | MY-Swich/SOLO | 1850ac37376e6ca1162741cff4b226ced321ea16 | [
"BSD-2-Clause"
] | null | null | null | demo/inference_demo1.py | MY-Swich/SOLO | 1850ac37376e6ca1162741cff4b226ced321ea16 | [
"BSD-2-Clause"
] | null | null | null | demo/inference_demo1.py | MY-Swich/SOLO | 1850ac37376e6ca1162741cff4b226ced321ea16 | [
"BSD-2-Clause"
] | null | null | null | from mmdet.apis import init_detector, inference_detector
import mmcv
import cv2
import numpy as np
from scipy import ndimage
config_file = '../configs/solov2/solov2_r50_fpn_8gpu_1x___.py'
# download the checkpoint from model zoo and put it in `checkpoints/`
checkpoint_file = '../work_dirs/solov2_12/epoch_12.pth'
# build the model from a config file and a checkpoint file
model = init_detector(config_file, checkpoint_file, device='cuda:0')
# test a single image
img = 'demo.jpg'
result = inference_detector(model, img)
def show_image_demo(img,
result,
class_names,
score_thr=0.3,
sort_by_density=False,
out_file=None):
"""Visualize the instance segmentation results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The instance segmentation result.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the masks.
sort_by_density (bool): sort the masks by their density.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
Returns:
np.ndarray or None: If neither `show` nor `out_file` is specified, the
visualized image is returned, otherwise None is returned.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
img_show = img.copy()
h, w, _ = img.shape # 获取照片的H和W,_是省略channel
cur_result = result[0] # 获取result
seg_label = cur_result[0] # result[0][0]
seg_label = seg_label.cpu().numpy().astype(np.uint8) # 转换成numpy数组
cate_label = cur_result[1] # result[0][1],这是类别
cate_label = cate_label.cpu().numpy() # 转换成numpy数组
score = cur_result[2].cpu().numpy() # result[0][2],并转换为numpy数组,这是阈值
vis_inds = score > score_thr # vis_inds是bool类型
seg_label = seg_label[vis_inds] # result[0][0][vis_inds],显示为真的mask数组
num_mask = seg_label.shape[0] # 统计mask的数量
cate_label = cate_label[vis_inds] # 可以显示的类别
cate_score = score[vis_inds] # 可以显示的类别的得分
if sort_by_density: # 根据mask密度排序
mask_density = []
for idx in range(num_mask):
cur_mask = seg_label[idx, :, :]
cur_mask = mmcv.imresize(cur_mask, (w, h))
cur_mask = (cur_mask > 0.5).astype(np.int32)
mask_density.append(cur_mask.sum())
orders = np.argsort(mask_density)
seg_label = seg_label[orders]
cate_label = cate_label[orders]
cate_score = cate_score[orders]
np.random.seed(42)
# 生成颜色不同的mask的颜色
color_masks = [
np.random.randint(0, 256, (1, 3), dtype=np.uint8)
for _ in range(num_mask)
]
for idx in range(num_mask):
idx = -(idx + 1) # idx是mask的id,为啥加-
cur_mask = seg_label[idx, :, :] # 选定一个mask
cur_mask = mmcv.imresize(cur_mask, (w, h))
cur_mask = (cur_mask > 0.5).astype(np.uint8) # 将bool值转换为int类型的0,1真值
if cur_mask.sum() == 0:
continue
color_mask = color_masks[idx][0] # 选择颜色
# cur_mask_bool = cur_mask.astype(np.bool)
# img_show[cur_mask_bool] = img[cur_mask_bool] * 0.5 + color_mask * 0.5
cur_cate = cate_label[idx]
cur_score = cate_score[idx]
b_boxs = np.argwhere(cur_mask == 1).T
y, x = b_boxs
xmin, xmax = np.min(x), np.max(x)
ymin, ymax = np.min(y), np.max(y)
cv2.rectangle(img_show, (xmin, ymin), (xmax, ymax), (color_mask[0].item(), color_mask[1].item(), color_mask[2].item()), 2)
label_text = class_names[cur_cate]
# label_text += '|{:.02f}'.format(cur_score)
center_y, center_x = ndimage.measurements.center_of_mass(cur_mask)
vis_pos = (max(int(center_x) - 10, 0), int(center_y)) # 确定名称的位置
cv2.putText(img_show, label_text, vis_pos,
cv2.FONT_HERSHEY_COMPLEX, 0.3, (255, 255, 255)) # green 在图片上写类别
if out_file is None:
return img
else:
mmcv.imwrite(img_show, out_file)
show_image_demo(img, result, model.CLASSES, score_thr=0.25, out_file="demo_out2.jpg") | 38.36036 | 130 | 0.634101 | 612 | 4,258 | 4.187909 | 0.318627 | 0.049161 | 0.015217 | 0.018728 | 0.087398 | 0.056184 | 0.040577 | 0.040577 | 0.040577 | 0.040577 | 0 | 0.025754 | 0.252231 | 4,258 | 111 | 131 | 38.36036 | 0.779209 | 0.297558 | 0 | 0.083333 | 0 | 0 | 0.037088 | 0.027816 | 0 | 0 | 0 | 0 | 0.013889 | 1 | 0.013889 | false | 0 | 0.069444 | 0 | 0.097222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba06be93e0c241acc2e0424d1b330846e207a2b5 | 2,783 | py | Python | src/data/clean_dataframe.py | TSGreen/newspaper-content-nlp-project | c1d6ac5ce6296a06a94a8fc6a947b4d2d4fe7ea6 | [
"MIT"
] | null | null | null | src/data/clean_dataframe.py | TSGreen/newspaper-content-nlp-project | c1d6ac5ce6296a06a94a8fc6a947b4d2d4fe7ea6 | [
"MIT"
] | null | null | null | src/data/clean_dataframe.py | TSGreen/newspaper-content-nlp-project | c1d6ac5ce6296a06a94a8fc6a947b4d2d4fe7ea6 | [
"MIT"
] | null | null | null |
"""
Opens, and cleans the raw dataframe file, removing unneccessary fields and
sets the datatypes.
@author: tim
"""
import pandas as pd
from pathlib import Path
year = '2020'
filename = Path.cwd().parent.parent.joinpath('data',
'interim',
'full_dataframe_'+year+'.csv')
def open_csvfile(filename):
if filename.exists():
print(f'\nOpening file {filename} ...\n')
df = pd.read_csv(filename)
print(f'\nFile opened.\n')
else:
raise NameError(f'Could not find file "{filename}".')
return df
df = open_csvfile(filename)
def trim_df(dataframe, columns_keep):
'''
Take a large dataframe and return the columns given in the list
provided.
Parameters
----------
dataframe : dataframe
The dataframe to be reduced in size
columns_keep : list
List of column names to be kept in dataframe
Returns
-------
dataframe of columns specified
'''
print(f'\nTrimming dataframe..')
return dataframe[columns_keep]
col_keep = ['type', 'sectionName', 'webPublicationDate', 'webTitle',
'pillarName', 'headline', 'byline', 'webUrl', 'bodyText',
'wordcount', 'publication', 'charCount', 'productionOffice']
df = trim_df(df, col_keep)
def change_datatypes(dataframe, datatypes):
"""
Change the datatypes in a dataframe and check the number of null values
before and after and flag any inconsistancy.
Parameters
----------
dataframe : dataframe
Dataframe to be acted on.
datatypes : dict
Dictionary of datatypes and associated columns.
Returns
-------
Dataframe with changed datatypes.
"""
print('\nChanging data types..')
pre_nans = dataframe.isnull().sum()
dataframe = dataframe.astype(datatypes)
if dataframe.isnull().sum().equals(pre_nans):
pass
else:
print('The number of nan values has increased, check data type conversion')
print('Changing data types complete.\n')
return dataframe
datatypes = {'charCount': 'int32',
'wordcount': 'int32',
'productionOffice': 'category',
'pillarName': 'category',
'type': 'category',
'publication': 'category',
'sectionName': 'category'}
df = change_datatypes(df, datatypes)
df['webPublicationDate'] = pd.to_datetime(df['webPublicationDate'])
save_filename = Path.cwd().parent.parent.joinpath('data',
'interim',
'cleaned_'+year+'.csv')
print(f'\nSaving file: {save_filename} ...')
df.to_csv(save_filename)
print(f'Saving file complete.\n')
| 26.504762 | 83 | 0.598994 | 295 | 2,783 | 5.576271 | 0.427119 | 0.018237 | 0.018237 | 0.025532 | 0.055927 | 0.055927 | 0.055927 | 0.055927 | 0 | 0 | 0 | 0.003992 | 0.279914 | 2,783 | 104 | 84 | 26.759615 | 0.816866 | 0.250449 | 0 | 0.085106 | 0 | 0 | 0.313486 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0.021277 | 0.042553 | 0 | 0.170213 | 0.170213 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba0881ebcb1431973e05a78e1320511946f80cbd | 834 | py | Python | shellish/command/contrib/commands.py | mayfield/shellish | df0f0e4612d138c34d8cb99b66ab5b8e47f1414a | [
"MIT"
] | 4 | 2015-10-06T23:50:20.000Z | 2021-06-11T19:20:43.000Z | shellish/command/contrib/commands.py | mayfield/shellish | df0f0e4612d138c34d8cb99b66ab5b8e47f1414a | [
"MIT"
] | null | null | null | shellish/command/contrib/commands.py | mayfield/shellish | df0f0e4612d138c34d8cb99b66ab5b8e47f1414a | [
"MIT"
] | null | null | null | """
Command tree.
"""
import collections
from .. import command
from ... import layout
class Commands(command.Command):
""" Show a command tree. """
name = 'commands'
use_pager = True
def setup_args(self, parser):
pass
def command_choices(self, prefix, args):
return frozenset(x for x in self.parent.subcommands
if x.startswith(prefix))
def run(self, args):
root = self.parent
tree = self.walkinto(root)
layout.treeprint({root.name: tree})
def walkinto(self, command):
tree = collections.OrderedDict()
if not command.subcommands:
return command.title or command.name
else:
for key, cmd in command.subcommands.items():
tree[key] = self.walkinto(cmd)
return tree
| 23.166667 | 59 | 0.595923 | 96 | 834 | 5.145833 | 0.447917 | 0.066802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.302158 | 834 | 35 | 60 | 23.828571 | 0.848797 | 0.041966 | 0 | 0 | 0 | 0 | 0.010191 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0.043478 | 0.130435 | 0.043478 | 0.565217 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba0aaf9e7b6cbea8d7049913cec04817b26efe9b | 22,943 | py | Python | package/tests/test_PartSeg/test_common_gui.py | neuromusic/PartSeg | a4edff1b9fbe55eb7f5e1fc8b5b3f8e730b35caf | [
"BSD-3-Clause"
] | 15 | 2020-03-21T03:27:56.000Z | 2022-03-21T07:46:39.000Z | package/tests/test_PartSeg/test_common_gui.py | neuromusic/PartSeg | a4edff1b9fbe55eb7f5e1fc8b5b3f8e730b35caf | [
"BSD-3-Clause"
] | 479 | 2019-10-27T22:57:22.000Z | 2022-03-30T12:48:14.000Z | package/tests/test_PartSeg/test_common_gui.py | neuromusic/PartSeg | a4edff1b9fbe55eb7f5e1fc8b5b3f8e730b35caf | [
"BSD-3-Clause"
] | 5 | 2020-02-05T14:25:02.000Z | 2021-12-21T03:44:52.000Z | # pylint: disable=R0201
import os
import platform
import sys
from enum import Enum
from pathlib import Path
from unittest.mock import MagicMock
import numpy as np
import pytest
import qtpy
from qtpy.QtCore import QSize, Qt
from qtpy.QtWidgets import QFileDialog, QMainWindow, QWidget
from PartSeg.common_gui import select_multiple_files
from PartSeg.common_gui.custom_load_dialog import CustomLoadDialog, IOMethodMock, LoadProperty, PLoadDialog
from PartSeg.common_gui.custom_save_dialog import CustomSaveDialog, FormDialog, PSaveDialog
from PartSeg.common_gui.equal_column_layout import EqualColumnLayout
from PartSeg.common_gui.main_window import OPEN_DIRECTORY, OPEN_FILE, OPEN_FILE_FILTER, BaseMainWindow
from PartSeg.common_gui.multiple_file_widget import LoadRecentFiles, MultipleFileWidget, MultipleLoadDialog
from PartSeg.common_gui.qt_modal import QtPopup
from PartSeg.common_gui.searchable_combo_box import SearchComboBox
from PartSeg.common_gui.universal_gui_part import EnumComboBox
from PartSegCore.algorithm_describe_base import AlgorithmProperty, Register
from PartSegCore.analysis.calculation_plan import MaskSuffix
from PartSegCore.analysis.load_functions import LoadProject, LoadStackImage, load_dict
from PartSegCore.analysis.save_functions import SaveAsTiff, SaveProject, save_dict
from PartSegCore.io_utils import SaveBase
from PartSegImage import Image, ImageWriter
pyside_skip = pytest.mark.skipif(qtpy.API_NAME == "PySide2" and platform.system() == "Linux", reason="PySide2 problem")
IS_MACOS = sys.platform == "darwin"
class Enum1(Enum):
test1 = 1
test2 = 2
test3 = 3
class Enum2(Enum):
test1 = 1
test2 = 2
test3 = 3
test4 = 4
def __str__(self):
return self.name
@pytest.mark.filterwarnings("ignore:EnumComboBox is deprecated")
class TestEnumComboBox:
def test_enum1(self, qtbot):
widget = EnumComboBox(Enum1)
qtbot.addWidget(widget)
assert widget.count() == 3
assert widget.currentText() == "Enum1.test1"
with qtbot.waitSignal(widget.current_choose):
widget.set_value(Enum1.test2)
def test_enum2(self, qtbot):
widget = EnumComboBox(Enum2)
qtbot.addWidget(widget)
assert widget.count() == 4
assert widget.currentText() == "test1"
with qtbot.waitSignal(widget.current_choose):
widget.set_value(Enum2.test2)
@pytest.fixture
def mock_accept_files(monkeypatch):
def accept(*_):
return True
monkeypatch.setattr(select_multiple_files.AcceptFiles, "exec_", accept)
@pytest.fixture
def mock_warning(monkeypatch):
warning_show = [0]
def warning(*_):
warning_show[0] = 1
monkeypatch.setattr(select_multiple_files.QMessageBox, "warning", warning)
return warning_show
@pytest.mark.usefixtures("mock_accept_files")
class TestAddFiles:
def test_update_files_list(self, qtbot, tmp_path, part_settings):
for i in range(20):
with open(tmp_path / f"test_{i}.txt", "w") as f_p:
f_p.write("test")
widget = select_multiple_files.AddFiles(part_settings)
qtbot.addWidget(widget)
file_list1 = [str(tmp_path / f"test_{i}.txt") for i in range(15)]
widget.update_files_list(file_list1[:10])
assert len(widget.files_to_proceed) == 10
widget.update_files_list(file_list1[5:])
assert len(widget.files_to_proceed) == 15
def test_find_all(self, qtbot, tmp_path, part_settings, mock_warning):
for i in range(10):
with open(tmp_path / f"test_{i}.txt", "w") as f_p:
f_p.write("test")
widget = select_multiple_files.AddFiles(part_settings)
qtbot.addWidget(widget)
widget.paths_input.setText(str(tmp_path / "*.txt"))
widget.find_all()
assert mock_warning[0] == 0
assert len(widget.files_to_proceed) == 10
widget.find_all()
assert mock_warning[0] == 1
def test_parse_drop_file_list(self, qtbot, tmp_path, part_settings, mock_warning):
name_list = []
full_name_list = []
for i in range(10):
with open(tmp_path / f"test_{i}.txt", "w") as f_p:
f_p.write("test")
name_list.append(f"test_{i}.txt")
full_name_list.append(str(tmp_path / f"test_{i}.txt"))
widget = select_multiple_files.AddFiles(part_settings)
qtbot.addWidget(widget)
widget.paths_input.setText(str(tmp_path / "aaa"))
widget.parse_drop_file_list(name_list)
assert mock_warning[0] == 1
mock_warning[0] = 0
widget.parse_drop_file_list(full_name_list)
assert mock_warning[0] == 0
assert len(widget.files_to_proceed) == 10
widget.clean()
assert len(widget.files_to_proceed) == 0
widget.paths_input.setText(str(tmp_path))
widget.parse_drop_file_list(name_list)
assert mock_warning[0] == 0
assert len(widget.files_to_proceed) == 10
def test_delete_element(self, qtbot, tmp_path, part_settings):
for i in range(10):
with open(tmp_path / f"test_{i}.txt", "w") as f_p:
f_p.write("test")
widget = select_multiple_files.AddFiles(part_settings)
qtbot.addWidget(widget)
file_list = [str(tmp_path / f"test_{i}.txt") for i in range(10)]
widget.update_files_list(file_list)
assert len(widget.files_to_proceed) == 10
widget.selected_files.setCurrentRow(2)
widget.delete_element()
assert len(widget.files_to_proceed) == 9
def test_load_file(self, qtbot, tmp_path, part_settings):
for i in range(10):
with open(tmp_path / f"test_{i}.txt", "w") as f_p:
f_p.write("test")
widget = select_multiple_files.AddFiles(part_settings)
qtbot.addWidget(widget)
file_list = [str(tmp_path / f"test_{i}.txt") for i in range(10)]
widget.update_files_list(file_list)
widget.selected_files.setCurrentRow(2)
def check_res(val):
return val == [str(tmp_path / "test_2.txt")]
with qtbot.waitSignal(part_settings.request_load_files, check_params_cb=check_res):
widget._load_file()
mapper = MaskSuffix(name="", suffix="_mask")
def check_res2(val):
return val == [str(tmp_path / "test_2.txt"), str(tmp_path / "test_2_mask.txt")]
with qtbot.waitSignal(part_settings.request_load_files, check_params_cb=check_res2):
widget._load_file_with_mask(mapper)
class _TestWidget(QWidget):
def __init__(self):
super().__init__()
self.setLayout(EqualColumnLayout())
class TestEqualColumnLayout:
def test_add(self, qtbot):
widget = _TestWidget()
qtbot.addWidget(widget)
w1 = QWidget()
w2 = QWidget()
widget.layout().addWidget(w1)
assert widget.layout().count() == 1
widget.layout().addWidget(w2)
assert widget.layout().count() == 2
assert widget.layout().itemAt(1).widget() == w2
assert widget.layout().itemAt(0).widget() == w1
assert widget.layout().itemAt(2) is None
def test_remove_item(self, qtbot):
widget = _TestWidget()
qtbot.addWidget(widget)
w1 = QWidget()
w2 = QWidget()
widget.layout().addWidget(w1)
widget.layout().addWidget(w2)
assert widget.layout().count() == 2
assert widget.layout().takeAt(0).widget() == w1
assert widget.layout().itemAt(0).widget() == w2
assert widget.layout().count() == 1
assert widget.layout().takeAt(2) is None
@pyside_skip
def test_geometry(self, qtbot):
widget = _TestWidget()
qtbot.addWidget(widget)
w1 = QWidget()
w2 = QWidget()
widget.layout().addWidget(w1)
widget.layout().addWidget(w2)
widget.show()
widget.resize(200, 200)
assert widget.width() == 200
assert w1.width() == 100
widget.hide()
@pyside_skip
def test_hidden_widget(self, qtbot):
widget = _TestWidget()
w1 = QWidget()
w2 = QWidget()
w3 = QWidget()
widget.layout().addWidget(w1)
widget.layout().addWidget(w2)
widget.layout().addWidget(w3)
w2.hide()
qtbot.addWidget(widget)
widget.show()
widget.resize(200, 200)
assert w1.width() == 100
widget.hide()
class TestSearchCombBox:
def test_create(self, qtbot):
widget = SearchComboBox()
qtbot.addWidget(widget)
def test_add_item(self, qtbot):
widget = SearchComboBox()
qtbot.addWidget(widget)
widget.addItem("test1")
assert widget.count() == 1
assert widget.itemText(0) == "test1"
def test_add_items(self, qtbot):
widget = SearchComboBox()
qtbot.addWidget(widget)
widget.addItems(["test1", "test2", "test3"])
assert widget.count() == 3
assert widget.itemText(0) == "test1"
assert widget.itemText(2) == "test3"
def test_create_load_dialog(qtbot):
dialog = CustomLoadDialog(load_dict, history=["/aaa/"])
assert dialog.acceptMode() == CustomLoadDialog.AcceptOpen
dialog = CustomLoadDialog(LoadProject, history=["/aaa/"])
assert dialog.acceptMode() == CustomLoadDialog.AcceptOpen
def test_create_save_dialog(qtbot):
dialog = CustomSaveDialog(save_dict, history=["/aaa/"])
assert dialog.acceptMode() == CustomSaveDialog.AcceptSave
dialog = CustomSaveDialog(SaveProject, history=["/aaa/"])
assert not hasattr(dialog, "stack_widget")
dialog = CustomSaveDialog(save_dict, system_widget=False)
assert hasattr(dialog, "stack_widget")
def test_p_save_dialog(part_settings, tmp_path, qtbot, monkeypatch):
def selected_files(self):
return [str(tmp_path / "test.tif")]
monkeypatch.setattr(QFileDialog, "selectedFiles", selected_files)
assert part_settings.get_path_history() == [str(Path.home())]
dialog = PSaveDialog(save_dict, settings=part_settings, path="io.test")
qtbot.addWidget(dialog)
assert Path(dialog.directory().path()) == Path.home()
assert Path(part_settings.get("io.test")) == Path.home()
dialog = PSaveDialog(save_dict, settings=part_settings, path="io.test2", default_directory=str(tmp_path))
qtbot.addWidget(dialog)
assert Path(dialog.directory().path()) == tmp_path
assert Path(part_settings.get("io.test2")) == tmp_path
part_settings.set("io.test3", str(tmp_path))
dialog = PSaveDialog(save_dict, settings=part_settings, path="io.test3")
qtbot.addWidget(dialog)
assert Path(dialog.directory().path()) == tmp_path
assert Path(part_settings.get("io.test3")) == tmp_path
monkeypatch.setattr(QFileDialog, "result", lambda x: QFileDialog.Rejected)
part_settings.set("io.filter_save", SaveAsTiff.get_name())
assert part_settings.get_path_history() == [str(Path.home())]
dialog.show()
dialog.accept()
assert part_settings.get_path_history() == [str(Path.home())]
monkeypatch.setattr(QFileDialog, "result", lambda x: QFileDialog.Accepted)
dialog = PSaveDialog(save_dict, settings=part_settings, path="io.test4", filter_path="io.filter_save")
qtbot.addWidget(dialog)
assert SaveAsTiff.get_name() in dialog.nameFilters()
dialog.show()
dialog.selectFile(str(tmp_path / "test.tif"))
dialog.accept()
assert dialog.selectedNameFilter() == SaveAsTiff.get_name()
assert [Path(x) for x in part_settings.get_path_history()] == [tmp_path, Path.home()]
def test_form_dialog(qtbot):
fields = [
AlgorithmProperty("aaa", "Aaa", 1.0),
AlgorithmProperty("bbb", "Bbb", False),
]
form = FormDialog(fields, values={"aaa": 2.0})
assert form.get_values() == {"aaa": 2.0, "bbb": False}
form.set_values({"aaa": 5.0, "bbb": True})
assert form.get_values() == {"aaa": 5.0, "bbb": True}
def test_p_load_dialog(part_settings, tmp_path, qtbot, monkeypatch):
dialog = PLoadDialog(load_dict, settings=part_settings, path="io.load_test")
qtbot.addWidget(dialog)
assert Path(dialog.directory().path()) == Path.home()
assert Path(part_settings.get("io.load_test")) == Path.home()
dialog = PLoadDialog(load_dict, settings=part_settings, path="io.load_test2", default_directory=str(tmp_path))
qtbot.addWidget(dialog)
assert Path(dialog.directory().path()) == tmp_path
assert Path(part_settings.get("io.load_test2")) == tmp_path
part_settings.set("io.load_test3", str(tmp_path))
dialog = PLoadDialog(load_dict, settings=part_settings, path="io.load_test3")
qtbot.addWidget(dialog)
assert Path(dialog.directory().path()) == tmp_path
assert Path(part_settings.get("io.load_test3")) == tmp_path
monkeypatch.setattr(QFileDialog, "result", lambda x: QFileDialog.Rejected)
part_settings.set("io.filter_load", LoadStackImage.get_name())
assert part_settings.get_path_history() == [str(Path.home())]
dialog.show()
dialog.accept()
assert part_settings.get_path_history() == [str(Path.home())]
with (tmp_path / "test.tif").open("w") as f:
f.write("eeeeeee")
monkeypatch.setattr(QFileDialog, "result", lambda x: QFileDialog.Accepted)
dialog = PLoadDialog(load_dict, settings=part_settings, path="io.load_test4", filter_path="io.filter_load")
qtbot.addWidget(dialog)
assert LoadStackImage.get_name() in dialog.nameFilters()
dialog.show()
dialog.selectFile(str(tmp_path / "test.tif"))
if IS_MACOS:
monkeypatch.setattr(dialog, "selectedFiles", lambda: [str(tmp_path / "test.tif")])
dialog.accept()
assert dialog.selectedNameFilter() == LoadStackImage.get_name()
assert [Path(x) for x in part_settings.get_path_history()] == [tmp_path, Path.home()]
def test_str_filter(part_settings, tmp_path, qtbot, monkeypatch):
tiff_text = "Test (*.tiff)"
monkeypatch.setattr(QFileDialog, "result", lambda x: QFileDialog.Accepted)
monkeypatch.setattr(QFileDialog, "selectedFiles", lambda x: [str(tmp_path / "test.tif")])
dialog = PSaveDialog(tiff_text, settings=part_settings, path="io.save_test")
qtbot.addWidget(dialog)
assert tiff_text in dialog.nameFilters()
dialog.show()
dialog.selectFile(str(tmp_path / "test.tif"))
dialog.accept()
assert dialog.selectedNameFilter() == tiff_text
assert [Path(x) for x in part_settings.get_path_history()] == [tmp_path, Path.home()]
with (tmp_path / "test2.tif").open("w") as f:
f.write("eeeeeee")
dialog = PLoadDialog(tiff_text, settings=part_settings, path="io.load_test2")
qtbot.addWidget(dialog)
assert tiff_text in dialog.nameFilters()
dialog.show()
dialog.selectFile(str(tmp_path / "test2.tif"))
if IS_MACOS:
monkeypatch.setattr(dialog, "selectedFiles", lambda: [str(tmp_path / "test2.tif")])
dialog.accept()
assert dialog.selectedNameFilter() == tiff_text
assert [Path(x) for x in part_settings.get_path_history()] == [tmp_path, Path.home()]
def test_recent_files(part_settings, qtbot):
dial = LoadRecentFiles(part_settings)
qtbot.add_widget(dial)
assert dial.file_list.count() == 0
size = dial.size()
new_size = size.width() + 50, size.width() + 50
dial.resize(*new_size)
dial.accept()
assert part_settings.get_from_profile("multiple_files_dialog_size") == new_size
part_settings.add_last_files_multiple(["aaa.txt"], "method")
part_settings.add_last_files_multiple(["bbb.txt"], "method")
part_settings.add_last_files(["bbb.txt"], "method")
part_settings.add_last_files(["ccc.txt"], "method")
dial = LoadRecentFiles(part_settings)
qtbot.add_widget(dial)
assert dial.file_list.count() == 3
assert dial.size() == QSize(*new_size)
dial.file_list.selectAll()
assert dial.get_files() == [(["bbb.txt"], "method"), (["aaa.txt"], "method"), (["ccc.txt"], "method")]
class TestMultipleFileWidget:
def test_create(self, part_settings, qtbot):
widget = MultipleFileWidget(part_settings, {})
qtbot.add_widget(widget)
@staticmethod
def check_load_files(parameter, custom_name):
return not custom_name and os.path.basename(parameter.file_path) == "img_4.tif"
@pytest.mark.enablethread
@pytest.mark.enabledialog
def test_load_recent(self, part_settings, qtbot, monkeypatch, tmp_path):
widget = MultipleFileWidget(part_settings, {LoadStackImage.get_name(): LoadStackImage})
qtbot.add_widget(widget)
for i in range(5):
ImageWriter.save(
Image(np.random.random((10, 10)), image_spacing=(1, 1), axes_order="XY"), tmp_path / f"img_{i}.tif"
)
file_list = [
[
[
tmp_path / f"img_{i}.tif",
],
LoadStackImage.get_name(),
]
for i in range(5)
]
with qtbot.waitSignal(widget._add_state, check_params_cb=self.check_load_files):
widget.load_recent_fun(file_list, lambda x, y: True, lambda x: True)
assert part_settings.get_last_files_multiple() == file_list
assert widget.file_view.topLevelItemCount() == 5
widget.file_view.clear()
widget.state_dict.clear()
widget.file_list.clear()
monkeypatch.setattr(LoadRecentFiles, "exec_", lambda x: True)
monkeypatch.setattr(LoadRecentFiles, "get_files", lambda x: file_list)
with qtbot.waitSignal(widget._add_state, check_params_cb=self.check_load_files):
widget.load_recent()
assert part_settings.get_last_files_multiple() == file_list
assert widget.file_view.topLevelItemCount() == 5
@pytest.mark.enablethread
@pytest.mark.enabledialog
def test_load_files(self, part_settings, qtbot, monkeypatch, tmp_path):
widget = MultipleFileWidget(part_settings, {LoadStackImage.get_name(): LoadStackImage})
qtbot.add_widget(widget)
for i in range(5):
ImageWriter.save(
Image(np.random.random((10, 10)), image_spacing=(1, 1), axes_order="XY"), tmp_path / f"img_{i}.tif"
)
file_list = [[[str(tmp_path / f"img_{i}.tif")], LoadStackImage.get_name()] for i in range(5)]
load_property = LoadProperty(
[str(tmp_path / f"img_{i}.tif") for i in range(5)], LoadStackImage.get_name(), LoadStackImage
)
with qtbot.waitSignal(widget._add_state, check_params_cb=self.check_load_files):
widget.execute_load_files(load_property, lambda x, y: True, lambda x: True)
assert widget.file_view.topLevelItemCount() == 5
assert part_settings.get_last_files_multiple() == file_list
widget.file_view.clear()
widget.state_dict.clear()
widget.file_list.clear()
monkeypatch.setattr(MultipleLoadDialog, "exec_", lambda x: True)
monkeypatch.setattr(MultipleLoadDialog, "get_result", lambda x: load_property)
with qtbot.waitSignal(widget._add_state, check_params_cb=self.check_load_files):
widget.load_files()
assert widget.file_view.topLevelItemCount() == 5
assert part_settings.get_last_files_multiple() == file_list
part_settings.dump()
part_settings.load()
assert part_settings.get_last_files_multiple() == file_list
class TestBaseMainWindow:
def test_create(self, tmp_path, qtbot):
window = BaseMainWindow(config_folder=tmp_path)
qtbot.add_widget(window)
@pytest.mark.enablethread
@pytest.mark.enabledialog
def test_recent(self, tmp_path, qtbot, monkeypatch):
load_mock = MagicMock()
load_mock.load = MagicMock(return_value=1)
load_mock.get_name = MagicMock(return_value="test")
window = BaseMainWindow(config_folder=tmp_path, load_dict={"test": load_mock})
qtbot.add_widget(window)
assert window.recent_file_menu.isEmpty()
window.settings.add_last_files([tmp_path / "test.txt"], "test")
actions = window.recent_file_menu.actions()
assert len(actions) == 1
assert actions[0].data() == ([tmp_path / "test.txt"], "test")
monkeypatch.setattr(window, "sender", lambda: actions[0])
main_menu = MagicMock()
add_last_files = MagicMock()
monkeypatch.setattr(window, "main_menu", main_menu, raising=False)
monkeypatch.setattr(window.settings, "add_last_files", add_last_files)
window._load_recent()
window.settings.add_last_files.assert_called_once_with([tmp_path / "test.txt"], "test")
main_menu.set_data.assert_called_with(1)
assert window.settings.get(OPEN_DIRECTORY) == str(tmp_path)
assert str(window.settings.get(OPEN_FILE)) == str(tmp_path / "test.txt")
assert window.settings.get(OPEN_FILE_FILTER) == "test"
class TestQtPopup:
def test_show_above(self, qtbot):
popup = QtPopup(None)
qtbot.addWidget(popup)
popup.show_above_mouse()
popup.close()
def test_show_right(self, qtbot):
popup = QtPopup(None)
qtbot.addWidget(popup)
popup.show_right_of_mouse()
popup.close()
def test_move_to_error_no_parent(self, qtbot):
popup = QtPopup(None)
qtbot.add_widget(popup)
with pytest.raises(ValueError):
popup.move_to()
@pytest.mark.parametrize("pos", ["top", "bottom", "left", "right"])
def test_move_to(self, pos, qtbot):
window = QMainWindow()
qtbot.addWidget(window)
widget = QWidget()
window.setCentralWidget(widget)
popup = QtPopup(widget)
popup.move_to(pos)
def test_move_to_error_wrong_params(self, qtbot):
window = QMainWindow()
qtbot.addWidget(window)
widget = QWidget()
window.setCentralWidget(widget)
popup = QtPopup(widget)
with pytest.raises(ValueError):
popup.move_to("dummy_text")
with pytest.raises(ValueError):
popup.move_to({})
@pytest.mark.parametrize("pos", [[10, 10, 10, 10], (15, 10, 10, 10)])
def test_move_to_cords(self, pos, qtbot):
window = QMainWindow()
qtbot.addWidget(window)
widget = QWidget()
window.setCentralWidget(widget)
popup = QtPopup(widget)
popup.move_to(pos)
def test_click(self, qtbot, monkeypatch):
popup = QtPopup(None)
monkeypatch.setattr(popup, "close", MagicMock())
qtbot.addWidget(popup)
qtbot.keyClick(popup, Qt.Key_8)
popup.close.assert_not_called()
qtbot.keyClick(popup, Qt.Key_Return)
popup.close.assert_called_once()
@pytest.mark.parametrize("function_name", SaveBase.need_functions)
def test_IOMethodMock(function_name):
Register.check_function(IOMethodMock("test"), function_name, True)
getattr(IOMethodMock("test"), function_name)()
| 39.218803 | 119 | 0.671316 | 2,868 | 22,943 | 5.142259 | 0.112622 | 0.030852 | 0.01763 | 0.009696 | 0.647206 | 0.587537 | 0.544345 | 0.490372 | 0.453756 | 0.424125 | 0 | 0.013003 | 0.205553 | 22,943 | 584 | 120 | 39.285959 | 0.796127 | 0.000915 | 0 | 0.461847 | 0 | 0 | 0.052967 | 0.001134 | 0 | 0 | 0 | 0 | 0.190763 | 1 | 0.088353 | false | 0 | 0.052209 | 0.012048 | 0.188755 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba0be54407485aabe4c942c65e5033d0178a10ec | 9,826 | py | Python | helper/views/group.py | Feng-Yz/Study-Helper | 0be95331bcdb8909fdd21b7eb025e9281b709726 | [
"MIT"
] | 11 | 2021-11-12T02:41:41.000Z | 2022-02-15T07:42:14.000Z | helper/views/group.py | Feng-Yz/Study-Helper | 0be95331bcdb8909fdd21b7eb025e9281b709726 | [
"MIT"
] | 1 | 2021-11-12T09:00:26.000Z | 2021-11-21T16:13:21.000Z | helper/views/group.py | Feng-Yz/Study-Helper | 0be95331bcdb8909fdd21b7eb025e9281b709726 | [
"MIT"
] | 1 | 2021-07-22T13:23:40.000Z | 2021-07-22T13:23:40.000Z | from django.shortcuts import render, get_object_or_404
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.urls import reverse
from helper import models
class SubAssignmentForm(forms.Form):
description = forms.CharField(label='子任务描述', required=True, max_length=50,
widget=forms.TextInput(attrs={'class': 'form-control form-control-user mb-5'}))
pre_sub_assignment = forms.CharField(label='前置子任务', required=False, max_length=50,
widget=forms.TextInput(attrs={'class': 'form-control form-control-user mb-5'}))
start_time = forms.DateTimeField(label='开始时间', required=True,
widget=forms.DateTimeInput(attrs={'class': 'form-control form-control-user mb-5'}))
deadline = forms.DateTimeField(label='截止日期', required=True,
widget=forms.DateTimeInput(attrs={'class': 'form-control form-control-user mb-5'}))
user = forms.ModelChoiceField(label='用户', queryset=User.objects.all(), required=True,
widget=forms.Select(attrs={'class': 'form-control form-control-user mb-5'}))
assignment = forms.ModelChoiceField(label='父任务', queryset=models.GroupAssignment.objects.all(), required=True,
widget=forms.Select(attrs={'class': 'form-control form-control-user mb-5'}))
weight = forms.IntegerField(label='权重', required=True, max_value=100,
widget=forms.NumberInput(attrs={'class': 'form-control form-control-user mb-5'}))
expected_minutes_consumed = forms.IntegerField(label='预期花费时间', required=True,
widget=forms.NumberInput(
attrs={'class': 'form-control form-control-user mb-5'}))
class GroupForm(forms.Form):
type = forms.CharField(label='小组类型', required=True, max_length=20)
group_name = forms.CharField(label='小组名称', required=True, max_length=20)
class AssignmentForm(forms.Form):
description = forms.CharField(label='任务描述', required=True, max_length=1000,
widget=forms.TextInput(attrs={'class': 'form-control form-control-user mb-5'}))
deadline = forms.DateTimeField(label='截止日期', required=True,
widget=forms.DateTimeInput(attrs={'class': 'form-control form-control-user mb-5'}))
@login_required
def group_admin(request):
user = request.user
leader_groups = None
message = None
if request.method == 'POST':
add_group = request.POST.get('group_name')
leader_id = request.POST.get('leader_id')
group_id = request.POST.get('group_id')
if not(add_group is None):
type = request.POST.get('type')
name = request.POST.get('group_name')
group = models.Group(type=type, group_name=name, leader=user)
group.save()
models.UserGroup.objects.create(is_leader=True, group=group, user=user)
if not(leader_id is None):
try:
leader = models.User.objects.filter(username=leader_id)[0]
leader_groups = models.Group.objects.filter(leader_id__exact=leader.id)
except IndexError:
leader_groups = None
message = "组长的学号不存在!"
if not(group_id is None):
if len(models.UserGroup.objects.filter(group_id=group_id, user_id=user.id)) == 0 and \
len(models.Group.objects.filter(id=group_id)) != 0:
user_group = models.UserGroup(is_leader=False, group_id=group_id, user=user)
user_group.save()
else:
message = "请输入正确的组号!"
add_form = GroupForm()
user_groups = models.UserGroup.objects.filter(user_id=user.id)
groups = list(map(lambda k: k.group, user_groups))
return render(request, '../templates/group/groups_admin.html',
{
'add_form': add_form,
'groups': groups,
'leader_groups': leader_groups,
'message': message
})
@login_required
def add_sub_assign(request, pk):
user = request.user
group = get_object_or_404(models.Group, pk=pk)
if user.id != group.leader.id:
return HttpResponseForbidden
if request.method == "GET":
form = SubAssignmentForm()
qs_user = User.objects.filter(usergroup__group_id=group.id)
qs_assign = models.GroupAssignment.objects.filter(group_id=group.id).distinct()
form.fields['user'].queryset = qs_user
form.fields['assignment'].queryset = qs_assign
return render(request, "../templates/group/add_sub_assign.html", {'form': form})
else:
form = SubAssignmentForm(request.POST)
if form.is_valid():
username = form.cleaned_data['user']
user = User.objects.filter(username=username)[0]
assignment_id = models.GroupAssignment.objects.filter(group=group,
description=form.cleaned_data['assignment'])[0].id
description = form.cleaned_data['description']
deadline = form.cleaned_data['deadline']
weight = form.cleaned_data['weight']
pre_sub_assignment = form.cleaned_data['pre_sub_assignment']
emc = form.cleaned_data['expected_minutes_consumed']
start_time = form.cleaned_data['start_time']
models.SubAssignment.objects.create(assignment_id=assignment_id,
pre_sub_assignment=pre_sub_assignment,
user_id=user.id,
description=description,
weight=weight, deadline=deadline, expected_minutes_consumed=emc)
models.Schedule.objects.create(user_id=user.id, description=description, type="学习", is_repeated=False,
is_done=False, start_time=start_time, weight=weight, deadline=deadline,
expected_minutes_consumed=emc)
return HttpResponseRedirect(reverse('helper:group_home', args=(pk, )))
else:
message = "添加失败!"
return render(request, "../templates/group/add_sub_assign.html", {'form': form, 'message': message})
@login_required
def add_assign(request, pk):
user = request.user
group = get_object_or_404(models.Group, pk=pk)
if user.id != group.leader.id:
return HttpResponseForbidden
if request.method == "GET":
form = AssignmentForm()
return render(request, "../templates/group/add_assign.html", {'form': form})
else:
form = AssignmentForm(request.POST)
if form.is_valid():
models.GroupAssignment.objects.create(description=form.cleaned_data['description'],
deadline=form.cleaned_data['deadline'],
group_id=pk)
return HttpResponseRedirect(reverse('helper:group_home', args=(pk, )))
else:
return render(request, "../templates/group/add_assign.html", {'message': "添加失败!", 'form': form})
@login_required
def home(request, pk):
user = request.user
group = get_object_or_404(models.Group, pk=pk)
user_group = models.UserGroup.objects.filter(group=group)
partcipants = list(map(lambda k: k.user.id, user_group))
if not (user.id in partcipants):
return HttpResponseForbidden
assignments = models.GroupAssignment.objects.filter(group=group)
sub_assignments = models.SubAssignment.objects.filter(assignment__group=group)
return render(request, '../templates/group/home.html', {
'group': group,
'partcipants': user_group,
'assignments': assignments,
'sub_assignments': sub_assignments
})
# @login_required
# def add(request):
# if request.method == "GET":
# form = GroupForm()
# return render(request, "../templates/group/add.html", {'form': form})
# else:
# form = GroupForm(request.POST)
# if form.is_valid():
# user = request.user
#
# group = models.Group(type=form.cleaned_data['type'],
# group_name=form.cleaned_data['group_name'],
# leader=user)
# group.save()
#
# models.UserGroup.objects.create(is_leader=True,
# group=group,
# user=user)
#
# return render(request, '../templates/group/add.html', {'form': form})
# else:
# return render(request, '../templates/group/add.html', {'form': form, 'message': '表单无效!'})
#
#
# @login_required
# def join(request):
# if request.method == "GET":
# groups = models.Group.objects.all()
# return render(request, "../templates/group/join.html", {'groups': groups})
# if request.is_ajax():
# user = request.user
# group_id = request.POST.get('group_id')
# status = {'status': None}
#
# result = models.UserGroup.objects.filter(user=user, group_id=group_id)
# if result.count() == 0:
# models.UserGroup.objects.create(is_leader=False, group_id=group_id, user=user)
# status['status'] = 200
# else:
# status['status'] = 400
# return JsonResponse(status)
| 47.468599 | 120 | 0.593731 | 1,060 | 9,826 | 5.364151 | 0.136792 | 0.038692 | 0.031657 | 0.036933 | 0.52726 | 0.454098 | 0.370032 | 0.360183 | 0.325009 | 0.283855 | 0 | 0.006978 | 0.285365 | 9,826 | 206 | 121 | 47.699029 | 0.802763 | 0.154081 | 0 | 0.284722 | 0 | 0 | 0.12104 | 0.028174 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0.034722 | 0.048611 | 0 | 0.256944 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba0c61fe035483d37045ea75608f0afbc6103195 | 2,819 | py | Python | labpack/records/id.py | collectiveacuity/labPack | c8fb0d1ee23608f6dbcb99c232373eee886000fd | [
"MIT"
] | 2 | 2017-06-20T15:20:46.000Z | 2019-11-18T01:28:49.000Z | labpack/records/id.py | collectiveacuity/labPack | c8fb0d1ee23608f6dbcb99c232373eee886000fd | [
"MIT"
] | null | null | null | labpack/records/id.py | collectiveacuity/labPack | c8fb0d1ee23608f6dbcb99c232373eee886000fd | [
"MIT"
] | null | null | null | __author__ = 'rcj1492'
__created__ = '2015.09'
__license__ = 'MIT'
# pip install pytz
# pip install tzlocal
import uuid
import binascii
import os
import hashlib
import base64
from datetime import datetime
import pytz
class labID(object):
''' a class of methods for uniquely identifying objects
build-in methods:
self.uuid: uuid1 uuid object
self.id12: 12 character base 64 url safe string of posix time
self.id24: 24 character base 64 url safe string of md5 hash of uuid1
self.id36: 36 character base 64 url safe string of sha1 hash of uuid1
self.id48: 48 character base 64 url safe string of sha256 hash of uuid1
self.mac: string of mac address of device
self.epoch: current posix epoch timestamp with micro second resolution
self.iso: current iso utc datetime string
self.datetime: current python datetime
'''
def __init__(self):
''' a method to initialize a unique ID based upon the UUID1 method '''
# retrieve UUID
self.uuid = uuid.uuid1()
# calculate micro second posix timestamp of uuid
t = self.uuid.time
t = t - 0x01b21dd213814000
v = t / 1e7
self.epoch = float(str(v)[0:17])
self.datetime = datetime.utcfromtimestamp(self.epoch).replace(tzinfo=pytz.utc)
self.iso = self.datetime.isoformat()
# create byte ids of various lengths using hash of uuid
self.bytes_9 = os.urandom(2) + bytes(binascii.unhexlify(format(int(t), 'x')))
self.bytes_18 = os.urandom(2) + hashlib.md5(self.uuid.bytes).digest()
self.bytes_27 = os.urandom(7) + hashlib.sha1(self.uuid.bytes).digest()
self.bytes_36 = os.urandom(4) + hashlib.sha256(self.uuid.bytes).digest()
# convert byte ids into base 64 url safe id strings
self.id12 = base64.urlsafe_b64encode(self.bytes_9).decode()
self.id24 = base64.urlsafe_b64encode(self.bytes_18).decode()
self.id36 = base64.urlsafe_b64encode(self.bytes_27).decode()
self.id48 = base64.urlsafe_b64encode(self.bytes_36).decode()
# determine the mac address
mac = 0
test_mac = uuid.getnode()
counter = 0
while not test_mac == mac and counter < 5:
mac = test_mac
test_mac = uuid.getnode()
counter += 1
if counter < 5:
m = hex(mac)[2:14]
local_mac = m[0:2] + ':' + m[2:4] + ':' + m[4:6] + \
':' + m[6:8] + ':' + m[8:10] + ':' + m[10:12]
else:
local_mac = ''
self.mac = local_mac
if __name__ == '__main__':
print(labID().id12)
print(labID().uuid)
print(labID().epoch)
| 33.963855 | 87 | 0.596665 | 373 | 2,819 | 4.394102 | 0.364611 | 0.043929 | 0.027456 | 0.039658 | 0.213545 | 0.107383 | 0.073215 | 0 | 0 | 0 | 0 | 0.071174 | 0.302235 | 2,819 | 82 | 88 | 34.378049 | 0.762074 | 0.317488 | 0 | 0.044444 | 0 | 0 | 0.017867 | 0 | 0 | 0 | 0.010375 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.155556 | 0 | 0.2 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba0cfa52069fe87f3f4262060234a0b81ee7c383 | 5,115 | py | Python | 2019/06_UniversalOrbitMap/uomap.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | 1 | 2021-01-03T23:09:28.000Z | 2021-01-03T23:09:28.000Z | 2019/06_UniversalOrbitMap/uomap.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | 6 | 2020-12-26T21:02:42.000Z | 2020-12-26T21:02:52.000Z | 2019/06_UniversalOrbitMap/uomap.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | null | null | null | # ======================================================================
# Universal Orbit Map
# Advent of Code 2019 Day 06 -- Eric Wastl -- https://adventofcode.com
#
# Computer simulation by Dr. Dean Earl Wright III
# ======================================================================
# ======================================================================
# u o m a p . p y
# ======================================================================
"Map for Universal Orbit Map problem for Advent of Code 2019 Day 06"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import dag
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
COM = 'COM'
YOU = 'YOU'
SANTA = 'SAN'
# ======================================================================
# UOMap
# ======================================================================
class UOMap(dag.DAG):
"""Object representing a Universal Orbit Map"""
def __init__(self, pairs=None, text=None):
# 1. Start with an empty dag
super(UOMap, self).__init__(pairs=pairs)
# 2. If there is text, process it
if text is not None:
# 3. Loop for all of the lines
for line in text:
# 4. Split line into the two node names
nodes = line.split(')')
# 5. Add nodes to graph
self.add_node(nodes[0], nodes[1])
def orbits(self, node):
"Return the number of [in]direct orbits for a given node"
# Number of orbits is path length from COM minus 1
path = self.find_shortest_path(COM, node)
assert path is not None
#print("%s %d %s" % (node, len(path), path))
return len(path) - 1
def total_orbits(self):
"Return the number of direct and indirect orbits"
# 1. Start with no orbits
result = 0
# 2. Loop for all of the nodes
for node in self.nodes():
# 3. We only want terminal nodes
if node == COM:
continue
# 4. Orbits is one less than the length of path from COM
result += self.orbits(node)
# 5. Return total number of orbits
return result
def count_orbits(self):
"Count the orbits by walking the tree"
# 1. Start with nothing, but a list of things to do
orbits = {}
todo = [(COM, 0)]
# 2. Loop until there is nothing to do
while todo:
pass
# 9. Return the sum of the orbits
return sum(orbits.values())
def bodies(self):
"Retnumber of orbit"
return self.nodes()
def minimum_transfers(self, from_node, to_node):
"Find the minimumal number of orbital transfers between two nodes"
# 1. Assume no path
result = []
# 2. If from you or Santa, find where orbiting
if from_node in [YOU, SANTA]:
from_node = self.orbiting(from_node)
# 3. If to you or Santa, find where orbiting
if to_node in [YOU, SANTA]:
to_node = self.orbiting(to_node)
# 4. Find the shorted path from the center to each
from_path = self.find_shortest_path(COM, from_node)
to_path = self.find_shortest_path(COM, to_node)
assert from_path is not None
assert to_path is not None
# 4. Keep only the unique parts
for indx in range(min(len(from_path), len(to_path))):
if from_path[indx] == to_path[indx]:
continue
result = from_path[indx:] + to_path[indx:]
break
# 5. Return length of unique legs
return len(result)
def orbiting(self, node):
"What is the node orbiting?"
# 1. Asssume the worst
result = None
# 2. if not the center, find where it is orbiting
if node != COM:
for onode, bodies in self.dag.items():
if node in bodies:
result = onode
break
# 3. Return where orbiting or None
return result
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end u o m a p . p y end
# ======================================================================
| 33.651316 | 75 | 0.403715 | 511 | 5,115 | 3.951076 | 0.30137 | 0.019812 | 0.017831 | 0.029718 | 0.133234 | 0.118375 | 0.035661 | 0 | 0 | 0 | 0 | 0.011955 | 0.345846 | 5,115 | 151 | 76 | 33.874172 | 0.591452 | 0.536266 | 0 | 0.126984 | 0 | 0 | 0.13269 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.111111 | false | 0.031746 | 0.015873 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba0d648c1c33bdadb1e11eb0daaea9e418533e75 | 5,341 | py | Python | tests/test_35_ledger_service.py | asymworks/jadetree-backend | 5764d9971ef3fdc85b0b9cd51fad82076f464ae4 | [
"BSD-3-Clause"
] | 7 | 2021-11-02T05:58:58.000Z | 2022-03-04T22:16:20.000Z | tests/test_35_ledger_service.py | asymworks/jadetree-backend | 5764d9971ef3fdc85b0b9cd51fad82076f464ae4 | [
"BSD-3-Clause"
] | 5 | 2021-01-27T14:18:01.000Z | 2022-03-04T22:03:49.000Z | tests/test_35_ledger_service.py | asymworks/jadetree-backend | 5764d9971ef3fdc85b0b9cd51fad82076f464ae4 | [
"BSD-3-Clause"
] | null | null | null | # =============================================================================
#
# Jade Tree Personal Budgeting Application | jadetree.io
# Copyright (c) 2020 Asymworks, LLC. All Rights Reserved.
#
# =============================================================================
from datetime import date
from decimal import Decimal
import pytest # noqa: F401
from sqlalchemy import and_, func
from jadetree.domain.models import (
Account,
Category,
TransactionEntry,
TransactionLine,
TransactionSplit,
)
from jadetree.domain.types import (
AccountSubtype,
AccountType,
PayeeRole,
TransactionType,
)
from jadetree.service import (
account as account_service,
budget as budget_service,
ledger as ledger_service,
payee as payee_service,
)
from .helpers import check_transaction_entries as check_entries
@pytest.fixture(scope='function')
def budget_id(session, user_with_profile):
u = user_with_profile
b = budget_service.create_budget(session, u, 'Test Budget', 'USD')
return b.id
@pytest.fixture(scope='function')
def default_accounts(session, user_with_profile, budget_id):
accts = []
cats = []
# Create personal accounts for Checking, Savings and CC
accts.append(account_service.create_user_account(session, user_with_profile, 'Checking', AccountType.Asset, 'USD', Decimal(10000), date(2020, 1, 1), AccountSubtype.Checking, budget_id=budget_id)[0])
accts.append(account_service.create_user_account(session, user_with_profile, 'Savings', AccountType.Asset, 'USD', Decimal(50000), date(2020, 1, 1), AccountSubtype.Savings, budget_id=budget_id)[0])
accts.append(account_service.create_user_account(session, user_with_profile, 'Credit Card', AccountType.Liability, 'USD', Decimal(500), date(2020, 1, 1), AccountSubtype.CreditCard, budget_id=budget_id)[0])
# Create budget categories for Rent, Groceries, and Insurance
g1 = budget_service.create_budget_category_group(session, user_with_profile, budget_id, 'Monthly Expenses')
g2 = budget_service.create_budget_category_group(session, user_with_profile, budget_id, 'Yearly Expenses')
cats.append(budget_service.create_budget_category(session, user_with_profile, budget_id, g1.id, 'Rent'))
cats.append(budget_service.create_budget_category(session, user_with_profile, budget_id, g1.id, 'Groceries'))
cats.append(budget_service.create_budget_category(session, user_with_profile, budget_id, g2.id, 'Insurance'))
# Return ID List
return tuple([a.id for a in accts]), tuple([c.id for c in cats])
@pytest.fixture(scope='function')
def default_payees(session, user_with_profile):
u = user_with_profile
payees = []
payees.append(payee_service.create_payee(session, u, 'Vons'))
payees.append(payee_service.create_payee(session, u, 'Landlord'))
return tuple([p.id for p in payees])
def test_add_simple_transaction(
session, user_with_profile, budget_id, default_accounts, default_payees
):
(a_chk, a_svg, a_cc), (c_rent, c_groc, c_ins) = default_accounts
(p_vons, p_landlord) = default_payees
t = ledger_service.create_transaction(
session=session,
user=user_with_profile,
account_id=a_cc,
date=date(2020, 1, 2),
amount=Decimal(80),
payee_id=p_vons,
splits=[
dict(
category_id=c_groc,
amount=Decimal(80),
),
]
)
assert t.id > 0
a = session.query(Account).filter(Account.id == a_cc).one()
o = session.query(Account).filter(Account.name == '_expense').one()
c = session.query(Category).get(c_groc)
assert t.account == a
assert t.date == date(2020, 1, 2)
assert t.memo is None
assert t.currency == 'USD'
assert t.foreign_currency is None
assert t.foreign_exchrate is None
assert t.payee is not None
assert t.payee.user == user_with_profile
assert t.payee.name == 'Vons'
assert t.payee.role == PayeeRole.Expense
assert t.payee.system is False
assert t.payee.hidden is False
assert len(t.lines) == 2
assert t.lines[0].account == a
assert t.lines[0].amount == Decimal(80)
assert t.lines[0].cleared is False
assert t.lines[0].reconciled is False
assert t.lines[1].account == o
assert t.lines[1].amount == Decimal(80)
assert t.lines[1].cleared is False
assert t.lines[1].reconciled is False
assert len(t.splits) == 1
assert t.splits[0].amount == Decimal(80)
assert t.splits[0].left_line == t.lines[0]
assert t.splits[0].right_line == t.lines[1]
assert t.splits[0].category == c
assert t.splits[0].type == TransactionType.Outflow
check_entries(t.splits[0], [
(a, Decimal(80), 'USD'),
(o, Decimal(80), 'USD'),
])
assert t.amount == Decimal(80)
assert sum([ln.amount for ln in a.transaction_lines]) == Decimal(580)
assert sum([ln.amount for ln in o.transaction_lines]) == Decimal(580)
cat_balance = session.query(func.sum(TransactionEntry.amount)) \
.join(TransactionSplit) \
.join(TransactionLine, and_(
TransactionLine.id == TransactionEntry.line_id,
TransactionLine.account == o
)) \
.filter(TransactionSplit.category == c) \
.scalar()
assert cat_balance == Decimal(80)
| 34.681818 | 209 | 0.673282 | 706 | 5,341 | 4.916431 | 0.206799 | 0.054451 | 0.069144 | 0.076059 | 0.396716 | 0.311726 | 0.230193 | 0.216364 | 0.169692 | 0.169692 | 0 | 0.023218 | 0.185546 | 5,341 | 153 | 210 | 34.908497 | 0.774713 | 0.076203 | 0 | 0.06087 | 0 | 0 | 0.032297 | 0 | 0 | 0 | 0 | 0 | 0.278261 | 1 | 0.034783 | false | 0 | 0.069565 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba0edb0ff2adbce3d0a45afde6ac18b4190da1f6 | 480 | py | Python | conda.recipe/sync_version.py | irisTa56/MyPlotUtils | a58d3ca3d6fed7aa4b973f42807eb7894392bf9a | [
"MIT"
] | 1 | 2019-05-30T07:49:22.000Z | 2019-05-30T07:49:22.000Z | conda.recipe/sync_version.py | irisTa56/MyPlotUtils | a58d3ca3d6fed7aa4b973f42807eb7894392bf9a | [
"MIT"
] | 2 | 2019-03-05T12:02:55.000Z | 2019-03-18T06:42:43.000Z | conda.recipe/sync_version.py | irisTa56/MyPlotUtils | a58d3ca3d6fed7aa4b973f42807eb7894392bf9a | [
"MIT"
] | 1 | 2019-10-31T17:52:09.000Z | 2019-10-31T17:52:09.000Z | import os
import yaml
from collections import OrderedDict
version_ns = {}
with open(os.path.join("..", "tk_plot_utils", "_version.py")) as f:
exec(f.read(), {}, version_ns)
with open("meta.yaml", "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if lines[i].startswith(" version: "):
lines[i] = " version: {}\n".format(
".".join(map(str, version_ns["version_info"][:3])))
break
with open("meta.yaml", "w") as f:
lines = f.writelines(lines)
| 22.857143 | 67 | 0.627083 | 73 | 480 | 4.027397 | 0.547945 | 0.091837 | 0.088435 | 0.115646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002506 | 0.16875 | 480 | 20 | 68 | 24 | 0.734336 | 0 | 0 | 0 | 0 | 0 | 0.177083 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba100d3eb1b942b5ba9211fc93d938bd2f98f5f4 | 832 | py | Python | apps/publications/tests/test_title_manager.py | techlib/celus | f32a7a22be5f4613dcac10b8e02c5c5a9bc297cb | [
"MIT"
] | 7 | 2020-02-20T13:24:40.000Z | 2022-01-28T19:36:04.000Z | apps/publications/tests/test_title_manager.py | techlib/celus | f32a7a22be5f4613dcac10b8e02c5c5a9bc297cb | [
"MIT"
] | 15 | 2020-04-28T13:09:02.000Z | 2021-11-03T15:21:24.000Z | apps/publications/tests/test_title_manager.py | techlib/celus | f32a7a22be5f4613dcac10b8e02c5c5a9bc297cb | [
"MIT"
] | 4 | 2020-02-20T13:48:30.000Z | 2021-03-19T00:33:34.000Z | import pytest
from logs.logic.data_import import TitleManager, TitleRec
from publications.models import Title
@pytest.mark.django_db
class TestTitleManager(object):
def test_mangled_isbn(self):
"""
Test for a bug that TitleManager looks for data in database with non-normalized isbn
but uses normalized ISBN when storing new data. This discrepancy may lead to
database level integrity error because of constraints.
:return:
"""
Title.objects.create(name='Foo', isbn='978-0-07-174521-5')
tm = TitleManager()
record = TitleRec(
name='Foo', isbn='978- 0-07-174521-5', issn='', eissn='', doi='', pub_type='U'
)
record = tm.normalize_title_rec(record)
tm.prefetch_titles(records=[record])
tm.get_or_create(record)
| 34.666667 | 92 | 0.661058 | 108 | 832 | 5 | 0.675926 | 0.044444 | 0.040741 | 0.051852 | 0.088889 | 0.088889 | 0.088889 | 0.088889 | 0 | 0 | 0 | 0.040945 | 0.236779 | 832 | 23 | 93 | 36.173913 | 0.809449 | 0.270433 | 0 | 0 | 0 | 0 | 0.075134 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba12e6ccdc84d30d13b6370cdedfea813b4cc46a | 2,654 | py | Python | myuw/test/api/test_banner_message.py | timtim17/myuw | d59702a8095daf049d7e57cbb1f7f2a5bebc69af | [
"Apache-2.0"
] | null | null | null | myuw/test/api/test_banner_message.py | timtim17/myuw | d59702a8095daf049d7e57cbb1f7f2a5bebc69af | [
"Apache-2.0"
] | null | null | null | myuw/test/api/test_banner_message.py | timtim17/myuw | d59702a8095daf049d7e57cbb1f7f2a5bebc69af | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.urls import reverse
from userservice.user import UserService
from myuw.models import MigrationPreference, User
from myuw.test import get_request_with_user
from myuw.test.api import MyuwApiTest, VALIDATE, OVERRIDE
class TestBannerMessage(MyuwApiTest):
def test_close_banner_msg(self):
self.set_user('bill')
resp = self.get_response_by_reverse('myuw_close_banner_message')
self.assertEqual(resp.content, b'{"done": true}')
# remove the entry in DB (delete CASCADE)
User.objects.get(uwnetid='bill').delete()
resp = self.get_response_by_reverse('myuw_close_banner_message')
self.assertEqual(resp.content, b'{"done": true}')
user = User.objects.get(uwnetid='bill')
self.assertIsNotNone(str(user))
pref = MigrationPreference.objects.get(user=user)
self.assertIsNotNone(str(pref))
def test_invalid_user_msg_error_case(self):
self.set_user('0000')
err_msg = (b'<p>MyUW cannot find data for this user account '
b'in the Person Registry services. '
b'If you have just created your UW NetID, '
b'please try signing in to MyUW again in one hour.</p>')
resp = self.get_response_by_reverse('myuw_close_banner_message')
self.assertEqual(resp.content, err_msg)
resp = self.get_response_by_reverse('myuw_turn_off_tour_popup')
self.assertEqual(resp.content, err_msg)
def test_turn_off_pop_up(self):
self.set_user('bill')
resp = self.get_response_by_reverse('myuw_turn_off_tour_popup')
self.assertEqual(resp.content, b'{"done": true}')
def test_close_banner_msg_when_override(self):
with self.settings(DEBUG=False,
MYUW_DISABLE_ACTIONS_WHEN_OVERRIDE=True):
self.set_user('javerage')
self.set_userservice_override("bill")
self.assertEquals(UserService().get_override_user(), "bill")
resp = self.get_response_by_reverse('myuw_close_banner_message')
self.assertEqual(resp.status_code, 403)
def test_turn_off_pop_up_when_override(self):
with self.settings(DEBUG=False,
MYUW_DISABLE_ACTIONS_WHEN_OVERRIDE=True):
self.set_user('javerage')
self.set_userservice_override("bill")
self.assertEquals(UserService().get_override_user(), "bill")
resp = self.get_response_by_reverse('myuw_turn_off_tour_popup')
self.assertEqual(resp.status_code, 403)
| 42.806452 | 76 | 0.678975 | 342 | 2,654 | 4.98538 | 0.312866 | 0.028739 | 0.045161 | 0.078006 | 0.637537 | 0.583578 | 0.539003 | 0.533724 | 0.533724 | 0.533724 | 0 | 0.007756 | 0.222683 | 2,654 | 61 | 77 | 43.508197 | 0.818711 | 0.045968 | 0 | 0.553191 | 0 | 0 | 0.173259 | 0.068038 | 0 | 0 | 0 | 0 | 0.234043 | 1 | 0.106383 | false | 0 | 0.106383 | 0 | 0.234043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba131bfa8864d7d5b3f82143287c76ccd40c968d | 11,723 | py | Python | dgdynamic/output.py | Ezbob/dgDynamic | 394de1c138c1517c4cdfead879c43db189752d92 | [
"MIT"
] | null | null | null | dgdynamic/output.py | Ezbob/dgDynamic | 394de1c138c1517c4cdfead879c43db189752d92 | [
"MIT"
] | null | null | null | dgdynamic/output.py | Ezbob/dgDynamic | 394de1c138c1517c4cdfead879c43db189752d92 | [
"MIT"
] | null | null | null | from dgdynamic.utils.project_utils import LogMixin, make_directory
from dgdynamic.config.settings import config
from dgdynamic.utils.plotter import matplotlib_plot
from scipy.interpolate import interpolate
import threading
import time
import csv
import matplotlib.pyplot as plt
import os.path
import enum
import collections
import array
import numpy
class SimulationOutput(LogMixin):
def __init__(self, solved_by, user_sim_range, symbols, dependent=(), independent=(), ignore=(),
solver_method=None, errors=(),):
self.dependent = numpy.asanyarray(dependent, dtype=float)
self.independent = numpy.asanyarray(independent, dtype=float)
self.errors = errors
self.solver_used = solved_by
self.solver_method_used = solver_method
self.requested_simulation_range = user_sim_range
if independent is not None and len(independent) >= 2:
self.simulation_duration = abs(independent[-1] - independent[0])
elif independent is not None and len(independent) == 1:
self.simulation_duration = independent[0]
else:
self.simulation_duration = 0.0
try:
self._ignored = tuple(item[1] for item in ignore)
except IndexError:
self._ignored = ignore
self._path = os.path.abspath(config['Output Paths']['DATA_DIRECTORY'])
self._file_writer_thread = None
self.symbols = tuple(symbols) if isinstance(symbols, collections.Generator) else symbols
def has_sim_prematurely_stopped(self, rel_tol=1e-05, abs_tol=1e-08):
if len(self.independent) > 0:
return not numpy.isclose(self.independent[-1], self.requested_simulation_range[1],
rtol=rel_tol, atol=abs_tol)
else:
return self.requested_simulation_range[1] != 0
def is_data_evenly_spaced(self, rel_tol=1e-05, abs_tol=1e-08):
delta_t = 0
time_vals = self.independent
if len(time_vals) >= 2:
delta_t = abs(time_vals[1] - time_vals[0])
for i in range(1, len(time_vals)):
curr_t = time_vals[i]
if i < len(time_vals) - 1:
next_t = time_vals[i + 1]
curr_dt = abs(next_t - curr_t)
if not numpy.isclose(curr_dt, delta_t, rtol=rel_tol, atol=abs_tol):
return False
return True
def interpolate_data(self, new_sample_resolution, kind='linear'):
"""Shall return a new evenly spaced interpolated version of the original output"""
if new_sample_resolution > 0:
new_independent = numpy.linspace(self.independent[0], self.independent[-1], num=new_sample_resolution)
interpolation_func = interpolate.interp1d(self.independent, self.dependent, axis=0, kind=kind)
return SimulationOutput(self.solver_used, self.requested_simulation_range, self.symbols,
dependent=interpolation_func(new_independent), independent=new_independent,
ignore=self._ignored, solver_method=self.solver_method_used, errors=self.errors)
return self
@property
def is_output_set(self):
return False
@property
def has_errors(self):
return len(self.errors) > 0
@property
def is_empty(self):
return len(self.independent) + len(self.dependent) == 0
@property
def dependent_dimension(self):
return len(self.dependent[0])
def plot(self, filename=None, labels=None, figure_size=None, axis_labels=None,
axis_limits=None, title=None, show_grid=True, has_tight_layout=True):
if title is None and isinstance(self.solver_used, (str, enum.Enum)):
if isinstance(self.solver_used, enum.Enum):
title = self.solver_used.name.title()
else:
title = self.solver_used
if self.solver_method_used is not None:
title += (" - " + self.solver_method_used.name)
input_values = {
'independent': self.independent,
'dependent': self.dependent,
'symbols': self.symbols,
'ignored': self._ignored,
'title': title,
'filename': filename,
'labels': labels,
'figure_size': figure_size,
'axis_labels': axis_labels,
'axis_limits': axis_limits,
'show_grid': show_grid,
'has_tight_layout': has_tight_layout,
}
matplotlib_plot(input_values)
return self
@staticmethod
def show(*args, **kwargs):
plt.show(*args, **kwargs)
def _get_file_prefix(self, name, extension=".tsv", prefix=None):
if prefix is None:
return os.path.join(self._path, "{}_{}{}".format(self.solver_used.value, name, extension))
else:
return os.path.join(self._path, "{}{}{}".format(prefix, name, extension))
def _filter_out_ignores(self):
for rows in self.dependent:
filtered_row = ()
for index, item in enumerate(rows):
if index not in self._ignored:
filtered_row += (item,)
yield filtered_row
@property
def filtered_output(self):
return SimulationOutput(self.solver_used,
dependent=tuple(self._filter_out_ignores()),
independent=self.independent, ignore=(),
solver_method=self.solver_method_used,
symbols=self.symbols, errors=self.errors,
user_sim_range=self.requested_simulation_range)
def save(self, filename, prefix=None, unfiltered=False, labels=None, stream=None):
"""
Saves the independent and dependent variables as a Tab Separated Variables(TSV) file in the directory specified
by the DATA_DIRECTORY variable in the configuration file. The name of the TSV file is constructed from a
concatenation of the ODE solver name followed by a underscore, the 'name' parameter and finally the file
extension.
:param prefix: name prefix for the data file. Default is the plugin name followed by an underscore.
:param unfiltered: whether to mark 'unchanging species' in the output data set
:param filename: a name for the data file
:param stream: use another stream than a file stream
:param labels: use custom header labels for species. Default is the symbols specified by the model.
:return:
"""
float_precision = config.getint('Simulation', 'FIXED_POINT_PRECISION', fallback=18)
if len(self.dependent) == 0 or len(self.independent) == 0:
self._logger.warn("No or mismatched data")
return
if unfiltered:
paired_data = zip(self.independent, self.dependent)
else:
paired_data = zip(self.independent, self._filter_out_ignores())
make_directory(config['Output Paths']['DATA_DIRECTORY'], pre_delete=False)
if unfiltered:
dependent_dimension = self.dependent_dimension
else:
dependent_dimension = max(self.dependent_dimension - len(self._ignored), 0)
self._logger.debug("Dimension of the dependent variable is {}".format(dependent_dimension))
header_labels = self.symbols if labels is None else labels
assert isinstance(header_labels, (list, set, tuple))
def header():
yield "time"
for index, label in enumerate(header_labels):
if unfiltered and index in self._ignored:
yield "_{}".format(label)
else:
yield label
def format_float(variable):
return "{:.{}f}".format(variable, float_precision)
def data_rows():
for independent, dependent in paired_data:
yield (format_float(independent),) + tuple(format_float(var) for var in dependent)
if stream is None:
file_path = self._get_file_prefix(filename, prefix=prefix)
self._logger.info("Saving data as {}".format(file_path))
stream = open(file_path, mode="w")
def write_data():
self._logger.info("Started on writing data to disk")
start_t = time.time()
with stream as outfile:
# writing header underscore prefix marks that the columns where ignored (for ODE only, since SPiM
# don't output data for a variable if it's not in the plot directive)
writer = csv.writer(outfile, delimiter="\t")
writer.writerow(element for element in header())
for row in data_rows():
writer.writerow(row)
end_t = time.time()
self._logger.info("Finished writing to disk. Took: {} secs".format(end_t - start_t))
self._file_writer_thread = threading.Thread(target=write_data)
self._file_writer_thread.start()
return self
def __getitem__(self, index):
return self.independent[index], self.dependent[index]
def __iter__(self):
for i in range(len(self.independent)):
yield self.independent[i], self.dependent[i]
def __len__(self):
return (len(self.independent) + len(self.dependent)) // 2
def __str__(self):
return "independent variable: {}\ndependent variable: {}".format(self.independent,
self.dependent)
class SimulationOutputSet(LogMixin):
def __init__(self, output):
self.output_set = tuple(output)
def plot(self, filename=None, **kwargs):
if isinstance(filename, collections.Iterable):
for filename, output in zip(filename, self.output_set):
output.plot(filename=filename, **kwargs)
elif filename is None:
for output in self.output_set:
output.plot(filename=filename, **kwargs)
else:
raise TypeError("Expected an iterable collection of file names; got {}"
.format(type(filename)))
return self
def save(self, filename, **kwargs):
if isinstance(filename, collections.Iterable):
for filename, output in zip(filename, self.output_set):
output.save(filename=filename, **kwargs)
else:
raise TypeError("Expected an iterable collection of file names; got {}"
.format(type(filename)))
return self
@property
def is_output_set(self):
return True
@property
def filtered_output(self):
return SimulationOutputSet((out.filtered_output for out in self.output_set))
@property
def data_matrix(self):
return tuple((array.array('d', column) for column in out.columns) for out in self.output_set)
@property
def failure_indices(self):
return tuple(i for i, o in enumerate(self.output_set) if o.has_errors)
@property
def failures(self):
return SimulationOutputSet(filter(lambda obj: not obj.has_errors, self.output_set))
@property
def successes(self):
return SimulationOutputSet(filter(lambda obj: obj.has_errors, self.output_set))
def __iter__(self):
return self.output_set.__iter__()
def __getitem__(self, key):
return self.output_set.__getitem__(key)
def __len__(self):
return self.output_set.__len__()
def __repr__(self):
return "<SimulationOutputSet with {} runs>".format(self.__len__())
| 40.14726 | 119 | 0.622537 | 1,384 | 11,723 | 5.077312 | 0.194364 | 0.038423 | 0.0222 | 0.014231 | 0.217447 | 0.176605 | 0.123097 | 0.104027 | 0.075423 | 0.058062 | 0 | 0.005493 | 0.285678 | 11,723 | 291 | 120 | 40.285223 | 0.833652 | 0.081634 | 0 | 0.216814 | 0 | 0 | 0.05392 | 0.001969 | 0 | 0 | 0 | 0 | 0.004425 | 1 | 0.154867 | false | 0 | 0.057522 | 0.084071 | 0.362832 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba13e81e8598e900c4a906d592cda958b24f530a | 7,899 | py | Python | biodada/sdf.py | simomarsili/biodada | 642fb440d8a66a0413deb69c8623ea3b61d41678 | [
"BSD-3-Clause"
] | null | null | null | biodada/sdf.py | simomarsili/biodada | 642fb440d8a66a0413deb69c8623ea3b61d41678 | [
"BSD-3-Clause"
] | null | null | null | biodada/sdf.py | simomarsili/biodada | 642fb440d8a66a0413deb69c8623ea3b61d41678 | [
"BSD-3-Clause"
] | null | null | null | """SequenceDataFrame class module."""
import logging
import pandas
from pandas import DataFrame
from biodada.utils import timeit
from biodada.pipelines import PipelinesMixin
logger = logging.getLogger(__name__)
class SequenceDataFrame(PipelinesMixin, DataFrame):
"""
In addition to the standard DataFrame constructor arguments,
SequenceDataFrame also accepts the following keyword arguments:
Parameters
----------
alphabet : str
Alphabet for the alignment. Default: None. See biodada.ALPHABETS.
"""
_metadata = ['alphabet']
def __init__(self, *args, **kwargs):
self.alphabet = kwargs.pop('alphabet', None)
logger.debug('init SequenceDataFrame, alphabet: %r', self.alphabet)
super().__init__(*args, **kwargs)
# set column labels
if isinstance(self.columns, pandas.RangeIndex):
lmax = max(len(x) for x in self[0])
if lmax == 1:
raise ValueError(
'The first data field must contain sequence identifiers')
else:
self.columns = ['id'] + list(range(self.shape[1] - 1))
@property
def _constructor(self):
return SequenceDataFrame
@classmethod
def from_sequence_records(cls, records, alphabet=None):
"""
Return a SequenceDataFrame from records iterable.
If alphabet, filter out records with symbols not in alphabet.
"""
from biodada.alphabets import check_alphabet, check_alphabet_records
if alphabet:
# check alphabet first
alphabet = check_alphabet(alphabet)
records = check_alphabet_records(records, alphabet)
return cls(([identifier] + list(sequence)
for identifier, sequence in records),
alphabet=alphabet)
@property
@timeit
def data(self):
"""Return an ndarray of one-letter codes."""
return self.to_numpy(copy=False, dtype='U1')[:, 1:]
@property
def records(self):
"""Iterable of frame records."""
return ((r[0], ''.join(r[1:]))
for r in self.itertuples(index=False, name=None))
def encoded(self, encoder='one-hot', dtype=None):
"""
Return sequence data encoded into integer labels.
Parameters
----------
encoder : 'one-hot', 'ordinal'
encoder class:sklearn OneHotEncoder or OrdinalEncoder
dtype : number type
Default: numpy.float64 (one-hot), numpy.int8 (ordinal)
Returns
-------
Encoded data : sparse matrix (one-hot) or numpy array (ordinal)
Transformed array
"""
encoder = self.encoder(encoder=encoder, dtype=dtype)
return encoder.fit_transform(self.data)
def principal_components(self, n_components=3, pca=None):
"""Return n_components principal components from PCA.
See SequenceDataFrame.pca method for details.
Attributes
----------
n_components : int
Number of components to keep.
pca : a fitted PCA pipeline.
If passed, just transform the data with `pca`
Returns
-------
array-like, shape=(n_records, n_components)
"""
from sklearn.exceptions import NotFittedError
if not pca:
pca = self.pca(n_components=n_components)
pca.fit(self.data)
try:
return pca.transform(self.data)
except NotFittedError:
raise
def clusters(self, n_clusters, n_components=3):
"""For a given number of clusters, return the cluster labels.
See SequenceDataFrame.clustering for details.
Parameters
----------
n_clusters : int
The number of clusters.
n_components : int
Number of principal components to keep in the dimensionality
reduction pre-processing step.
Returns
-------
cluster_labels : list
"""
clustering = self.clustering(
n_clusters=n_clusters, n_components=n_components)
labels = clustering.fit_predict(self.data)
return labels
def classify(self, labeled_data, n_neighbors=3, transformer=None):
"""Classify records from labeled data."""
classifier = self.classifier(n_neighbors=n_neighbors).fit(
*labeled_data)
if not transformer:
X1 = self.data
else:
X1 = transformer.transform(self.data)
return classifier.predict(X1)
@timeit
def save(self, target):
"""Save frame as bzipped json."""
import json
import codecs
from bz2 import BZ2File
dd = {}
dd['index'] = list(self.index)
dd['columns'] = [-1] + list(self.columns)[1:]
dd['records'] = list(self.records)
dd['alphabet'] = self.alphabet
handle = codecs.getwriter('utf8')(BZ2File(target, 'w'))
json.dump(dd, fp=handle)
def parse_records(source, frmt, uppercase=True):
"""Parse records from source."""
import lilbio # pylint: disable=import-error
preprocess = lilbio.uppercase_only if uppercase else None
return lilbio.parse(source, frmt, func=preprocess)
def non_redundant_records(records, threshold=0.9):
"""Return an iterable of non-redundant records."""
import pcdhit # pylint: disable=import-error
return pcdhit.filter(records, threshold)
@timeit
def ungap_frame(frame, threshold=0.1):
"""Return a copy of frame after removing gappy records/positions."""
import cleanset # pylint: disable=import-error
logger.debug('start filtering gaps')
cleaner = cleanset.Cleaner(
fna=threshold, condition=lambda x: x == '-' or x == 'X', axis=0.5)
frame = cleaner.fit_transform(frame)
logger.debug('stop filtering gaps')
return frame
@timeit
def read_alignment(source, fmt, uppercase=True, c=0.9, g=0.1, alphabet=None):
"""Parse a pandas dataframe from an alignment file.
Parameters
----------
source : filepath or file-like
The alignment file
fmt : str
Alignment format. Valid options are: 'fasta', 'stockholm'.
uppercase : boolean
If True, return only uppercase symbols and {-', '*'} symbols.
c : float
Sequence identity threshold for redundancy filter. 0 < c < 1.
g : float
Gap fraction threshold for gap filter. 0 <= g <= 1.
Returns
-------
dataframe
A pandas dataframe.
"""
import itertools
from biodada.alphabets import check_alphabet, guess_alphabet
# parse records
records = parse_records(source, fmt, uppercase=uppercase)
# filter redundant records via cdhit
if c:
records = non_redundant_records(records, c)
if not alphabet:
records_head = itertools.islice(records, 50)
alphabet = guess_alphabet(records_head)
records = itertools.chain(records_head, records)
else:
alphabet = check_alphabet(alphabet)
# convert records to a dataframe
df = SequenceDataFrame.from_sequence_records(records, alphabet=alphabet)
# reduce gappy records/positions
if g:
df = ungap_frame(df, g)
return df
@timeit
def load(source):
"""Load a frame as bzipped json."""
import json
import gopen
with gopen.readable(source) as fp:
dd = json.load(fp)
index = dd['index']
columns = dd['columns']
columns.sort()
columns = ['id'] + columns[1:]
df = SequenceDataFrame(([identifier] + list(sequence)
for identifier, sequence in dd['records']),
index=index.sort(),
columns=columns,
alphabet=dd['alphabet'])
# sort rows/columns by index and reset column labels
return df
| 30.498069 | 77 | 0.614128 | 881 | 7,899 | 5.430193 | 0.275823 | 0.022993 | 0.017559 | 0.01505 | 0.058528 | 0.049331 | 0.033027 | 0 | 0 | 0 | 0 | 0.007078 | 0.284593 | 7,899 | 258 | 78 | 30.616279 | 0.839497 | 0.307507 | 0 | 0.136 | 0 | 0 | 0.044038 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.136 | 0.008 | 0.376 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba15521ef40d650dff171633da21a78beae9c5a4 | 2,180 | py | Python | picprime.py | BartMassey/prime-tree | e913495c215a4d898e13145ba12829b8650cb7d5 | [
"MIT"
] | 1 | 2020-05-11T06:31:58.000Z | 2020-05-11T06:31:58.000Z | picprime.py | BartMassey/prime-tree | e913495c215a4d898e13145ba12829b8650cb7d5 | [
"MIT"
] | null | null | null | picprime.py | BartMassey/prime-tree | e913495c215a4d898e13145ba12829b8650cb7d5 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# Bart Massey
# Render an ASCII image from a template, constructed such
# that the digits in the resulting image form a large prime
# number.
# This code is licensed under the "MIT license". See
# the file `LICENSE` in this distribution for license terms.
import random
import sys
# Miller-Rabin test below is recursive, and this number is
# going to be big. Should probably re-implement Miller-Rabin
# iteratively.
sys.setrecursionlimit(10000)
if len(sys.argv) > 1:
picfile = open(sys.argv[1], "r")
else:
picfile = sys.stdin
pic = picfile.read()
# Miller-Rabin probabilistic primality test.
# Code based on a 1999 Nickle implementation by me.
def is_composite(n, d):
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
def witness_exp(b, e, m):
if e == 0:
return (0, 1)
if e == 1:
return (b % m, 0)
p, w = witness_exp(b, e // 2, m)
if w != 0:
return (p, w)
t = (p ** 2) % m
if t == 1 and p != 1 and p != m - 1:
return (t, p)
if e % 2 == 0:
return (t, w)
return ((t * b) % m, w)
def witness(a, n):
p, w = witness_exp(a, n - 1, n)
if w != 0:
return True
if p != 1:
return True
return False
for p in primes:
if n % p == 0:
return True
for _ in range(d):
a = 1 + random.randrange(n - 1)
if witness(a, n):
return True
return False
# Repeatedly fill in the . characters in the template with
# random digits and see if the resulting number is prime.
while True:
tn = ""
for c in pic:
if c == '.':
tn += chr(ord('0') + random.randrange(10))
continue
if c.isnumeric():
tn += c
if not is_composite(int(tn), 40):
break
# Substitute the . characters in the original pic template
# to produce a prime-pic.
ti = 0
tl = list(pic)
for i, c in enumerate(tl):
if c.isnumeric():
assert tn[ti] == c
ti += 1
continue
if c == '.':
tl[i] = tn[ti]
ti += 1
# Render the pic.
print(''.join(tl))
| 24.222222 | 60 | 0.539908 | 331 | 2,180 | 3.537764 | 0.39577 | 0.029889 | 0.013664 | 0.020495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038408 | 0.343119 | 2,180 | 89 | 61 | 24.494382 | 0.77933 | 0.318349 | 0 | 0.266667 | 0 | 0 | 0.002723 | 0 | 0 | 0 | 0 | 0 | 0.016667 | 1 | 0.05 | false | 0 | 0.033333 | 0 | 0.283333 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba1597afda998cf11cce71daa14e435a88d232ec | 6,546 | py | Python | bokeh/tile_providers.py | areaweb/bokeh | 9d131e45d626a912e85aee5b2647139c194dc893 | [
"BSD-3-Clause"
] | null | null | null | bokeh/tile_providers.py | areaweb/bokeh | 9d131e45d626a912e85aee5b2647139c194dc893 | [
"BSD-3-Clause"
] | 1 | 2017-01-12T00:37:38.000Z | 2017-01-12T00:37:38.000Z | bokeh/tile_providers.py | areaweb/bokeh | 9d131e45d626a912e85aee5b2647139c194dc893 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Pre-configured tile sources for common third party tile services.
Attributes:
CARTODBPOSITRON
Tile Source for CartoDB Tile Service
.. raw:: html
<img src="http://tiles.basemaps.cartocdn.com/light_all/14/2627/6331.png" />
CARTODBPOSITRON_RETINA
Tile Source for CartoDB Tile Service (tiles at 'retina' resolution)
.. raw:: html
<img src="http://tiles.basemaps.cartocdn.com/light_all/14/2627/6331@2x.png" />
STAMEN_TERRAIN
Tile Source for Stamen Terrain Service
.. raw:: html
<img src="http://c.tile.stamen.com/terrain/14/2627/6331.png" />
STAMEN_TERRAIN_RETINA
Tile Source for Stamen Terrain Service
.. raw:: html
<img src="http://c.tile.stamen.com/terrain/14/2627/6331@2x.png" />
STAMEN_TONER
Tile Source for Stamen Toner Service
.. raw:: html
<img src="http://c.tile.stamen.com/toner/14/2627/6331.png" />
STAMEN_TONER_BACKGROUND
Tile Source for Stamen Toner Background Service which does not include labels
.. raw:: html
<img src="http://c.tile.stamen.com/toner-background/14/2627/6331.png" />
STAMEN_TONER_LABELS
Tile Source for Stamen Toner Service which includes only labels
.. raw:: html
<img src="http://c.tile.stamen.com/toner-labels/14/2627/6331.png" />
Additional information available at:
* Stamen tile service - http://maps.stamen.com/
* CartoDB tile service - https://carto.com/location-data-services/basemaps/
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
from bokeh.util.api import public, internal ; public, internal
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import sys
import types
# External imports
# Bokeh imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# __all__ defined at the bottom on the class module
#-----------------------------------------------------------------------------
# Public API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Internal API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
class _TileProvidersModule(types.ModuleType):
_CARTO_ATTRIBUTION = (
'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors,'
'© <a href="https://cartodb.com/attributions">CartoDB</a>'
)
_STAMEN_ATTRIBUTION = (
'Map tiles by <a href="http://stamen.com">Stamen Design</a>, '
'under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. '
'Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, '
'under %s.'
)
@property
def CARTODBPOSITRON(self):
from bokeh.models.tiles import WMTSTileSource
return WMTSTileSource(
url='http://tiles.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png',
attribution=self._CARTO_ATTRIBUTION
)
@property
def CARTODBPOSITRON_RETINA(self):
from bokeh.models.tiles import WMTSTileSource
return WMTSTileSource(
url='http://tiles.basemaps.cartocdn.com/light_all/{z}/{x}/{y}@2x.png',
attribution=self._CARTO_ATTRIBUTION
)
@property
def STAMEN_TERRAIN(self):
from bokeh.models.tiles import WMTSTileSource
return WMTSTileSource(
url='http://tile.stamen.com/terrain/{Z}/{X}/{Y}.png',
attribution=self._STAMEN_ATTRIBUTION % '<a href="http://creativecommons.org/licenses/by-sa/3.0">CC BY SA</a>'
)
@property
def STAMEN_TERRAIN_RETINA(self):
from bokeh.models.tiles import WMTSTileSource
return WMTSTileSource(
url='http://tile.stamen.com/terrain/{Z}/{X}/{Y}@2x.png',
attribution=self._STAMEN_ATTRIBUTION % '<a href="http://creativecommons.org/licenses/by-sa/3.0">CC BY SA</a>'
)
@property
def STAMEN_TONER(self):
from bokeh.models.tiles import WMTSTileSource
return WMTSTileSource(
url='http://tile.stamen.com/toner/{Z}/{X}/{Y}.png',
attribution=self._STAMEN_ATTRIBUTION % '<a href="http://www.openstreetmap.org/copyright">ODbL</a>'
)
@property
def STAMEN_TONER_BACKGROUND(self):
from bokeh.models.tiles import WMTSTileSource
return WMTSTileSource(
url='http://tile.stamen.com/toner-background/{Z}/{X}/{Y}.png',
attribution=self._STAMEN_ATTRIBUTION % '<a href="http://www.openstreetmap.org/copyright">ODbL</a>'
)
@property
def STAMEN_TONER_LABELS(self):
from bokeh.models.tiles import WMTSTileSource
return WMTSTileSource(
url='http://tile.stamen.com/toner-labels/{Z}/{X}/{Y}.png',
attribution=self._STAMEN_ATTRIBUTION % '<a href="http://www.openstreetmap.org/copyright">ODbL</a>'
)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
_mod = _TileProvidersModule(str('bokeh.tile_providers'))
_mod.__doc__ = __doc__
_mod.__all__ = (
'CARTODBPOSITRON',
'CARTODBPOSITRON_RETINA',
'STAMEN_TERRAIN',
'STAMEN_TERRAIN_RETINA',
'STAMEN_TONER',
'STAMEN_TONER_BACKGROUND',
'STAMEN_TONER_LABELS',
)
sys.modules['bokeh.tile_providers'] = _mod
del _mod, sys, types
| 33.397959 | 121 | 0.528109 | 649 | 6,546 | 5.195686 | 0.228043 | 0.032028 | 0.038553 | 0.026987 | 0.591637 | 0.570878 | 0.515421 | 0.465599 | 0.465599 | 0.465599 | 0 | 0.016732 | 0.178277 | 6,546 | 195 | 122 | 33.569231 | 0.610151 | 0.484418 | 0 | 0.35443 | 0 | 0.063291 | 0.358021 | 0.01979 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088608 | false | 0 | 0.151899 | 0 | 0.367089 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba1b2875425d87985d623e2b9aaa9ba8a6c4dc05 | 326 | py | Python | submissions/excel-sheet-column-number/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | null | null | null | submissions/excel-sheet-column-number/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | 1 | 2022-03-04T20:24:32.000Z | 2022-03-04T20:31:58.000Z | submissions/excel-sheet-column-number/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/excel-sheet-column-number
class Solution:
def titleToNumber(self, s):
char2int = {chr(64 + i): i for i in range(1, 27)}
s = s[::-1]
ans = 0
for i, char in enumerate(s):
num = char2int[char]
ans += num * (26 ** i)
return ans
| 25.076923 | 57 | 0.518405 | 45 | 326 | 3.755556 | 0.666667 | 0.047337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.051163 | 0.340491 | 326 | 12 | 58 | 27.166667 | 0.734884 | 0.168712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba1d34490f5d8eafffe1b2ed6c3b6fdeebb44a44 | 368 | py | Python | class11/class11_03.py | WesGtoX/python-selenium | 52f4fdca84a6f4139c8a8478435f4f1b12048258 | [
"MIT"
] | null | null | null | class11/class11_03.py | WesGtoX/python-selenium | 52f4fdca84a6f4139c8a8478435f4f1b12048258 | [
"MIT"
] | 1 | 2021-06-02T21:51:27.000Z | 2021-06-02T21:51:27.000Z | class11/class11_03.py | WesGtoX/python-selenium | 52f4fdca84a6f4139c8a8478435f4f1b12048258 | [
"MIT"
] | null | null | null | from time import sleep
from selenium.webdriver import Firefox
from selenium.webdriver.common.alert import Alert
url = 'https://selenium.dunossauro.live/aula_11_a.html'
browser = Firefox()
browser.get(url)
sleep(2)
browser.find_element_by_id('prompt').click()
alert = Alert(browser)
alert.send_keys('Wesley')
sleep(1)
alert.accept() # Confirma, clica no OK
| 16 | 55 | 0.758152 | 54 | 368 | 5.055556 | 0.648148 | 0.087912 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012346 | 0.119565 | 368 | 22 | 56 | 16.727273 | 0.830247 | 0.057065 | 0 | 0 | 0 | 0 | 0.171014 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba1e275e3805e253dac61d8e4dca4e8734856aa7 | 648 | py | Python | binarysearch.py | gary-mayfield/AOTW | e320342f8918d2bf0352d8479d866dbc7db58e5e | [
"MIT"
] | null | null | null | binarysearch.py | gary-mayfield/AOTW | e320342f8918d2bf0352d8479d866dbc7db58e5e | [
"MIT"
] | null | null | null | binarysearch.py | gary-mayfield/AOTW | e320342f8918d2bf0352d8479d866dbc7db58e5e | [
"MIT"
] | null | null | null | from random import shuffle
def binarysearch(arr, left, right, element):
if right >= left:
total = left + right
middle = (total)//2
if arr[middle] == element:
return middle
elif arr[middle] > element:
return binarysearch(arr, left, middle - 1, element)
else:
return binarysearch(arr, middle + 1, right, element)
else:
return -1
_list = [i for i in range(1,100)]
#shuffle(_list)
print(_list)
element = int(input("Select an element "))
index = binarysearch(_list, 0, len(_list) - 1, element)
print("Index of %s is %s" %(element, index))
| 24.923077 | 64 | 0.583333 | 81 | 648 | 4.604938 | 0.432099 | 0.120643 | 0.101877 | 0.117962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021978 | 0.29784 | 648 | 25 | 65 | 25.92 | 0.797802 | 0.021605 | 0 | 0.111111 | 0 | 0 | 0.055292 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.333333 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba1f43156e727833f3124e7a717dcb003ddee0d9 | 13,660 | py | Python | Battleship.py | BroCode23/battleship | d2c0ab4fbaecf4a41f0a9d3b8311d602e762edc7 | [
"MIT"
] | null | null | null | Battleship.py | BroCode23/battleship | d2c0ab4fbaecf4a41f0a9d3b8311d602e762edc7 | [
"MIT"
] | null | null | null | Battleship.py | BroCode23/battleship | d2c0ab4fbaecf4a41f0a9d3b8311d602e762edc7 | [
"MIT"
] | null | null | null | from random import randint
from Boards import *
class Player():
def getVerticalOrHorizontal(self):
"""Used to place boats, return true if vertical, false if horizontal"""
choice = ""
while not (choice == "v" or choice == "h"): # input loop
choice = input("Orientation? (v/h)")
if not choice:
continue
choice = choice[0].lower()
if choice == "v":
return True
else: # choice == "h"
return False
def getBoatPlacementCoords(self, vertical, boatLength):
"""Gets the coordinates for the given boat for placement"""
top = -1
left = -1
if vertical:
maxSpaceFromTop = 10 - boatLength
maxSpaceFromLeft = 9
else: # horizontal
maxSpaceFromTop = 9
maxSpaceFromLeft = 10 - boatLength
# positions boat placement
while not 0 <= top <= maxSpaceFromTop:
while top not in NUMBERS:
top = input("Space from top?")
top = int(top)
while not 0 <= left <= maxSpaceFromLeft:
while left not in NUMBERS:
left = input("Space from left?")
left = int(left)
return top, left
def placeBoat(self, boatLength, board, vertical, top, left):
"""Places the boat on the board, if unsuccessful, it will remove the partial boat and return false"""
boatPegsPlaced = 0
while boatPegsPlaced < boatLength and board[top][left] != "O":
board[top][left] = "O"
if vertical: # if vertical
top += 1
else:
left += 1
boatPegsPlaced += 1
if boatPegsPlaced < boatLength:
# if it hits another boat while placing, it removes the partial of the boat placed
while boatPegsPlaced > 0:
if vertical:
top -= 1
else:
left -= 1
boatPegsPlaced -= 1
board[top][left] = "."
return False
return True
def placeBoats(self, board):
"""Places boats onto the board to set up the game"""
time = 1
while time <= 5: # game state
print("\n\n\n\n\n")
printBoard(playerBoard, True)
if time == 1:
boat = 5 # variable for boat length (pegs)
print("Aircraft Carrier (5 pegs)")
elif time == 2:
boat = 4
print("Battleship (4 pegs)")
elif time == 3:
boat = 3
print("Submarine (3 pegs)")
elif time == 4:
boat = 3
print("Cruiser (3 pegs)")
elif time == 5:
boat = 2
print("Destroyer (2 pegs)")
vertical = self.getVerticalOrHorizontal()
top, left = self.getBoatPlacementCoords(vertical, boat)
if self.placeBoat(boat, board, vertical, top, left):
time += 1
else:
print("your boats collided! Reposition your boat.")
return
def makeTurn(self):
"""Player's turn to shoot at a spot on the board"""
top = -1
left = -1
while not onBoard(left): # coordinates for shot
while left not in NUMBERS:
left = input("X coordinate?")
left = int(left) - 1
while not onBoard(top): # coordinates for shot
while top not in NUMBERS:
top = input("Y coordinate?")
top = int(top) - 1
if hiddenBoard[top][left] == "." and computerBoard[top][left] == ".":
computerBoard[top][left] = "$"
print("We missed, Cap'n.")
return 0
elif hiddenBoard[top][left] == "O" and computerBoard[top][left] == ".":
computerBoard[top][left] = "X"
print("We got 'em!")
return 1
elif computerBoard[top][left] == "$" or computerBoard[top][left] == "X":
print("Oops, we already shot there.")
return 0
else:
raise EnvironmentError(
'hidden or computer board not set up correctly')
class Computer(Player):
def __init__(self):
self.vertical = randint(0, 1)
self.top = -1
self.left = -1
self.hit = False # if last shot hit
self.turnedAround = False # if the computer was shooting along the boat but missed
self.direction = '' # direction the boat is placed
self.shots = []
self.hits = []
def shotHereBefore(self, top, left):
"""returns true if the shot has already been taken, otherwise false"""
return coordsToString(top, left) in self.shots
def getPreviousHit(self):
"""grabs the last landed shot from self.hits"""
if len(self.hits) == 0:
raise IndexError("Must have a previous shot to use")
return stringToCoords(self.hits[-1])
def getVerticalOrHorizontal(self):
"""returns 1 or 0, vertical or horizontal"""
self.vertical = randint(0, 1)
def getBoatPlacementCoords(self, boatLength):
"""Gets the coordinates for the given boat for placement"""
self.top = -1
self.left = -1
if self.vertical:
maxSpaceFromTop = 10 - boatLength
maxSpaceFromLeft = 9
else: # horizontal
maxSpaceFromTop = 9
maxSpaceFromLeft = 10 - boatLength
self.top = randint(0, maxSpaceFromTop)
self.left = randint(0, maxSpaceFromLeft)
def placeBoats(self, board):
"""Places boats onto the board to set up the game"""
time = 1
while time <= 5:
if time == 1: # game state
boat = 5 # variable for boat length (pegs)
elif time == 2:
boat = 4
elif time == 3:
boat = 3
elif time == 4:
boat = 3
elif time == 5:
boat = 2
self.getVerticalOrHorizontal()
self.getBoatPlacementCoords(boat)
if self.placeBoat(boat, board, self.vertical, self.top, self.left):
time += 1
def tryContinueShot(self):
"""Tries to shoot along a boat after 2 successful hits"""
self.turnedAround = False
if self.direction:
# if last shot missed but shooting along boat
if not self.hit:
self.turnedAround = True
if self.direction == 'up':
self.direction = 'down'
elif self.direction == 'down':
self.direction = 'up'
elif self.direction == 'right':
self.direction = 'left'
elif self.direction == 'left':
self.direction = 'right'
else:
raise EnvironmentError('Unknown Direciton')
# find the next shot along the boat
iterations = 0
maxIterations = 1 # should only move once if not turned around
if self.turnedAround:
maxIterations = 5 # at max could go full length of boat
while iterations < maxIterations and self.shotHereBefore(self.top, self.left):
if self.direction == 'up':
self.top -= 1
if not onBoard(self.top):
self.direction = 'down'
elif self.direction == 'down':
self.top += 1
if not onBoard(self.top):
self.direction = 'up'
elif self.direction == 'right':
self.left += 1
if not onBoard(self.left):
self.direction = 'left'
elif self.direction == 'left':
self.left -= 1
if not onBoard(self.left):
self.direction = 'right'
else:
raise EnvironmentError('Unknown Direciton')
iterations += 1
if not onBoard(self.top, self.left):
iterations = 0
maxIterations = 5
self.turnedAround = True
# reset everything
if not onBoard(self.top, self.left) or self.shotHereBefore(self.top, self.left):
self.top = -1
self.left = -1
self.direction = ''
self.hit = False
self.turnedAround = False
def handleHitOrMiss(self):
"""Logs the computer shot in the self.shots array and outputs text based on a hit or miss"""
if len(self.hits) > 0:
prevTop, prevLeft = self.getPreviousHit()
self.shots.append(coordsToString(self.top, self.left))
if playerBoard[self.top][self.left] == ".":
playerBoard[self.top][self.left] = "$"
print("The enemy missed at %i,%i." %
(self.left + 1, self.top + 1))
# want to shoot in all directions
if self.hit and len(self.hits) > 0 and not self.direction:
self.top, self.left = prevTop, prevLeft
else:
self.hit = False
if self.turnedAround: # if already turned around and missed, find a new boat to shoot
self.direction = ''
return 0 # returns 0 so the player doesn't lose a life
elif playerBoard[self.top][self.left] == "O":
if len(self.shots) > 0 and self.hit: # find direction
if self.top - prevTop == -1:
self.direction = 'up'
elif self.left - prevLeft == 1:
self.direction = 'right'
elif self.top - prevTop == 1:
self.direction = 'down'
elif self.left - prevLeft == -1:
self.direction = 'left'
else:
self.direction = ''
self.hit = True
playerBoard[self.top][self.left] = "X"
self.hits.append(coordsToString(self.top, self.left))
print("They hit us at %i,%i Cap'n!" %
(self.left + 1, self.top + 1))
return 1 # returns 1 because they hit the player's boat
else:
print("Their Circuits fried.")
raise EnvironmentError('Computer didn\'t hit or miss')
def makeTurn(self):
"""Computer shoots at a random spot on the board, and if it hits it tries to shoot around the same spot"""
self.tryContinueShot()
# loop through all directions if hit a boat for the first time
if not self.direction and self.hit:
prevTop, prevLeft = self.getPreviousHit()
if onBoard(prevTop - 1) and not self.shotHereBefore(prevTop - 1, prevLeft):
prevTop -= 1
self.top, self.left = prevTop, prevLeft
elif onBoard(prevLeft + 1) and not self.shotHereBefore(prevTop, prevLeft + 1):
prevLeft += 1
self.top, self.left = prevTop, prevLeft
elif onBoard(prevTop + 1) and not self.shotHereBefore(prevTop + 1, prevLeft):
prevTop += 1
self.top, self.left = prevTop, prevLeft
elif onBoard(prevLeft - 1) and not self.shotHereBefore(prevTop, prevLeft - 1):
prevLeft -= 1
self.top, self.left = prevTop, prevLeft
else:
self.hit = False
# otherwise take a random shot
if not onBoard(self.top, self.left) or self.shotHereBefore(self.top, self.left):
self.top = randint(0, 9)
self.left = randint(0, 9)
while self.shotHereBefore(self.top, self.left):
self.top = randint(0, 9)
self.left = randint(0, 9)
return self.handleHitOrMiss()
class BattleshipGame():
def __init__(self):
self.human = Player()
self.computer = Computer()
def playGame(self):
"""A game of battleship! Player places boats and will play against a computer player"""
# Board setup
print(" BATTLESHIP")
self.computer.placeBoats(hiddenBoard)
self.human.placeBoats(playerBoard)
# Shows completed board
printBoard(computerBoard, False)
printBoard(playerBoard, True)
# boats placed correctly
print("you're good to go Cap'n! Where should we shoot?")
print("\n\n\n\n\n\n")
print("X = hit, $ = miss") # key for characters
playerPegsLeft = 17
computerPegsLeft = 17
# checks if player won or lost, starts main game loop
while playerPegsLeft and computerPegsLeft:
# turn functions return 1 if hit, else 0
computerPegsLeft -= self.human.makeTurn()
playerPegsLeft -= self.computer.makeTurn()
printBoard(computerBoard, False)
printBoard(playerBoard, True)
print("\n\n\n\n\n\n")
if not computerPegsLeft and playerPegsLeft:
print("Cap'n we are victorious! Thanks to yerr fearless leadership.")
elif not playerPegsLeft and computerPegsLeft: # player not alive :(
print("They sunk our fleet Cap'n! I'm going down with the ship!")
print("It was an honor serving you...")
else:
print("It was a tie?? How is that possible??")
game = BattleshipGame()
game.playGame()
| 37.322404 | 114 | 0.523426 | 1,487 | 13,660 | 4.802959 | 0.172159 | 0.033324 | 0.032344 | 0.039905 | 0.406189 | 0.352702 | 0.294735 | 0.236628 | 0.211005 | 0.184542 | 0 | 0.015529 | 0.38243 | 13,660 | 365 | 115 | 37.424658 | 0.831081 | 0.14063 | 0 | 0.522491 | 0 | 0.00346 | 0.073749 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055363 | false | 0 | 0.00692 | 0 | 0.121107 | 0.093426 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba1fe3cc14ae2fb7a1f6917969b6ebe4bff3b179 | 587 | py | Python | mmf/datasets/subset_dataset.py | dk25021999/mmf | 218057265a3fc175f656b5ebe8fb44ef5ccca2e9 | [
"BSD-3-Clause"
] | 3,252 | 2018-07-27T02:32:24.000Z | 2020-05-07T17:54:46.000Z | mmf/datasets/subset_dataset.py | dk25021999/mmf | 218057265a3fc175f656b5ebe8fb44ef5ccca2e9 | [
"BSD-3-Clause"
] | 914 | 2020-05-07T18:36:26.000Z | 2022-03-31T05:45:26.000Z | mmf/datasets/subset_dataset.py | dk25021999/mmf | 218057265a3fc175f656b5ebe8fb44ef5ccca2e9 | [
"BSD-3-Clause"
] | 490 | 2020-05-07T20:05:10.000Z | 2022-03-31T14:17:23.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
from torch.utils.data.dataset import Subset
class MMFSubset(Subset):
def __init__(self, dataset, indices):
super().__init__(dataset, indices)
self._dir_representation = dir(self)
def __getattr__(self, name):
if "_dir_representation" in self.__dict__ and name in self._dir_representation:
return getattr(self, name)
elif "dataset" in self.__dict__ and hasattr(self.dataset, name):
return getattr(self.dataset, name)
else:
raise AttributeError(name)
| 32.611111 | 87 | 0.672913 | 70 | 587 | 5.271429 | 0.485714 | 0.089431 | 0.113821 | 0.070461 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.235094 | 587 | 17 | 88 | 34.529412 | 0.821826 | 0.081772 | 0 | 0 | 0 | 0 | 0.048417 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba2119f32355417c2644809bb5d6b273bb820282 | 1,876 | py | Python | ppyt/decorators.py | yusukemurayama/ppytrading | 9804d0de870d77bf8a1c847736a636b1342d4600 | [
"MIT"
] | 4 | 2016-08-16T07:47:15.000Z | 2017-12-11T10:08:47.000Z | ppyt/decorators.py | yusukemurayama/ppytrading | 9804d0de870d77bf8a1c847736a636b1342d4600 | [
"MIT"
] | null | null | null | ppyt/decorators.py | yusukemurayama/ppytrading | 9804d0de870d77bf8a1c847736a636b1342d4600 | [
"MIT"
] | 2 | 2018-06-15T04:43:15.000Z | 2020-05-02T07:47:15.000Z | # coding: utf-8
import logging
from functools import wraps
from ppyt.exceptions import NoDataError
logger = logging.getLogger(__name__)
def handle_nodataerror(nodata_return):
"""NoDataErrorを処理するデコレータです。
このデコレータをつけておくと、内部でNoDataErrorが発生したときに[nodata_return]が返るようになります。
Args:
nodata_return: NoDataError発生時に返る値
Retusn:
関数・メソッドの実行結果
※関数・メソッドでNoDataErrorが発生したら、nodata_returnが返ります。
"""
def wrapper(func):
@wraps(func)
def inner(*args, **kwds):
try:
return func(*args, **kwds)
except NoDataError:
# NoDataErrorが投げられたらnodata_returnを返します。
return nodata_return
return inner
return wrapper
class cached_property(object):
"""プロパティの値をキャッシュします。それにより、2回目以降のアクセス時の負荷を下げます。
評価されたプロパティの結果は、そのプロパティが定義されているインスタンス自身に格納されます。"""
def __init__(self, func):
"""コンストラクタ
Args:
func: cache_propertyでデコレートされたメソッド
※cached_propertyをつけたときは、プロパティのように
()なしでメソッドが走るようになります。
"""
self._func = func
def __get__(self, obj, klass):
# プロパティが定義されているインスタンス自身から、cache_keyを使って辞書型の属性を取得します。
cache_key = '__CACHED_PROPERTY_DICT' # キャッシュデータ用のインスタンス変数名
cache = getattr(obj, cache_key, None)
if cache is None:
# まだ辞書型の属性がない場合は、インスタンスに追加しておきます。
cache = {}
setattr(obj, cache_key, cache)
propname = self._func.__name__ # プロパティの名前を取得します。
if propname not in cache:
# キャッシュされていない場合はメソッドを実行し、その結果をキャッシュします。
cache[propname] = self._func(obj)
logger.debug('propname[{}]をキャッシュしました。'.format(propname))
else:
# キャッシュにヒットしたことをログに書き込んでおきます。
logger.debug('propname[{}]をキャッシュから取得します。'.format(propname))
return cache[propname]
| 28 | 71 | 0.632196 | 160 | 1,876 | 7.20625 | 0.51875 | 0.041631 | 0.019081 | 0.036427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001486 | 0.282516 | 1,876 | 66 | 72 | 28.424242 | 0.852155 | 0.353412 | 0 | 0 | 0 | 0 | 0.06362 | 0.06362 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.1 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba2239ef061f264402f17baa88727dae18cadae7 | 5,318 | py | Python | src/jsonapi/v1/rebases.py | piwaniuk/critic | 28ed20bb8032d7cc5aa23de98da51e619fd84164 | [
"Apache-2.0"
] | 216 | 2015-01-05T12:48:10.000Z | 2022-03-08T00:12:23.000Z | src/jsonapi/v1/rebases.py | piwaniuk/critic | 28ed20bb8032d7cc5aa23de98da51e619fd84164 | [
"Apache-2.0"
] | 55 | 2015-02-28T12:10:26.000Z | 2020-11-18T17:45:16.000Z | src/jsonapi/v1/rebases.py | piwaniuk/critic | 28ed20bb8032d7cc5aa23de98da51e619fd84164 | [
"Apache-2.0"
] | 34 | 2015-05-02T15:15:10.000Z | 2020-06-15T19:20:37.000Z | # -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2015 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import api
import jsonapi
@jsonapi.PrimaryResource
class Rebases(object):
"""The review rebases in this system."""
name = "rebases"
contexts = (None, "reviews")
value_class = (api.log.rebase.MoveRebase, api.log.rebase.HistoryRewrite)
exceptions = api.log.rebase.RebaseError
@staticmethod
def json(value, parameters):
"""{
"id": integer,
"review": integer,
"creator": integer,
"type": "history-rewrite" or "move"
"old_head": integer,
"new_head": integer,
// if |type| is "move":
"old_upstream": integer,
"new_upstream": integer,
"equivalent_merge": integer or null,
"replayed_rebase": integer or null,
}"""
old_head = value.old_head
new_head = value.new_head
data = { "id": value.id,
"review": value.review,
"creator": value.creator,
"old_head": old_head,
"new_head": new_head }
if isinstance(value, api.log.rebase.HistoryRewrite):
data.update({ "type": "history-rewrite" })
else:
data.update({ "type": "move",
"old_upstream": value.old_upstream,
"new_upstream": value.new_upstream,
"equivalent_merge": value.equivalent_merge,
"replayed_rebase": value.replayed_rebase })
return parameters.filtered("rebases", data)
@staticmethod
def single(parameters, argument):
"""Retrieve one (or more) rebases in this system.
REBASE_ID : integer
Retrieve a rebase identified by its unique numeric id."""
return Rebases.setAsContext(parameters, api.log.rebase.fetch(
parameters.critic, rebase_id=jsonapi.numeric_id(argument)))
@staticmethod
def multiple(parameters):
"""Retrieve all rebases in this system.
review : REVIEW_ID : -
Include only rebases of one review, identified by the review's unique
numeric id."""
review = jsonapi.deduce("v1/reviews", parameters)
return api.log.rebase.fetchAll(parameters.critic, review=review)
@staticmethod
def create(parameters, value, values, data):
critic = parameters.critic
user = critic.actual_user
converted = jsonapi.convert(
parameters,
{
"new_upstream?": str,
"history_rewrite?": bool
},
data)
new_upstream = converted.get("new_upstream")
history_rewrite = converted.get("history_rewrite")
if (new_upstream is None) == (history_rewrite is None):
raise jsonapi.UsageError(
"Exactly one of the arguments new_upstream and history_rewrite "
"must be specified.")
if history_rewrite == False:
raise jsonapi.UsageError(
"history_rewrite must be true, or omitted.")
review = jsonapi.deduce("v1/reviews", parameters)
if review is None:
raise jsonapi.UsageError(
"review must be specified when preparing a rebase")
if history_rewrite is not None:
expected_type = api.log.rebase.HistoryRewrite
else:
expected_type = api.log.rebase.MoveRebase
result = []
def collectRebase(rebase):
assert isinstance(rebase, expected_type), repr(rebase)
result.append(rebase)
with api.transaction.Transaction(critic) as transaction:
transaction \
.modifyReview(review) \
.prepareRebase(
user, new_upstream, history_rewrite,
callback=collectRebase)
assert len(result) == 1, repr(result)
return result[0], None
@staticmethod
def delete(parameters, value, values):
critic = parameters.critic
if value is None:
raise jsonapi.UsageError(
"Only one rebase can currently be deleted per request")
rebase = value
with api.transaction.Transaction(critic) as transaction:
transaction \
.modifyReview(rebase.review) \
.cancelRebase(rebase)
@staticmethod
def setAsContext(parameters, rebase):
parameters.setContext(Rebases.name, rebase)
# Also set the rebase's review (and repository and branch) as context.
jsonapi.v1.reviews.Reviews.setAsContext(parameters, rebase.review)
return rebase
| 33.2375 | 80 | 0.599285 | 559 | 5,318 | 5.618962 | 0.323792 | 0.049029 | 0.030564 | 0.018147 | 0.111429 | 0.069405 | 0.045209 | 0.045209 | 0.045209 | 0 | 0 | 0.003818 | 0.310455 | 5,318 | 159 | 81 | 33.446541 | 0.852741 | 0.243701 | 0 | 0.222222 | 0 | 0 | 0.113451 | 0 | 0 | 0 | 0 | 0 | 0.022222 | 1 | 0.077778 | false | 0 | 0.022222 | 0 | 0.211111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba2331e6aeb4fecff47bd0b8b8611ca7b36d514a | 5,984 | py | Python | standoff2conll.py | aoldoni/standoff2conll | 951c9f1a3e151e82acc8abbbe711b04daed0fff6 | [
"MIT"
] | null | null | null | standoff2conll.py | aoldoni/standoff2conll | 951c9f1a3e151e82acc8abbbe711b04daed0fff6 | [
"MIT"
] | 3 | 2017-02-21T11:17:21.000Z | 2019-09-30T18:26:27.000Z | standoff2conll.py | aoldoni/standoff2conll | 951c9f1a3e151e82acc8abbbe711b04daed0fff6 | [
"MIT"
] | 2 | 2019-08-21T15:43:03.000Z | 2021-01-24T21:07:56.000Z | #!/usr/bin/env python
from __future__ import print_function
import sys
import os
import codecs
from logging import error
from document import Document
from common import pairwise
from asciify import document_to_ascii
from unicode2ascii import log_missing_ascii_mappings
from tagsequence import TAGSETS, IO_TAGSET, IOBES_TAGSET, DEFAULT_TAGSET
from tagsequence import BIO_to_IO, BIO_to_IOBES
from standoff import OVERLAP_RULES, load_postags_into_document
OUTPUT_TYPES = {'CONLL': 0, 'ROTHANDYIH': 1}
def argparser():
import argparse
ap = argparse.ArgumentParser(description='Convert standoff to CoNLL format',
usage='%(prog)s [OPTIONS] DIRECTORY')
ap.add_argument('directory')
ap.add_argument('-1', '--singletype', default=None, metavar='TYPE',
help='replace all annotation types with TYPE')
ap.add_argument('-a', '--asciify', default=None, action='store_true',
help='map input to ASCII')
ap.add_argument('-n', '--no-sentence-split', default=False,
action='store_true',
help='do not perform sentence splitting')
ap.add_argument('-o', '--overlap-rule', choices=OVERLAP_RULES,
default=OVERLAP_RULES[0],
help='rule to apply to resolve overlapping annotations')
ap.add_argument('-s', '--tagset', choices=TAGSETS, default=None,
help='tagset (default %s)' % DEFAULT_TAGSET)
ap.add_argument('-p', '--postag', choices=TAGSETS, default=None,
help='tagset (default %s)' % DEFAULT_TAGSET)
ap.add_argument('--process', choices=['CONLL','ROTHANDYIH'], default='CONLL',
help='switch between processes for the output format CONLL, or ROTHANDYIH')
ap.add_argument('--process_pos_tag_input',
help='the pos tag input file used for ROTHANDYIH')
return ap
def is_standoff_file(fn):
return os.path.splitext(fn)[1] == '.ann'
def txt_for_ann(filename):
return os.path.splitext(filename)[0]+'.txt'
def read_ann(filename, options, encoding='utf-8', filepos = False):
txtfilename = txt_for_ann(filename)
with codecs.open(txtfilename, 'rU', encoding=encoding) as t_in:
with codecs.open(filename, 'rU', encoding=encoding) as a_in:
return Document.from_standoff(
t_in.read(), a_in.read(),
sentence_split = not options.no_sentence_split,
overlap_rule = options.overlap_rule,
filepos = filepos
)
def replace_types_with(document, type_):
from tagsequence import OUT_TAG, parse_tag, make_tag
for sentence in document.sentences:
for token in sentence.tokens:
if token.tag != OUT_TAG:
token.tag = make_tag(parse_tag(token.tag)[0], type_)
def retag_document(document, tagset):
if tagset == IO_TAGSET:
mapper = BIO_to_IO
elif tagset == IOBES_TAGSET:
mapper = BIO_to_IOBES
else:
raise ValueError('tagset {}'.format(tagset))
for sentence in document.sentences:
for t, next_t in pairwise(sentence.tokens, include_last=True):
next_tag = next_t.tag if next_t is not None else None
t.tag = mapper(t.tag, next_tag)
def convert_directory_conll(directory, options):
files = [n for n in os.listdir(directory) if is_standoff_file(n)]
files = [os.path.join(directory, fn) for fn in files]
if not files:
error('No standoff files in {}'.format(directory))
return
conll_data = ''
for fn in sorted(files):
document = read_ann(fn, options)
if options.singletype:
replace_types_with(document, options.singletype)
if options.tagset:
retag_document(document, options.tagset)
if options.asciify:
document_to_ascii(document)
conll_data = conll_data + document.to_conll()
return conll_data.encode('utf-8')
def convert_directory_rothandyih(directory, options, filepos):
files = [n for n in os.listdir(directory) if is_standoff_file(n)]
files = [os.path.join(directory, fn) for fn in files]
if not files:
error('No standoff files in {}'.format(directory))
return
conll_data = ''
lines = []
with open(filepos) as f:
lines = f.readlines()
previous_position = 0
for fn in sorted(files):
document = read_ann(fn, options, filepos = filepos)
if options.singletype:
replace_types_with(document, options.singletype)
if options.tagset:
retag_document(document, options.tagset)
if options.asciify:
document_to_ascii(document)
previous_position = load_postags_into_document(document, filepos, previous_position, lines)
conll_data = conll_data + document.to_rothandyih()
return conll_data.encode('utf-8')
def conversion_entry(argv, which, filepos = False):
# extra node just to compatibility with command line
data = convert_and_return([''] + argv, which, filepos)
return data
def convert_and_return(argv, which, filepos):
if not os.path.isdir(argv.directory):
error('Not a directory: {}'.format(argv.directory))
return 1
if which == OUTPUT_TYPES['CONLL']:
data = convert_directory_conll(argv.directory, argv)
elif which == OUTPUT_TYPES['ROTHANDYIH']:
data = convert_directory_rothandyih(argv.directory, argv, filepos)
if argv.asciify:
log_missing_ascii_mappings()
return data
def main(argv):
argv = argparser().parse_args(argv[1:])
if argv.process == 'CONLL':
data = convert_and_return(argv, OUTPUT_TYPES[argv.process], False)
elif argv.process == 'ROTHANDYIH':
data = convert_and_return(argv, OUTPUT_TYPES[argv.process], argv.process_pos_tag_input)
sys.stdout.write(data)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 35.408284 | 99 | 0.657253 | 760 | 5,984 | 4.980263 | 0.218421 | 0.023778 | 0.030911 | 0.021136 | 0.303831 | 0.302774 | 0.253633 | 0.238838 | 0.238838 | 0.214531 | 0 | 0.003287 | 0.237299 | 5,984 | 168 | 100 | 35.619048 | 0.82603 | 0.011865 | 0 | 0.259542 | 0 | 0 | 0.112502 | 0.003891 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083969 | false | 0 | 0.10687 | 0.015267 | 0.282443 | 0.007634 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba23604795f8acde70a121e9f64775847c2840a2 | 582 | py | Python | main.py | spencerteiknsmith/invisible-conway | 9b6b9cbfff2225b3262ef8ef4c27e7f63731ebcf | [
"MIT"
] | null | null | null | main.py | spencerteiknsmith/invisible-conway | 9b6b9cbfff2225b3262ef8ef4c27e7f63731ebcf | [
"MIT"
] | null | null | null | main.py | spencerteiknsmith/invisible-conway | 9b6b9cbfff2225b3262ef8ef4c27e7f63731ebcf | [
"MIT"
] | null | null | null | import numpy as np
h = 60
w = 60
p = .3
land = np.random.choice(a=[False, True], size=(h, w), p=[1-p, p])
prev = None
def update(land):
res = land.copy()
for i in range(h):
for j in range(w):
cell = land[i,j]
n = np.sum(land[max(i-1,0):min(i+2,h),max(j-1,0):min(j+2,w)])
if cell:
if n < 2 or n > 3:
res[i,j] = False
else:
if n == 3:
res[i,j] = True
return res
while not np.array_equal(land, prev):
prev = land
land = update(land)
| 21.555556 | 73 | 0.450172 | 100 | 582 | 2.61 | 0.43 | 0.022989 | 0.038314 | 0.045977 | 0.05364 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042373 | 0.391753 | 582 | 26 | 74 | 22.384615 | 0.694915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.045455 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba264f7d2f72ee99e371653a2694a78a31c79f79 | 1,158 | py | Python | test/cnnl/test_logging_cnnl.py | Cambricon/catch | 2625da389f25a67066d20fb6b0c38250ef98f8ab | [
"BSD-2-Clause"
] | 20 | 2022-03-01T11:40:51.000Z | 2022-03-30T08:17:47.000Z | test/cnnl/test_logging_cnnl.py | Cambricon/catch | 2625da389f25a67066d20fb6b0c38250ef98f8ab | [
"BSD-2-Clause"
] | null | null | null | test/cnnl/test_logging_cnnl.py | Cambricon/catch | 2625da389f25a67066d20fb6b0c38250ef98f8ab | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
import os
import sys
import unittest
import logging
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir+"/../")
from common_utils import testinfo, TestCase # pylint: disable=C0413
logging.basicConfig(level=logging.DEBUG)
class TestLoggingCNNL(TestCase):
# @unittest.skip("not test")
@testinfo()
def test_cnnl_logging(self):
data_path = os.path.join(cur_dir, '../data/cnlog/')
cmd = 'python ' + os.path.join(data_path, 'cnlog_cnnl.py') + \
' 2>' + os.path.join(data_path, 'cnlog_cnnl.txt')
os.system(cmd)
with open(os.path.join(data_path, 'cnlog_cnnl.txt'), 'r') as f1:
temp = f1.readlines()
with open(os.path.join(data_path, 'cnlog_cnnl.err'), 'r') as f2:
msg2 = f2.readlines()
msg1 = []
for line in temp:
if line.startswith('[DEBUG]'):
msg1.append(line)
for i, line in enumerate(msg2):
self.assertNotEqual(msg1[i].find(line), -1)
os.remove(os.path.join(data_path, 'cnlog_cnnl.txt'))
if __name__ == '__main__':
unittest.main()
| 32.166667 | 72 | 0.626079 | 157 | 1,158 | 4.401274 | 0.426752 | 0.069465 | 0.086831 | 0.101302 | 0.231548 | 0.231548 | 0.231548 | 0.192475 | 0.101302 | 0 | 0 | 0.016741 | 0.226252 | 1,158 | 35 | 73 | 33.085714 | 0.754464 | 0.041451 | 0 | 0 | 0 | 0 | 0.102981 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.034483 | false | 0 | 0.206897 | 0 | 0.275862 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba268da33508894c4cedb192dd049008b06b3802 | 11,157 | py | Python | scripts/create_2018_mpg_ranch_archive_data_yaml.py | RichardLitt/Vesper | 5360844f42a06942e7684121c650b08cf8616285 | [
"MIT"
] | 29 | 2017-07-10T14:49:15.000Z | 2022-02-02T23:14:38.000Z | scripts/create_2018_mpg_ranch_archive_data_yaml.py | Tubbz-alt/Vesper | 76e5931ca0c7fbe070c53b1362ec246ec9007beb | [
"MIT"
] | 167 | 2015-03-17T14:45:22.000Z | 2022-03-30T21:00:05.000Z | scripts/create_2018_mpg_ranch_archive_data_yaml.py | Tubbz-alt/Vesper | 76e5931ca0c7fbe070c53b1362ec246ec9007beb | [
"MIT"
] | 4 | 2015-02-06T03:30:27.000Z | 2020-12-27T08:38:52.000Z | """Creates an archive data YAML file from a stations CSV file."""
from collections import Counter, defaultdict
from pathlib import Path
import textwrap
from vesper.util.bunch import Bunch
WORKING_DIR_PATH = Path(
'/Users/Harold/Desktop/NFC/Data/MPG Ranch/2018 MPG Ranch Archive/'
'Archive Data YAML')
CSV_FILE_PATH = WORKING_DIR_PATH / 'Stations 2018.csv'
ARCHIVE_DATA_FILE_PATH = WORKING_DIR_PATH / 'Archive Data.yaml'
ALIASES_FILE_PATH = WORKING_DIR_PATH / 'Station Name Aliases.yaml'
sn_counts = Counter()
"""Counts of generated serial numbers by device model name."""
def main():
create_archive_data_yaml()
create_station_name_aliases_preset()
def create_archive_data_yaml():
lines = parse_csv_file()
text = '\n'.join([
create_stations_section(lines),
create_device_models_section(),
create_devices_section(lines),
create_station_devices_section(lines),
create_processor_sections(),
create_annotation_sections()
])
with open(ARCHIVE_DATA_FILE_PATH, 'wt') as yaml_file:
yaml_file.write(text)
def parse_csv_file():
with open(CSV_FILE_PATH, 'r', encoding='utf-8') as csv_file:
lines = csv_file.read().strip().split('\n')[1:]
return [parse_csv_file_line(l) for l in lines]
def parse_csv_file_line(line):
(station_name, _, recorder_model, recorder_sn, microphone_sn, latitude,
longitude, elevation, station_name_alias) = line.split(',')
if recorder_model == 'SM2':
recorder_model = 'SM2+'
if recorder_sn == '':
recorder_sn = create_sn(recorder_model)
microphone_model = '21c'
if microphone_sn == '':
microphone_sn = create_sn(microphone_model)
return Bunch(
station_name=station_name,
station_name_alias=station_name_alias,
description='',
time_zone='US/Mountain',
latitude=latitude,
longitude=longitude,
elevation=elevation,
recorder_model=recorder_model,
recorder_sn=recorder_sn,
microphone_model=microphone_model,
microphone_sn=microphone_sn)
def create_sn(device_model):
sn = 'PH{:02d}'.format(sn_counts[device_model])
sn_counts[device_model] += 1
return sn
def create_stations_section(lines):
items = create_station_items(lines)
return create_section('stations', items)
def create_station_items(lines):
# Eliminate station duplicates and sort by name.
stations_dict = dict((l.station_name, l) for l in lines)
names = sorted(stations_dict.keys())
stations = [stations_dict[n] for n in names]
return [create_station_item(s) for s in stations]
def create_station_item(s):
f = '''
- name: {}
description: {}
time_zone: {}
latitude: {}
longitude: {}
elevation: {}
'''.lstrip()
return f.format(
s.station_name, q(s.description), s.time_zone, s.latitude,
s.longitude, s.elevation)
def q(s):
return s if len(s) != 0 else '""'
def create_section(title, items):
return title + ':\n\n' + indent('\n'.join(items))
def indent(text, num_spaces=4):
prefix = ' ' * num_spaces
return textwrap.indent(text, prefix)
def create_device_models_section():
return '''
device_models:
- name: SM2+
type: Audio Recorder
manufacturer: Wildlife Acoustics
model: Song Meter SM2+
description: ""
num_inputs: 2
- name: SM3
type: Audio Recorder
manufacturer: Wildlife Acoustics
model: Song Meter SM3
description: ""
num_inputs: 2
- name: Swift
type: Audio Recorder
manufacturer: Cornell Lab of Ornithology
model: Swift
description: ""
num_inputs: 1
- name: PC
type: Audio Recorder
manufacturer: Various
model: PC
description: Personal computer as an audio recorder.
num_inputs: 2
- name: SMX-NFC
type: Microphone
manufacturer: Wildlife Acoustics
model: SMX-NFC
description: ""
num_outputs: 1
- name: SMX-II
type: Microphone
manufacturer: Wildlife Acoustics
model: SMX-II
description: ""
num_outputs: 1
- name: 21c
type: Microphone
manufacturer: Old Bird
model: 21c
description: ""
num_outputs: 1
'''.lstrip()
def create_devices_section(lines):
recorder_items = create_recorder_items(lines)
microphone_items = create_microphone_items(lines)
return create_section('devices', recorder_items + microphone_items)
def create_recorder_items(lines):
recorders = sorted(set(
[(l.recorder_model, l.recorder_sn) for l in lines]))
return [create_device_item(*r) for r in recorders]
def create_device_item(model, sn):
name = '{} {}'.format(model, sn)
return '''
- name: {}
model: {}
serial_number: {}
description: ""
'''.lstrip().format(name, model, sn)
def create_microphone_items(lines):
microphones = sorted(set(
[(l.microphone_model, l.microphone_sn) for l in lines]))
return [create_device_item(*m) for m in microphones]
def create_station_devices_section(lines):
# Collect device connections by station.
connections = defaultdict(set)
for l in lines:
recorder_name = create_device_name(l.recorder_model, l.recorder_sn)
microphone_name = create_device_name(
l.microphone_model, l.microphone_sn)
connections[l.station_name].add((recorder_name, microphone_name))
# Create station devices items, one for each station.
station_names = sorted(connections.keys())
items = [
create_station_devices_item(station_name, connections[station_name])
for station_name in station_names]
return create_section('station_devices', items)
def create_device_name(model, sn):
return '{} {}'.format(model, sn)
def create_station_devices_item(station_name, connections):
header = '''
- station: {}
start_time: 2018-01-01
end_time: 2019-01-01
'''.lstrip().format(station_name)
connections = sorted(connections)
device_list = create_device_list(connections)
connection_list = create_connection_list(connections)
return header + indent(device_list, 2) + indent(connection_list, 2)
def create_device_list(connections):
recorder_names = sorted(set([c[0] for c in connections]))
microphone_names = sorted(set([c[1] for c in connections]))
device_names = recorder_names + microphone_names
device_items = ['- ' + n + '\n' for n in device_names]
return create_list('devices', device_items)
def create_list(name, items):
return name + ':\n' + indent(''.join(items))
def create_connection_list(connections):
connection_items = [create_connection_item(*c) for c in connections]
return create_list('connections', connection_items)
def create_connection_item(recorder_name, microphone_name):
output_name = microphone_name + ' Output'
# Get channel number text for recorder input name.
if recorder_name.startswith('Swift'):
channel_num = ''
elif recorder_name.startswith('SM3'):
channel_num = ' 1'
else:
channel_num = ' 0'
input_name = recorder_name + ' Input' + channel_num
return '''
- output: {}
input: {}
'''.lstrip().format(output_name, input_name)
def create_processor_sections():
return '''
detectors:
- name: Old Bird Thrush Detector Redux 1.1
description: Vesper reimplementation of Old Bird Thrush detector.
- name: Old Bird Tseep Detector Redux 1.1
description: Vesper reimplementation of Old Bird Tseep detector.
classifiers:
- name: MPG Ranch Outside Classifier 1.0
description: >
Classifies a clip as "Outside" if and only if its start time is
outside of the interval from one hour after sunset to one half
hour before sunrise.
- name: MPG Ranch NFC Coarse Classifier 2.0
description: >
Classifies an unclassified clip as a "Call" if it appears to be
a nocturnal flight call, or as a "Noise" otherwise. Does not
classify a clip that has already been classified, whether
manually or automatically.
'''.lstrip()
def create_annotation_sections():
return '''
annotation_constraints:
- name: Coarse Classification
description: Coarse classifications only.
type: Values
values:
- CHSP_DEJU
- Call
- Noise
- Other
- Outside
- Thrush
- Tone
- Tseep
- Unknown
- name: Classification
description: All classifications, including call subclassifications.
type: Hierarchical Values
extends: Coarse Classification
values:
- Call:
- AMPI
- AMRE
- AMRO
- ATSP
- BAIS
- CAWA
- CCSP_BRSP
- CHSP
- COYE
- CSWA
- DBUP
- DEJU
- GCKI
- GCTH
- GRSP
- GRYE
- HETH
- LALO
- LAZB
- LCSP
- LESA
- LISP
- MGWA
- NOWA
- OCWA
- OVEN
- PESA
- SAVS
- SNBU
- SORA
- SOSP
- SPSA_SOSA
- SWSP
- SWTH
- UPSA
- Unknown
- VEER
- VESP
- VIRA
- WCSP
- WIWA
- WTSP
- Weak
- YRWA
- YEWA
- Zeep
annotations:
- name: Classification
type: String
constraint: Classification
'''.lstrip()
def create_station_name_aliases_preset():
comment = '''
# A station name aliases preset is a mapping from station names as they appear
# in an archive to lists of aliases for them that appear in recording and clip
# file names. The archive station names should be capitalized exactly they are
# in the archive. The capitalization of aliases is irrelevant since they and
# station names that appear in file names are converted to lower case before
# comparison.
'''.lstrip()
lines = parse_csv_file()
lines.sort(key=lambda l: l.station_name)
aliases = []
for line in lines:
name = line.station_name
alias = line.station_name_alias.lower()
if alias != '' and name.lower() != alias:
aliases.append('{}: [{}]\n'.format(name, alias))
text = comment + '\n' + ''.join(aliases)
with open(ALIASES_FILE_PATH, 'wt') as aliases_file:
aliases_file.write(text)
if __name__ == '__main__':
main()
| 25.648276 | 78 | 0.609931 | 1,270 | 11,157 | 5.150394 | 0.227559 | 0.035316 | 0.012842 | 0.008409 | 0.155328 | 0.092035 | 0.076135 | 0.046476 | 0.046476 | 0.017429 | 0 | 0.008328 | 0.300439 | 11,157 | 434 | 79 | 25.707373 | 0.829725 | 0.022139 | 0 | 0.147436 | 0 | 0 | 0.4124 | 0.005259 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.012821 | 0.022436 | 0.169872 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba271bd1a281e785675d1c2320bdff1d4f51aa69 | 1,295 | py | Python | JumpscalePortalClassic/portal/macrohelper/PortalMacroHelper.py | threefoldtech/jumpscale_portal_classic | d14fe4a17c0486df7a87d149e900746654091fda | [
"Apache-2.0"
] | null | null | null | JumpscalePortalClassic/portal/macrohelper/PortalMacroHelper.py | threefoldtech/jumpscale_portal_classic | d14fe4a17c0486df7a87d149e900746654091fda | [
"Apache-2.0"
] | null | null | null | JumpscalePortalClassic/portal/macrohelper/PortalMacroHelper.py | threefoldtech/jumpscale_portal_classic | d14fe4a17c0486df7a87d149e900746654091fda | [
"Apache-2.0"
] | null | null | null |
from jumpscale import j
class PortalMacroHelper():
def push2doc(self, args, params, objFetchManipulate):
params.merge(args)
doc = params.doc
idd = args.getTag("id")
if not idd:
params.result = ('Missing id param "id"', doc)
return params
obj = objFetchManipulate(idd)
if args.tags.labelExists("show"):
out = ""
keys = sorted(obj.keys())
for key in keys:
value = obj[key]
r = 0
for item in str(value).split("\n"):
if r == 0:
out += "- %-20s : %s\n" % (key, item)
else:
out += "- %-20s %s\n" % (" ", item)
r += 1
params.result = ("{{code:\n%s\n}}" % out, doc)
return params
objparams = {str(k).lower(): v for k, v in list(obj.items())}
# apply the properties of the object as parameters to the active wiki document
doc.content = doc.applyParams(objparams, content=doc.content)
# IMPORTANT return 2x doc (not (out,doc)) but return (doc,doc) this tells
# the appserver that the doc was manipulated
params.result = (doc, doc)
return params
| 30.833333 | 86 | 0.494208 | 149 | 1,295 | 4.295302 | 0.489933 | 0.05625 | 0.070313 | 0.025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011349 | 0.387645 | 1,295 | 41 | 87 | 31.585366 | 0.795712 | 0.14749 | 0 | 0.107143 | 0 | 0 | 0.066424 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.035714 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba2789e19b758536393fb0d7f9e9f71794c32943 | 5,528 | py | Python | src/scripts/validators.py | ucfcbb/RNAMotifContrast | a5e643a760a9f2f2c7fab76f65617e4f1f66eeb6 | [
"MIT"
] | 2 | 2021-04-14T15:13:25.000Z | 2021-06-27T08:54:27.000Z | src/scripts/validators.py | ucfcbb/RNAMotifContrast | a5e643a760a9f2f2c7fab76f65617e4f1f66eeb6 | [
"MIT"
] | null | null | null | src/scripts/validators.py | ucfcbb/RNAMotifContrast | a5e643a760a9f2f2c7fab76f65617e4f1f66eeb6 | [
"MIT"
] | null | null | null | import os
import sys
import logging
import pickle
sys.path.append('../../')
from config import *
sys.path.append(scripts_dir)
from my_log import *
from utils import *
def get_node_list_from_rmsd_data(rmsd_data_dict):
node_list_dict = {}
for c_id in rmsd_data_dict:
_, rmsd_data_list_dict = rmsd_data_dict[c_id]
key_list = list(rmsd_data_list_dict.keys())
if len(key_list) > 0:
(i, r1) = key_list[0]
node1 = strToNode(r1)
_, fit_ret = rmsd_data_list_dict[(i, r1)]
node2_list = list(map(lambda x: strToNode(x[1]), fit_ret))
node_list_dict[c_id] = [node1] + node2_list
return node_list_dict
# def get_formatted_rmsd_data(rmsd_data_dict):
# rmsd_formatted_data = {}
# for c_id in rmsd_data_dict:
# rmsd_formatted_data[c_id] = {}
# _, rmsd_data_list_dict = rmsd_data_dict[c_id]
# for i, r1 in rmsd_data_list_dict:
# node1 = strToNode(r1)
# if node1 not in rmsd_formatted_data[c_id]:
# rmsd_formatted_data[c_id][node1] = []
# _, fit_ret = rmsd_data_list_dict[(i, r1)]
# for _, r2, _, _ in fit_ret:
# node2 = strToNode(r2)
# if node2 not in rmsd_formatted_data[c_id][node1]:
# rmsd_formatted_data[c_id][node1].append(node2)
# return rmsd_formatted_data
def is_valid_graph(clusters, graph_fname):
if not os.path.isfile(graph_fname):
return False
fp = open(graph_fname)
lines = fp.readlines()
fp.close()
graph_data = {}
for line in lines:
r1, r2, _, _, _ = line.strip().split(' ')
if r1 not in graph_data:
graph_data[r1] = []
if r2 not in graph_data:
graph_data[r2] = []
graph_data[r1].append(r2)
graph_data[r2].append(r1)
if align_all_pair == False:
for c_id in clusters:
r1 = clusters[c_id][0]
if r1 not in graph_data:
return False
r1_graph_data = [r1]
r1_graph_data += graph_data[r1]
set_a = set(clusters[c_id])
set_b = set(r1_graph_data)
if not(len(set_a) == len(set_b) and len(set_a.intersection(set_b)) == len(set_a)):
return False
return True
else:
all_loops = []
for c_id in clusters:
all_loops += clusters[c_id]
r1 = all_loops[0]
if r1 not in graph_data:
return False
r1_graph_data = [r1]
r1_graph_data += graph_data[r1]
set_a = set(all_loops)
set_b = set(r1_graph_data)
if len(set_a) == len(set_b) and len(set_a.intersection(set_b)) == len(set_a):
return True
return False
def is_valid_pickle(pickle_fname, clusters):
if os.path.basename(pickle_fname).startswith('alignment'):
# check alignment pickle
alignment_data_fname = pickle_fname
if not os.path.isfile(alignment_data_fname):
return False
f = open(alignment_data_fname, 'rb')
cluster_alignment_data = pickle.load(f)
f.close()
for c_id in clusters:
if c_id not in cluster_alignment_data:
return False
for c_id in clusters:
loop_nodes = list(map(lambda x: strToNode(x), clusters[c_id]))
node1 = loop_nodes[0]
set_a = set(loop_nodes)
set_b = set(list(cluster_alignment_data[c_id][node1].keys()) + [node1])
if not(len(set_a) == len(set_b) and len(set_a.intersection(set_b)) == len(set_a)):
return False
elif os.path.basename(pickle_fname).startswith('rmsd'):
# hard to detect if this is invalid or not
# check rmsd pickle
rmsd_data_fname = pickle_fname
if not os.path.isfile(rmsd_data_fname):
return False
f = open(rmsd_data_fname, 'rb')
rmsd_data_dict = pickle.load(f)
f.close()
for c_id in clusters:
if c_id not in rmsd_data_dict:
return False
node_list_dict = get_node_list_from_rmsd_data(rmsd_data_dict)
for c_id in clusters:
loop_nodes = list(map(lambda x: strToNode(x), clusters[c_id]))
set_a = set(loop_nodes)
set_b = set(node_list_dict[c_id])
if not(len(set_a) == len(set_b) and len(set_a.intersection(set_b)) == len(set_a)):
return False
else:
return False
return True
def validate_all(input_fname, draw_figures):
if not os.path.isfile(input_fname):
logger.error('Input file does not exists.')
sys.exit()
if draw_figures == True:
if sys.version_info > (3, 0):
# if len(pymol_py3_dir) == 0:
if not os.path.exists(pymol_py3_dir):
logger.error('Please configure pymol, and set pymol directory in ' + os.path.join(root_dir, 'config.py')[base_path_len:] + ' file to generate images. (see instructions in README file)')
sys.exit()
else:
try:
import pymol
except Exception as e:
logger.error('Please install pymol to generate images. (see instructions in README file)')
sys.exit()
# if draw_figures == True and (sys.version_info > (3, 0)) and len(pymol_py3_dir) == 0:
# logger.error('Please set value for \'pymol_py3_dir\' in \'config.py\' file.')
# sys.exit()
| 32.139535 | 201 | 0.585926 | 782 | 5,528 | 3.847826 | 0.15601 | 0.024925 | 0.027916 | 0.02127 | 0.54769 | 0.501496 | 0.374211 | 0.344965 | 0.298438 | 0.229977 | 0 | 0.016566 | 0.312048 | 5,528 | 171 | 202 | 32.327485 | 0.774652 | 0.174566 | 0 | 0.391304 | 0 | 0 | 0.053744 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034783 | false | 0 | 0.069565 | 0 | 0.243478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba298a376da805eb670a48982febe4b8acc76aba | 1,731 | py | Python | ds_and_alg/remove_invalid_parentheses.py | and1can/and1can-data-structures-and-algorithms | f6aee3b6728f3e575465ce157a869ccebd45ef62 | [
"MIT"
] | 1 | 2021-05-13T20:52:42.000Z | 2021-05-13T20:52:42.000Z | ds_and_alg/remove_invalid_parentheses.py | and1can/and1can-data-structures-and-algorithms | f6aee3b6728f3e575465ce157a869ccebd45ef62 | [
"MIT"
] | null | null | null | ds_and_alg/remove_invalid_parentheses.py | and1can/and1can-data-structures-and-algorithms | f6aee3b6728f3e575465ce157a869ccebd45ef62 | [
"MIT"
] | null | null | null | class Solution:
def removeInvalidParentheses(self, s: str) -> List[str]:
self.results = set([])
self.min = len(s)
self.helper(list(s), '', 0, 0, len(s) // 2, 0)
return self.results
def helper(self, string, curr_pattern, open_count, close_count, half_length, remove_count):
if open_count < close_count:
return
if open_count > half_length:
return
if len(string) == 0:
if open_count == close_count and remove_count <= self.min:
if remove_count < self.min:
self.results = set([curr_pattern])
self.min = remove_count
else:
if curr_pattern not in self.results:
self.results.add(curr_pattern)
return
else:
return
if string[0] == '(':
new_list = list(curr_pattern)
new_list.append('(')
self.helper(string[1:], ''.join(new_list), open_count + 1, close_count, half_length, remove_count)
self.helper(string[1:], curr_pattern, open_count, close_count, half_length, remove_count + 1)
elif string[0] == ')':
new_list = list(curr_pattern)
new_list.append(')')
self.helper(string[1:], ''.join(new_list), open_count, close_count + 1, half_length, remove_count)
self.helper(string[1:], curr_pattern, open_count, close_count, half_length, remove_count + 1)
else:
new_list = list(curr_pattern)
new_list.append(string[0])
self.helper(string[1:], ''.join(new_list), open_count, close_count, half_length, remove_count) | 46.783784 | 110 | 0.558059 | 209 | 1,731 | 4.37799 | 0.172249 | 0.108197 | 0.107104 | 0.145355 | 0.605464 | 0.559563 | 0.548634 | 0.548634 | 0.487432 | 0.487432 | 0 | 0.014617 | 0.328134 | 1,731 | 37 | 111 | 46.783784 | 0.772141 | 0 | 0 | 0.333333 | 0 | 0 | 0.002309 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba2a21ef5cf05cb9ac09d79ba540981fff7af731 | 5,886 | py | Python | login_webo.py | UranusResident/sina_weibo_login | 403a88f4b2f814d35ec7b34f8940095f5ca8db95 | [
"Apache-2.0"
] | 7 | 2018-05-09T07:04:34.000Z | 2020-02-26T12:43:19.000Z | login_webo.py | UranusResident/sina_weibo_login | 403a88f4b2f814d35ec7b34f8940095f5ca8db95 | [
"Apache-2.0"
] | null | null | null | login_webo.py | UranusResident/sina_weibo_login | 403a88f4b2f814d35ec7b34f8940095f5ca8db95 | [
"Apache-2.0"
] | 2 | 2019-06-01T16:19:48.000Z | 2022-03-24T11:38:44.000Z | # -*- coding:utf-8 -*-
# Author:longjiang
import random
import os
import sys
import json
import io
import time
import logging
from PIL import Image
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.remote.command import Command
from selenium.webdriver.common.action_chains import ActionChains
from math import sqrt
from train_model import joblib
from predict_result import image_identification
# reload(sys)
# sys.setdefaultencoding("utf-8")
PIXELS = []
def get_type(browser):
""" 识别图形路径 """
time.sleep(3.5)
im0 = Image.open(io.BytesIO(browser.get_screenshot_as_png()))
box = browser.find_element_by_id('patternCaptchaHolder')
im = im0.crop((int(box.location['x']) + 10, int(box.location['y']) + 100,
int(box.location['x']) + box.size['width'] - 10,
int(box.location['y']) + box.size['height'] - 10)).convert('L')
new_box = get_exactly(im)
im = im.crop(new_box)
px0_x = box.location['x'] + 40 + new_box[0]
px1_y = box.location['y'] + 130 + new_box[1]
PIXELS.append((px0_x, px1_y))
PIXELS.append((px0_x + 100, px1_y))
PIXELS.append((px0_x, px1_y + 100))
PIXELS.append((px0_x + 100, px1_y + 100))
# 识别
result = image_identification(im, "lr")
m_dict = joblib.load("img/m_dict.pkl")
print("The picture class is: {}, and number path is {}".format(result, m_dict[result]))
t_type = m_dict[result]
return t_type
def get_exactly(im):
""" 精确剪切"""
imin = -1
imax = -1
jmin = -1
jmax = -1
row = im.size[0]
col = im.size[1]
for i in range(row):
for j in range(col):
if im.load()[i, j] != 255:
imax = i
break
if imax == -1:
imin = i
for j in range(col):
for i in range(row):
if im.load()[i, j] != 255:
jmax = j
break
if jmax == -1:
jmin = j
return imin + 1, jmin + 1, imax + 1, jmax + 1
def move(browser, coordinate, coordinate0):
""" 从坐标coordinate0,移动到坐标coordinate """
time.sleep(0.05)
length = sqrt((coordinate[0] - coordinate0[0]) ** 2 + (coordinate[1] - coordinate0[1]) ** 2) # 两点直线距离
if length < 4: # 如果两点之间距离小于4px,直接划过去
ActionChains(browser).move_by_offset(coordinate[0] - coordinate0[0], coordinate[1] - coordinate0[1]).perform()
return
else: # 递归,不断向着终点滑动
step = random.randint(3, 5)
x = int(step * (coordinate[0] - coordinate0[0]) / length) # 按比例
y = int(step * (coordinate[1] - coordinate0[1]) / length)
ActionChains(browser).move_by_offset(x, y).perform()
move(browser, coordinate, (coordinate0[0] + x, coordinate0[1] + y))
def draw(browser, ttype):
""" 滑动 """
if len(ttype) == 4:
px0 = PIXELS[int(ttype[0]) - 1]
login = browser.find_element_by_id('loginAction')
ActionChains(browser).move_to_element(login).move_by_offset(
px0[0] - login.location['x'] - int(login.size['width'] / 2),
px0[1] - login.location['y'] - int(login.size['height'] / 2)
).perform()
browser.execute(Command.MOUSE_DOWN, {})
px1 = PIXELS[int(ttype[1]) - 1]
move(browser, (px1[0], px1[1]), px0)
px2 = PIXELS[int(ttype[2]) - 1]
move(browser, (px2[0], px2[1]), px1)
px3 = PIXELS[int(ttype[3]) - 1]
move(browser, (px3[0], px3[1]), px2)
browser.execute(Command.MOUSE_UP, {})
else:
print('Sorry! Failed! Maybe you need to update the code.')
def my_default_get_cookie_from_weibo(account, password):
driver = webdriver.Chrome()
try:
driver.get(
r'https://passport.weibo.cn/signin/login?entry=mweibo&r=http%3A%2F%2Fweibo.cn%2F&backTitle=%CE%A2%B2%A9&vt='
)
retry_count = 0
while retry_count < 5 and "微博" in driver.title:
retry_count += 1
js = 'var lo=document.getElementById("loginWrapper");lo.style.display="block";'
# 调用js脚本
driver.execute_script(js)
driver.find_element_by_id('loginName').clear()
driver.find_element_by_id('loginName').send_keys(account)
driver.find_element_by_id('loginPassword').clear()
driver.find_element_by_id('loginPassword').send_keys(password)
submit = driver.find_element_by_id('loginAction')
ActionChains(driver).double_click(submit).perform()
time.sleep(1)
try:
if driver.find_element_by_id('patternCaptchaHolder'):
t_type = get_type(driver) # 识别轨迹路径
draw(driver, t_type) # 滑动破解
except Exception as e:
print(e)
# 等待手动通过验证码(很智能,I like)
WebDriverWait(driver, 30).until(
EC.presence_of_element_located((By.XPATH, '//title[contains(text(),"我的首页")]')))
if "我的首页" not in driver.title:
time.sleep(4)
if '未激活微博' in driver.page_source:
print('账号未开通微博')
return {}
cookie = {}
if "我的首页" in driver.title:
for elem in driver.get_cookies():
cookie[elem["name"]] = elem["value"]
logging.warning("Get Cookie Success!( username:%s )" % account)
time.sleep(3)
return json.dumps(cookie)
except Exception as e:
logging.warning("登录失败 %s!" % account)
logging.error(e)
return ""
finally:
try:
driver.quit()
except Exception as e:
logging.error(e)
print(e)
if __name__ == '__main__':
my_default_get_cookie_from_weibo('username', 'password')
| 31.645161 | 120 | 0.585627 | 758 | 5,886 | 4.416887 | 0.319261 | 0.026284 | 0.031063 | 0.035842 | 0.202509 | 0.113501 | 0.01374 | 0 | 0 | 0 | 0 | 0.033787 | 0.275909 | 5,886 | 185 | 121 | 31.816216 | 0.75176 | 0.037037 | 0 | 0.142857 | 0 | 0.007143 | 0.098562 | 0.017759 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0.035714 | 0.121429 | 0 | 0.2 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba2cf851d13cf912d5bef7aad6a06d2f7040d32c | 4,462 | py | Python | train_model.py | maxfrei750/CarbonBlackSegmentation | ff5aeaf03a9c60c1a0396f1d2b6d5a3347808a30 | [
"MIT"
] | null | null | null | train_model.py | maxfrei750/CarbonBlackSegmentation | ff5aeaf03a9c60c1a0396f1d2b6d5a3347808a30 | [
"MIT"
] | null | null | null | train_model.py | maxfrei750/CarbonBlackSegmentation | ff5aeaf03a9c60c1a0396f1d2b6d5a3347808a30 | [
"MIT"
] | null | null | null | import fire
import ignite.distributed as idist
import utils
from training import training
def run(
seed=42,
data_path="./data",
subset_train="train",
subset_val="val",
output_path="./output",
architecture="FPN",
encoder="resnet50",
encoder_weights="imagenet",
encoder_freeze_at=None,
batch_size=6,
optimizer="Adam",
weight_decay=1e-4,
num_workers=12,
num_iterations=10000,
learning_rate=0.0001,
learning_rate_milestone_iterations=(2000, 8000),
gamma=0.1,
num_warmup_iterations=1000,
warmup_factor=0.001,
validate_every=10,
checkpoint_every=200,
backend=None,
resume_from=None,
log_every_iters=0,
nproc_per_node=None,
stop_iteration=None,
with_trains=False,
active_gpu_ids=(0,),
**spawn_kwargs,
):
"""Main entry to train a model on the semantic segmentation of carbon black agglomerate TEM images.
Args:
seed (int): random state seed to set. Default, 42.
data_path (str): input dataset path. Default, "./data".
subset_train (str): name of training subset. Default, "train".
subset_val (str): name of validation subset. Default, "val".
architecture (str): architecture (see https://github.com/qubvel/segmentation_models.pytorch#architectures-).
Default, "FPN".
encoder (str): encoder architecture (see https://github.com/qubvel/segmentation_models.pytorch#encoders-).
Default, "resnet50".
encoder_weights (str): pretrained weights (see https://github.com/qubvel/segmentation_models.pytorch#encoders-).
Default, "imagenet".
encoder_freeze_at (int or None): defines stages of the encoder which are frozen before the training (e.g. 2
means all stages including stage 2 and beyond). Default, None.
output_path (str): output path. Default, "./output".
batch_size (int): total batch size. Default, 6.
optimizer (str): optimizer. Default, "Adam".
weight_decay (float): weight decay. Default, 1e-4.
num_workers (int): number of workers in the data loader. Default, 12.
num_iterations (int): number of iterations to train the model. Default, 10000.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.0001.
learning_rate_milestone_iterations (iterable of int): numbers of iterations where learning rate is each time
decreased by a factor gamma. Default, (2000, 8000).
gamma (float): factor to multiply learning rate with at each milestone. Default, 0.1.
num_warmup_iterations (int): number of warm-up iterations before learning rate decay. Default, 1000.
warmup_factor (float): learning rate starts at warmup_factor * learning_rate. Default, 0.001.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 10.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes. Default, None.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 0.
stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.
Default, None.
with_trains (bool): if True, experiment Trains logger is setup. Default, False.
active_gpu_ids (tuple of int): ids of GPUs to use. Default, (0,).
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes
"""
# catch all local parameters
config = locals()
config.update(config["spawn_kwargs"])
del config["spawn_kwargs"]
utils.select_active_gpus(config["active_gpu_ids"])
spawn_kwargs["nproc_per_node"] = nproc_per_node
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config)
if __name__ == "__main__":
fire.Fire(run)
| 46 | 120 | 0.690273 | 589 | 4,462 | 5.073005 | 0.342954 | 0.040161 | 0.016064 | 0.017068 | 0.104418 | 0.090361 | 0.066265 | 0.066265 | 0.066265 | 0.042169 | 0 | 0.025956 | 0.214254 | 4,462 | 96 | 121 | 46.479167 | 0.826298 | 0.6961 | 0 | 0 | 0 | 0 | 0.088384 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.090909 | 0 | 0.113636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba2d2052cc73fe2e3e9d2768655a369a01f2a33a | 13,155 | py | Python | project/controllers/admin/objects_ctrl.py | vinibiavatti1/PythonFlaskCms | e43a4db84d1f77a5f66b1f8fcb9dc96e05e6c023 | [
"MIT"
] | null | null | null | project/controllers/admin/objects_ctrl.py | vinibiavatti1/PythonFlaskCms | e43a4db84d1f77a5f66b1f8fcb9dc96e05e6c023 | [
"MIT"
] | null | null | null | project/controllers/admin/objects_ctrl.py | vinibiavatti1/PythonFlaskCms | e43a4db84d1f77a5f66b1f8fcb9dc96e05e6c023 | [
"MIT"
] | null | null | null | """
Objects controller.
"""
from typing import Any, Optional
from flask import Blueprint, request, abort, render_template, flash, redirect
from project.decorators.security_decorators import login_required
from project.decorators.context_decorators import process_context
from project.entities.object_entity import ObjectEntity
from project.models.object_model import ObjectModel
from project.utils import record_utils
from project.utils.data_utils import set_properties_value
from project.utils.ctrl_utils import generate_admin_url
from project.services import history_service
from project.services import object_service
from project.enums import object_enum
from project.enums import string_types_enum as str_type
from project.enums import table_enum
import json
# Blueprint data
blueprint = Blueprint(
name='admin_objects_ctrl',
import_name=__name__,
url_prefix='/<context>/admin/objects'
)
###############################################################################
# View Routes
###############################################################################
@blueprint.route(
rule='/',
methods=['GET'],
defaults={'object_name': None}
)
@blueprint.route(
rule='/<object_name>',
methods=['GET']
)
@login_required()
@process_context()
def list_objects_view(context: str, object_name: Optional[str] = None
) -> Any:
"""
List content view endpoint.
"""
root_url = generate_admin_url(context, 'objects')
back_url = generate_admin_url(
context, 'objects'
)
change_order_url = generate_admin_url(
context, 'objects', 'change_order', object_name if object_name else '',
)
referrer_url = request.referrer
headers = [
'#',
'Name',
'Type',
'Published',
'URL',
'Created On',
'Edit',
'Children',
]
objects: list[ObjectEntity] = list()
children: list[ObjectModel] = list()
data: list[Any] = list()
title = 'Root'
parent_name = None
# Get record and data
if object_name is not None:
entity = object_service.select_by_name(context, object_name)
if not entity:
return abort(400)
record = object_service.get_record_by_name(entity.object_type)
if not record:
return abort(400)
if entity.reference_name:
parent = object_service.select_by_name(
context, entity.reference_name
)
if not parent:
return abort(400)
parent_name = parent.name
back_url = generate_admin_url(
context, 'objects', parent.name,
)
title = entity.name
children = object_service.get_records_by_names(record.children)
objects = object_service.select_by_reference(context, entity.name)
else:
children = object_service.get_root_records()
objects = object_service.select_root_objects(context)
# Filter children
children = [child for child in children if child.allow_actions]
# Parse entities
for entity in objects:
record = object_service.get_record_by_name(entity.object_type)
if record is None:
continue
published = entity.properties.get('published', '1') == str_type.TRUE
if record.is_content and published:
url = f'<a href="{entity.url}" target="_blank">{entity.url}</a>'
else:
url = '-'
data.append((
entity.object_order,
f'<i class="bi {record.icon}"></i> '
f'{entity.name}',
f'{record.name}',
f'<i class="bi bi-broadcast"></i> True ' if published else 'False',
url,
entity.created_on,
f'<i class="bi bi-pencil"></i> '
f'<a href="{root_url}/edit/{entity.id}">Edit</a>',
f'<i class="bi bi-folder2-open"></i> '
f'<a href="{root_url}/{entity.name}">Children</a>'
))
# Render template
return render_template(
'/admin/object_list.html',
page_data=dict(
object_name=object_name,
headers=headers,
data=data,
root_url=root_url,
referrer_url=referrer_url,
title=title,
children=children,
back_url=back_url,
parent_name=parent_name,
change_order_url=change_order_url,
)
)
@blueprint.route(
rule='/change_order',
methods=['GET'],
defaults={'object_name': None}
)
@blueprint.route(
rule='/change_order/<object_name>',
methods=['GET']
)
@login_required()
@process_context()
def change_order_view(context: str, object_name: Optional[str] = None
) -> Any:
"""
Change order view endpoint.
"""
back_url = generate_admin_url(
context, 'objects'
)
action_url = generate_admin_url(
context, 'objects', 'save_order'
)
referrer_url = request.referrer
objects: list[ObjectEntity] = list()
data: list[Any] = list()
title = 'Root'
# Get record and data
if object_name is not None:
back_url = generate_admin_url(
context, 'objects', object_name,
)
title = object_name
objects = object_service.select_by_reference(context, object_name)
else:
objects = object_service.select_root_objects(context)
# Parse entities
for entity in objects:
object_type = object_service.get_record_by_name(
entity.object_type
)
if not object_type:
continue
data.append({
"id": entity.id,
"order": entity.object_order,
"name": f'<i class="bi {object_type.icon}"></i> {entity.name}',
"type": object_type.name
})
# Render template
return render_template(
'/admin/object_order_list.html',
page_data=dict(
object_name=object_name,
data=data,
referrer_url=referrer_url,
title=title,
back_url=back_url,
action_url=action_url,
)
)
@blueprint.route(
rule='/<object_type>/create',
methods=['GET'],
defaults={'reference_name': None}
)
@blueprint.route(
rule='/<object_type>/create/<reference_name>',
methods=['GET']
)
@login_required()
@process_context()
def create_view(context: str, object_type: str,
reference_name: Optional[str] = None) -> Any:
"""
Render create page.
"""
record = object_service.get_record_by_name(object_type)
referrer_url = request.referrer
if not record:
return abort(400)
back_url = generate_admin_url(
context, 'objects'
)
action_url = generate_admin_url(
context, 'objects', 'create'
)
if reference_name is not None:
parent = object_service.select_by_name(context, reference_name)
if parent:
back_url = generate_admin_url(
context, 'objects', parent.name
)
action_url = generate_admin_url(
context, 'objects', 'create', reference_name
)
return render_template(
'/admin/object_form.html',
page_data=dict(
context=context,
object_id=None,
edit=False,
object_type=object_type,
title=object_type,
action_url=action_url,
back_url=back_url,
properties=record.properties,
allow_actions=record.allow_actions,
referrer_url=referrer_url,
reference_name=reference_name,
)
)
@blueprint.route(
rule='/edit/<object_id>',
methods=['GET']
)
@login_required()
@process_context()
def edit_view(context: str, object_id: int) -> Any:
"""
Render edit page.
"""
# Get entity and record
entity = object_service.select_by_id(object_id)
if not entity:
return abort(400)
record = object_service.get_record_by_name(entity.object_type)
if not record:
return abort(400)
# URLs
back_url = generate_admin_url(
context, 'objects',
)
action_url = generate_admin_url(
context, 'objects', 'edit', str(object_id)
)
if entity.reference_name:
parent = object_service.select_by_name(
context, entity.reference_name
)
if parent:
back_url = generate_admin_url(
context, 'objects', parent.name
)
# Set props
props = set_properties_value(getattr(record, 'properties'), entity)
history = history_service.select_by_target_id(
context, table_enum.OBJECTS, object_id,
)
# Render
return render_template(
'/admin/object_form.html',
page_data=dict(
context=context,
object_id=object_id,
edit=True,
object_type=entity.object_type,
title=entity.object_type,
back_url=back_url,
action_url=action_url,
properties=props,
history=history,
name=entity.name,
allow_actions=record.allow_actions,
)
)
###############################################################################
# Action Routes
###############################################################################
@blueprint.route(
rule='/<object_type>/create',
methods=['POST']
)
@login_required()
@process_context()
def create_action(context: str, object_type: str) -> Any:
"""
Insert content to database.
"""
data = request.form.to_dict()
root_url = generate_admin_url(
context, 'objects', object_type,
)
new_object = ObjectEntity(
context=context,
name=data['name'],
properties=data,
object_type=object_type,
)
try:
entity_id = object_service.insert(new_object)
flash('Content created successfully!', category='success')
return redirect(f'{root_url}/edit/{entity_id}')
except Exception as err:
flash(str(err), category='danger')
return redirect(request.referrer)
@blueprint.route(
rule='/edit/<object_id>',
methods=['POST']
)
@login_required()
@process_context()
def edit_action(context: str, object_id: int) -> Any:
"""
Update content in database.
"""
data = request.form.to_dict()
root_url = generate_admin_url(
context, 'objects',
)
try:
object_service.update(object_id, data)
flash('Content updated successfully!', category='success')
return redirect(f'{root_url}/edit/{object_id}')
except Exception as err:
flash(str(err), category='danger')
return redirect(request.referrer)
@blueprint.route(
rule='/delete/<object_id>',
methods=['GET']
)
@login_required()
@process_context()
def delete_action(context: str, object_id: int) -> Any:
"""
Delete content from database.
"""
root_url = generate_admin_url(
context, 'objects',
)
try:
object_service.delete(object_id)
flash(f'Content {object_id} sent to trash bin', category='success')
return redirect(root_url)
except Exception as err:
flash(str(err), category='danger')
return redirect(request.referrer)
@blueprint.route(
rule='/duplicate/<object_id>/<to_context>/<new_name>',
methods=['GET']
)
@login_required()
@process_context()
def duplicate_action(context: str, object_id: int, to_context: str,
new_name: str) -> Any:
"""
Duplicate content.
"""
root_url = generate_admin_url(
context, 'objects',
)
try:
object_service.duplicate(object_id, to_context, new_name)
flash('Content duplicated successfully!', category='success')
return redirect(root_url)
except Exception as err:
flash(str(err), category='danger')
return redirect(request.referrer)
@blueprint.route(
rule='/save_order',
methods=['POST']
)
@login_required()
@process_context()
def save_order_action(context: str) -> Any:
"""
Save order endpoint.
"""
data = request.form.to_dict()
json_data: list[dict[str, Any]] = json.loads(data['json_data'])
try:
for item in json_data:
object_service.update_order(
int(item['id']), int(item['object_order'])
)
flash('Order updated successfully!', category='success')
return redirect(data['back_url'])
except Exception as err:
flash(str(err), category='danger')
return redirect(request.referrer)
###############################################################################
# Ajax Routes
###############################################################################
@blueprint.route(
rule='/exists/<name>',
methods=['GET']
)
@login_required()
def object_exists(context: str, name: str) -> Any:
"""
Verify if object exists.
"""
exists = object_service.object_exists(
context, name
)
return dict(exists=exists)
| 27.989362 | 79 | 0.591106 | 1,439 | 13,155 | 5.165393 | 0.106324 | 0.040226 | 0.040899 | 0.046011 | 0.572447 | 0.528185 | 0.477196 | 0.364456 | 0.325306 | 0.242836 | 0 | 0.002069 | 0.265298 | 13,155 | 469 | 80 | 28.049041 | 0.766994 | 0.036488 | 0 | 0.478495 | 0 | 0 | 0.116385 | 0.040984 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026882 | false | 0 | 0.043011 | 0 | 0.126344 | 0.040323 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba2dd4a9a2286a279356cd97a34845a8ae1f8694 | 622 | py | Python | tests/feeds/test_eth_jpy_feed.py | tellor-io/telliot-feed-examples | 3f825c90ad372f42c89eee0f5b54250f22ec0728 | [
"MIT"
] | 7 | 2021-11-10T21:14:57.000Z | 2022-03-26T07:27:23.000Z | tests/feeds/test_eth_jpy_feed.py | tellor-io/telliot-feed-examples | 3f825c90ad372f42c89eee0f5b54250f22ec0728 | [
"MIT"
] | 86 | 2021-11-09T13:12:58.000Z | 2022-03-31T17:28:56.000Z | tests/feeds/test_eth_jpy_feed.py | tellor-io/telliot-feed-examples | 3f825c90ad372f42c89eee0f5b54250f22ec0728 | [
"MIT"
] | 2 | 2021-11-27T12:51:22.000Z | 2022-03-12T16:38:00.000Z | import statistics
import pytest
from telliot_feed_examples.feeds.eth_jpy_feed import eth_jpy_median_feed
@pytest.mark.asyncio
async def test_AssetPriceFeed():
"""Retrieve median ETH/JPY price."""
v, _ = await eth_jpy_median_feed.source.fetch_new_datapoint()
assert v is not None
assert v > 0
print(f"ETH/JPY Price: {v}")
# Get list of data sources from sources dict
source_prices = [source.latest[0] for source in eth_jpy_median_feed.source.sources]
print(source_prices)
# Make sure error is less than decimal tolerance
assert (v - statistics.median(source_prices)) < 10**-6
| 27.043478 | 87 | 0.734727 | 94 | 622 | 4.648936 | 0.542553 | 0.08238 | 0.08238 | 0.10984 | 0.100687 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009804 | 0.180064 | 622 | 22 | 88 | 28.272727 | 0.847059 | 0.143087 | 0 | 0 | 0 | 0 | 0.036437 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba2e3dafa078bec8b05e729ede81943f7535f0fe | 1,719 | py | Python | Exercises/Exercise_Database-and-SQL/script1.py | npinak/Python-Projects | 6e6463f4fde175fde60c9cca045e3c114b854505 | [
"MIT"
] | 1 | 2021-10-16T16:22:14.000Z | 2021-10-16T16:22:14.000Z | Exercises/Exercise_Database-and-SQL/script1.py | npinak/Python-Projects | 6e6463f4fde175fde60c9cca045e3c114b854505 | [
"MIT"
] | null | null | null | Exercises/Exercise_Database-and-SQL/script1.py | npinak/Python-Projects | 6e6463f4fde175fde60c9cca045e3c114b854505 | [
"MIT"
] | null | null | null | import sqlite3
from typing import ItemsView
def create_table():
conn = sqlite3.connect('lite.db') # Create connection to database, if no database then it will be created with this line of code
cur = conn.cursor() # Create cursor object
cur.execute("CREATE TABLE IF NOT EXISTS store (item TEXT, quantity INTEGER, price REAL)")
conn.commit()
conn.close()
def insert(item,quantity,price):
conn = sqlite3.connect('lite.db') # Create connection to database, if no database then it will be created with this line of code
cur = conn.cursor() # Create cursor object
cur.execute("INSERT INTO store VALUES (?,?,?)",(item,quantity,price))
conn.commit()
conn.close()
#insert("Water Glass", 10, 5)
def view():
conn = sqlite3.connect('lite.db') # Create connection to database, if no database then it will be created with this line of code
cur = conn.cursor() # Create cursor object
cur.execute("SELECT * from store")
rows = cur.fetchall()
conn.close()
return rows
def delete(item):
conn = sqlite3.connect('lite.db') # Create connection to database, if no database then it will be created with this line of code
cur = conn.cursor() # Create cursor object
cur.execute("DELETE FROM store WHERE item=?",(item,))
conn.commit()
conn.close()
def update(quantity,price,item):
conn = sqlite3.connect('lite.db') # Create connection to database, if no database then it will be created with this line of code
cur = conn.cursor() # Create cursor object
cur.execute("UPDATE store SET quantity =?, price=? WHERE item = ?",(quantity,price,item))
conn.commit()
conn.close()
#delete("Wine Glass")
update(12,6,"Water Glass")
print(view()) | 37.369565 | 132 | 0.692263 | 250 | 1,719 | 4.756 | 0.248 | 0.046257 | 0.075694 | 0.092515 | 0.655172 | 0.595458 | 0.595458 | 0.595458 | 0.595458 | 0.595458 | 0 | 0.008621 | 0.190227 | 1,719 | 46 | 133 | 37.369565 | 0.845546 | 0.35893 | 0 | 0.542857 | 0 | 0 | 0.23211 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.057143 | 0 | 0.228571 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba2fcdf85498c3f2c26386179c5cfe4baa28b67b | 2,925 | py | Python | scorer.py | amirveyseh/AAAI-22-SDU-shared-task-2-AD | 9f90ba25aca9bcc8454e91638aab733d2d56f4ec | [
"MIT"
] | 5 | 2021-09-17T01:53:01.000Z | 2022-02-26T08:32:50.000Z | scorer.py | amirveyseh/AAAI-22-SDU-shared-task-2-AD | 9f90ba25aca9bcc8454e91638aab733d2d56f4ec | [
"MIT"
] | null | null | null | scorer.py | amirveyseh/AAAI-22-SDU-shared-task-2-AD | 9f90ba25aca9bcc8454e91638aab733d2d56f4ec | [
"MIT"
] | 1 | 2021-09-21T12:00:33.000Z | 2021-09-21T12:00:33.000Z | import argparse
import json
from collections import defaultdict
def run_evaluation(args):
verbose = args.v
with open(args.g) as file:
gold = dict([(d['ID'], d['label']) for d in json.load(file)])
with open(args.p) as file:
pred = dict([(d['ID'], d['label']) for d in json.load(file)])
pred = [pred[k] for k,v in gold.items()]
gold = [gold[k] for k,v in gold.items()]
p, r, f1 = score_expansion(gold, pred, verbos=verbose)
return p, r, f1
def score_expansion(key, prediction, verbos=False):
correct = 0
for i in range(len(key)):
if key[i] == prediction[i]:
correct += 1
acc = correct / len(prediction)
expansions = set()
correct_per_expansion = defaultdict(int)
total_per_expansion = defaultdict(int)
pred_per_expansion = defaultdict(int)
for i in range(len(key)):
expansions.add(key[i])
total_per_expansion[key[i]] += 1
pred_per_expansion[prediction[i]] += 1
if key[i] == prediction[i]:
correct_per_expansion[key[i]] += 1
precs = defaultdict(int)
recalls = defaultdict(int)
for exp in expansions:
precs[exp] = correct_per_expansion[exp] / pred_per_expansion[exp] if exp in pred_per_expansion else 1
recalls[exp] = correct_per_expansion[exp] / total_per_expansion[exp]
micro_prec = sum(correct_per_expansion.values()) / sum(pred_per_expansion.values())
micro_recall = sum(correct_per_expansion.values()) / sum(total_per_expansion.values())
micro_f1 = 2*micro_prec*micro_recall/(micro_prec+micro_recall) if micro_prec+micro_recall != 0 else 0
macro_prec = sum(precs.values()) / len(precs)
macro_recall = sum(recalls.values()) / len(recalls)
macro_f1 = 2*macro_prec*macro_recall / (macro_prec+macro_recall) if macro_prec+macro_recall != 0 else 0
if verbos:
print('Accuracy: {:.3%}'.format(acc))
print('-'*10)
print('Micro Precision: {:.3%}'.format(micro_prec))
print('Micro Recall: {:.3%}'.format(micro_recall))
print('Micro F1: {:.3%}'.format(micro_f1))
print('-'*10)
print('Macro Precision: {:.3%}'.format(macro_prec))
print('Macro Recall: {:.3%}'.format(macro_recall))
print('Macro F1: {:.3%}'.format(macro_f1))
print('-'*10)
return macro_prec, macro_recall, macro_f1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-g', type=str,
help='Gold file path')
parser.add_argument('-p', type=str,
help='Predictions file path')
parser.add_argument('-v', dest='v',
default=False, action='store_true',
help="Verbose Evaluation")
args = parser.parse_args()
p, r, f1 = run_evaluation(args)
print('Official Scores:')
print('P: {:.2%}, R: {:.2%}, F1: {:.2%}'.format(p,r,f1)) | 37.025316 | 109 | 0.61641 | 398 | 2,925 | 4.344221 | 0.218593 | 0.104106 | 0.065934 | 0.04627 | 0.245228 | 0.138809 | 0.055523 | 0.035859 | 0.035859 | 0.035859 | 0 | 0.017802 | 0.231795 | 2,925 | 79 | 110 | 37.025316 | 0.751669 | 0 | 0 | 0.107692 | 0 | 0 | 0.094668 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030769 | false | 0 | 0.046154 | 0 | 0.107692 | 0.184615 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e83b3fa2adcf8120002265b15bf8d59fd474f26b | 322 | py | Python | frameworks/Python/granian/run.py | http4k/FrameworkBenchmarks | ca11af53b90f92a12f987cc9aaf29ccaf3aec7e7 | [
"BSD-3-Clause"
] | 2 | 2019-09-23T16:12:35.000Z | 2020-08-29T05:59:51.000Z | frameworks/Python/granian/run.py | http4k/FrameworkBenchmarks | ca11af53b90f92a12f987cc9aaf29ccaf3aec7e7 | [
"BSD-3-Clause"
] | null | null | null | frameworks/Python/granian/run.py | http4k/FrameworkBenchmarks | ca11af53b90f92a12f987cc9aaf29ccaf3aec7e7 | [
"BSD-3-Clause"
] | 1 | 2019-09-23T16:12:41.000Z | 2019-09-23T16:12:41.000Z | import multiprocessing
import sys
from granian import Granian
if __name__ == '__main__':
interface = sys.argv[1]
Granian(
f"app_{interface}:main",
address="0.0.0.0",
port=8080,
workers=multiprocessing.cpu_count(),
backlog=2048,
interface=interface
).serve()
| 17.888889 | 44 | 0.614907 | 36 | 322 | 5.222222 | 0.638889 | 0.031915 | 0.031915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055319 | 0.270186 | 322 | 17 | 45 | 18.941176 | 0.744681 | 0 | 0 | 0 | 0 | 0 | 0.108696 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e83d5d813be6f00c41b647168541843a2f0d4b8c | 7,428 | py | Python | pylogview/window.py | CrazyIvan359/logview | 4fb145843315dd03ff4ba414a5a617775d9d2af1 | [
"MIT"
] | null | null | null | pylogview/window.py | CrazyIvan359/logview | 4fb145843315dd03ff4ba414a5a617775d9d2af1 | [
"MIT"
] | 3 | 2020-11-01T23:57:39.000Z | 2020-11-02T01:21:48.000Z | pylogview/window.py | CrazyIvan359/logview | 4fb145843315dd03ff4ba414a5a617775d9d2af1 | [
"MIT"
] | null | null | null | __all__ = ["Window"]
import locale
import os
import typing as t
from pylogview.common import (
ACTIVE_DELAY,
BLK,
BLK_B,
BLK_BL,
BLK_BR,
BLK_L,
BLK_R,
COLORS,
LOG_BG,
WIN_FRAME,
WIN_FRAME_ACTIVE,
WIN_FRAME_ERROR,
WIN_FRAME_LOAD,
WIN_FRAME_SELECT,
WIN_FRAME_SELECT_ACTIVE,
WIN_LINES,
WIN_TITLE,
tformat,
tprint,
)
from pylogview.reader import LogReader
if t.TYPE_CHECKING:
from pylogview.record import LogRecord
class Window(object):
__slots__ = [
"config",
"log",
"colors",
"name",
"term",
"_selected",
"active_delay",
"reader",
"_display_lines",
"_last_line",
"frame",
"x",
"y",
"w",
"h",
]
def __init__(self, filename, config, log):
self.config = config
self.log = log
self.colors = []
self.name = os.path.split(filename)[-1]
self.term = None
self._selected = False
self.active_delay = 0
self.reader = LogReader(self, filename)
self._display_lines = []
self._last_line = 0
self.frame = None
self.x = 0
self.y = 0
self.w = 0
self.h = 0
@property
def selected(self):
return self._selected
@selected.setter
def selected(self, value):
self._selected = value
if value:
self.frame = self.colors[
WIN_FRAME_SELECT_ACTIVE if self.active_delay else WIN_FRAME_SELECT
]
else:
self.frame = self.colors[
WIN_FRAME_ACTIVE if self.active_delay else WIN_FRAME
]
def start(self, term):
self.term = term
self.colors.extend(COLORS[term.number_of_colors])
self.frame = self.colors[WIN_FRAME_LOAD]
self.draw_frame(True)
def load(self):
self.reader.preload()
self._update_display_lines()
if self.reader.isOpen:
self.frame = self.colors[WIN_FRAME_SELECT if self.selected else WIN_FRAME]
else:
self.frame = self.colors[WIN_FRAME_ERROR]
def resize(self, x, y, w, h, update):
self.x = x
self.y = y
self.w = w
self.h = h
if update:
self._update_display_lines()
self.refresh(True)
def refresh(self, force=False):
new_records = self.reader.read(0)
if new_records and not force:
self.active_delay = ACTIVE_DELAY
self.frame = self.colors[
WIN_FRAME_SELECT_ACTIVE if self._selected else WIN_FRAME_ACTIVE
]
self._update_display_lines(new_records)
if new_records or force:
self.draw_frame()
tformat(self.term.on_color(self.colors[LOG_BG]))
for i in range(self.h - 2):
print(
self.term.move(self.y + (self.h - 2 - i), self.x + 1)
+ (" " * (self.w - 2))
+ self.term.move(self.y + (self.h - 2 - i), self.x + 1)
+ (
self._display_lines[
len(self._display_lines) + self._last_line - i - 1
]
if len(self._display_lines) + self._last_line - i - 1 >= 0
else (" " * (self.w - 2))
)
)
if self._last_line == 0 and not new_records:
if self.active_delay > 0:
self.active_delay -= 1
else:
new_frame = self.colors[
WIN_FRAME_SELECT if self._selected else WIN_FRAME
]
if self.frame != new_frame:
self.frame = new_frame
self.draw_frame()
else:
self.frame = new_frame
def scroll_up(self, lines=1):
if len(self._display_lines) < self.h - 2:
return
max_scroll = 0 - len(self._display_lines) + 1 + (self.h - 2)
if self._last_line > max_scroll:
self._last_line -= lines
if self._last_line < max_scroll:
self._last_line = max_scroll
def scroll_down(self, lines=1):
if self._last_line < 0:
self._last_line += lines
if self._last_line > 0:
self._last_line = 0
def scroll_end(self):
self._last_line = 0
def page_up(self):
self.scroll_up(self.h - 2)
def page_down(self):
self.scroll_down(self.h - 2)
def draw_frame(self, fill=False):
tprint( # draw top edge and corners
self.term,
self.term.move(self.y, self.x),
self.term.color(self.frame) + self.term.on_color(self.colors[LOG_BG]),
BLK * int((self.w - len(self.name)) / 2),
self.term.color(self.colors[WIN_TITLE]) + self.term.on_color(self.frame),
self.term.bold,
self.name,
self.term.color(self.frame) + self.term.on_color(self.colors[LOG_BG]),
BLK
* int(
(
((self.w - len(self.name)) / 2)
+ (((self.w - len(self.name)) / 2) % 1)
)
- 18
),
self.term.color(self.colors[WIN_LINES]),
self.term.on_color(self.frame),
self.term.bold,
f"lines: {locale.format_string('%d', self.reader.lines, True):>9}",
self.term.color(self.frame) + self.term.on_color(self.colors[LOG_BG]),
BLK * 2,
)
tprint( # draw bottom edge and corners
self.term,
self.term.move(self.y + self.h - 1, self.x),
self.term.color(self.frame) + self.term.on_color(self.colors[LOG_BG]),
BLK_BL,
BLK_B * (self.w - 2),
BLK_BR,
)
# draw left and right edge and fill window
tformat(self.term.color(self.frame) + self.term.on_color(self.colors[LOG_BG]))
if fill:
for row in range(self.y + 1, self.y + self.h - 1):
print(
self.term.move(row, self.x) + BLK_L + (" " * (self.w - 2)) + BLK_R
)
else:
for row in range(self.y + 1, self.y + self.h - 1):
print(
self.term.move(row, self.x)
+ BLK_L
+ self.term.move(row, self.x + self.w - 1)
+ BLK_R
)
# reset formatting
tprint(self.term)
def _update_display_lines(self, new_records: "t.List[LogRecord]" = []):
if new_records:
scroll_at_end = self._last_line == 0
lines_added = 0
for record in new_records:
new_lines = record.display_lines
lines_added += len(new_lines)
self._display_lines.extend(new_lines)
if not scroll_at_end:
self.scroll_up(len(self._display_lines) - lines_added)
self._display_lines[
sum([len(record.display_lines) for record in new_records]) :
]
else:
self._display_lines = []
for record in self.reader.records:
self._display_lines.extend(record.display_lines)
self._last_line = 0
| 30.694215 | 86 | 0.511039 | 908 | 7,428 | 3.952643 | 0.135463 | 0.066871 | 0.050153 | 0.028977 | 0.458345 | 0.400947 | 0.351351 | 0.324882 | 0.28253 | 0.224575 | 0 | 0.011359 | 0.383683 | 7,428 | 241 | 87 | 30.821577 | 0.772608 | 0.015078 | 0 | 0.193548 | 0 | 0 | 0.023526 | 0.003693 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.02765 | 0.004608 | 0.110599 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e83e316f2e3b3f26c9825fbdb2f601a494f33be8 | 1,409 | py | Python | src/direct_kinematics.py | Foxnox/robotique-delpeyroux-monseigne | 304d027bb14506d79c57e178a0d7a92c509f7367 | [
"MIT"
] | null | null | null | src/direct_kinematics.py | Foxnox/robotique-delpeyroux-monseigne | 304d027bb14506d79c57e178a0d7a92c509f7367 | [
"MIT"
] | null | null | null | src/direct_kinematics.py | Foxnox/robotique-delpeyroux-monseigne | 304d027bb14506d79c57e178a0d7a92c509f7367 | [
"MIT"
] | null | null | null | from math import *
from Vertex import *
#Length of the three subparts of the robot leg
L1 = 51.0
L2 = 63.7
L3 = 93.0
Alpha = 20.69 #Mecanic constraint on Theta 2
Beta = 5.06 #Mecanic constraint on Theta 3
# Check if the given float match with radian (between 2PI and -2PI)
def radValidation (radian):
return (radian <= 2 * pi and radian >= -2 * pi)
# Direct kinamatics for our considered robot (specific of our leg setting)
def leg_dk(theta1, theta2, theta3, l1=L1, l2=L2, l3=L3, alpha = Alpha, beta = Beta):
Angle = Vertex(theta1,theta2,theta3)
#Modification od theta1 and theta2 according constraint
theta2 += alpha
theta3 = 90-(alpha+beta+theta3)
#print "Angles : " + str(theta1) + " ; " + str(theta2) + " ; " + str(theta3)
theta1=radians(theta1)
theta2=-radians(theta2)
theta3=-radians(theta3)
#Storing all the sinus and cosinus into variable in order to simplify and run the calculation only once
c_1 = cos(theta1)
c_2 = cos(theta2)
c_2_3 = cos(theta2 + theta3)
s_1 = sin(theta1)
s_2 = sin(theta2)
s_2_3 = sin(theta2 + theta3)
#calculation of the projections and the differences due to the robot setting
projection = l1 + (l2 * c_2) + (l3 * c_2_3)
#Calculation of the final position
Final = Vertex((projection * c_1), (projection * s_1), ((l2 * s_2) + (l3 * s_2_3)))
return Final
leg_dk(0, 0, 0)
leg_dk(90, 0, 0)
leg_dk(180, -30.501, -67.819)
leg_dk(0, -30.645, 38.501)
| 29.354167 | 104 | 0.69269 | 239 | 1,409 | 3.995816 | 0.405858 | 0.026178 | 0.039791 | 0.050262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.098604 | 0.186657 | 1,409 | 47 | 105 | 29.978723 | 0.734729 | 0.412349 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.068966 | 0.034483 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e840a2d9d608dab0f793efae68463a23df88258c | 2,395 | py | Python | examples/3Drendering/main.py | Sentient07/kivy | e5022e1cc84b1bcda6e4619d618509dc4ea7da04 | [
"MIT"
] | 2 | 2021-05-16T09:46:14.000Z | 2021-11-17T11:23:15.000Z | examples/3Drendering/main.py | Sentient07/kivy | e5022e1cc84b1bcda6e4619d618509dc4ea7da04 | [
"MIT"
] | 1 | 2016-11-11T13:45:42.000Z | 2016-11-11T13:45:42.000Z | examples/3Drendering/main.py | Sentient07/kivy | e5022e1cc84b1bcda6e4619d618509dc4ea7da04 | [
"MIT"
] | 2 | 2017-03-09T14:27:03.000Z | 2019-05-03T08:36:02.000Z | '''
3D Rotating Monkey Head
========================
This example demonstrates using OpenGL to display a rotating monkey head. This
includes loading a Blender OBJ file, shaders written in OpenGL's Shading
Language (GLSL), and using scheduled callbacks.
The monkey.obj file is an OBJ file output from the Blender free 3D creation
software. The file is text, listing vertices and faces and is loaded
using a class in the file objloader.py. The file simple.glsl is
a simple vertex and fragment shader written in GLSL.
'''
from kivy.app import App
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.resources import resource_find
from kivy.graphics.transformation import Matrix
from kivy.graphics.opengl import *
from kivy.graphics import *
from objloader import ObjFile
class Renderer(Widget):
def __init__(self, **kwargs):
self.canvas = RenderContext(compute_normal_mat=True)
self.canvas.shader.source = resource_find('simple.glsl')
self.scene = ObjFile(resource_find("monkey.obj"))
super(Renderer, self).__init__(**kwargs)
with self.canvas:
self.cb = Callback(self.setup_gl_context)
PushMatrix()
self.setup_scene()
PopMatrix()
self.cb = Callback(self.reset_gl_context)
Clock.schedule_interval(self.update_glsl, 1 / 60.)
def setup_gl_context(self, *args):
glEnable(GL_DEPTH_TEST)
def reset_gl_context(self, *args):
glDisable(GL_DEPTH_TEST)
def update_glsl(self, *largs):
asp = self.width / float(self.height)
proj = Matrix().view_clip(-asp, asp, -1, 1, 1, 100, 1)
self.canvas['projection_mat'] = proj
self.canvas['diffuse_light'] = (1.0, 1.0, 0.8)
self.canvas['ambient_light'] = (0.1, 0.1, 0.1)
self.rot.angle += 1
def setup_scene(self):
Color(1, 1, 1, 1)
PushMatrix()
Translate(0, 0, -3)
self.rot = Rotate(1, 0, 1, 0)
m = list(self.scene.objects.values())[0]
UpdateNormalMatrix()
self.mesh = Mesh(
vertices=m.vertices,
indices=m.indices,
fmt=m.vertex_format,
mode='triangles',
)
PopMatrix()
class RendererApp(App):
def build(self):
return Renderer()
if __name__ == "__main__":
RendererApp().run()
| 31.513158 | 78 | 0.649269 | 323 | 2,395 | 4.681115 | 0.414861 | 0.042328 | 0.007937 | 0.007937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020274 | 0.237996 | 2,395 | 75 | 79 | 31.933333 | 0.808219 | 0.213779 | 0 | 0.076923 | 0 | 0 | 0.0416 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.173077 | 0.019231 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e841a361de11568432b34d88eac781deb34afdb2 | 6,796 | py | Python | tests/tests_geomstats/test_quotient_metric.py | Maya95assal/geomstats | 628854a6ace5a9bfbdaa2a32be726ca61dc2d7a5 | [
"MIT"
] | null | null | null | tests/tests_geomstats/test_quotient_metric.py | Maya95assal/geomstats | 628854a6ace5a9bfbdaa2a32be726ca61dc2d7a5 | [
"MIT"
] | null | null | null | tests/tests_geomstats/test_quotient_metric.py | Maya95assal/geomstats | 628854a6ace5a9bfbdaa2a32be726ca61dc2d7a5 | [
"MIT"
] | null | null | null | """Unit tests for the quotient space."""
import geomstats.backend as gs
import geomstats.tests
from geomstats.geometry.fiber_bundle import FiberBundle
from geomstats.geometry.general_linear import GeneralLinear
from geomstats.geometry.matrices import MatricesMetric
from geomstats.geometry.quotient_metric import QuotientMetric
from geomstats.geometry.spd_matrices import SPDMatrices, \
SPDMetricBuresWasserstein
from geomstats.geometry.special_orthogonal import SpecialOrthogonal
class TestQuotientMetric(geomstats.tests.TestCase):
def setUp(self):
gs.random.seed(0)
n = 3
self.base = SPDMatrices(n)
self.base_metric = SPDMetricBuresWasserstein(n)
self.group = SpecialOrthogonal(n)
self.bundle = FiberBundle(
GeneralLinear(n), base=self.base, group=self.group,
ambient_metric=MatricesMetric(n, n))
self.quotient_metric = QuotientMetric(self.bundle)
def submersion(point):
return GeneralLinear.mul(point, GeneralLinear.transpose(point))
def tangent_submersion(tangent_vec, base_point):
product = GeneralLinear.mul(
base_point, GeneralLinear.transpose(tangent_vec))
return 2 * GeneralLinear.to_symmetric(product)
def horizontal_lift(tangent_vec, point, base_point=None):
if base_point is None:
base_point = submersion(point)
sylvester = gs.linalg.solve_sylvester(
base_point, base_point, tangent_vec)
return GeneralLinear.mul(sylvester, point)
self.bundle.submersion = submersion
self.bundle.tangent_submersion = tangent_submersion
self.bundle.horizontal_lift = horizontal_lift
self.bundle.lift = gs.linalg.cholesky
def test_belongs(self):
point = self.base.random_point()
result = self.bundle.belongs(point)
self.assertTrue(result)
def test_submersion(self):
mat = self.bundle.total_space.random_point()
point = self.bundle.submersion(mat)
result = self.bundle.belongs(point)
self.assertTrue(result)
def test_lift_and_submersion(self):
point = self.base.random_point()
mat = self.bundle.lift(point)
result = self.bundle.submersion(mat)
self.assertAllClose(result, point)
def test_tangent_submersion(self):
mat = self.bundle.total_space.random_point()
point = self.bundle.submersion(mat)
vec = self.bundle.total_space.random_point()
tangent_vec = self.bundle.tangent_submersion(vec, point)
result = self.base.is_tangent(tangent_vec, point)
self.assertTrue(result)
def test_horizontal_projection(self):
mat = self.bundle.total_space.random_point()
vec = self.bundle.total_space.random_point()
horizontal_vec = self.bundle.horizontal_projection(vec, mat)
product = GeneralLinear.mul(horizontal_vec, GeneralLinear.inverse(mat))
is_horizontal = GeneralLinear.is_symmetric(product)
self.assertTrue(is_horizontal)
def test_vertical_projection(self):
mat = self.bundle.total_space.random_point()
vec = self.bundle.total_space.random_point()
vertical_vec = self.bundle.vertical_projection(vec, mat)
result = self.bundle.tangent_submersion(vertical_vec, mat)
expected = gs.zeros_like(result)
self.assertAllClose(result, expected, atol=1e-5)
def test_horizontal_lift_and_tangent_submersion(self):
mat = self.bundle.total_space.random_point()
tangent_vec = GeneralLinear.to_symmetric(
self.bundle.total_space.random_point())
horizontal = self.bundle.horizontal_lift(tangent_vec, mat)
result = self.bundle.tangent_submersion(horizontal, mat)
self.assertAllClose(result, tangent_vec)
def test_is_horizontal(self):
mat = self.bundle.total_space.random_point()
tangent_vec = GeneralLinear.to_symmetric(
self.bundle.total_space.random_point())
horizontal = self.bundle.horizontal_lift(tangent_vec, mat)
result = self.bundle.is_horizontal(horizontal, mat)
self.assertTrue(result)
def test_is_vertical(self):
mat = self.bundle.total_space.random_point()
tangent_vec = self.bundle.total_space.random_point()
vertical = self.bundle.vertical_projection(tangent_vec, mat)
result = self.bundle.is_vertical(vertical, mat)
self.assertTrue(result)
def test_align(self):
point = self.bundle.total_space.random_point(2)
aligned = self.bundle.align(
point[0], point[1], tol=1e-10)
result = self.bundle.is_horizontal(
point[1] - aligned, point[1], atol=1e-5)
self.assertTrue(result)
def test_inner_product(self):
mat = self.bundle.total_space.random_point()
point = self.bundle.submersion(mat)
tangent_vecs = GeneralLinear.to_symmetric(
self.bundle.total_space.random_point(2)) / 10
result = self.quotient_metric.inner_product(
tangent_vecs[0], tangent_vecs[1], point=mat)
expected = self.base_metric.inner_product(
tangent_vecs[0], tangent_vecs[1], point)
self.assertAllClose(result, expected)
def test_exp(self):
mat = self.bundle.total_space.random_point()
point = self.bundle.submersion(mat)
tangent_vec = GeneralLinear.to_symmetric(
self.bundle.total_space.random_point()) / 5
result = self.quotient_metric.exp(tangent_vec, point)
expected = self.base_metric.exp(tangent_vec, point)
self.assertAllClose(result, expected)
def test_log(self):
mats = self.bundle.total_space.random_point(2)
points = self.bundle.submersion(mats)
result = self.quotient_metric.log(points[1], points[0], tol=1e-10)
expected = self.base_metric.log(points[1], points[0])
self.assertAllClose(result, expected, atol=3e-4)
def test_squared_dist(self):
mats = self.bundle.total_space.random_point(2)
points = self.bundle.submersion(mats)
result = self.quotient_metric.squared_dist(
points[1], points[0], tol=1e-10)
expected = self.base_metric.squared_dist(points[1], points[0])
self.assertAllClose(result, expected, atol=1e-5)
def test_integrability_tensor(self):
mat = self.bundle.total_space.random_point()
point = self.bundle.submersion(mat)
tangent_vec = GeneralLinear.to_symmetric(
self.bundle.total_space.random_point()) / 5
self.assertRaises(
NotImplementedError,
lambda: self.bundle.integrability_tensor(
tangent_vec, tangent_vec, point))
| 40.694611 | 79 | 0.683343 | 802 | 6,796 | 5.591022 | 0.124688 | 0.115968 | 0.073595 | 0.098127 | 0.523194 | 0.499108 | 0.464764 | 0.41124 | 0.402988 | 0.355932 | 0 | 0.008311 | 0.221012 | 6,796 | 166 | 80 | 40.939759 | 0.838685 | 0.005003 | 0 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108696 | 1 | 0.137681 | false | 0 | 0.057971 | 0.007246 | 0.224638 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8422f23e2c70935609f3f6d65470b6a9595ada7 | 3,911 | py | Python | makeadditions/transform/llvm/cc_both.py | hutoTUM/MakeAdditions | 85fbe80c16d24abdddeee2d737f046a5bd1a9604 | [
"Apache-2.0"
] | null | null | null | makeadditions/transform/llvm/cc_both.py | hutoTUM/MakeAdditions | 85fbe80c16d24abdddeee2d737f046a5bd1a9604 | [
"Apache-2.0"
] | null | null | null | makeadditions/transform/llvm/cc_both.py | hutoTUM/MakeAdditions | 85fbe80c16d24abdddeee2d737f046a5bd1a9604 | [
"Apache-2.0"
] | null | null | null | """
C compiler
"""
from os import path
from ..Transformer import TransformerLlvm
from ...config import CLANG, LLVMLINK
from ...constants import (
COMPILERS,
DEPENDENCYFLAGS,
DEPENDENCYEMISSION,
EXECFILEEXTENSION,
OPTIMIZERFLAGS
)
from ...helper import no_duplicates
class TransformCCBoth(TransformerLlvm):
""" transform commands, that compile and link at the same time"""
@staticmethod
def can_be_applied_on(cmd):
return (any(cmd.bashcmd.startswith(s + " ") for s in COMPILERS) and
"-o /dev/null" not in cmd.bashcmd and
" -c " not in cmd.bashcmd and (
".c " in cmd.bashcmd or cmd.bashcmd.endswith(".c")))
@staticmethod
def apply_transformation_on(cmd, container):
# tokenize and remove the original command
tokens = cmd.bashcmd.split()[1:]
# remove optimizer flags
tokens = [t for t in tokens if t not in OPTIMIZERFLAGS]
# deactivate optimization
tokens.insert(0, "-O0")
# remove dependency emission
for deptoken in DEPENDENCYEMISSION:
if deptoken in tokens:
pos = tokens.index(deptoken)
del tokens[pos:pos + 2]
# Extract all c files
cfiles = [f for f in tokens if f.endswith(".c")]
# remove dependency flags
tokens = [t for t in tokens if t not in DEPENDENCYFLAGS]
if (len(cfiles) > 1):
tokens = [t for t in tokens if not t.endswith(".c")]
# Build the prepended compile flags
newcmd = ""
newtokens = tokens[:]
if "-o" in newtokens:
# remove output file
pos = tokens.index("-o")
newtokens.pop(pos)
newtokens.pop(pos)
for cfile in cfiles:
newpart = CLANG + " -c -emit-llvm "
# add -g flag, if it was not there before
if "-g" not in tokens:
newpart += "-g "
newcmd += (newpart + " ".join(newtokens) + " " + cfile +
" -o " + cfile[:-1] + "bc" + "; ")
# And build the link command
if "-o" in tokens:
# append .bc to the output file
pos = tokens.index("-o")
# add marker for executable files e.i. files that are not .so
if ".so" not in tokens[pos + 1]:
tokens[pos + 1] += EXECFILEEXTENSION
tokens[pos + 1] += ".bc"
# replace -l flags, if the library was llvm-compiled earlier
tokens = [
container.libs.get("lib" + t[2:], t)
if t.startswith("-l") else t
for t in tokens]
# replace references to static libraries
tokens = [
container.libs.get(path.basename(t[:-2]), t)
if t.endswith(".a") else t
for t in tokens]
# filter all command line options except -o
flagstarts = ["-", "'-", '"-']
tokens = [t for t in tokens if not (
any(t.startswith(start) for start in flagstarts)) or t == "-o"]
cmd.bashcmd = (newcmd + LLVMLINK + " " +
" ".join([c[:-1] + "bc" for c in cfiles]) + " " +
" ".join(no_duplicates(tokens)))
return cmd
else:
# build the new command
newcmd = CLANG + " -c -emit-llvm "
# add -g flag, if it was not there before
if "-g" not in tokens:
newcmd += "-g "
if "-o" in tokens:
# append .x.bc to the output file
pos = tokens.index("-o")
tokens[pos + 1] += EXECFILEEXTENSION + ".bc"
cmd.bashcmd = newcmd + " ".join(tokens)
return cmd
| 32.591667 | 79 | 0.499872 | 438 | 3,911 | 4.447489 | 0.305936 | 0.053388 | 0.0154 | 0.021561 | 0.222279 | 0.198665 | 0.148871 | 0.148871 | 0.12423 | 0.091376 | 0 | 0.005481 | 0.393506 | 3,911 | 119 | 80 | 32.865546 | 0.815767 | 0.172079 | 0 | 0.236111 | 0 | 0 | 0.03773 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.069444 | 0.013889 | 0.152778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8434397239e2d89b6f53639c72378ceefd5c1c7 | 362 | py | Python | tests/template/test_text_node.py | Tikubonn/peco | c77fc163ad31d3c271d299747914ce4ef3386987 | [
"MIT"
] | null | null | null | tests/template/test_text_node.py | Tikubonn/peco | c77fc163ad31d3c271d299747914ce4ef3386987 | [
"MIT"
] | null | null | null | tests/template/test_text_node.py | Tikubonn/peco | c77fc163ad31d3c271d299747914ce4ef3386987 | [
"MIT"
] | null | null | null |
from unittest import TestCase
from peco.template import TextNode
from io import StringIO
class TestTextNode (TestCase):
def test_write(self):
content = "this is text nodes content."
node = TextNode(content)
with StringIO() as stream:
node.write(stream)
self.assertEqual(stream.getvalue(), content) # test
| 24.133333 | 64 | 0.665746 | 42 | 362 | 5.714286 | 0.619048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.256906 | 362 | 14 | 65 | 25.857143 | 0.892193 | 0.01105 | 0 | 0 | 0 | 0 | 0.076056 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8434d08a34bc411454997420da18fb25ec59dd5 | 801 | py | Python | src/engine/main.py | loghinalexandru/blackboard-greenboard | 80332bf7709e602a4d5ada31b3cf95801c06190f | [
"MIT"
] | null | null | null | src/engine/main.py | loghinalexandru/blackboard-greenboard | 80332bf7709e602a4d5ada31b3cf95801c06190f | [
"MIT"
] | null | null | null | src/engine/main.py | loghinalexandru/blackboard-greenboard | 80332bf7709e602a4d5ada31b3cf95801c06190f | [
"MIT"
] | null | null | null | import cv2
import sys
def get_note(path):
original = cv2.imread(path)
hsv = cv2.cvtColor(original, cv2.COLOR_BGR2HSV)
green_mask = cv2.inRange(hsv, (25, 52, 72), (102, 255,255))
filtered = cv2.bitwise_and(original,original, mask=green_mask)
filtered = cv2.morphologyEx(filtered, cv2.MORPH_DILATE, cv2.getStructuringElement(cv2.MORPH_RECT, (10,10)))
gray = cv2.cvtColor(filtered, cv2.COLOR_BGR2GRAY)
_, gray = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY)
gray = cv2.morphologyEx(gray, cv2.MORPH_ERODE, cv2.getStructuringElement(cv2.MORPH_RECT, (5,5)))
return cv2.bitwise_not(gray)
def process_image(path, new_path, _):
buffer = get_note(path)
cv2.imwrite(new_path, buffer)
if __name__ == '__main__':
cv2.imwrite('result.jpg', get_note(sys.argv[1])) | 40.05 | 111 | 0.717853 | 115 | 801 | 4.765217 | 0.452174 | 0.080292 | 0.040146 | 0.116788 | 0.131387 | 0 | 0 | 0 | 0 | 0 | 0 | 0.072886 | 0.143571 | 801 | 20 | 112 | 40.05 | 0.725948 | 0 | 0 | 0 | 0 | 0 | 0.022444 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.117647 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e84ada279ae6eea14855ee947b1f91a3c3a8e830 | 1,470 | py | Python | standup/utils.py | rlr/standup | 998341af354ed0ddcd15b673ea7af090a7efbce6 | [
"BSD-3-Clause"
] | 2 | 2015-06-21T16:04:07.000Z | 2015-11-09T00:30:19.000Z | standup/utils.py | rlr/standup | 998341af354ed0ddcd15b673ea7af090a7efbce6 | [
"BSD-3-Clause"
] | null | null | null | standup/utils.py | rlr/standup | 998341af354ed0ddcd15b673ea7af090a7efbce6 | [
"BSD-3-Clause"
] | null | null | null | import re
import simplejson as json
from flask import Response, request
from unidecode import unidecode
_PUNCT_RE = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _PUNCT_RE.split(text.lower()):
result.extend(unidecode(unicode(word)).split())
return unicode(delim.join(result))
def json_requested():
"""Check if json is the preferred output format for the request."""
best = request.accept_mimetypes.best_match(
['application/json', 'text/html'])
return (best == 'application/json' and
request.accept_mimetypes[best] >
request.accept_mimetypes['text/html'])
def jsonify(obj):
"""Dump an object to JSON and create a Response object from the dump.
Unlike Flask's native implementation, this works on lists.
"""
dump = json.dumps(obj)
return Response(dump, mimetype='application/json')
def truthify(s):
"""Returns a boolean from a string"""
try:
return str(s).lower() in ['true', 't', '1']
except (TypeError, ValueError, UnicodeEncodeError):
return False
def numerify(s, default=None, lower=None, upper=None):
"""Converts a string to an integer"""
if s is None:
s = default
num = int(s)
if lower is not None and num < lower:
num = lower
if upper is not None and num > upper:
num = upper
return num
| 27.222222 | 73 | 0.631293 | 194 | 1,470 | 4.731959 | 0.453608 | 0.042484 | 0.071895 | 0.056645 | 0.03268 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000883 | 0.229252 | 1,470 | 53 | 74 | 27.735849 | 0.809356 | 0.191156 | 0 | 0 | 0 | 0 | 0.07279 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151515 | false | 0 | 0.121212 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e84bbaa86bf4132350630d8297f2d5f0334f7ce7 | 500 | py | Python | scripts/040_ld_ukb/utils/compute_neale_samples.py | miltondp/phenomexcan | 38390ac21987f1e72835c42919c53abd1a35cb7e | [
"MIT"
] | 3 | 2020-12-07T15:06:41.000Z | 2021-05-25T06:03:38.000Z | scripts/040_ld_ukb/utils/compute_neale_samples.py | miltondp/phenomexcan | 38390ac21987f1e72835c42919c53abd1a35cb7e | [
"MIT"
] | 1 | 2020-07-01T14:45:38.000Z | 2020-07-01T15:15:55.000Z | scripts/040_ld_ukb/utils/compute_neale_samples.py | miltondp/phenomexcan | 38390ac21987f1e72835c42919c53abd1a35cb7e | [
"MIT"
] | 1 | 2020-08-20T13:23:40.000Z | 2020-08-20T13:23:40.000Z | import os
import pandas as pd
samples_neale = pd.read_csv('samples.both_sexes.tsv.bgz', compression='gzip', delim_whitespace=True).drop_duplicates()
samples_qc = pd.read_csv('samplesqc.txt', sep=' ', usecols=['eid', 'Plate.Name', 'Well']).rename(columns={'Plate.Name': 'plate_name', 'Well': 'well'})
samples_merge = pd.merge(samples_neale, samples_qc, on=['plate_name', 'well'])
assert samples_merge['eid'].is_unique
samples_merge['eid'].to_csv('samples_neale_eids.csv', index=False, header=True)
| 45.454545 | 150 | 0.742 | 75 | 500 | 4.706667 | 0.546667 | 0.101983 | 0.110482 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.072 | 500 | 10 | 151 | 50 | 0.760776 | 0 | 0 | 0 | 0 | 0 | 0.262525 | 0.096192 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e84d2dd77751173d0cb771aff7cdb6212c6bc70e | 7,341 | py | Python | networks/utils.py | cherise215/atria_segmentation_2018 | 6d17cfc8b948cb064c5d6a836d94b5540ecc0dcd | [
"MIT"
] | 14 | 2019-03-19T14:15:36.000Z | 2022-02-17T04:37:17.000Z | networks/utils.py | cherise215/atria_segmentation_2018 | 6d17cfc8b948cb064c5d6a836d94b5540ecc0dcd | [
"MIT"
] | 2 | 2020-10-30T02:21:53.000Z | 2021-12-06T20:54:25.000Z | networks/utils.py | cherise215/atria_segmentation_2018 | 6d17cfc8b948cb064c5d6a836d94b5540ecc0dcd | [
"MIT"
] | 4 | 2019-05-27T13:27:15.000Z | 2022-02-28T01:29:44.000Z | import torch
import torch.nn as nn
from torch.optim import lr_scheduler
class HookBasedFeatureExtractor(nn.Module):
def __init__(self, submodule, layername, upscale=False):
super(HookBasedFeatureExtractor, self).__init__()
self.submodule = submodule
self.submodule.eval()
self.layername = layername
self.outputs_size = None
self.outputs = None
self.inputs = None
self.inputs_size = None
self.upscale = upscale
def get_input_array(self, m, i, o):
if isinstance(i, tuple):
self.inputs = [i[index].data.clone() for index in range(len(i))]
self.inputs_size = [input.size() for input in self.inputs]
else:
self.inputs = i.data.clone()
self.inputs_size = self.input.size()
print('Input Array Size: ', self.inputs_size)
def get_output_array(self, m, i, o):
if isinstance(o, tuple):
self.outputs = [o[index].data.clone() for index in range(len(o))]
self.outputs_size = [output.size() for output in self.outputs]
else:
self.outputs = o.data.clone()
self.outputs_size = self.outputs.size()
print('Output Array Size: ', self.outputs_size)
def rescale_output_array(self, newsize):
us = nn.Upsample(size=newsize[2:], mode='bilinear')
if isinstance(self.outputs, list):
for index in range(len(self.outputs)): self.outputs[index] = us(self.outputs[index]).data()
else:
self.outputs = us(self.outputs).data()
def forward(self, x):
target_layer = self.submodule._modules.get(self.layername)
# Collect the output tensor
h_inp = target_layer.register_forward_hook(self.get_input_array)
h_out = target_layer.register_forward_hook(self.get_output_array)
self.submodule(x)
h_inp.remove()
h_out.remove()
# Rescale the feature-map if it's required
if self.upscale: self.rescale_output_array(x.size())
return self.inputs, self.outputs
import math
def spatial_pyramid_pool(previous_conv, batch_size, previous_conv_size, out_bin_sizes):
'''
ref: Spatial Pyramid Pooling in Deep ConvolutionalNetworks for Visual Recognition
previous_conv: a tensor vector of previous convolution layer
num_sample: an int number of image in the batch
previous_conv_size: an int vector [height, width] of the matrix features size of previous convolution layer
out_pool_size: a int vector of expected output size of max pooling layer
returns: a tensor vector with shape [1 x n] is the concentration of multi-level pooling
'''
# print(previous_conv.size())
for i in range(0, len(out_bin_sizes)):
print(previous_conv_size)
#assert previous_conv_size[0] % out_bin_sizes[i]==0, 'please make sure feature size can be devided by bins'
h_wid = int(math.ceil(previous_conv_size[0] / out_bin_sizes[i]))
w_wid = int(math.ceil(previous_conv_size[1] / out_bin_sizes[i]))
# h_stride = int(math.floor(previous_conv_size[0] / out_bin_sizes[i]))
# w_stride = int(math.floor(previous_conv_size[1] / out_bin_sizes[i]))
h_pad = (h_wid * out_bin_sizes[i] - previous_conv_size[0] + 1) // 2
w_pad = (w_wid * out_bin_sizes[i] - previous_conv_size[1] + 1) // 2
maxpool = nn.MaxPool2d(kernel_size=(h_wid, w_wid), stride=(h_wid, w_wid),padding=(h_pad,w_pad))
x = maxpool(previous_conv)
if (i == 0):
spp = x.view(batch_size, -1)
#print("spp size:",spp.size())
else:
# print("size:",spp.size())
spp = torch.cat((spp, x.view(batch_size, -1)), dim=1)
# print("spp size:",spp.size())
return spp
'''
https://discuss.pytorch.org/t/solved-reverse-gradients-in-backward-pass/3589/4
'''
class GradientReversalFunction(torch.autograd.Function):
def __init__(self, Lambda):
super(GradientReversalFunction, self).__init__()
self.Lambda = Lambda
def forward(self, input):
return input.view_as(input)
def backward(self, grad_output):
# Multiply gradient by -self.Lambda
return self.Lambda * grad_output.neg()
class GradientReversalLayer(nn.Module):
def __init__(self, Lambda, use_cuda=False):
super(GradientReversalLayer, self).__init__()
self.Lambda = Lambda
if use_cuda:
self.cuda()
def forward(self, input):
return GradientReversalFunction(self.Lambda)(input)
def change_lambda(self, Lambda):
self.Lambda = Lambda
def gram_matrix_2D(y):
'''
give torch 4d tensor, calculate Gram Matrix
:param y:
:return:
'''
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
def adjust_learning_rate(optimizer, lr):
"""Sets the learning rate to a fixed number"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_scheduler(optimizer, lr_policy,lr_decay_iters=5,epoch_count=None,niter=None,niter_decay=None):
print('lr_policy = [{}]'.format(lr_policy))
if lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + epoch_count - niter) / float(niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=lr_decay_iters, gamma=0.5)
elif lr_policy == 'step2':
scheduler = lr_scheduler.StepLR(optimizer, step_size=lr_decay_iters, gamma=0.1)
elif lr_policy == 'plateau':
print('schedular=plateau')
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, threshold=0.01, patience=5)
elif lr_policy == 'plateau2':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif lr_policy == 'step_warmstart':
def lambda_rule(epoch):
#print(epoch)
if epoch < 5:
lr_l = 0.1
elif 5 <= epoch < 100:
lr_l = 1
elif 100 <= epoch < 200:
lr_l = 0.1
elif 200 <= epoch:
lr_l = 0.01
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif lr_policy == 'step_warmstart2':
def lambda_rule(epoch):
#print(epoch)
if epoch < 5:
lr_l = 0.1
elif 5 <= epoch < 50:
lr_l = 1
elif 50 <= epoch < 100:
lr_l = 0.1
elif 100 <= epoch:
lr_l = 0.01
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', lr_policy)
return scheduler
def cal_cls_acc(pred,gt):
'''
input tensor
:param pred: network output N*n_classes
:param gt: ground_truth N [labels_id]
:return: float acc
'''
pred_class = pred.data.max(1)[1].cpu()
sum = gt.cpu().eq(pred_class).sum()
count = gt.size(0)
return sum, count
| 37.263959 | 116 | 0.628797 | 1,001 | 7,341 | 4.413586 | 0.226773 | 0.039837 | 0.039837 | 0.019013 | 0.308737 | 0.269805 | 0.252829 | 0.210729 | 0.160706 | 0.106383 | 0 | 0.018155 | 0.257186 | 7,341 | 196 | 117 | 37.454082 | 0.792041 | 0.157608 | 0 | 0.214815 | 0 | 0 | 0.031521 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140741 | false | 0 | 0.02963 | 0.022222 | 0.281481 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8528f5b3cf998738b7ebe3d8946d6b06021d610 | 2,844 | py | Python | src/sandbox/seccomp_loader.py | ospiper/Sandbox-Runner | d6a463fa7744ea2a88553eef197b6f8a9f4d91f0 | [
"MIT"
] | null | null | null | src/sandbox/seccomp_loader.py | ospiper/Sandbox-Runner | d6a463fa7744ea2a88553eef197b6f8a9f4d91f0 | [
"MIT"
] | null | null | null | src/sandbox/seccomp_loader.py | ospiper/Sandbox-Runner | d6a463fa7744ea2a88553eef197b6f8a9f4d91f0 | [
"MIT"
] | null | null | null | import sys
from seccomp import *
import errno
import os
from runner_config import RunnerConfig
def load_seccomp_rule(config, command):
if not isinstance(config, RunnerConfig) or len(command) <= 0:
return
if config is not None and config.seccomp_rule is not None:
rule = config.seccomp_rule
try:
f = None
# print('Loading seccomp rule:', config.seccomp_rule)
if rule == 'general':
f = SyscallFilter(defaction=ALLOW)
forbidden_syscalls = [
'clone', 'fork', 'vfork', 'kill'
]
for syscall in forbidden_syscalls:
f.add_rule(KILL, syscall)
f.add_rule(ERRNO(errno.EACCES), 'socket')
# f.add_rule(KILL, 'read', Arg(0, NE, sys.stdin.fileno()))
# f.add_rule(KILL, 'write', Arg(0, NE, sys.stdout.fileno()))
# f.add_rule(KILL, 'write', Arg(0, NE, sys.stderr.fileno()))
if not config.file_io:
f.add_rule(KILL, 'open', Arg(1, MASKED_EQ, os.O_WRONLY, os.O_WRONLY))
f.add_rule(KILL, 'open', Arg(1, MASKED_EQ, os.O_RDWR, os.O_RDWR))
f.add_rule(KILL, 'openat', Arg(2, MASKED_EQ, os.O_WRONLY, os.O_WRONLY))
f.add_rule(KILL, 'openat', Arg(2, MASKED_EQ, os.O_RDWR, os.O_RDWR))
# f.add_rule(KILL, "execve", Arg(1, NE, id(command)))
if rule == 'c/c++':
f = SyscallFilter(defaction=KILL)
f.add_rule(ALLOW, 'read', Arg(0, EQ, sys.stdin.fileno()))
f.add_rule(ALLOW, 'write', Arg(0, EQ, sys.stdout.fileno()))
f.add_rule(ALLOW, 'write', Arg(0, EQ, sys.stderr.fileno()))
f.add_rule(ALLOW, 'fstat')
f.add_rule(ALLOW, 'ioctl')
f.add_rule(ALLOW, 'sigaltstack')
f.add_rule(ALLOW, 'rt_sigaction')
f.add_rule(ALLOW, 'exit_group')
if not config.file_io:
f.add_rule(KILL, 'open', Arg(1, MASKED_EQ, os.O_WRONLY | os.O_WRONLY, 0))
f.add_rule(KILL, 'open', Arg(1, MASKED_EQ, os.O_RDWR | os.O_RDWR, 0))
f.add_rule(KILL, 'openat', Arg(2, MASKED_EQ, os.O_WRONLY | os.O_WRONLY, 0))
f.add_rule(KILL, 'openat', Arg(2, MASKED_EQ, os.O_RDWR | os.O_RDWR, 0))
allowed_syscalls = [
'mmap', 'mprotect', 'munmap', 'uname', 'arch_prctl', 'brk', 'access', 'close',
'readlink', 'sysinfo', 'writev', 'lseek', 'clock_gettime'
]
for syscall in allowed_syscalls:
f.add_rule(ALLOW, syscall)
if f is not None:
f.load()
except OSError as err:
pass
| 48.20339 | 98 | 0.519691 | 370 | 2,844 | 3.824324 | 0.251351 | 0.065018 | 0.130035 | 0.110247 | 0.414841 | 0.401413 | 0.381625 | 0.381625 | 0.381625 | 0.380919 | 0 | 0.010741 | 0.345288 | 2,844 | 58 | 99 | 49.034483 | 0.749194 | 0.09775 | 0 | 0.04 | 0 | 0 | 0.085547 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0.02 | 0.1 | 0 | 0.14 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e852d9f92dbf12a736c246fd82bf621bb0b07ad9 | 6,278 | py | Python | tools/getrsttitle.py | collivier/doc | 700e42cd4e51e95af865d798ab5441fbf92c0852 | [
"Apache-2.0"
] | 11 | 2018-01-03T12:05:47.000Z | 2021-05-23T15:54:25.000Z | tools/getrsttitle.py | collivier/doc | 700e42cd4e51e95af865d798ab5441fbf92c0852 | [
"Apache-2.0"
] | null | null | null | tools/getrsttitle.py | collivier/doc | 700e42cd4e51e95af865d798ab5441fbf92c0852 | [
"Apache-2.0"
] | 6 | 2018-01-03T12:05:59.000Z | 2021-09-07T07:33:53.000Z | #!/usr/bin/env python3
### ===========================================================================
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
###
### Copyright (C) 2021 Deutsche Telekom AG
### ============LICENSE_END====================================================
#
# getrsttitle.py
# AUTHOR(S):
# Thomas Kulik, Deutsche Telekom AG, 2021
# DESCRIPTION:
# Processes a list of rst files and retrieves the first title for every single rst file.
# Copy program to {branch} directory of cloned ONAP documentation and run it.
# USAGE:
# python3 getrsttitle.py filename
#
# Helpful resources:
# https://regex101.com/r/YNYK2Q/1/
# https://stackoverflow.com/questions/20312443/how-to-find-title-a-la-restructuredtext
#
import re
import os.path
import sys
import argparse
#
# argument handling
#
parser = argparse.ArgumentParser(description='Processes a list of rst files and retrieves the first title for every single rst file.')
parser.add_argument('filename')
args = parser.parse_args()
# regex to find title underlined with various characters
#regex1 = r"(?:^|\n)(?!\=)([^\n\r]+)\r?\n(\=+)(?:\r?\n| *$)"
#regex2 = r"(?:^|\n)(?!\-)([^\n\r]+)\r?\n(\-+)(?:\r?\n| *$)"
#regex3 = r"(?:^|\n)(?!\~)([^\n\r]+)\r?\n(\~+)(?:\r?\n| *$)"
#regex4 = r"(?:^|\n)(?!\#)([^\n\r]+)\r?\n(\#+)(?:\r?\n| *$)"
#regex5 = r"(?:^|\n)(?!\*)([^\n\r]+)\r?\n(\*+)(?:\r?\n| *$)"
# there is a problem with raw strings (r"...") in the regex search below
# workaround: using \\ to mask special characters in regex
regex_list = [
"(?:^|\\n)(?!\\=)([^\\n\\r]+)\\r?\\n(\\=+)(?:\\r?\\n| *$)",
"(?:^|\\n)(?!\\-)([^\\n\\r]+)\\r?\\n(\\-+)(?:\\r?\\n| *$)",
"(?:^|\\n)(?!\\~)([^\\n\\r]+)\\r?\\n(\\~+)(?:\\r?\\n| *$)",
"(?:^|\\n)(?!\\#)([^\\n\\r]+)\\r?\\n(\\#+)(?:\\r?\\n| *$)",
"(?:^|\\n)(?!\\*)([^\\n\\r]+)\\r?\\n(\\*+)(?:\\r?\\n| *$)",
]
# DBUG only
#for regex in regex_list:
# print(repr(regex))
#filename = './master_indexrst_docs_root.log'
#filename = './master_rstfiles.log'
if os.path.isfile(args.filename):
with open(args.filename) as fn:
# read first line
line = fn.readline()
#print("DBUG: line={}".format(line))
file_cnt = 0
while line:
rstfile = "./" + re.sub('\[|\]', '', line).strip()
repository_tmp1 = re.sub('\].+$', '',line).strip()
repository = re.sub('\[', '',repository_tmp1).strip()
project_tmp1 = re.sub('\].+$', '',line).strip()
project_tmp2 = re.sub('\/.+$', '',project_tmp1).strip()
project = re.sub('\[', '',project_tmp2).strip()
#print("DBUG: file #{} {}".format(file_cnt, rstfile))
#print("DBUG: repository #{} {}".format(file_cnt, repository))
#print("DBUG: project #{} {}".format(file_cnt, project))
file_cnt += 1
if os.path.isfile(rstfile):
with open(rstfile, 'r') as content:
content_rstfile = content.read()
#print("DBUG: content_rstfile = \n{}".format(content_rstfile))
regex_cnt = 0
for regex in regex_list:
regex_cnt += 1
m = re.search(regex, content_rstfile, re.MULTILINE)
#print("DBUG: using regex " + repr(regex))
#print("DBUG: using regex1 " + repr(regex1))
#print("DBUG: regex_cnt = {}".format(regex_cnt))
if m:
match = m.group(1)
#print ("DBUG: |REGEX| {} |REGEXCNT| {} |FILECNT| {} |FILE| {} |MATCH| {}".format(repr(regex), regex_cnt, file_cnt, rstfile, match))
# end regex loop if we have a title
break
else:
match = "NO-TITLE-FOUND"
#print ("DBUG: NO-TITLE-FOUND")
else:
print ("ERR: File {} does not exist".format(rstfile))
#print ("DBUG: |REGEX| {} |REGEXCNT| {} |FILECNT| {} |FILE| {} |MATCH| {}".format(repr(regex), regex_cnt, file_cnt, rstfile, match))
#print ("DBUG: file #{} '{}' '{}'".format(file_cnt, rstfile, match))
# clean up result and print
match_1 = match.replace(",", "") # remove ,
match_final = match_1.strip() # remove \n
print ("{},{},{},{}".format(project.strip(), repository.strip(), line.strip(), match_final.strip()))
# read next line and loop
line = fn.readline()
else:
print ("ERR: File {} does not exist".format(args.filename))
sys.exit()
#
# example code to show detailed regex matches and group content
# to be used in a future version of this program
#
# matches = re.finditer(regex2, content, re.MULTILINE)
# for matchNum, match in enumerate(matches, start=1):
# print ("Match {matchNum} was found at {start}-{end}: {match}".format(matchNum = matchNum, start = match.start(), end = match.end(), match = match.group()))
# print ("{match}".format(match = match.group()))
# for groupNum in range(0, len(match.groups())):
# groupNum = groupNum + 1
# print ("Group {groupNum} found at {start}-{end}: {group}".format(groupNum = groupNum, start = match.start(groupNum), end = match.end(groupNum), group = match.group(groupNum)))
# print ("Test:" "{group}".format(group = match.group(1)))
#
#
# example code for pandas
# to be used in a future version of this program
#
# import pandas as pd
# pd.set_option('display.max_rows', 500)
# pd.set_option('display.max_columns', 500)
# pd.set_option('display.width', 1000)
#
# table = pd.read_csv("master_table.csv")
# print(table)
# | 41.853333 | 185 | 0.536636 | 754 | 6,278 | 4.408488 | 0.301061 | 0.015042 | 0.009025 | 0.012034 | 0.234356 | 0.18201 | 0.18201 | 0.162154 | 0.129663 | 0.129663 | 0 | 0.012963 | 0.238133 | 6,278 | 150 | 186 | 41.853333 | 0.681999 | 0.585059 | 0 | 0.106383 | 0 | 0 | 0.19468 | 0.104796 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.085106 | 0 | 0.085106 | 0.06383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e8547f5404fd566ef5609ffcc94cae8b77adc6a9 | 617 | py | Python | entropy/devideN.py | ChengyuSun/edGNN | 2721c2ae6ac5da20d353632b4b51f5cf2c2c7176 | [
"MIT"
] | 2 | 2020-03-19T04:54:52.000Z | 2022-02-04T03:34:43.000Z | entropy/devideN.py | ChengyuSun/edGNN_entropy | 2721c2ae6ac5da20d353632b4b51f5cf2c2c7176 | [
"MIT"
] | null | null | null | entropy/devideN.py | ChengyuSun/edGNN_entropy | 2721c2ae6ac5da20d353632b4b51f5cf2c2c7176 | [
"MIT"
] | null | null | null | Nn=20000
def devide(N):
n = [[0] * N for i in range(0, N)]
for i in range(N):
for j in range((i+1)//2):
if i==0:
n[i][j]=1
else:
if j ==0:
n[i][j]=sum(n[i-1])
else :
if i+1==2*(j+1):
n[i][j] = n[i - j - 1][j]
else:
n[i][j]=n[i-j-1][j]+1
n[i][i]=1
return n
dN_matrix=devide(Nn)
with open("./data2/devide_"+str(Nn)+"_Nodes.csv","w") as fc:
for i in range(Nn):
fc.write(str(sum(dN_matrix[i]))+'\n')
| 25.708333 | 60 | 0.356564 | 103 | 617 | 2.097087 | 0.281553 | 0.074074 | 0.083333 | 0.152778 | 0.194444 | 0.194444 | 0.074074 | 0.074074 | 0 | 0 | 0 | 0.061584 | 0.447326 | 617 | 23 | 61 | 26.826087 | 0.571848 | 0 | 0 | 0.142857 | 0 | 0 | 0.045528 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0 | 0 | 0.095238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e85ae3b1c7bfa5f2342bfddbe14827386148b1a0 | 3,010 | py | Python | steem/markdown.py | steem-guides/roster | 73870f4d0928df4eac1bbdcaecb9850aa74c0a9e | [
"MIT"
] | null | null | null | steem/markdown.py | steem-guides/roster | 73870f4d0928df4eac1bbdcaecb9850aa74c0a9e | [
"MIT"
] | 1 | 2021-06-18T21:05:19.000Z | 2021-06-18T21:05:19.000Z | steem/markdown.py | steem-guides/roster | 73870f4d0928df4eac1bbdcaecb9850aa74c0a9e | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import re
import html
from bs4 import BeautifulSoup
from markdown import markdown
REGEX_IMAGE_URL = r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)\.(jpg|jpeg|png|gif|svg)"
class SteemMarkdown:
def __init__(self, text):
self.text = text
def get_top_image(self, regex=False):
if regex:
# follow markdown format
m = re.search(r"!\[(.*)\]\((\S+)\)", self.text)
if m:
pic_url = m.group(2)
return pic_url
# follow url format
m = re.search(REGEX_IMAGE_URL, self.text)
if m:
pic_url = m.group(0)
return pic_url
else:
links = self.get_img_links()
if links and len(links) > 0:
return links[0]
return None
def get_rendered_text(self):
""" Converts a markdown string to plaintext """
# md -> html -> text since BeautifulSoup can extract text cleanly
html = markdown(self.text)
# remove code snippets
html = re.sub(r'<pre>(.*?)</pre>', ' ', html)
html = re.sub(r'<code>(.*?)</code >', ' ', html)
# extract text
soup = BeautifulSoup(html, "html.parser")
text = ''.join(soup.findAll(text=True))
text = re.sub(REGEX_IMAGE_URL, '', text)
return text
def _get_valid_link(self, url):
url = url.strip()
if url[-1] == ")":
url = url[:-1]
# unescape HTML chars
return html.unescape(url)
def _is_img_link(self, url):
m = re.match(REGEX_IMAGE_URL, url)
return m is not None
def get_links(self, regex=True):
body = self.text
if regex:
# text = re.sub('<[^<]+?>', ' ', str(self.text))
links = re.findall(URL_REGEX, body)
else:
# md -> html -> text since BeautifulSoup can extract text cleanly
html = markdown(body)
# extract links
soup = BeautifulSoup(html, "html.parser")
tags = soup.findAll("a")
links = [tag.get("href") for tag in tags]
if len(links) > 0:
links = [self._get_valid_link(link) for link in links if link is not None]
return links or []
def get_img_links(self):
body = self.get_steem_markdown()
# md -> html -> text since BeautifulSoup can extract text cleanly
html = markdown(body)
# extract links
soup = BeautifulSoup(html, "html.parser")
tags = soup.findAll("img")
links = [tag.get("src") for tag in tags]
if len(links) > 0:
links = [self._get_valid_link(link) for link in links if link is not None]
return links or []
def get_steem_markdown(self):
text = self.text
text = re.sub(r"(?P<url>" + REGEX_IMAGE_URL + r")(?P<space>\s+)", r"\g<space>", text)
return text
| 28.666667 | 136 | 0.528904 | 389 | 3,010 | 3.974293 | 0.249357 | 0.046572 | 0.042044 | 0.029107 | 0.398448 | 0.352523 | 0.352523 | 0.352523 | 0.322768 | 0.322768 | 0 | 0.009862 | 0.326246 | 3,010 | 104 | 137 | 28.942308 | 0.752465 | 0.140864 | 0 | 0.328125 | 0 | 0.015625 | 0.100857 | 0.05296 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.0625 | 0 | 0.359375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e85c7b98eb4f35e61e4c247a571f302c2668c17d | 1,924 | py | Python | devel/run_fake21cmmc.py | steven-murray/py21cmmc_fg | 438bfec9b1e7fb41eb9269a5fcdc42df217d89e0 | [
"MIT"
] | null | null | null | devel/run_fake21cmmc.py | steven-murray/py21cmmc_fg | 438bfec9b1e7fb41eb9269a5fcdc42df217d89e0 | [
"MIT"
] | null | null | null | devel/run_fake21cmmc.py | steven-murray/py21cmmc_fg | 438bfec9b1e7fb41eb9269a5fcdc42df217d89e0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed May 2 15:01:35 2018
@author: bella
"""
import sys
import numpy as np
sys.path.insert(0, '../../EoR_21cmFAST_model/Codes')
from read_21cmFAST import load_binary_data
from Cores_py21cmmc import ForegroundCore
from Likelihood_py21cmmc import ForegroundLikelihood
class ctx:
def __init__(self):
lightcone_dir = '../../../../../../../data/phd/Runs/delta_T_v3_no_halos__zstart006.00000_zend011.41539_FLIPBOXES0_300_1500Mpc_lighttravel'
self.lightcone = load_binary_data(lightcone_dir, 300)
self.boxsize = 1500
redshift_dir = '../../../../../../../data/phd/Runs/zlistInterp_1500Mpc300.txt'
redshifts = np.genfromtxt(redshift_dir, delimiter=',')[:300+1]
# redshifts = (np.diff(redshifts)/2+redshifts[:-1])
self.redshifts = redshifts
def get(self,str):
if(str=="lightcone"):
return self.lightcone
if(str=="boxsize"):
return self.boxsize
if(str=="redshifts"):
return self.redshifts
if(str=="foreground_lightcone"):
return self.foreground_lightcone
if (str=="frequencies"):
return self.frequencies
if (str=="observed_power"):
return self.observed_power
if (str=="sky_size"):
return self.sky_size
def add(self,str,data):
if(str=="foreground_lightcone"):
self.foreground_lightcone = data
if (str=="frequencies"):
self.frequencies = data
if (str=="observed_power"):
self.observed_power = data
if (str=="sky_size"):
self.sky_size = data
stuff = ctx()
fg = ForegroundCore(0.5,1)
fg(stuff, 0.5,1)
likelihood = ForegroundLikelihood(stuff)
| 26 | 146 | 0.577963 | 211 | 1,924 | 5.075829 | 0.412322 | 0.051354 | 0.033613 | 0.026144 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053558 | 0.29158 | 1,924 | 73 | 147 | 26.356164 | 0.732208 | 0.079002 | 0 | 0.190476 | 0 | 0 | 0.194665 | 0.11975 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.119048 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e85d27e18953cc5de4385d1d537a1384caa51850 | 2,388 | py | Python | audio_converter/__init__.py | mac641/audio-converter | abd9584a7a6b76285654f5647455e37776045d0c | [
"MIT"
] | null | null | null | audio_converter/__init__.py | mac641/audio-converter | abd9584a7a6b76285654f5647455e37776045d0c | [
"MIT"
] | null | null | null | audio_converter/__init__.py | mac641/audio-converter | abd9584a7a6b76285654f5647455e37776045d0c | [
"MIT"
] | null | null | null | import logging
from flask import Flask, request, g, redirect, url_for
from flask_admin import Admin, AdminIndexView, expose
from flask_admin.menu import MenuLink
from flask_babelex import Babel
from flask_dropzone import Dropzone
from flask_mail import Mail
from flask_migrate import Migrate
from flask_security import Security, SQLAlchemyUserDatastore
from flask_sqlalchemy import SQLAlchemy
import system_config
app = Flask(__name__)
app.config.from_object(system_config)
# Instantiate logging
logging.basicConfig(filename='media/audio-converter.log', level=logging.DEBUG,
format=f'%(asctime)s %(levelname)s %(name)s : %(message)s')
app.logger.info('Set up database...')
db = SQLAlchemy(app)
app.logger.info('Set up migrate...')
migrate = Migrate(app, db)
from audio_converter import models
app.logger.info('Initialize database...')
user_datastore = SQLAlchemyUserDatastore(db, models.User, models.Role)
app.logger.info('Set up security...')
security = Security(app=app, datastore=user_datastore, register_blueprint=False)
# the class must be initialized before admin. Removed the normal Home button of admin model.
class DashboardView(AdminIndexView):
def is_visible(self):
return False
@expose('/')
def index(self):
return self.render(
'/admin/master.html'
)
app.logger.info('Set up admin...')
admin = Admin(app, name='Admin Audio-Converter', template_mode='bootstrap3', index_view=DashboardView())
admin.add_link(MenuLink(name='Home', url='/'))
from audio_converter import admin_models
from audio_converter.blueprints.multilingual import routes, multilingual
app.register_blueprint(multilingual)
# Set up mail
app.logger.info('Set up mail...')
mail = Mail(app)
# Set up babel
app.logger.info('Set up babel...')
babel = Babel(app)
@babel.localeselector
def get_locale():
if not g.get('lang_code', None):
g.lang_code = request.accept_languages.best_match(app.config['LANGUAGES'])
app.logger.info('Import language codes...')
return g.lang_code
@app.route('/')
def home():
g.lang_code = request.accept_languages.best_match(app.config['LANGUAGES'])
return redirect(url_for('multilingual.index'))
# link: https://medium.com/@nicolas_84494/flask-create-a-multilingual-web-application-with-language-specific-urls-5d994344f5fd
# Flask-Dropzone
dropzone = Dropzone(app)
| 28.771084 | 126 | 0.747906 | 320 | 2,388 | 5.459375 | 0.359375 | 0.046365 | 0.059531 | 0.054951 | 0.12822 | 0.0664 | 0.0664 | 0.0664 | 0.0664 | 0.0664 | 0 | 0.006763 | 0.133166 | 2,388 | 82 | 127 | 29.121951 | 0.837198 | 0.115159 | 0 | 0.037736 | 0 | 0 | 0.150522 | 0.011871 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.283019 | 0.037736 | 0.45283 | 0.056604 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e85fddfb91b1a2176f222671f32f20f8ba0e1902 | 827 | py | Python | nise-populator/app.py | chambridge/nise-populator | 30bfe7bb63cdf89ca86c0a47ffc7f907df18f903 | [
"MIT"
] | null | null | null | nise-populator/app.py | chambridge/nise-populator | 30bfe7bb63cdf89ca86c0a47ffc7f907df18f903 | [
"MIT"
] | 6 | 2021-04-30T21:10:30.000Z | 2021-08-12T01:11:54.000Z | nise-populator/app.py | chambridge/nise-populator | 30bfe7bb63cdf89ca86c0a47ffc7f907df18f903 | [
"MIT"
] | 2 | 2021-01-28T20:03:55.000Z | 2022-03-01T18:22:41.000Z | import logging
import os
import sys
import yaml
from sources.source_factory import SourceFactory
from utils import get_static_file_path
from utils import load_yaml_file
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
root.addHandler(handler)
LOG = logging.getLogger(__name__)
sources = None
sources_config = os.environ.get("SOURCES_CONFIG")
if sources_config:
sources = yaml.load(sources_config)
else:
# Load default sources list
default_sources_path = get_static_file_path("default_sources.yaml")
sources = load_yaml_file(default_sources_path)
factory = SourceFactory(sources)
factory.process()
| 22.351351 | 71 | 0.783555 | 108 | 827 | 5.777778 | 0.37037 | 0.083333 | 0.048077 | 0.054487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.120919 | 827 | 36 | 72 | 22.972222 | 0.858322 | 0.03023 | 0 | 0 | 0 | 0 | 0.1075 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.269231 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e861ff45f5576dd05916ac9bff6dd942a77f9c19 | 2,102 | py | Python | CNN/ResNet/train.py | JimCurryWang/Deep-Learning-Jot | b72e36b54089f7a8b92409b69b7187e84103f76e | [
"MIT"
] | null | null | null | CNN/ResNet/train.py | JimCurryWang/Deep-Learning-Jot | b72e36b54089f7a8b92409b69b7187e84103f76e | [
"MIT"
] | null | null | null | CNN/ResNet/train.py | JimCurryWang/Deep-Learning-Jot | b72e36b54089f7a8b92409b69b7187e84103f76e | [
"MIT"
] | null | null | null | from ResNet import Block
from ResNet import ResNet_test
from ResNet import ResNet50, ResNet101, ResNet152
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Check device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# load the data
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
#Load train and test set
data = torchvision.datasets.CIFAR10(
root='../CIFAR10',
train=True,
download=True,
transform=transform
)
train_loader = torch.utils.data.DataLoader(data,batch_size=128,shuffle=True)
test_loader = torch.utils.data.DataLoader(data,batch_size=128,shuffle=False)
# Optimizer and loss function
model = ResNet101(img_channel=3, num_classes=1000)
optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.9)
loss_function = nn.CrossEntropyLoss()
# training process
epochs = 2
for epoch in range(epochs):
closs = 0
for i,batch in enumerate(train_loader):
inputs, output = batch
inputs = inputs.to(device)
output = output.to(device)
# Forward
prediction = model(inputs)
# Backward
optimizer.zero_grad()
loss = loss_function(prediction, output)
closs = loss.item()
loss.backward()
optimizer.step()
# Show progress for every 100th times
if i%100 == 0:
print('[{}/{}] Loss: {}'.format(epoch+1,epochs,closs/100))
closs = 0
correctHits=0
total=0
for i,batch in enumerate(test_loader):
inputs, output = batch
inputs = inputs.to(device)
output = output.to(device)
# Forward
prediction = model(inputs)
# returns max as well as its index
_,prediction = torch.max(prediction.data,1)
total += output.size(0)
correctHits += (prediction==output).sum().item()
print('Accuracy on epoch ',epoch+1,'= ',str((correctHits/total)*100)) | 26.948718 | 76 | 0.633206 | 261 | 2,102 | 5.045977 | 0.402299 | 0.009112 | 0.01139 | 0.015186 | 0.259681 | 0.259681 | 0.22779 | 0.22779 | 0.22779 | 0.22779 | 0 | 0.041985 | 0.252141 | 2,102 | 78 | 77 | 26.948718 | 0.795802 | 0.089914 | 0 | 0.2 | 0 | 0 | 0.028902 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.14 | 0 | 0.14 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e865044dd06357bb318b97c09f3514c2b5a82814 | 4,861 | py | Python | examples/keras_cifar10.py | mjmikulski/elefas | 423e7180bd06eb4c51df4d22c3907e2a8423a4ea | [
"MIT"
] | 2 | 2018-02-22T17:46:13.000Z | 2020-03-30T12:49:32.000Z | examples/keras_cifar10.py | mjmikulski/elefas | 423e7180bd06eb4c51df4d22c3907e2a8423a4ea | [
"MIT"
] | null | null | null | examples/keras_cifar10.py | mjmikulski/elefas | 423e7180bd06eb4c51df4d22c3907e2a8423a4ea | [
"MIT"
] | null | null | null | '''
Example from keras
'''
import os
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from elefas.hyperparameters import Choice, Linear, Exponential, Boolean
from elefas.spaces import Random
SAVE_DIR = os.path.join(os.getcwd(), 'saved_models')
NUM_CLASSES = 10
# Load data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# x_train = x_train[:2000]
# y_train = y_train[:2000]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# Prepare hyper-parameters
space = Random(10)
space.add(Exponential('batch_size', 8, 128))
space.add(Exponential('lr', 0.00001, 0.001))
space.add(Linear('epochs', 20, 200))
space.add(Choice(['conv_activation', 'dense_activation'], ['relu', 'tanh', 'sigmoid']))
# space.add(Boolean('data_augmentation'))
data_augmentation = False
space.compile()
best_accuracy = 0
best_p = None
for p in space:
print('Exploring: ', p)
model_name = 'keras_cifar10_trained_model-{:04d}.h5'.format(space.n_explored)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]))
model.add(Activation(p['conv_activation']))
model.add(Conv2D(32, (3, 3)))
model.add(Activation(p['conv_activation']))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation(p['conv_activation']))
model.add(Conv2D(64, (3, 3)))
model.add(Activation(p['conv_activation']))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation(p['dense_activation']))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=p['lr'], decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=p['batch_size'],
epochs=p['epochs'],
validation_data=(x_test, y_test),
shuffle=True,
verbose=2)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=p['batch_size']),
epochs=p['epochs'],
validation_data=(x_test, y_test),
workers=4)
# Save model and weights
os.makedirs(SAVE_DIR, exist_ok=True)
model_path = os.path.join(SAVE_DIR, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
accuracy = scores[1]
print('Test loss:', scores[0])
print('Test accuracy:', accuracy)
if accuracy > best_accuracy:
best_accuracy = accuracy
best_p = p
print('This is new best accuracy')
else:
print('Best accuracy so far is {} for {}'.format(best_accuracy, best_p))
space.summary()
| 33.524138 | 98 | 0.658506 | 647 | 4,861 | 4.795981 | 0.306028 | 0.046407 | 0.034805 | 0.030616 | 0.178537 | 0.163068 | 0.152755 | 0.132775 | 0.132775 | 0.102481 | 0 | 0.029388 | 0.222999 | 4,861 | 144 | 99 | 33.756944 | 0.792163 | 0.177124 | 0 | 0.142857 | 0 | 0 | 0.126952 | 0.015365 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.091837 | 0 | 0.091837 | 0.112245 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e865b631eacc45ff62a4c5e933daae17aa14c4ca | 19,732 | py | Python | distarray/metadata_utils.py | sjperkins/distarray | 0e014e4d08e6745f5028a53132a424f909ca354e | [
"BSD-3-Clause"
] | 66 | 2015-01-04T22:40:23.000Z | 2021-11-03T11:25:52.000Z | distarray/metadata_utils.py | sjperkins/distarray | 0e014e4d08e6745f5028a53132a424f909ca354e | [
"BSD-3-Clause"
] | 66 | 2015-02-02T22:39:25.000Z | 2021-10-16T07:55:03.000Z | distarray/metadata_utils.py | sjperkins/distarray | 0e014e4d08e6745f5028a53132a424f909ca354e | [
"BSD-3-Clause"
] | 9 | 2015-07-12T18:57:21.000Z | 2020-10-03T03:54:19.000Z | # encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
"""
Utility functions for dealing with DistArray metadata.
"""
from __future__ import division
import operator
from itertools import product
from functools import reduce
from numbers import Integral
from collections import Sequence, Mapping
import numpy
from distarray import utils
from distarray.externals.six import next
from distarray.externals.six.moves import map, zip
# Register numpy integer types with numbers.Integral ABC.
Integral.register(numpy.signedinteger)
Integral.register(numpy.unsignedinteger)
class InvalidGridShapeError(Exception):
""" Exception class when the grid shape is incompatible with the distribution or communicator. """
pass
class GridShapeError(Exception):
""" Exception class when it is not possible to distribute the processes over the number of dimensions. """
pass
def check_grid_shape_preconditions(shape, dist, comm_size):
"""
Verify various distarray parameters are correct before making a grid_shape.
"""
if comm_size < 1:
raise ValueError("comm_size >= 1 not satisfied, comm_size = %s" %
(comm_size,))
if len(shape) != len(dist):
raise ValueError("len(shape) == len(dist) not satisfied, len(shape) ="
" %s and len(dist) = %s" % (len(shape), len(dist)))
if any(i < 0 for i in shape):
raise ValueError("shape must be a sequence of non-negative integers, "
"shape = %s" % (shape,))
if any(i not in ('b', 'c', 'n', 'u') for i in dist):
raise ValueError("dist must be a sequence of 'b', 'n', 'c', 'u' "
"strings, dist = %s" % (dist,))
def check_grid_shape_postconditions(grid_shape, shape, dist, comm_size):
""" Check grid_shape for reasonableness after creating it. """
if not (len(grid_shape) == len(shape) == len(dist)):
raise ValueError("len(gird_shape) == len(shape) == len(dist) not "
"satisfied, len(grid_shape) = %s and len(shape) = %s "
"and len(dist) = %s" % (len(grid_shape), len(shape),
len(dist)))
if any(gs < 1 for gs in grid_shape):
raise ValueError("all(gs >= 1 for gs in grid_shape) not satisfied, "
"grid_shape = %s" % (grid_shape,))
if any(gs != 1 for (d, gs) in zip(dist, grid_shape) if d == 'n'):
raise ValueError("all(gs == 1 for (d, gs) in zip(dist, grid_shape) if "
"d == 'n', not satified dist = %s and grid_shape = "
"%s" % (dist, grid_shape))
if any(gs > s for (s, gs) in zip(shape, grid_shape) if s > 0):
raise ValueError("all(gs <= s for (s, gs) in zip(shape, grid_shape) "
"if s > 0) not satisfied, shape = %s and grid_shape "
"= %s" % (shape, grid_shape))
if reduce(operator.mul, grid_shape, 1) > comm_size:
raise ValueError("reduce(operator.mul, grid_shape, 1) <= comm_size not"
" satisfied, grid_shape = %s product = %s and "
"comm_size = %s" % (
grid_shape,
reduce(operator.mul, grid_shape, 1),
comm_size))
def normalize_grid_shape(grid_shape, shape, dist, comm_size):
"""Adds 1s to grid_shape so it has `ndims` dimensions. Validates
`grid_shape` tuple against the `dist` tuple and `comm_size`.
"""
def check_normalization_preconditions(grid_shape, dist):
if any(i < 0 for i in grid_shape):
raise ValueError("grid_shape must be a sequence of non-negative "
"integers, grid_shape = %s" % (grid_shape,))
if len(grid_shape) > len(dist):
raise ValueError("len(grid_shape) <= len(dist) not satisfied, "
"len(grid_shape) = %s and len(dist) = %s" %
(len(grid_shape), len(dist)))
check_grid_shape_preconditions(shape, dist, comm_size)
check_normalization_preconditions(grid_shape, dist)
ndims = len(shape)
grid_shape = tuple(grid_shape) + (1,) * (ndims - len(grid_shape))
if len(grid_shape) != len(dist):
msg = "grid_shape's length (%d) not equal to dist's length (%d)"
raise InvalidGridShapeError(msg % (len(grid_shape), len(dist)))
if reduce(operator.mul, grid_shape, 1) > comm_size:
msg = "grid shape %r not compatible with comm size of %d."
raise InvalidGridShapeError(msg % (grid_shape, comm_size))
return grid_shape
def make_grid_shape(shape, dist, comm_size):
""" Generate a `grid_shape` from `shape` tuple and `dist` tuple.
Does not assume that `dim_data` has `proc_grid_size` set for each
dimension.
Attempts to allocate processes optimally for distributed dimensions.
Parameters
----------
shape : tuple of int
The global shape of the array.
dist: tuple of str
dist_type character per dimension.
comm_size : int
Total number of processes to distribute.
Returns
-------
dist_grid_shape : tuple of int
Raises
------
GridShapeError
if not possible to distribute `comm_size` processes over number of
dimensions.
"""
check_grid_shape_preconditions(shape, dist, comm_size)
distdims = tuple(i for (i, v) in enumerate(dist) if v != 'n')
ndistdim = len(distdims)
if ndistdim == 0:
dist_grid_shape = ()
elif ndistdim == 1:
# Trivial case: all processes used for the one distributed dimension.
if comm_size >= shape[distdims[0]]:
dist_grid_shape = (shape[distdims[0]],)
else:
dist_grid_shape = (comm_size,)
elif comm_size == 1:
# Trivial case: only one process to distribute over!
dist_grid_shape = (1,) * ndistdim
else: # Main case: comm_size > 1, ndistdim > 1.
factors = utils.mult_partitions(comm_size, ndistdim)
if not factors: # Can't factorize appropriately.
raise GridShapeError("Cannot distribute array over processors.")
reduced_shape = [shape[i] for i in distdims]
# Reorder factors so they match the relative ordering in reduced_shape
factors = [utils.mirror_sort(f, reduced_shape) for f in factors]
# Pick the "best" factoring from `factors` according to which matches
# the ratios among the dimensions in `shape`.
rs_ratio = _compute_grid_ratios(reduced_shape)
f_ratios = [_compute_grid_ratios(f) for f in factors]
distances = [rs_ratio - f_ratio for f_ratio in f_ratios]
norms = numpy.array([numpy.linalg.norm(d, 2) for d in distances])
index = norms.argmin()
# we now have the grid shape for the distributed dimensions.
dist_grid_shape = tuple(int(i) for i in factors[index])
# Create the grid_shape, all 1's for now.
grid_shape = [1] * len(shape)
# Fill grid_shape in the distdim slots using dist_grid_shape
it = iter(dist_grid_shape)
for distdim in distdims:
grid_shape[distdim] = next(it)
out_grid_shape = tuple(grid_shape)
check_grid_shape_postconditions(out_grid_shape, shape, dist, comm_size)
return out_grid_shape
def _compute_grid_ratios(shape):
shape = tuple(map(float, shape))
n = len(shape)
ratios = []
for (i, j) in product(range(n), range(n)):
if i < j:
ratios.append(shape[i] / shape[j])
return numpy.array(ratios)
def normalize_dist(dist, ndim):
"""Return a tuple containing dist-type for each dimension.
Parameters
----------
dist : str, list, tuple, or dict
ndim : int
Returns
-------
tuple of str
Contains string distribution type for each dim.
Examples
--------
>>> normalize_dist({0: 'b', 3: 'c'}, 4)
('b', 'n', 'n', 'c')
"""
if isinstance(dist, Sequence):
return tuple(dist) + ('n',) * (ndim - len(dist))
elif isinstance(dist, Mapping):
return tuple(dist.get(i, 'n') for i in range(ndim))
else:
raise TypeError("Dist must be a string, tuple, list or dict")
def _start_stop_block(size, proc_grid_size, proc_grid_rank):
"""Return `start` and `stop` for a regularly distributed block dim."""
nelements = size // proc_grid_size
if size % proc_grid_size != 0:
nelements += 1
start = proc_grid_rank * nelements
if start > size:
start = size
stop = start + nelements
if stop > size:
stop = size
return start, stop
def distribute_block_indices(dd):
"""Fill in `start` and `stop` in dim dict `dd`."""
if ('start' in dd) and ('stop' in dd):
return
else:
dd['start'], dd['stop'] = _start_stop_block(dd['size'],
dd['proc_grid_size'],
dd['proc_grid_rank'])
def distribute_cyclic_indices(dd):
"""Fill in `start` in dim dict `dd`."""
if 'start' in dd:
return
else:
dd['start'] = dd['proc_grid_rank']
def distribute_indices(dd):
"""Fill in index related keys in dim dict `dd`."""
dist_type = dd['dist_type']
try:
{'n': lambda dd: None,
'b': distribute_block_indices,
'c': distribute_cyclic_indices}[dist_type](dd)
except KeyError:
msg = "dist_type %r not supported."
raise TypeError(msg % dist_type)
def normalize_dim_dict(dd):
"""Fill out some degenerate dim_dicts."""
# TODO: Fill out empty dim_dict alias here?
if dd['dist_type'] == 'n':
dd['proc_grid_size'] = 1
dd['proc_grid_rank'] = 0
def _positivify(index, size):
"""Return a positive index offset from a Sequence's start."""
if index is None or index >= 0:
return index
elif index < 0:
return size + index
def _check_bounds(index, size):
"""Check if an index is in bounds.
Assumes a positive index as returned by _positivify.
"""
if not 0 <= index < size:
raise IndexError("Index %r out of bounds" % index)
def tuple_intersection(t0, t1):
"""Compute intersection of a (start, stop, step) and a (start, stop) tuple.
Assumes all values are positive.
Parameters
----------
t0: 2-tuple or 3-tuple
Tuple of (start, stop, [step]) representing an index range
t1: 2-tuple
Tuple of (start, stop) representing an index range
Returns
-------
3-tuple or None
A tightly bounded interval.
"""
if len(t0) == 2 or t0[2] is None:
# default step is 1
t0 = (t0[0], t0[1], 1)
start0, stop0, step0 = t0
start1, stop1 = t1
if start0 < start1:
n = int(numpy.ceil((start1 - start0) / step0))
start2 = start0 + n * step0
else:
start2 = start0
max_stop = min(t0[1], t1[1])
if (max_stop - start2) % step0 == 0:
n = ((max_stop - start2) // step0) - 1
else:
n = (max_stop - start2) // step0
stop2 = (start2 + n * step0) + 1
return (start2, stop2, step0) if stop2 > start2 else None
def positivify(index, size):
"""Check that an index is within bounds and return a positive version.
Parameters
----------
index : Integral or slice
size : Integral
Raises
------
IndexError
for out-of-bounds indices
"""
if isinstance(index, Integral):
index = _positivify(index, size)
_check_bounds(index, size)
return index
elif isinstance(index, slice):
start = _positivify(index.start, size)
stop = _positivify(index.stop, size)
# slice indexing doesn't check bounds
return slice(start, stop, index.step)
else:
raise TypeError("`index` must be of type Integral or slice.")
def sanitize_indices(indices, ndim=None, shape=None):
"""Classify and sanitize `indices`.
* Wrap naked Integral, slice, or Ellipsis indices into tuples
* Classify result as 'value' or 'view'
* Expand `Ellipsis` objects to slices
* If the length of the tuple-ized `indices` is < ndim (and it's
provided), add slice(None)'s to indices until `indices` is ndim long
* If `shape` is provided, call `positivify` on the indices
Raises
------
TypeError
If `indices` is other than Integral, slice or a Sequence of these
IndexError
If len(indices) > ndim
Returns
-------
2-tuple of (str, n-tuple of slices and Integral values)
"""
if isinstance(indices, Integral):
rtype, sanitized = 'value', (indices,)
elif isinstance(indices, slice) or indices is Ellipsis:
rtype, sanitized = 'view', (indices,)
elif all(isinstance(i, Integral) for i in indices):
rtype, sanitized = 'value', indices
elif all(isinstance(i, Integral)
or isinstance(i, slice)
or i is Ellipsis for i in indices):
rtype, sanitized = 'view', indices
else:
msg = ("Index must be an Integral, a slice, or a sequence of "
"Integrals and slices.")
raise IndexError(msg)
if Ellipsis in sanitized:
if ndim is None:
raise RuntimeError("Can't call `sanitize_indices` on Ellipsis "
"without providing `ndim`.")
# expand first Ellipsis
diff = ndim - (len(sanitized) - 1)
filler = (slice(None),) * diff
epos = sanitized.index(Ellipsis)
sanitized = sanitized[:epos] + filler + sanitized[epos + 1:]
# remaining Ellipsis objects are just converted to slices
def replace_ellipsis(idx):
if idx is Ellipsis:
return slice(None)
else:
return idx
sanitized = tuple(replace_ellipsis(i) for i in sanitized)
if ndim is not None:
diff = ndim - len(sanitized)
if diff < 0:
raise IndexError("Too many indices.")
if diff > 0:
# allow incomplete indexing
rtype = 'view'
sanitized = sanitized + (slice(None),) * diff
if shape is not None:
sanitized = tuple(positivify(i, size) for (i, size) in zip(sanitized,
shape))
return (rtype, sanitized)
def normalize_reduction_axes(axes, ndim):
if axes is None:
axes = tuple(range(ndim))
elif not isinstance(axes, Sequence):
axes = (positivify(axes, ndim),)
else:
axes = tuple(positivify(a, ndim) for a in axes)
return axes
# Functions for getting a size from a dim_data for each dist_type.
# n
def non_dist_size(dim_data):
""" Get a size from a nondistributed dim_data. """
return dim_data['size']
# b
def block_size(dim_data):
""" Get a size from a block distributed dim_data. """
stop = dim_data['stop']
start = dim_data['start']
return stop - start
# Choose cyclic or block cyclic based on block size. This is necessary
# because they have the same dist type character.
def c_or_bc_chooser(dim_data):
""" Get a size from a cyclic or block-cyclic dim_data. """
block_size = dim_data.get('block_size', 1)
if block_size == 1:
return cyclic_size(dim_data)
elif block_size > 1:
return block_cyclic_size(dim_data)
else:
raise ValueError("block_size %s is invalid" % block_size)
# c
def cyclic_size(dim_data):
""" Get a size from a cyclic dim_data. """
global_size = dim_data['size']
grid_rank = dim_data.get('proc_grid_rank', 0)
grid_size = dim_data.get('proc_grid_size', 1)
return (global_size - 1 - grid_rank) // grid_size + 1
# c
def block_cyclic_size(dim_data):
""" Get a size from a block-cyclic dim_data. """
global_size = dim_data['size']
block_size = dim_data.get('block_size', 1)
grid_size = dim_data.get('proc_grid_size', 1)
grid_rank = dim_data.get('proc_grid_rank', 0)
global_nblocks, partial = divmod(global_size, block_size)
local_partial = partial if grid_rank == 0 else 0
local_nblocks = (global_nblocks - 1 - grid_rank) // grid_size + 1
return local_nblocks * block_size + local_partial
# u
def unstructured_size(dim_data):
""" Get a size from an unstructured dim_data. """
return len(dim_data.get('indices', None))
def size_from_dim_data(dim_data):
"""
Get a size from a dim_data.
"""
return size_chooser(dim_data['dist_type'])(dim_data)
def size_chooser(dist_type):
"""
Get a function from a dist_type.
"""
chooser = {'n': non_dist_size,
'b': block_size,
'c': c_or_bc_chooser,
'u': unstructured_size}
return chooser[dist_type]
def shapes_from_dim_data_per_rank(ddpr): # ddpr = dim_data_per_rank
"""
Given a dim_data_per_rank object, return the shapes of the localarrays.
This requires no communication.
"""
# create the list of shapes
shape_list = []
for rank_dd in ddpr:
shape = []
for dd in rank_dd:
shape.append(size_from_dim_data(dd))
shape_list.append(tuple(shape))
return shape_list
# ----------------------------------------------------------------------------
# Redistribution-related utilities.
# ----------------------------------------------------------------------------
def _accum(start, next):
return tuple(s * next for s in start) + (next,)
def strides_from_shape(shape):
return reduce(_accum, tuple(shape[1:]) + (1,), ())
def ndim_from_flat(flat, strides):
res = []
for st in strides:
res.append(flat // st)
flat %= st
return tuple(res)
def _squeeze(accum, next):
last = accum[-1]
if not last:
return [next]
elif last[-1] != next[0]:
return accum + [next]
elif last[-1] == next[0]:
return accum[:-1] + [(last[0], next[-1])]
def condense(intervals):
intervals = reduce(_squeeze, intervals, [[]])
return intervals
# ----------------------------------------------------------------------------
# `apply` related utilities.
# ----------------------------------------------------------------------------
def arg_kwarg_proxy_converter(args, kwargs, module_name='__main__'):
from importlib import import_module
module = import_module(module_name)
# convert args
# In some situations, like redistributing a DistArray from one set of
# targets to a disjoint set, the source and destination DistArrays (and
# associated LocalArrays) are in different communicators with different
# targets. In those cases, it is possible for a proxy object for one
# DistArray to not refer to anything on this target. In that case,
# `a.dereference()` raises an `AttributeError`. We intercept that here and
# assign `None` instead.
args = list(args)
for i, a in enumerate(args):
if isinstance(a, module.Proxy):
try:
args[i] = a.dereference()
except AttributeError:
args[i] = None
args = tuple(args)
# convert kwargs
for k in kwargs.keys():
val = kwargs[k]
if isinstance(val, module.Proxy):
try:
kwargs[k] = val.dereference()
except AttributeError:
kwargs[k] = None
return args, kwargs
| 32.886667 | 110 | 0.594263 | 2,558 | 19,732 | 4.445661 | 0.161063 | 0.056982 | 0.012311 | 0.01108 | 0.216057 | 0.175255 | 0.129705 | 0.1086 | 0.056103 | 0.023215 | 0 | 0.009253 | 0.277012 | 19,732 | 599 | 111 | 32.941569 | 0.787887 | 0.275542 | 0 | 0.107362 | 0 | 0 | 0.11902 | 0 | 0 | 0 | 0 | 0.001669 | 0 | 1 | 0.104294 | false | 0.006135 | 0.03681 | 0.006135 | 0.254601 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e866f73091bc72e81ecc74b565f7343d815e7a5a | 1,475 | py | Python | ykdl/extractors/kankanews.py | danxinshang/python | a79d06abbca633f98c3825cc22ba4872f8c2aeef | [
"MIT"
] | 3 | 2018-09-04T09:33:51.000Z | 2021-11-01T09:03:27.000Z | ykdl/extractors/kankanews.py | hpuyj/ykdl | 7933263435d380b6b12538afc58a42d7a927c8f3 | [
"MIT"
] | null | null | null | ykdl/extractors/kankanews.py | hpuyj/ykdl | 7933263435d380b6b12538afc58a42d7a927c8f3 | [
"MIT"
] | 1 | 2022-03-09T14:43:52.000Z | 2022-03-09T14:43:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
from ykdl.util.html import get_content
from ykdl.util.match import match1, matchall
class KankanNews(VideoExtractor):
name = u'看看新闻 (kankannews)'
def prepare(self):
info = VideoInfo(self.name)
id1 = match1(self.url, 'a/([^\.]+)\.')
api1 = 'http://www.kankanews.com/vxml/{}.xml'.format(id1)
video_data1 = get_content(api1)
self.vid = match1(video_data1, '<omsid>([^<]+)<')
if self.vid == '0' or not self.vid:
html = get_content(self.url)
id1 = match1(html, 'xmlid=([^\"]+)') or match1(html, 'embed/([^\"]+)').replace('_', '/')
api1 = 'http://www.kankanews.com/vxml/{}.xml'.format(id1)
video_data1 = get_content(api1)
self.vid = match1(video_data1, '<omsid>([^<]+)<')
assert self.vid != '0' and self.vid, self.url + ': Not a video news link!'
api2 = 'http://v.kankanews.com/index.php?app=api&mod=public&act=getvideo&id={}'.format(self.vid)
video_data2 = get_content(api2)
urls = matchall(video_data2, ['<videourl><!\[CDATA\[([^\]]+)'])
info.title = match1(video_data2, '<otitle><!\[CDATA\[([^\]]+)')
info.stream_types.append('current')
info.streams['current'] = {'container': 'mp4', 'video_profile': 'current', 'src' : urls, 'size': 0}
return info
site = KankanNews()
| 43.382353 | 107 | 0.590508 | 182 | 1,475 | 4.703297 | 0.461538 | 0.057243 | 0.028037 | 0.046729 | 0.212617 | 0.212617 | 0.212617 | 0.212617 | 0.212617 | 0.212617 | 0 | 0.024808 | 0.207458 | 1,475 | 33 | 108 | 44.69697 | 0.707442 | 0.028475 | 0 | 0.222222 | 0 | 0.037037 | 0.255765 | 0.039133 | 0 | 0 | 0 | 0 | 0.037037 | 1 | 0.037037 | false | 0 | 0.148148 | 0 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e868070fadbce7805b2bd79153f177b46c2e896c | 2,718 | py | Python | functions/iptables_base.py | MatthewDavidMiller/Bash_Python_Common_Functions | a755bad1bea0bfc8c7272848f3820c80672725e9 | [
"MIT"
] | null | null | null | functions/iptables_base.py | MatthewDavidMiller/Bash_Python_Common_Functions | a755bad1bea0bfc8c7272848f3820c80672725e9 | [
"MIT"
] | null | null | null | functions/iptables_base.py | MatthewDavidMiller/Bash_Python_Common_Functions | a755bad1bea0bfc8c7272848f3820c80672725e9 | [
"MIT"
] | null | null | null | import subprocess
import re
def iptables_setup_base():
# Allow established connections
subprocess.call(['iptables', '-A', 'INPUT', '-m', 'conntrack',
'--ctstate', 'ESTABLISHED,RELATED', '-j', 'ACCEPT'])
subprocess.call(['ip6tables', '-A', 'INPUT', '-m', 'conntrack',
'--ctstate', 'ESTABLISHED,RELATED', '-j', 'ACCEPT'])
# Save rules
with open('/etc/iptables/rules.v4', "w") as opened_file:
subprocess.call(['iptables-save'], stdout=opened_file)
with open('/etc/iptables/rules.v6', "w") as opened_file:
subprocess.call(['ip6tables-save'], stdout=opened_file)
def iptables_set_defaults():
# Drop inbound by default
subprocess.call(['iptables', '-P', 'INPUT', 'DROP'])
subprocess.call(['ip6tables', '-P', 'INPUT', 'DROP'])
# Allow outbound by default
subprocess.call(['iptables', '-P', 'OUTPUT', 'ACCEPT'])
subprocess.call(['ip6tables', '-P', 'OUTPUT', 'ACCEPT'])
# Drop forwarding by default
subprocess.call(['iptables', '-P', 'FORWARD', 'DROP'])
subprocess.call(['ip6tables', '-P', 'FORWARD', 'DROP'])
# Save rules
with open('/etc/iptables/rules.v4', "w") as opened_file:
subprocess.call(['iptables-save'], stdout=opened_file)
with open('/etc/iptables/rules.v6', "w") as opened_file:
subprocess.call(['ip6tables-save'], stdout=opened_file)
def iptables_allow_forwarding():
allow_forwarding_ipv4_regex = str('.*' + r'net.ipv4.ip_forward=' + '.*')
allow_forwarding_ipv4_replace = str(r'net.ipv4.ip_forward=1')
allow_forwarding_ipv6_regex = str(
'.*' + r'net.ipv6.conf.all.forwarding=' + '.*')
allow_forwarding_ipv6_replace = str(r'net.ipv6.conf.all.forwarding=1')
# Allow Forwarding
with open('/etc/sysctl.conf', "r") as opened_file:
lines = opened_file.readlines()
with open('/etc/sysctl.conf', "w") as opened_file:
for line in lines:
opened_file.write(
re.sub(allow_forwarding_ipv4_regex, allow_forwarding_ipv4_replace, line))
if allow_forwarding_ipv4_replace == line.strip():
break
else:
opened_file.write(allow_forwarding_ipv4_replace + '\n')
with open('/etc/sysctl.conf', "r") as opened_file:
lines = opened_file.readlines()
with open('/etc/sysctl.conf', "w") as opened_file:
for line in lines:
opened_file.write(
re.sub(allow_forwarding_ipv6_regex, allow_forwarding_ipv6_replace, line, flags=re.S))
if allow_forwarding_ipv6_replace == line.strip():
break
else:
opened_file.write(allow_forwarding_ipv6_replace + '\n')
| 33.975 | 101 | 0.622884 | 326 | 2,718 | 5.003067 | 0.211656 | 0.110362 | 0.053955 | 0.047823 | 0.735745 | 0.641324 | 0.55794 | 0.523605 | 0.523605 | 0.464746 | 0 | 0.013146 | 0.216336 | 2,718 | 79 | 102 | 34.405063 | 0.752582 | 0.053348 | 0 | 0.5 | 0 | 0 | 0.225341 | 0.065497 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.041667 | 0 | 0.104167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |