hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
daad3d4644a92884bdcd19bcc887b15d6229fc69 | 849 | py | Python | mundo3-EstruturasCompostas/102-funcaoFatorial.py | jonasht/CursoEmVideo-CursoDePython3 | a1bbf1fe4226b1828213742ee5a440278d903fd1 | [
"MIT"
] | null | null | null | mundo3-EstruturasCompostas/102-funcaoFatorial.py | jonasht/CursoEmVideo-CursoDePython3 | a1bbf1fe4226b1828213742ee5a440278d903fd1 | [
"MIT"
] | null | null | null | mundo3-EstruturasCompostas/102-funcaoFatorial.py | jonasht/CursoEmVideo-CursoDePython3 | a1bbf1fe4226b1828213742ee5a440278d903fd1 | [
"MIT"
] | null | null | null | #Exercício Python 102:
# Crie um programa que tenha uma função fatorial()
# que receba dois parâmetros:
# o primeiro que indique o número a calcular e
# outro chamado show, que será um valor lógico (opcional)
# indicando se será mostrado ou não na tela o processo de
# cálculo do fatorial.
def fatorial(n, show=False):
'''
-> calcula um fatorial de um numero
:para n: o numero para ser calculado
:para show: (opcional) mostrar ou não aconta (False/True)
:return: o valor do fatorial 'numero'
'''
fatorial = [i for i in range(1, 1+n)]
soma = 1
resposta = ''
for i in fatorial:
soma *= i
if show:
for i in fatorial:
resposta += f'{i} X '
return f'{resposta[:-2] }= {soma}' if resposta else soma
print(fatorial(5, True))
print(fatorial(9))
help(fatorial)
| 28.3 | 61 | 0.632509 | 126 | 849 | 4.261905 | 0.531746 | 0.022346 | 0.03352 | 0.052142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014469 | 0.267373 | 849 | 29 | 62 | 29.275862 | 0.848875 | 0.526502 | 0 | 0.153846 | 0 | 0 | 0.081522 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.153846 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
daaeeb623720fda94eff96a09b813f98fd47e55e | 4,048 | py | Python | facebook_data_analysis/conversation_analysis/friends_network.py | Anogio/facebook_data_analysis | a43a64bf4892e8bb705a96805912f726f194f319 | [
"MIT"
] | null | null | null | facebook_data_analysis/conversation_analysis/friends_network.py | Anogio/facebook_data_analysis | a43a64bf4892e8bb705a96805912f726f194f319 | [
"MIT"
] | null | null | null | facebook_data_analysis/conversation_analysis/friends_network.py | Anogio/facebook_data_analysis | a43a64bf4892e8bb705a96805912f726f194f319 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
from facebook_data_analysis.conversation_analysis.activity_graphs import save_graph
from facebook_data_analysis.global_vars import messages_cols
from facebook_data_analysis.tools.helpers import cached
from sklearn.manifold import MDS
from tqdm import tqdm
@cached("people_distance")
def get_people_distances(
messages_df, conversations_df, min_messages_to_appear=10
): # pylint: disable=too-many-locals
group_conversations = conversations_df.loc[conversations_df["n_participants"] > 2][
messages_cols.conv_id
]
messages_in_group_conversations = messages_df[
messages_df[messages_cols.conv_id].isin(group_conversations.values)
]
friends_in_group_conversations = messages_in_group_conversations.groupby(
messages_cols.sender
)[messages_cols.timestamp].count()
selected_friends = friends_in_group_conversations[
friends_in_group_conversations > min_messages_to_appear
].index.values
messages_in_group_conversations = messages_in_group_conversations[
messages_in_group_conversations[messages_cols.sender].isin(selected_friends)
]
participants_list_by_conversation = messages_in_group_conversations.groupby(
messages_cols.conv_id
)[messages_cols.sender].unique()
messages_by_person_by_conversation = messages_in_group_conversations.groupby(
[messages_cols.conv_id, messages_cols.sender]
)[messages_cols.timestamp].count()
total_messages_by_person = messages_in_group_conversations.groupby(
[messages_cols.sender]
)[messages_cols.timestamp].count()
print("Counting common conversation messages for all friends...")
co_occurrence = pd.DataFrame(0, index=selected_friends, columns=selected_friends)
for conversation in tqdm(participants_list_by_conversation.index):
participants = participants_list_by_conversation[conversation]
messages_by_person = messages_by_person_by_conversation[conversation]
for participant1 in range( # pylint: disable=consider-using-enumerate # TODO
len(participants)
):
for participant2 in range(participant1, len(participants)):
exchanged_messages = (
messages_by_person[participants[participant1]]
+ messages_by_person[participants[participant2]]
)
co_occurrence.loc[
participants[participant1], participants[participant2]
] += exchanged_messages
if participant1 != participant2:
co_occurrence.loc[
participants[participant2], participants[participant1]
] += exchanged_messages
print()
print("Adjusting for total number of messages...")
with tqdm(total=len(selected_friends) ** 2) as pbar:
for friend1 in selected_friends:
for friend2 in selected_friends:
co_occurrence.loc[friend1, friend2] = co_occurrence.loc[
friend1, friend2
] / (
total_messages_by_person[friend1]
+ total_messages_by_person[friend2]
)
pbar.update()
distance = 1 - co_occurrence
return distance
@cached("projection_coordinates")
def get_projection_coordinates(distance):
mds = MDS(n_components=2, verbose=1, n_jobs=-1, dissimilarity="precomputed")
coordinates = mds.fit_transform(distance.values)
return pd.DataFrame(coordinates, index=distance.index)
@save_graph("friends_graph")
def friends_plot(coordinates):
_, ax = plt.subplots()
coordinates.plot(0, 1, kind="scatter", ax=ax)
for k, v in coordinates.iterrows():
ax.annotate(k, v)
return ax
@save_graph("friends_network")
def friends_network(distance, threshold):
graph = nx.convert_matrix.from_pandas_adjacency(distance < threshold)
_, ax = plt.subplots()
nx.draw(graph, ax=ax)
return ax
| 38.552381 | 87 | 0.704298 | 447 | 4,048 | 6.058166 | 0.270694 | 0.057607 | 0.081241 | 0.082718 | 0.259232 | 0.172083 | 0.172083 | 0.155096 | 0.155096 | 0.121123 | 0 | 0.00947 | 0.217391 | 4,048 | 104 | 88 | 38.923077 | 0.845328 | 0.019269 | 0 | 0.170455 | 0 | 0 | 0.048916 | 0.005547 | 0 | 0 | 0 | 0.009615 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.181818 | 0.034091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dab23516379326ead1d968570c74ed69eecbf055 | 2,863 | py | Python | frictionless/settings.py | Grandient/frictionless-py | 8c41b96f2047d573702fe7b34ff7ffbd32c3d815 | [
"MIT"
] | 247 | 2020-08-04T16:42:09.000Z | 2022-03-30T11:54:54.000Z | frictionless/settings.py | Grandient/frictionless-py | 8c41b96f2047d573702fe7b34ff7ffbd32c3d815 | [
"MIT"
] | 444 | 2020-07-29T09:13:59.000Z | 2022-03-31T14:54:57.000Z | frictionless/settings.py | Grandient/frictionless-py | 8c41b96f2047d573702fe7b34ff7ffbd32c3d815 | [
"MIT"
] | 60 | 2020-09-04T11:39:34.000Z | 2022-03-23T18:59:51.000Z | import os
import json
import gzip
import zipfile
# Helpers
def read_asset(*paths):
dirname = os.path.dirname(__file__)
with open(os.path.join(dirname, "assets", *paths)) as file:
return file.read().strip()
# General
UNDEFINED = object()
VERSION = read_asset("VERSION")
COMPRESSION_FORMATS = ["zip", "gz"]
INQUIRY_PROFILE = json.loads(read_asset("profiles", "inquiry.json"))
PIPELINE_PROFILE = json.loads(read_asset("profiles", "pipeline.json"))
REPORT_PROFILE = json.loads(read_asset("profiles", "report.json"))
STATUS_PROFILE = json.loads(read_asset("profiles", "status.json"))
SCHEMA_PROFILE = json.loads(read_asset("profiles", "schema", "general.json"))
RESOURCE_PROFILE = json.loads(read_asset("profiles", "resource", "general.json"))
TABULAR_RESOURCE_PROFILE = json.loads(read_asset("profiles", "resource", "tabular.json"))
PACKAGE_PROFILE = json.loads(read_asset("profiles", "package", "general.json"))
FISCAL_PACKAGE_PROFILE = json.loads(read_asset("profiles", "package", "fiscal.json"))
TABULAR_PACKAGE_PROFILE = json.loads(read_asset("profiles", "package", "tabular.json"))
GEOJSON_PROFILE = json.loads(read_asset("profiles", "geojson", "general.json"))
TOPOJSON_PROFILE = json.loads(read_asset("profiles", "geojson", "topojson.json"))
# Defaults
DEFAULT_SCHEME = "file"
DEFAULT_FORMAT = "csv"
DEFAULT_HASHING = "md5"
DEFAULT_ENCODING = "utf-8"
DEFAULT_INNERPATH = ""
DEFAULT_COMPRESSION = ""
DEFAULT_HEADER = True
DEFAULT_HEADER_ROWS = [1]
DEFAULT_HEADER_JOIN = " "
DEFAULT_HEADER_CASE = True
DEFAULT_FLOAT_NUMBERS = False
DEFAULT_MISSING_VALUES = [""]
DEFAULT_LIMIT_ERRORS = 1000
DEFAULT_LIMIT_MEMORY = 1000
DEFAULT_BUFFER_SIZE = 10000
DEFAULT_SAMPLE_SIZE = 100
DEFAULT_ENCODING_CONFIDENCE = 0.5
DEFAULT_FIELD_CONFIDENCE = 0.9
DEFAULT_PACKAGE_PROFILE = "data-package"
DEFAULT_RESOURCE_PROFILE = "data-resource"
DEFAULT_TABULAR_RESOURCE_PROFILE = "tabular-data-resource"
DEFAULT_TRUE_VALUES = ["true", "True", "TRUE", "1"]
DEFAULT_FALSE_VALUES = ["false", "False", "FALSE", "0"]
DEFAULT_DATETIME_PATTERN = "%Y-%m-%dT%H:%M:%S%z"
DEFAULT_DATE_PATTERN = "%Y-%m-%d"
DEFAULT_TIME_PATTERN = "%H:%M:%S%z"
DEFAULT_BARE_NUMBER = True
DEFAULT_FLOAT_NUMBER = False
DEFAULT_GROUP_CHAR = ""
DEFAULT_DECIMAL_CHAR = "."
DEFAULT_SERVER_PORT = 8000
DEFAULT_CANDIDATES = [
{"type": "yearmonth"},
{"type": "geopoint"},
{"type": "duration"},
{"type": "geojson"},
{"type": "object"},
{"type": "array"},
{"type": "datetime"},
{"type": "time"},
{"type": "date"},
{"type": "integer"},
{"type": "number"},
{"type": "boolean"},
{"type": "year"},
{"type": "string"},
]
# Backports
# It can be removed after dropping support for Python 3.6 and Python 3.7
COMPRESSION_EXCEPTIONS = (
(zipfile.BadZipFile, gzip.BadGzipFile)
if hasattr(gzip, "BadGzipFile")
else (zipfile.BadZipFile)
)
| 29.515464 | 89 | 0.71219 | 358 | 2,863 | 5.438547 | 0.354749 | 0.064715 | 0.098613 | 0.123267 | 0.259887 | 0.248588 | 0.163842 | 0.122753 | 0 | 0 | 0 | 0.013142 | 0.122948 | 2,863 | 96 | 90 | 29.822917 | 0.762246 | 0.036675 | 0 | 0 | 0 | 0 | 0.21766 | 0.007631 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013333 | false | 0 | 0.053333 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dab536d1d9d21df99c2db8bf403753c5070c87f7 | 4,747 | py | Python | A_gen_literal_po.py | Dharma-Sagar/tx-template | 93a3005445a21a249107e01c446c9237e886de8e | [
"Apache-2.0"
] | null | null | null | A_gen_literal_po.py | Dharma-Sagar/tx-template | 93a3005445a21a249107e01c446c9237e886de8e | [
"Apache-2.0"
] | null | null | null | A_gen_literal_po.py | Dharma-Sagar/tx-template | 93a3005445a21a249107e01c446c9237e886de8e | [
"Apache-2.0"
] | 1 | 2022-03-25T15:25:00.000Z | 2022-03-25T15:25:00.000Z | from pathlib import Path
import re
import sys
from uuid import uuid4
import polib
from antx import transfer
from botok import Text
class Po:
def __init__(self):
self.transfer = Transfer()
self.file = polib.POFile()
self.file.metadata = {
'MIME-Version': '1.0',
'Content-Type': 'text/plain; charset=utf-8',
'Content-Transfer-Encoding': '8bit',
}
def _create_entry(self, msgid, msgstr="", msgctxt=None, comment=None, tcomment=None):
"""
:param msgid: string, the entry msgid.
:param msgstr: string, the entry msgstr.
:param msgctxt: string, the entry context.
:param comment: string, the entry comment.
:param tcomment: string, the entry translator comment.
"""
entry = polib.POEntry(
msgid=msgid,
msgstr=msgstr,
msgctxt=msgctxt,
comment=comment,
tcomment=tcomment
)
self.file.append(entry)
def write_to_file(self, filename):
self.file.save(filename)
def lines_to_entries(self, dump, po_file):
lines = self.transfer.generate_entries(dump, po_file)
for num, l in enumerate(lines):
line, ctxt = l
no_notes = self.remove_peydurma_notes(line)
if no_notes == "":
no_notes, line = line, no_notes
no_notes = re.sub('\[.+?\]', '', no_notes)
# segment
t = Text(no_notes)
no_notes = t.tokenize_words_raw_text
# format tokens
no_notes = re.sub('([^།་_]) ([^_།་])', '\g<1>␣\g<2>', no_notes) # affixed particles
no_notes = re.sub('_', ' ', no_notes) # spaces
self._create_entry(msgid=no_notes, msgctxt=ctxt, tcomment=line)
def txt_to_po(self, filename):
lines = filename.read_text(encoding='utf-8')
outfile = filename.parent / (filename.stem + ".po")
self.lines_to_entries(lines, outfile)
self.write_to_file(outfile)
@staticmethod
def remove_pagination(line):
note = re.split(r'(\[.*?\])', line)
if len(note) > 1:
return ''.join([a for a in note if not a.startswith('\[')])
else:
return ""
@staticmethod
def remove_peydurma_notes(line):
note = re.split(r'(<.*?>)', line)
if len(note) > 1:
return ''.join([a for a in note if not a.startswith('<')]).replace(':', '')
else:
return ""
class Transfer:
"""
limitation : in case a line is split on two lines in the updated .txt, it will keep
the same uuid on the second line and only add a new uuid on the first line.
"""
def __init__(self):
self.transfer = transfer
def generate_entries(self, dump, po_file):
if po_file.is_file():
dump = self.extract_entries(dump, po_file)
updated = self.add_missing_uuids(dump)
entries = []
for line in updated.strip().split('\n'):
line = line.strip()
line = self.remove_extra_uuid(line)
txt, ctxt = line[:-1].split('—')
entries.append([txt, ctxt])
return entries
def extract_entries(self, dump, po_file):
po_file = polib.pofile(po_file)
po_entries = []
for p in po_file:
if p.tcomment:
line = p.tcomment
else:
line = p.msgid.replace(' ', '').replace(' ', ' ')
po_entries.append([line, p.msgctxt])
po_dump = '\n'.join([''.join((a, f'—{b}—')) for a, b in po_entries])
pattern = [['uuid', '(—.+?—)']]
transfered = self.transfer(po_dump, pattern, dump, 'txt')
return transfered
def add_missing_uuids(self, dump):
lines = dump.strip().split('\n')
for num, l in enumerate(lines):
l = l.strip()
if not l.endswith('—'):
lines[num] = l + f'—{self.get_unique_id()}—'
return '\n'.join(lines)
@staticmethod
def remove_extra_uuid(line):
if line.count('—') > 2:
idx1 = line.find('—')
idx2 = line.find('—', idx1+1)
return (line[:idx1] + line[idx2+1:]).strip()
else:
return line
def get_unique_id(self):
return uuid4().hex
if __name__ == '__main__':
folder = 'literal/tibetan'
if len(sys.argv) > 1:
stem = sys.argv[1]
file = Path(folder) / (stem + '.txt')
print(file)
po = Po()
po.txt_to_po(file)
else:
files = sorted(list(Path(folder).glob('*.txt')))
for file in files:
print(file)
po = Po()
po.txt_to_po(file)
| 31.026144 | 97 | 0.540552 | 593 | 4,747 | 4.202361 | 0.251265 | 0.036517 | 0.02809 | 0.020465 | 0.158507 | 0.133226 | 0.074639 | 0.074639 | 0.074639 | 0.053772 | 0 | 0.006834 | 0.321888 | 4,747 | 152 | 98 | 31.230263 | 0.762349 | 0.090162 | 0 | 0.188034 | 0 | 0 | 0.055896 | 0.011557 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.059829 | 0.008547 | 0.273504 | 0.017094 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dab78841b63bd54072dd390557aad814bcd2c522 | 2,845 | py | Python | cvm/tests/test_trec.py | CortexFoundation/tvm-cvm | d8941dc60a51dd27a6d2accc1eff2eced3b3640d | [
"Apache-2.0"
] | 6 | 2019-07-04T09:42:53.000Z | 2021-12-28T13:19:48.000Z | cvm/tests/test_trec.py | CortexFoundation/tvm-cvm | d8941dc60a51dd27a6d2accc1eff2eced3b3640d | [
"Apache-2.0"
] | 4 | 2019-06-27T08:05:18.000Z | 2021-09-09T18:59:11.000Z | cvm/tests/test_trec.py | CortexFoundation/tvm-cvm | d8941dc60a51dd27a6d2accc1eff2eced3b3640d | [
"Apache-2.0"
] | null | null | null | import gluon_zoo as gz
import mxnet as mx
from mxnet import ndarray as nd
from mxnet import gluon
import tvm
from tvm.contrib import graph_runtime
import nnvm
import pickle
import sym_pass as spass
import dataset as ds
import sym_calib as calib
import sim_quant_helper as sim
import ops_generator as opg
import utils
import mrt as _mrt
def load_fname(suffix=None, with_ext=False):
suffix = "."+suffix if suffix is not None else ""
prefix = "./data/trec%s" % (suffix)
return utils.extend_fname(prefix, with_ext=with_ext)
batch_size = 16
ctx = mx.gpu()
inputs_ext = { 'data': {
'shape': (38, batch_size)
}}
inputs = [mx.sym.var(n) for n in inputs_ext]
utils.log_init()
data_iter = ds.load_trec(batch_size)
def data_iter_func():
return next(data_iter)
data, label = data_iter_func()
sym_file, param_file = load_fname()
net1 = utils.load_model(sym_file, param_file, inputs, ctx=ctx)
def trec(data):
res = net1(data.as_in_context(ctx))
return res
sym, params = mx.sym.load(sym_file), nd.load(param_file)
sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
if True:
mrt = _mrt.MRT(sym, params, inputs_ext)
mrt.set_data('data', data)
mrt.calibrate(ctx=ctx)
mrt.set_input_prec('data', 16)
mrt.set_fixed('data')
mrt.set_output_prec(8)
qsym, qparams, inputs_ext = mrt.quantize()
else:
inputs_ext['data']['data'] = data
th_dict = calib.sym_calibrate(sym, params, inputs_ext, ctx=ctx)
qsym, qparams, _ = calib.pure_int8_quantize(sym, params, inputs_ext, th_dict)
net2 = gluon.nn.SymbolBlock(qsym, inputs)
utils.load_parameters(net2, qparams, ctx=ctx)
def quantize(data):
data = sim.load_real_data(data, 'data', inputs_ext)
res = net2(data.as_in_context(ctx))
return res
quant_sym, quant_params, quant_ext = load_fname("sym.quantize", with_ext=True)
open(quant_sym, "w").write(qsym.tojson())
if False:
inputs_ext['data']['shape'] = (38, 1)
data = data[:, 0].reshape(38, 1)
_mrt.std_dump(qsym, qparams, inputs_ext, data, "trec",
batch=True, data_dtype="int32", max_num=1000,
dump_ops=["sentimentnet0_embedding0_fwd"])
opg.dump_file("take",
["/data/std_out/trec/sentimentnet0_embedding0_fwd_0.mrt.dump.in.npy",
"/data/std_out/trec/sentimentnet0_embedding0_fwd_1.mrt.dump.in.npy"],
["/data/std_out/trec/sentimentnet0_embedding0_fwd_0.mrt.dump.out.npy"],
"/data/std_out/trec/sentimentnet0_embedding0_fwd.attr")
exit()
if False:
while True:
data, _ = next(data_iter)
data = sim.load_real_data(data, 'data', inputs_ext)
inputs_ext['data']['data'] = data
spass.sym_dump_ops(qsym, qparams, inputs_ext,
ctx=mx.gpu(3))
exit()
utils.multi_eval_accuracy(trec, data_iter_func,
quantize,
iter_num=1000)
| 30.265957 | 83 | 0.693497 | 442 | 2,845 | 4.219457 | 0.273756 | 0.06756 | 0.034853 | 0.038606 | 0.216622 | 0.172654 | 0.172654 | 0.1437 | 0.120643 | 0.082038 | 0 | 0.018844 | 0.179262 | 2,845 | 93 | 84 | 30.591398 | 0.779872 | 0 | 0 | 0.125 | 0 | 0 | 0.130098 | 0.097046 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0.0375 | 0.1875 | 0.0125 | 0.2875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dab7892075e86447b7eb58c074c9b44136134f5d | 9,856 | py | Python | public/scripts/node_process_scripts/vamps_script_parse.py | avoorhis/vamps-node.js | 7071da6e569a669a06a6d21c23cc9c667ec49250 | [
"MIT"
] | 1 | 2015-10-28T14:48:38.000Z | 2015-10-28T14:48:38.000Z | public/scripts/node_process_scripts/vamps_script_parse.py | avoorhis/vamps-node.js | 7071da6e569a669a06a6d21c23cc9c667ec49250 | [
"MIT"
] | 48 | 2015-10-23T16:02:14.000Z | 2022-02-15T00:46:39.000Z | public/scripts/node_process_scripts/vamps_script_parse.py | avoorhis/vamps-node.js | 7071da6e569a669a06a6d21c23cc9c667ec49250 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011, Marine Biological Laboratory
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import os
from stat import * # ST_SIZE etc
import sys
import shutil
import types
import time
import random
import logging
import csv, json
import configparser as ConfigParser
import fastalibAV as fastalib
import datetime
today = str(datetime.date.today())
import subprocess
import pymysql as MySQLdb
"""
"""
# class FastaReader:
# def __init__(self,file_name=None):
# self.file_name = file_name
# self.h = open(self.file_name, 'rb')
# #self.h = open(self.file_name)
# self.seq = ''
# self.id = None
#
# def next(self):
# def read_id():
# #return self.h.readline().decode('utf-8').strip()[1:]
# #print(self.h.readline())
# return self.h.readline().strip()[1:]
#
# def read_seq():
# #ret = bytearray(b'')
# ret = ''
# #ret = ''
# while True:
# line = self.h.readline()
# print(str(line))
# while len(line) and not len(line.strip()):
# # found empty line(s)
#
# line = self.h.readline()
# print(str(line))
# if not len(line):
# # EOF
# break
#
# if str(line).startswith('>'):
# # found new defline: move back to the start
# self.h.seek(-len(line), os.SEEK_CUR)
# break
#
# else:
# ret += str(line).strip()
#
# return ret
#
# self.id = read_id()
# self.seq = read_seq()
#
# if self.id:
# return True
#
def get_data(args):
pass
def parse_matrix(args):
print('running matrix')
n = 0
dirty_datasets = {}
clean_datasets = {}
project_count = 0
max_ds_count = 0
with open(args.file, mode='r') as infile:
for line in infile:
items = line.strip().split('\t')
#print('items',items)
if not line or items[0][:5] == 'VAMPS':
print('found vamps')
continue
if n==0:
ds_items = items[1:] #line.strip('\n').split('\t')[1:] # stip original line on '\n' only to retain first '\t' ip present
#print('ds_items',ds_items)
for ds in ds_items:
dirty_datasets[ds] = 0
else:
line_items = items #line.strip().split('\t')
#print('line_items',line_items)
counts = line_items[1:]
for i,cnt in enumerate(counts):
#print(i,cnt)
if cnt == '' or not cnt:
cnt = 0
print('MISSING COUNT - Setting to zero (line:'+str(n+1)+';col:'+str(i+2)+')')
try:
cnt = int(cnt)
except:
cnt = 0
print('NON-INTEGER COUNT - Setting to zero (line:'+str(n+1)+';col:'+str(i+2)+')')
project_count += int(cnt)
dirty_datasets[ds_items[i]] += int(cnt)
tax = line_items[0]
n+=1
for ds in dirty_datasets:
if not ds or ds == '':
print('REMOVING EMPTY Dataset and data')
else:
clean_datasets[ds] = dirty_datasets[ds]
print('clean datasets',clean_datasets.keys())
for ds in clean_datasets:
if clean_datasets[ds] > max_ds_count:
max_ds_count = clean_datasets[ds]
return(clean_datasets, project_count, max_ds_count)
def find_dataset_name(args, id):
"""
This should be the same fxn as in demultiplex
"""
# adjust to your specific defline
if args.verbose:
print(id)
if args.separator == 'space':
sampleName_items = id.split()[0].split('_')
else:
sampleName_items = id.split(args.separator)[0].split('_')
test = sampleName_items[-1]
try:
int(test)
sampleName = '_'.join(sampleName_items[:-1])
if args.verbose:
print('INT',sampleName_items[-1])
except:
sampleName = '_'.join(sampleName_items)
if args.verbose:
print('NO INT',sampleName_items[-1])
return sampleName
def parse_fasta(args):
print('running fasta')
f = fastalib.SequenceSource(args.file)
#f = FastaReader(args.file)
datasets={}
project_count = 0
max_ds_count = 0
# sample f.id:
# 308_1|M01028:283:000000000-CPKFG:1:1101:16551:1784 1:N:0:15|o:99|m/o:0.030303|MR:n=0;r1=3;r2=0|Q30:p=93;p=98|CO:0|mismatches:3
# PC.354_3 FLP3FBN01EEWKD orig_bc=AGCACGAGCCTA new_bc=AGCACGAGCCTA bc_diffs=0
# dataset1 FLP3FBN01EEWKD orig_bc=AGCACGAGCCTA new_bc=AGCACGAGCCTA bc_diffs=0
while f.next():
#print(f.seq)
#print(f.id)
project_count += 1
if args.separator == 'space':
defline_pts = f.id.split() # splits on white space
else:
defline_pts = f.id.split(args.separator) # splits on white space
dataset = find_dataset_name(args, f.id)
seq_id = defline_pts[1]
if args.verbose:
print(dataset)
print(seq_id)
sys.exit()
if dataset in datasets:
datasets[dataset] += 1
else:
datasets[dataset] = 1
# max_ds_count; number_of_ds; total_seqs;
for ds in datasets:
if datasets[ds] > max_ds_count:
max_ds_count = datasets[ds]
#print(datasets)
return(datasets, project_count, max_ds_count)
def write_config(args, datasets, project_count, max_ds_count):
ini_file = os.path.join(args.project_dir,'INFO.config')
print( 'Writing INFO.config file:',ini_file )
f = open(ini_file, 'w')
f.write('[MAIN]'+"\n")
f.write('project_name='+args.project+"\n")
f.write("total_seq_count="+str(project_count)+"\n")
f.write('owner='+args.owner+"\n")
f.write("max_dataset_count="+str(max_ds_count)+"\n")
f.write('public=0'+"\n")
f.write('project_dir='+args.project_dir+"\n")
f.write('type='+args.type+"\n")
f.write('number_of_datasets='+str(len(datasets))+"\n")
f.write("\n")
f.write('[MAIN.dataset]'+"\n")
for ds in datasets:
f.write(ds+'='+str(datasets[ds])+"\n")
f.close()
if __name__ == '__main__':
import argparse
myusage = """usage: 5-vamps-clean-db.py [options]
where
-pid/--project_id clean this pid only
-p/--project_name clean this name only
-site/--site vamps, vampsdev or localhost
-all/--all Remove ALL Data for fresh install
Be Careful -- will remove ALL data from db
"""
parser = argparse.ArgumentParser(description="" ,usage=myusage)
parser.add_argument("-f","--file",
required=True, action="store", dest = "file",
help="""ProjectID""")
parser.add_argument("-p", "--project",
required=True, action='store', dest = "project",
help=" ")
parser.add_argument("-d", "--project_dir",
required=True, action='store', dest = "project_dir",
help=" ")
# parser.add_argument("-host", "--host",
# required=True, action='store', dest = "host", default='localhost',
# help=" ")
parser.add_argument("-t", "--type",
required=True, action='store', dest = "type",
help=" ")
parser.add_argument("-u", "--user",
required=True, action='store', dest = "owner",
help=" ")
parser.add_argument("-sep", "--separator",
required=False, action='store', dest = "separator", default='space',
help=" ")
parser.add_argument("-v", "--verbose",
required=False, action="store_true", dest = "verbose", default=False,
help="""JSON Files Directory""")
# parser.add_argument("-data_dir", "--data_dir",
# required=True, action='store', dest = "data_dir", default='user_data',
# help=" config.USER_FILES_BASE ")
args = parser.parse_args()
# if args.host == 'vamps':
# #db_host = 'vampsdb'
# db_host = 'bpcweb8'
# args.NODE_DATABASE = 'vamps2'
# db_home = '/groups/vampsweb/vamps/'
# elif args.host == 'vampsdev':
# #db_host = 'vampsdev'
# db_host = 'bpcweb7'
# args.NODE_DATABASE = 'vamps2'
# db_home = '/groups/vampsweb/vampsdev/'
# else:
# db_host = 'localhost'
# db_home = '~/'
# args.NODE_DATABASE = 'vamps_development'
#
# args.obj = MySQLdb.connect( host=db_host, db=args.NODE_DATABASE, read_default_file=os.path.expanduser("~/.my.cnf_node") )
#
# #db = MySQLdb.connect(host="localhost", # your host, usually localhost
# # read_default_file="~/.my.cnf" )
# args.cur = args.obj.cursor()
#(args.proj, args.pid, args.dids, args.dsets) = get_data(args)
if args.type == 'fasta':
(datasets, project_count, max_ds_count) = parse_fasta(args)
elif args.type == 'matrix':
(datasets, project_count, max_ds_count) = parse_matrix(args)
write_config(args, datasets, project_count, max_ds_count)
print('Finished')
| 32.104235 | 138 | 0.542106 | 1,193 | 9,856 | 4.331098 | 0.251467 | 0.013548 | 0.027095 | 0.023224 | 0.203406 | 0.168183 | 0.13083 | 0.093284 | 0.053029 | 0.035611 | 0 | 0.017682 | 0.317167 | 9,856 | 306 | 139 | 32.20915 | 0.750074 | 0.356027 | 0 | 0.173913 | 0 | 0 | 0.161483 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031056 | false | 0.006211 | 0.093168 | 0 | 0.130435 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dab9da99251eb13cbf19a332bec3cc7115eec8f3 | 859 | py | Python | yahoo_nsfw.py | m0r13/tensorflow-open_nsfw | 35da5a6eb71ad5490b25ad433cfee02986f123eb | [
"BSD-2-Clause"
] | null | null | null | yahoo_nsfw.py | m0r13/tensorflow-open_nsfw | 35da5a6eb71ad5490b25ad433cfee02986f123eb | [
"BSD-2-Clause"
] | null | null | null | yahoo_nsfw.py | m0r13/tensorflow-open_nsfw | 35da5a6eb71ad5490b25ad433cfee02986f123eb | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import sys
import tensorflow as tf
from image_utils import create_raw_image_loader
from model import OpenNsfwModel, InputType
class YahooNSFWClassifier:
def __init__(self, weights_path):
self.session = tf.Session()
self.model = OpenNsfwModel()
self.model.build(weights_path=weights_path)
self.session.run(tf.global_variables_initializer())
self.fn_load_image = create_raw_image_loader()
def classify(self, image):
image = self.fn_load_image(image)
predictions = self.session.run(self.model.predictions, feed_dict={self.model.input: image})
return predictions
if __name__ == "__main__":
from PIL import Image
classifier = YahooNSFWClassifier("data/open_nsfw-weights.npy")
print("NSFW score: %f" % classifier.classify(Image.open(sys.argv[1]))[0][1])
| 33.038462 | 99 | 0.718277 | 111 | 859 | 5.288288 | 0.486486 | 0.061329 | 0.0477 | 0.068143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004237 | 0.175786 | 859 | 25 | 100 | 34.36 | 0.824859 | 0.023283 | 0 | 0 | 0 | 0 | 0.057279 | 0.031026 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.263158 | 0 | 0.473684 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dabd1be8c1b91392a0032b7379bdd13519e05e0b | 4,823 | py | Python | crazyflie-lib-python/cflib/crazyflie/localization.py | loujiabin1994/crazyswarm | 893325b63b3b19015fe261bfa989846a1c82dc82 | [
"MIT"
] | null | null | null | crazyflie-lib-python/cflib/crazyflie/localization.py | loujiabin1994/crazyswarm | 893325b63b3b19015fe261bfa989846a1c82dc82 | [
"MIT"
] | null | null | null | crazyflie-lib-python/cflib/crazyflie/localization.py | loujiabin1994/crazyswarm | 893325b63b3b19015fe261bfa989846a1c82dc82 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2017-2020 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Subsytem handling localization-related data communication
"""
import collections
import logging
import struct
from cflib.crtp.crtpstack import CRTPPacket
from cflib.crtp.crtpstack import CRTPPort
from cflib.utils.callbacks import Caller
__author__ = 'Bitcraze AB'
__all__ = ['Localization', 'LocalizationPacket']
logger = logging.getLogger(__name__)
# A generic location packet contains type and data. When received the data
# may be decoded by the lib.
LocalizationPacket = collections.namedtuple('localizationPacket',
['type', 'raw_data', 'data'])
class Localization():
"""
Handle localization-related data communication with the Crazyflie
"""
# Implemented channels
POSITION_CH = 0
GENERIC_CH = 1
# Location message types for generig channel
RANGE_STREAM_REPORT = 0
RANGE_STREAM_REPORT_FP16 = 1
LPS_SHORT_LPP_PACKET = 2
EMERGENCY_STOP = 3
EMERGENCY_STOP_WATCHDOG = 4
COMM_GNSS_NMEA = 6
COMM_GNSS_PROPRIETARY = 7
EXT_POSE = 8
EXT_POSE_PACKED = 9
def __init__(self, crazyflie=None):
"""
Initialize the Extpos object.
"""
self._cf = crazyflie
self.receivedLocationPacket = Caller()
self._cf.add_port_callback(CRTPPort.LOCALIZATION, self._incoming)
def _incoming(self, packet):
"""
Callback for data received from the copter.
"""
if len(packet.data) < 1:
logger.warning('Localization packet received with incorrect' +
'length (length is {})'.format(len(packet.data)))
return
pk_type = struct.unpack('<B', packet.data[:1])[0]
data = packet.data[1:]
# Decoding the known packet types
# TODO: more generic decoding scheme?
decoded_data = None
if pk_type == self.RANGE_STREAM_REPORT:
if len(data) % 5 != 0:
logger.error('Wrong range stream report data lenght')
return
decoded_data = {}
raw_data = data
for i in range(int(len(data) / 5)):
anchor_id, distance = struct.unpack('<Bf', raw_data[:5])
decoded_data[anchor_id] = distance
raw_data = raw_data[5:]
pk = LocalizationPacket(pk_type, data, decoded_data)
self.receivedLocationPacket.call(pk)
def send_extpos(self, pos):
"""
Send the current Crazyflie X, Y, Z position. This is going to be
forwarded to the Crazyflie's position estimator.
"""
pk = CRTPPacket()
pk.port = CRTPPort.LOCALIZATION
pk.channel = self.POSITION_CH
pk.data = struct.pack('<fff', pos[0], pos[1], pos[2])
self._cf.send_packet(pk)
def send_extpose(self, pos, quat):
"""
Send the current Crazyflie pose (position [x, y, z] and
attitude quaternion [qx, qy, qz, qw]). This is going to be forwarded
to the Crazyflie's position estimator.
"""
pk = CRTPPacket()
pk.port = CRTPPort.LOCALIZATION
pk.channel = self.GENERIC_CH
pk.data = struct.pack('<Bfffffff',
self.EXT_POSE,
pos[0], pos[1], pos[2],
quat[0], quat[1], quat[2], quat[3])
self._cf.send_packet(pk)
def send_short_lpp_packet(self, dest_id, data):
"""
Send ultra-wide-band LPP packet to dest_id
"""
pk = CRTPPacket()
pk.port = CRTPPort.LOCALIZATION
pk.channel = self.GENERIC_CH
pk.data = struct.pack('<BB', self.LPS_SHORT_LPP_PACKET, dest_id) + data
self._cf.send_packet(pk)
| 33.262069 | 79 | 0.609579 | 575 | 4,823 | 4.843478 | 0.393043 | 0.012567 | 0.024417 | 0.020467 | 0.202154 | 0.159785 | 0.131059 | 0.113106 | 0.113106 | 0.113106 | 0 | 0.015707 | 0.287166 | 4,823 | 144 | 80 | 33.493056 | 0.794357 | 0.37259 | 0 | 0.191176 | 0 | 0 | 0.069538 | 0 | 0 | 0 | 0 | 0.006944 | 0 | 1 | 0.073529 | false | 0 | 0.088235 | 0 | 0.367647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dabfe56a60fe696acd94d21ad2b11875bcc6001b | 1,396 | py | Python | uibcdf_stdlib/exceptions.py | uibcdf/UIBCDF-Standard-Library | 16665d12d94c8d82b17356f79795d3741cc86324 | [
"MIT"
] | 1 | 2021-07-06T18:51:45.000Z | 2021-07-06T18:51:45.000Z | uibcdf_stdlib/exceptions.py | dprada/UIBCDF-Standard-Library | 96906e631623ff8de59bd5e15fe25d87a9d2a4f4 | [
"MIT"
] | null | null | null | uibcdf_stdlib/exceptions.py | dprada/UIBCDF-Standard-Library | 96906e631623ff8de59bd5e15fe25d87a9d2a4f4 | [
"MIT"
] | 1 | 2021-07-06T18:51:48.000Z | 2021-07-06T18:51:48.000Z | class BadCallError(ValueError):
def __init__(self, message=None, documentation_web=None):
if message is None:
message = ('Wrong way of invoking this method. Check the online documentation for'
'more information.')
if documentation_web is not None:
message = message[:-1] + ': {}'.format(documentation_web)
super().__init__(message)
class NotImplementedError(NotImplementedError):
def __init__(self, message=None, issues_web=None):
if message is None:
if issues_web is not None:
message = ('It has not been implemeted yet. Write a new issue in'
'{} asking for it.'.format(issues_web))
super().__init__(message)
class LibraryNotFound(NotImplementedError):
def __init__(self, library):
message = 'The python library {} was not found.'.format(library)
super().__init__(message)
class InputArgumentError(NotImplementedError):
def __init__(self, argument, method, documentation_web=None):
message = ('Invalid value for input argument "{}" in method or class "{}".'
'Check the online documentation for more information.'.format(argument, method))
if documentation_web is not None:
message = message[:-1] + ': {}'.format(documentation_web)
super().__init__(message)
| 31.022222 | 96 | 0.637536 | 151 | 1,396 | 5.622517 | 0.344371 | 0.113074 | 0.051826 | 0.042403 | 0.454653 | 0.34629 | 0.294464 | 0.188457 | 0.188457 | 0.188457 | 0 | 0.001936 | 0.260029 | 1,396 | 44 | 97 | 31.727273 | 0.819942 | 0 | 0 | 0.384615 | 0 | 0 | 0.224373 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dac5823dc3e2b862d786e19df82399b8a50a961d | 2,757 | py | Python | feature-detection.py | ronheywood/opencv | 664de6f5b3708af6c00348a31a37db484137bb8d | [
"MIT"
] | 7 | 2021-07-08T14:57:22.000Z | 2022-01-12T09:15:01.000Z | feature-detection.py | ronheywood/opencv | 664de6f5b3708af6c00348a31a37db484137bb8d | [
"MIT"
] | 1 | 2021-08-18T20:15:44.000Z | 2022-02-04T18:04:39.000Z | feature-detection.py | ronheywood/opencv | 664de6f5b3708af6c00348a31a37db484137bb8d | [
"MIT"
] | 2 | 2021-10-14T18:40:26.000Z | 2022-01-10T00:36:29.000Z | # import the necessary packages
import cv2
import sys
import os
import numpy as np
import argparse
sys.path.append(os.path.abspath('./modules/'))
import detection
import helpers
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--debug", type=bool, default=False,
help="experiment with different image filters")
ap.add_argument("-i", "--imagepath", type=str,
default = None,
help="path to test image")
args = ap.parse_args()
if(args.imagepath is None):
image_path = helpers.get_random_test_image()
else:
image_path = args.imagepath
image = cv2.imread(image_path,1)
ball = detection.GolfBallDetection(image)
if ball:
(x,y,w,h) = ball
x_plus_w = x+w
y_plus_h = y+h
section = image.copy()[y:y+h, x:x+w]
cv2.imshow("Crop to ball",section)
# generating the kernels
kernel1 = np.array([[0, -1, -1], # kernel for embossing bottom left side
[1, 0, -1],
[1, 1, 0]])
# you can generate kernels for embossing top as well
gray = cv2.cvtColor(section, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
if(args.debug):
cv2.imshow("Gray",gray)
if(args.debug):
embossdepth = np.ones((h, w), np.uint8) * 128
embossed = cv2.add(cv2.filter2D(gray, -1, kernel1),embossdepth) # emboss on bottom left side
cv2.imshow("Embossed",embossed)
if(args.debug):
_, binary = cv2.threshold(cv2.cvtColor(section, cv2.COLOR_BGR2GRAY), 20, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
binary = 255 - binary
cv2.imshow("Binary",binary)
if(args.debug):
edges = cv2.Canny(binary,100,200)
cv2.imshow("Edges",edges)
if(args.debug):
embossed_edges = cv2.Canny(embossed,100,200)
cv2.imshow("Embossed Edges",embossed_edges)
contours,heirarchy = cv2.findContours(embossed_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
im2 = cv2.cvtColor(section, cv2.COLOR_BGR2GRAY)
dimples = []
for contour in contours:
approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)
area = cv2.contourArea(contour)
if ((len(approx) > 8) & (area > 30) ):
dimples.append(contour)
cv2.drawContours(im2, dimples, -1, (0,255,0), 3)
cv2.imshow("Contours",im2)
circle = detection.get_ball_circle(image,x,y,w,h)
if circle is not None:
detection.draw_circle_around_ball(image,circle,(x,y))
else:
detection.draw_boundaries_and_label(image,(x,y),(w,h),(0,255,0),'Ball')
else:
print('[INFO] Failed to find ball')
cv2.imshow("Detecting features",image)
cv2.waitKey(0)
cv2.destroyAllWindows() | 32.05814 | 122 | 0.629307 | 376 | 2,757 | 4.526596 | 0.364362 | 0.042303 | 0.032315 | 0.007051 | 0.070505 | 0.059929 | 0 | 0 | 0 | 0 | 0 | 0.046919 | 0.234675 | 2,757 | 86 | 123 | 32.05814 | 0.759716 | 0.060936 | 0 | 0.117647 | 0 | 0 | 0.075077 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.102941 | 0 | 0.102941 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dac5b2ff80767ae00c126aba31c2851cfe3769ef | 7,538 | py | Python | loopy/execution.py | xywei/loopy | f176bc70ef52862ee0dfcc661fbe4b86b1726f55 | [
"MIT"
] | null | null | null | loopy/execution.py | xywei/loopy | f176bc70ef52862ee0dfcc661fbe4b86b1726f55 | [
"MIT"
] | null | null | null | loopy/execution.py | xywei/loopy | f176bc70ef52862ee0dfcc661fbe4b86b1726f55 | [
"MIT"
] | null | null | null | from __future__ import division, with_statement, absolute_import
__copyright__ = "Copyright (C) 2012-16 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six
import numpy as np
from pytools import ImmutableRecord, memoize_method
from loopy.diagnostic import LoopyError
import logging
logger = logging.getLogger(__name__)
from pytools.persistent_dict import PersistentDict
from loopy.tools import LoopyKeyBuilder
from loopy.version import DATA_MODEL_VERSION
# {{{ object array argument packing
class _PackingInfo(ImmutableRecord):
"""
.. attribute:: name
.. attribute:: sep_shape
.. attribute:: subscripts_and_names
A list of type ``[(index, unpacked_name), ...]``.
"""
class SeparateArrayPackingController(object):
"""For argument arrays with axes tagged to be implemented as separate
arrays, this class provides preprocessing of the incoming arguments so that
all sub-arrays may be passed in one object array (under the original,
un-split argument name) and are unpacked into separate arrays before being
passed to the kernel.
It also repacks outgoing arrays of this type back into an object array.
"""
def __init__(self, kernel):
# map from arg name
self.packing_info = {}
from loopy.kernel.array import ArrayBase
for arg in kernel.args:
if not isinstance(arg, ArrayBase):
continue
if arg.shape is None or arg.dim_tags is None:
continue
subscripts_and_names = arg.subscripts_and_names()
if subscripts_and_names is None:
continue
self.packing_info[arg.name] = _PackingInfo(
name=arg.name,
sep_shape=arg.sep_shape(),
subscripts_and_names=subscripts_and_names,
is_written=arg.name in kernel.get_written_variables())
def unpack(self, kernel_kwargs):
if not self.packing_info:
return kernel_kwargs
kernel_kwargs = kernel_kwargs.copy()
for packing_info in six.itervalues(self.packing_info):
arg_name = packing_info.name
if packing_info.name in kernel_kwargs:
arg = kernel_kwargs[arg_name]
for index, unpacked_name in packing_info.subscripts_and_names:
assert unpacked_name not in kernel_kwargs
kernel_kwargs[unpacked_name] = arg[index]
del kernel_kwargs[arg_name]
return kernel_kwargs
def pack(self, outputs):
if not self.packing_info:
return outputs
for packing_info in six.itervalues(self.packing_info):
if not packing_info.is_written:
continue
result = outputs[packing_info.name] = \
np.zeros(packing_info.sep_shape, dtype=np.object)
for index, unpacked_name in packing_info.subscripts_and_names:
result[index] = outputs.pop(unpacked_name)
return outputs
# }}}
# {{{ KernelExecutorBase
typed_and_scheduled_cache = PersistentDict(
"loopy-typed-and-scheduled-cache-v1-"+DATA_MODEL_VERSION,
key_builder=LoopyKeyBuilder())
class KernelExecutorBase(object):
"""An object connecting a kernel to a :class:`pyopencl.Context`
for execution.
.. automethod:: __init__
.. automethod:: __call__
"""
def __init__(self, kernel):
"""
:arg kernel: a loopy.LoopKernel
"""
self.kernel = kernel
self.packing_controller = SeparateArrayPackingController(kernel)
self.output_names = tuple(arg.name for arg in self.kernel.args
if arg.name in self.kernel.get_written_variables())
self.has_runtime_typed_args = any(
arg.dtype is None
for arg in kernel.args)
def get_typed_and_scheduled_kernel_uncached(self, arg_to_dtype_set):
from loopy.kernel.tools import add_dtypes
kernel = self.kernel
if arg_to_dtype_set:
var_to_dtype = {}
for var, dtype in arg_to_dtype_set:
try:
dest_name = kernel.impl_arg_to_arg[var].name
except KeyError:
dest_name = var
try:
var_to_dtype[dest_name] = dtype
except KeyError:
raise LoopyError("cannot set type for '%s': "
"no known variable/argument with that name"
% var)
kernel = add_dtypes(kernel, var_to_dtype)
from loopy.type_inference import infer_unknown_types
kernel = infer_unknown_types(kernel, expect_completion=True)
if kernel.schedule is None:
from loopy.preprocess import preprocess_kernel
kernel = preprocess_kernel(kernel)
from loopy.schedule import get_one_scheduled_kernel
kernel = get_one_scheduled_kernel(kernel)
return kernel
@memoize_method
def get_typed_and_scheduled_kernel(self, arg_to_dtype_set):
from loopy import CACHING_ENABLED
cache_key = (type(self).__name__, self.kernel, arg_to_dtype_set)
if CACHING_ENABLED:
try:
return typed_and_scheduled_cache[cache_key]
except KeyError:
pass
logger.debug("%s: typed-and-scheduled cache miss" % self.kernel.name)
kernel = self.get_typed_and_scheduled_kernel_uncached(arg_to_dtype_set)
if CACHING_ENABLED:
typed_and_scheduled_cache[cache_key] = kernel
return kernel
def arg_to_dtype_set(self, kwargs):
if not self.has_runtime_typed_args:
return None
from loopy.types import NumpyType
target = self.kernel.target
impl_arg_to_arg = self.kernel.impl_arg_to_arg
arg_to_dtype = {}
for arg_name, val in six.iteritems(kwargs):
arg = impl_arg_to_arg.get(arg_name, None)
if arg is None:
# offsets, strides and such
continue
if arg.dtype is None and val is not None:
try:
dtype = val.dtype
except AttributeError:
pass
else:
arg_to_dtype[arg_name] = NumpyType(dtype, target)
return frozenset(six.iteritems(arg_to_dtype))
# }}}
# vim: foldmethod=marker
| 32.076596 | 79 | 0.648183 | 926 | 7,538 | 5.046436 | 0.278618 | 0.035309 | 0.0214 | 0.019474 | 0.152579 | 0.109566 | 0.064199 | 0.040659 | 0.040659 | 0.021828 | 0 | 0.001309 | 0.290395 | 7,538 | 234 | 80 | 32.213675 | 0.872313 | 0.107588 | 0 | 0.214286 | 0 | 0 | 0.181173 | 0.005289 | 0 | 0 | 0 | 0 | 0.007143 | 1 | 0.05 | false | 0.014286 | 0.114286 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dac7f27abecc653f42dd61c7ee788bece9231835 | 3,419 | py | Python | rebound/python_examples/megno_interactive/problem.py | rodluger/ttv-devil | e534e4f3cd75db951cba54441f7a5458c87e0cf9 | [
"MIT"
] | null | null | null | rebound/python_examples/megno_interactive/problem.py | rodluger/ttv-devil | e534e4f3cd75db951cba54441f7a5458c87e0cf9 | [
"MIT"
] | null | null | null | rebound/python_examples/megno_interactive/problem.py | rodluger/ttv-devil | e534e4f3cd75db951cba54441f7a5458c87e0cf9 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import rebound
from rebound.interruptible_pool import InterruptiblePool
# Import other modules
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
from matplotlib.colors import LogNorm
# Runs one simulation.
def simulation(par):
saturn_a, saturn_e = par
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.integrator_whfast_safe_mode = 0
sim.dt = 5.
# These parameters are only approximately those of Jupiter and Saturn.
sim.add(m=1.)
sim.add(m=0.000954, a=5.204, anom=0.600, omega=0.257, e=0.048)
sim.add(m=0.000285, a=saturn_a, anom=0.871, omega=1.616, e=saturn_e)
sim.move_to_com()
sim.init_megno()
sim.integrate(5e2*2.*np.pi) # integrator for 500 years
return [sim.calculate_megno(),1./(sim.calculate_lyapunov()*2.*np.pi)] # returns MEGNO and Lypunov timescale in years
def updatePlot(first=False):
# This constructs a 2d array.
# The current implementation is slow, but simple.
keys = np.array(resd.keys())
x1 = np.unique(keys.T[0])
x2 = np.unique(keys.T[1])
res = np.empty((len(x2),len(x1),2))
for i,_x1 in enumerate(x1):
for j,_x2 in enumerate(x2):
res[j][i] = resd[(_x1,_x2)]
# Clip arrays
megno = np.clip(res[:,:,0],1.8,4.)
lyaptimescale = np.clip(np.absolute(res[:,:,1]),1e1,4e3)
# Plot MEGNO
im1 = axarr[0].imshow(megno, vmin=1.8, vmax=4., aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn_r", extent=extent)
# Plot Lyapunov timescale
im2 = axarr[1].imshow(lyaptimescale, vmin=1e1, vmax=4e3, norm=LogNorm(), aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn", extent=extent)
if first:
cb1 = plt.colorbar(im1, ax=axarr[0])
cb1.solids.set_rasterized(True)
cb1.set_label("MEGNO $\\langle Y \\rangle$")
cb2 = plt.colorbar(im2, ax=axarr[1])
cb2.solids.set_rasterized(True)
cb2.set_label("Lyapunov timescale [years]")
plt.draw()
pool = InterruptiblePool() # Number of threads default to the number of CPUs on the system
def runSim(p):
print("Running %d simulations." % len(p))
res = np.nan_to_num(np.array(pool.map(simulation,p)))
for i,r in enumerate(res):
resd[p[i]] = r
# Setup grid and run many simulations in parallel
a = np.array([7.,10.]) # range of saturn semi-major axis in AU
e = np.array([0.,0.5]) # range of saturn eccentricity
# Setup plots
f, axarr = plt.subplots(2,figsize=(10,8))
extent = [a.min(), a.max(), e.min(), e.max()]
for ax in axarr:
ax.set_xlim(extent[0],extent[1])
ax.set_ylim(extent[2],extent[3])
ax.set_xlabel("$a_{\mathrm{Saturn}}$ [AU]")
ax.set_ylabel("$e_{\mathrm{Saturn}}$")
# Results are stored in this dictionary
resd = {}
# Initial parameters (2x2 grid)
parameters = [(_a, _e) for _a in a for _e in e]
# Run and plot first simulations
runSim(parameters)
updatePlot(first=True)
# Eight levels of refinement
for i in xrange(8):
_a = np.linspace((a[0]+a[1])/2.,a[-1],len(a))[:-1]
a = np.sort(np.concatenate((a,_a)))
parameters = [(__a, _e) for __a in _a for _e in e]
runSim(parameters)
updatePlot()
_e = np.linspace((e[0]+e[1])/2.,e[-1],len(e))[:-1]
e = np.sort(np.concatenate((e,_e)))
parameters = [(_a, __e) for _a in a for __e in _e]
runSim(parameters)
updatePlot()
raw_input('Press enter...')
| 31.953271 | 162 | 0.64434 | 543 | 3,419 | 3.959484 | 0.366483 | 0.013023 | 0.009767 | 0.02093 | 0.107907 | 0.107907 | 0.107907 | 0.107907 | 0.060465 | 0.060465 | 0 | 0.043573 | 0.194501 | 3,419 | 106 | 163 | 32.254717 | 0.73711 | 0.186312 | 0 | 0.071429 | 0 | 0 | 0.068429 | 0.015206 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0 | 0.071429 | 0 | 0.128571 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dacc0afe8e6fcaa1fdee48ff089bcf82ca383f60 | 1,926 | py | Python | code/eval_tsd.py | YJiangcm/COMP_5222_Project | 18afd92af6f38c9410d711fa9448b7a03507e0ac | [
"Apache-2.0"
] | null | null | null | code/eval_tsd.py | YJiangcm/COMP_5222_Project | 18afd92af6f38c9410d711fa9448b7a03507e0ac | [
"Apache-2.0"
] | null | null | null | code/eval_tsd.py | YJiangcm/COMP_5222_Project | 18afd92af6f38c9410d711fa9448b7a03507e0ac | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
import sys
import argparse
def f1(predictions, gold):
"""
F1 (a.k.a. DICE) operating on two lists of offsets (e.g., character).
>>> assert f1([0, 1, 4, 5], [0, 1, 6]) == 0.5714285714285714
:param predictions: a list of predicted offsets
:param gold: a list of offsets serving as the ground truth
:return: a score between 0 and 1
"""
if len(gold) == 0:
return 1. if len(predictions) == 0 else 0.
if len(predictions) == 0:
return 0.
predictions_set = set(predictions)
gold_set = set(gold)
nom = 2 * len(predictions_set.intersection(gold_set))
denom = len(predictions_set) + len(gold_set)
return float(nom)/float(denom)
def evaluate(pred_file, test_file):
'''
pred_file: path to the prediction file
test_file: path to the test csv file
'''
test_df = pd.read_csv(test_file)
gold_spans = test_df.spans.apply(eval).to_list()
pred_spans = [eval(line.strip()) for line in open(pred_file).readlines()]
if(len(gold_spans) != len(pred_spans)):
print('Error: the number of predictions does not match the number of test examples!')
sys.exit(1)
scores = []
for pred, gold in zip(pred_spans, gold_spans):
scores.append(f1(pred, gold))
print('F1 score: ', np.mean(scores))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--prediction_file",
required=True,
help="path to the line-by-line file containing system predictions.")
parser.add_argument("--test_file",
required=True,
help="path to the csv file with gold spans.")
args = parser.parse_args()
evaluate(args.prediction_file, args.test_file)
if __name__ == "__main__":
main() | 28.746269 | 94 | 0.600727 | 260 | 1,926 | 4.307692 | 0.373077 | 0.035714 | 0.032143 | 0.030357 | 0.051786 | 0.051786 | 0.051786 | 0 | 0 | 0 | 0 | 0.028384 | 0.286604 | 1,926 | 67 | 95 | 28.746269 | 0.786754 | 0.179647 | 0 | 0.054054 | 0 | 0 | 0.14959 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.108108 | 0 | 0.27027 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dad0c47ce0d0ea8d56dcd1c6ed410b390c57cb8f | 1,444 | py | Python | notebooks/PyTorch/tutorial_deep_learning_with_pytorch_a_60_minute_blitz_training_a_classifier_CIFAR-10/training.py | shitake/ml-sandbox | a23a312251d82b27582df0029c23ff4aa7792d4c | [
"MIT"
] | null | null | null | notebooks/PyTorch/tutorial_deep_learning_with_pytorch_a_60_minute_blitz_training_a_classifier_CIFAR-10/training.py | shitake/ml-sandbox | a23a312251d82b27582df0029c23ff4aa7792d4c | [
"MIT"
] | 1 | 2018-11-01T03:09:50.000Z | 2018-11-01T03:09:50.000Z | notebooks/PyTorch/tutorial_deep_learning_with_pytorch_a_60_minute_blitz_training_a_classifier_CIFAR-10/training.py | shitake/ml-sandbox | a23a312251d82b27582df0029c23ff4aa7792d4c | [
"MIT"
] | null | null | null | from net import Net
import os
import sys
import time
import torch.optim as optim
print(os.getcwd())
sys.path.append(os.getcwd())
# from ml-sandbox.src.utils.utils import display_formatted_time
import gc
del Training; gc.collect()
class Training:
def __init__(self, dev):
self.device = dev
def train(self, lr, momentum):
since = time.time()
print(self.device)
model = Net().to(self.device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
lr=lr,
momentum=momentum)
for epoch in range(epochs):
running_loss = 0.0
for batch_idx, (inputs, labels) in enumerate(train_loader, 0):
inputs, labels = inputs.to(self.device), labels.to(self.device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % log_interval == (log_interval - 1):
print("Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tRunningLoss: {:.3f}".format(
epoch, batch_idx * len(inputs), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), running_loss / log_interval
))
running_loss = 0.0
display_formatted_time(time.time() - since)
| 27.245283 | 96 | 0.585873 | 171 | 1,444 | 4.812866 | 0.432749 | 0.060753 | 0.043742 | 0.031592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011707 | 0.290166 | 1,444 | 52 | 97 | 27.769231 | 0.79122 | 0.042244 | 0 | 0.054054 | 0 | 0 | 0.04924 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.162162 | 0 | 0.243243 | 0.081081 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dad29b783a0cff6154b9ddd5bbb2a707070f8ace | 1,321 | py | Python | metricol/outputs/statsite.py | soutys/metricol | 56b96e80aab8a61727b48eb2f5f19e0694895b55 | [
"MIT"
] | null | null | null | metricol/outputs/statsite.py | soutys/metricol | 56b96e80aab8a61727b48eb2f5f19e0694895b55 | [
"MIT"
] | null | null | null | metricol/outputs/statsite.py | soutys/metricol | 56b96e80aab8a61727b48eb2f5f19e0694895b55 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''Graphite output plugins module
'''
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
with_statement,
)
import logging
from queue import Empty
from statsd.client import StatsClient
from metricol.inputs import MetricInput
from metricol.outputs import MetricOutput
LOG = logging.getLogger(__name__)
class Statsite(MetricOutput):
'''Statsite pusher class
'''
options = ['host', 'port']
def __init__(self, section, queue):
super(Statsite, self).__init__(section, queue)
self.client = None
def prepare_things(self):
super(Statsite, self).prepare_things()
self.client = StatsClient(
host=self.cfg['host'], port=int(self.cfg['port']), maxudpsize=1024)
def do_things(self):
while True:
try:
_key, _val, _type, _ = self.queue.get(block=False)
if _type == MetricInput.METRIC_TYPE_GAUGE:
self.client.gauge(_key, _val)
elif _type == MetricInput.METRIC_TYPE_COUNTER:
self.client.incr(_key, count=_val)
elif _type == MetricInput.METRIC_TYPE_TIMER:
self.client.timing(_key, _val)
except Empty:
break
| 24.462963 | 79 | 0.613929 | 142 | 1,321 | 5.415493 | 0.5 | 0.06502 | 0.081925 | 0.097529 | 0.083225 | 0.083225 | 0 | 0 | 0 | 0 | 0 | 0.005297 | 0.28539 | 1,321 | 53 | 80 | 24.924528 | 0.809322 | 0.06056 | 0 | 0 | 0 | 0 | 0.013019 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.205882 | 0 | 0.352941 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dad80e149a334d4d98779bd5f03ddaee09b35f0c | 5,300 | py | Python | quotes_generate_attribute.py | russDA/Quotes_Predictor | 6c22f141a468b304c1a5d76e43165d6f42d4e3cf | [
"MIT"
] | null | null | null | quotes_generate_attribute.py | russDA/Quotes_Predictor | 6c22f141a468b304c1a5d76e43165d6f42d4e3cf | [
"MIT"
] | null | null | null | quotes_generate_attribute.py | russDA/Quotes_Predictor | 6c22f141a468b304c1a5d76e43165d6f42d4e3cf | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import pandas as pd
import numpy as np
def url_list(base_url, no_of_pages):
nos = np.linspace(1, no_of_pages, no_of_pages)
pages = []
for n in nos:
page = base_url+str(int(n))
pages.append(page)
return pages
def quote_list(url):
site = requests.get(url)
soup = BeautifulSoup(site.text, features='lxml')
quotes = []
genres = []
items = soup.find_all('div', {'class':'quote mediumText'})
for item in items:
q = item.find('div', {'class':'quoteText'}).text
quote = str.split(q, '―')
quotes.append(quote[0].strip())
genre_box = item.find('div', {'class':'greyText smallText left'}).find_all('a')
genre = ''
for g in genre_box:
genre += (g.text + ',')
genre = genre[:-1]
genres.append(genre)
df = pd.DataFrame({'Quote':quotes, 'Genre':genres})
return df
def make_quote_df(base_url, no_of_pages):
pages = url_list(base_url, no_of_pages)
list_of_dfs = []
counter=1
for page in pages:
print(f'About to make dataframe: {counter}')
counter +=1
df = quote_list(page)
list_of_dfs.append(df)
final_df=pd.concat(list_of_dfs, ignore_index=True)
return final_df
# =============================================================================
# Love category of quotes
# =============================================================================
romance_urls = 'https://www.goodreads.com/quotes/tag/romance?page='
romance_df = make_quote_df(romance_urls, 100)
love_urls = 'https://www.goodreads.com/quotes/tag/love?page='
love_df = make_quote_df(love_urls, 100)
# =============================================================================
# Wisdom category of quotes
# =============================================================================
wisdom_urls = 'https://www.goodreads.com/quotes/tag/wisdom?page='
wisdom_df = make_quote_df(wisdom_urls, 100)
truth_urls = 'https://www.goodreads.com/quotes/tag/truth?page='
truth_df = make_quote_df(truth_urls, 100)
# =============================================================================
# Religion category of quotes
# =============================================================================
god_urls = 'https://www.goodreads.com/quotes/tag/god?page='
god_df = make_quote_df(god_urls, 100)
faith_urls = 'https://www.goodreads.com/quotes/tag/faith?page='
faith_df = make_quote_df(faith_urls, 100)
# =============================================================================
# Witty and clever category of quotes
# =============================================================================
humor_urls = 'https://www.goodreads.com/quotes/tag/humor?page='
humor_df = make_quote_df(humor_urls, 100)
writing_urls = 'https://www.goodreads.com/quotes/tag/writing?page='
writing_df = make_quote_df(writing_urls, 100)
# =============================================================================
# Dark and contemplative category of quotes
# =============================================================================
death_urls = 'https://www.goodreads.com/quotes/tag/death?page='
death_df = make_quote_df(death_urls, 100)
time_urls = 'https://www.goodreads.com/quotes/tag/time?page='
time_df = make_quote_df(time_urls, 100)
# =============================================================================
# Intellectual category of quotes
# =============================================================================
knowledge_urls = 'https://www.goodreads.com/quotes/tag/knowledge?page='
knowledge_df = make_quote_df(knowledge_urls, 100)
science_urls = 'https://www.goodreads.com/quotes/tag/science?page='
science_df = make_quote_df(science_urls, 100)
# =============================================================================
# Giving each df the column category, with respective category
# =============================================================================
CATEGORY = 'Category'
romance_df[CATEGORY] = 'Romance'
love_df[CATEGORY] = 'Love'
wisdom_df[CATEGORY] = 'Wisdom'
truth_df[CATEGORY] = 'Truth'
god_df[CATEGORY] = 'God'
faith_df[CATEGORY] = 'Faith'
humor_df[CATEGORY] = 'Humor'
writing_df[CATEGORY] = 'Writing'
death_df[CATEGORY] = 'Death'
time_df[CATEGORY] = 'Time'
knowledge_df[CATEGORY] = 'Knowledge'
science_df[CATEGORY] = 'Science'
# =============================================================================
# Converting every df into a CSV for future use
# =============================================================================
romance_df.to_csv('Romance_Quotes.csv')
love_df.to_csv('Love_Quotes.csv')
wisdom_df.to_csv('Wisdom_Quotes.csv')
truth_df.to_csv('Truth_Quotes.csv')
god_df.to_csv('God_Quotes.csv')
faith_df.to_csv('Faith_Quotes.csv')
humor_df.to_csv('Humor_Quotes.csv')
writing_df.to_csv('Writing_Quotes.csv')
death_df.to_csv('Death_Quotes.csv')
time_df.to_csv('Time_Quotes.csv')
knowledge_df.to_csv('Knowledge_Quotes.csv')
science_df.to_csv('Science_Quotes.csv')
| 29.943503 | 88 | 0.508113 | 582 | 5,300 | 4.386598 | 0.189003 | 0.045828 | 0.056013 | 0.098707 | 0.179397 | 0.17313 | 0.17313 | 0 | 0 | 0 | 0 | 0.009404 | 0.157358 | 5,300 | 176 | 89 | 30.113636 | 0.562024 | 0.290943 | 0 | 0 | 0 | 0 | 0.276134 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033708 | false | 0 | 0.044944 | 0 | 0.11236 | 0.011236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dad8ed0484b8958b3b31e0149f5e4b0720f01f84 | 791 | py | Python | tap_tally/__init__.py | bkgreve/tap-tally | 207c64b821024003859713624034c531969f0456 | [
"MIT"
] | 1 | 2021-02-12T23:22:38.000Z | 2021-02-12T23:22:38.000Z | tap_tally/__init__.py | bkgreve/tap-tally | 207c64b821024003859713624034c531969f0456 | [
"MIT"
] | 3 | 2020-09-10T11:13:17.000Z | 2020-09-10T11:19:06.000Z | tap_tally/__init__.py | bkgreve/tap-tally | 207c64b821024003859713624034c531969f0456 | [
"MIT"
] | null | null | null | import os
import json
from flask import Flask
app = Flask(__name__)
@app.route('/api/entries')
def get_tap_entries():
with open('/usr/src/app/data/beers.json') as f:
beer_data = json.load(f)
entries = []
for entry in beer_data:
if entry.get('visible', False):
image_name = entry.get('image', False)
if image_name:
image_exists = os.path.exists(f"/usr/src/app/data/images/{image_name}")
if not image_exists:
entry['image'] = None
entries.append(entry)
return {
'entries': entries
}
@app.route('/api/header-info')
def get_header_info():
return {
'headerInfo': {
'breweryName': '',
'kegeratorTemp': None
}
}
| 22.6 | 87 | 0.549937 | 94 | 791 | 4.468085 | 0.446809 | 0.064286 | 0.052381 | 0.061905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.31732 | 791 | 34 | 88 | 23.264706 | 0.777778 | 0 | 0 | 0.071429 | 0 | 0 | 0.190898 | 0.082174 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.107143 | 0.035714 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dad97463770df090d4e73a80e6339dec1808753d | 4,133 | py | Python | DatabaseGeneration_database4_question5.py | wangpinggl/covidQA | f8b440ee27058cc1030af7cd57178ddf987462d0 | [
"MIT"
] | null | null | null | DatabaseGeneration_database4_question5.py | wangpinggl/covidQA | f8b440ee27058cc1030af7cd57178ddf987462d0 | [
"MIT"
] | null | null | null | DatabaseGeneration_database4_question5.py | wangpinggl/covidQA | f8b440ee27058cc1030af7cd57178ddf987462d0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 05:31:09 2020
@author: Srikar Balusu
"""
import json
import pandas as pd
import numpy as np
import re
import random
import sqlite3
import datetime
import calendar
from dateutil.relativedelta import *
with open('lookup1.json') as json_file:
data = json.load(json_file)
with open('uniquelookup.json') as json_file:
data2 = json.load(json_file)
with open('state_dict.json') as json_file:
state_dict = json.load(json_file)
conn = sqlite3.connect('testQ.db')
c = conn.cursor()
question_template = "What state will have the (Value Entity) total forecasted number of deaths (Time Entity)?"
question_template_id = 'db4q5'
output = {}
question_key = {}
entities = ['Value Entity', 'Time Entity']
time_values = ['in the next (x) days', 'in the next (x) weeks', 'in the next week', 'tomorrow', 'day after tomorrow']
count = 1
def queryEndDate(query, time_entity):
today = datetime.date.today()
output = time_entity
if time_entity == 'in the next (x) days':
num_day = random.randint(1,20)
future_date = today + datetime.timedelta(days = num_day)
output = output.replace("(x)", str(num_day))
elif time_entity == 'in the next (x) weeks':
num_week = random.randint(2,3)
future_date = today + datetime.timedelta(days=num_week * 7)
output = output.replace("(x)", str(num_week))
elif time_entity == 'in the next week':
future_date = today + datetime.timedelta(days=7)
elif time_entity == 'tomorrow':
future_date = today + datetime.timedelta(days=1)
elif time_entity == 'day after tomorrow':
future_date = today + datetime.timedelta(days=2)
if future_date.weekday() == 5:
query = query.replace("given date", str(future_date))
elif future_date.weekday() ==6:
query = query.replace("given date", str(future_date + datetime.timedelta(days=6)))
else:
query = query.replace("given date", str(future_date + datetime.timedelta(days=5-future_date.weekday())))
return query, output
while count < 250:
output[count] = []
populated_entities = []
time_entity = random.choice(time_values)
val = random.choice(data['Value Entity'])
if val.find("(x)") >= 0:
order = random.randint(1,5)
val = val.replace("(x)", str(order))
if order == 2:
val = val.replace("th", "nd")
if order == 3:
val = val.replace("th", "rd")
if order == 1:
val = val.replace("th", "st")
else:
order = 1
if val.find("most") >= 0 or val.find("highest") >=0 or val.find("Highest") >=0:
ascending = False
else:
ascending = True
sql_template = "Select location_name, Max(point) from db4forecaststate WHERE target_week_end_date = 'given date' and location_name != 'National' group by location_name order by Max(point) asc/desc limit X,1"
query = sql_template
query, time_e = queryEndDate(query,time_entity)
if ascending == False:
query = query.replace("asc/desc", "desc")
query = query.replace("X", str(order-1))
else:
query = query.replace("asc/desc", "asc")
query = query.replace("X", str(order-1))
real_question = question_template.replace("(Time Entity)", time_e)
real_question = real_question.replace("(Value Entity)", val)
populated_entities.append(val)
populated_entities.append(time_e)
c.execute(query)
result = c.fetchall()
if len(result) == 0 or result[0][0] == None:
continue
elif real_question in question_key.keys():
continue
else:
question_key[real_question] = True
output[count].append({'question_template_id' : question_template_id, 'question_template' : question_template,
'entities' : entities, 'question' : real_question,
'populated_entities': populated_entities, 'query_template' : sql_template, 'query' : query, 'database': 'database 4'})
print(count)
print(real_question)
print(query)
print(result)
count = count + 1 | 36.575221 | 212 | 0.640455 | 547 | 4,133 | 4.703839 | 0.270567 | 0.046638 | 0.057132 | 0.044695 | 0.293432 | 0.239798 | 0.147299 | 0.061796 | 0.046638 | 0.046638 | 0 | 0.017236 | 0.227922 | 4,133 | 113 | 213 | 36.575221 | 0.789094 | 0.01984 | 0 | 0.092784 | 0 | 0.010309 | 0.189911 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010309 | false | 0 | 0.092784 | 0 | 0.113402 | 0.041237 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dada78f117f641ec85db243d1ba2fcd8f0d8b75a | 1,347 | py | Python | src/tutorials/hello_world_tutorial/functions/app.py | drblahdblah/covid-19-analysis | abd6ed23a49bd1277708163cad0741c5b9f9698a | [
"MIT"
] | 3 | 2020-04-20T11:10:23.000Z | 2020-04-22T08:06:04.000Z | src/tutorials/hello_world_tutorial/functions/app.py | drblahdblah/covid-19-analysis | abd6ed23a49bd1277708163cad0741c5b9f9698a | [
"MIT"
] | null | null | null | src/tutorials/hello_world_tutorial/functions/app.py | drblahdblah/covid-19-analysis | abd6ed23a49bd1277708163cad0741c5b9f9698a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
df = pd.read_csv(f'https://gist.githubusercontent.com/chriddyp/c78bf172206ce24f77d6363a2d754b59/'
f'raw/c353e8ef842413cae56ae3920b8fd78468aa4cb2/usa-agricultural-exports-2011.csv')
def generate_simple_table(dataframe: pd.DataFrame, max_rows=10) -> html.Table:
"""
Simple function to return a simple HTML table for a Dash dashboard.
:param dataframe: A Pandas DataFrame to plot.
:param max_rows: The maximum number of rows to plot: defaults to 10
:return: A HTML table object.
"""
return html.Table([
html.Thead(
html.Tr([html.Tr(col) for col in dataframe.columns])
),
html.Tbody([
html.Tr([
html.Td(dataframe.iloc[i][col] for col in dataframe.columns)
]) for i in range(min(len(dataframe), max_rows))
])
])
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H4(children="US Agriculture Exports (2011)"),
generate_simple_table(dataframe=df, max_rows=10)
])
if __name__ == '__main__':
app.run_server(debug=True, host='127.0.0.1', port=8088)
| 31.325581 | 99 | 0.680772 | 180 | 1,347 | 4.933333 | 0.494444 | 0.031532 | 0.042793 | 0.063063 | 0.060811 | 0.060811 | 0 | 0 | 0 | 0 | 0 | 0.066852 | 0.200445 | 1,347 | 42 | 100 | 32.071429 | 0.75766 | 0.173719 | 0 | 0.12 | 0 | 0 | 0.223963 | 0.071889 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dadc943c456eca12a123766407c6fe7d3b911d2b | 12,804 | py | Python | main_robust.py | npatel5/similarity-learning | 9052f3d966a2e14918fcaf9d3a4c93dc9143db71 | [
"MIT"
] | null | null | null | main_robust.py | npatel5/similarity-learning | 9052f3d966a2e14918fcaf9d3a4c93dc9143db71 | [
"MIT"
] | null | null | null | main_robust.py | npatel5/similarity-learning | 9052f3d966a2e14918fcaf9d3a4c93dc9143db71 | [
"MIT"
] | null | null | null | from __future__ import print_function
from IPython.core.debugger import set_trace
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import config as cf
import numpy as np
import torchvision
import torchvision.transforms as transforms
#import ipdb
import os
import sys
import time
import argparse
import datetime
import scipy.ndimage as ndimage
from networks import *
from torch.autograd import Variable
from itertools import starmap
import random
parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning_rate')
parser.add_argument('--net_type', default='wide-resnet', type=str, help='model')
parser.add_argument('--depth', default=28, type=int, help='depth of model')
parser.add_argument('--widen_factor', default=10, type=int, help='width of model')
parser.add_argument('--dropout', default=0.3, type=float, help='dropout_rate')
parser.add_argument('--dataset', default='cifar10', type=str, help='dataset = [cifar10/cifar100]')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--testOnly', '-t', action='store_true', help='Test mode with the saved model')
args = parser.parse_args()
# Hyper Parameter settings
sim_learning = False
#use_noise = True
use_cuda = torch.cuda.is_available()
best_acc = 0
#sig = 10
reg_strength = 1
start_epoch, num_epochs, batch_size, optim_type = cf.start_epoch, cf.num_epochs, cf.batch_size, cf.optim_type
# Data Uplaod
print('\n[Phase 1] : Data Preparation')
torch.manual_seed(2809)
gaussian_transforms = [
transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=0)),
# transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=1)),
# transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=2)),
# transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=5)),
# transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=10))
]
transform_train_noise = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomChoice(gaussian_transforms),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
transform_train_clean = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
]) # meanstd transformation
transform_test_noise = transforms.Compose([
transforms.RandomChoice(gaussian_transforms),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
transform_test = transforms.Compose([
#transforms.RandomChoice(gaussian_transforms),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
if(args.dataset == 'cifar10'):
print("| Preparing CIFAR-10 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
num_classes = 10
elif(args.dataset == 'cifar100'):
print("| Preparing CIFAR-100 dataset...")
sys.stdout.write("| ")
trainset_noise = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train_noise)
trainset_clean = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train_clean)
testset_noise = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test_noise)
num_classes = 100
testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test)
num_classes = 100
trainloader_noise = torch.utils.data.DataLoader(trainset_noise, batch_size=batch_size, shuffle=True, num_workers=2)
trainloader_clean = torch.utils.data.DataLoader(trainset_clean, batch_size=batch_size, shuffle=True, num_workers=2)
testloader_noise = torch.utils.data.DataLoader(testset_noise, batch_size=100, shuffle=False, num_workers=2)
testloader_clean = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
# Return network & file name
def getNetwork(args):
if (args.net_type == 'lenet'):
net = LeNet(num_classes)
file_name = 'lenet'
elif (args.net_type == 'vggnet'):
net = VGG(args.depth, num_classes)
file_name = 'vgg-'+str(args.depth)
elif (args.net_type == 'resnet'):
net = ResNet_2Read(args.depth, num_classes)
file_name = 'resnet-'+str(args.depth)
elif (args.net_type == 'wide-resnet'):
net = Wide_ResNet(args.depth, args.widen_factor, args.dropout, num_classes)
file_name = 'wide-resnet-'+str(args.depth)+'x'+str(args.widen_factor)
else:
print('Error : Network should be either [LeNet / VGGNet / ResNet / Wide_ResNet')
sys.exit(0)
return net, file_name
if (sim_learning):
checkpoint_gauss = torch.load("./checkpoint/cifar100/resnet-50_2readout_3.t7")
robustNet = checkpoint_gauss['net']
robustNet = torch.nn.DataParallel(robustNet, device_ids=range(torch.cuda.device_count()))
# Test only option
if (args.testOnly):
print('\n[Test Phase] : Model setup')
assert os.path.isdir('checkpoint'), 'Error: No checkpoint directory found!'
_, file_name = getNetwork(args)
checkpoint = torch.load('./checkpoint/'+args.dataset+os.sep+file_name+'_readout_match.t7')
net = checkpoint['net']
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader_noise):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs, compute_similarity=False)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
acc = 100.*correct/total
#variance = batch_var.mean()
print("| Test Result (Noise Readout)\tAcc@1: %.2f%%" %(acc))
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader_noise):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs, img_type="clean")
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
acc = 100.*correct/total
#variance = batch_var.mean()
print("| Test Result (Clean Readout)\tAcc@1: %.2f%%" %(acc))
# std = 0.
# for images, _ in testloader:
# batch_samples = images.size(0)
# images = images.view(batch_samples,images.size(1), -1)
# std += images.std(2).sum(0)
# std /= len(testloader.dataset)
#print("| Standard Deviation of noise / Standard Deviation of Pixels: %.2f" %(sig/std))
sys.exit(0)
# Model
print('\n[Phase 2] : Model setup')
if args.resume:
# Load checkpoint
print('| Resuming from checkpoint...')
assert os.path.isdir('checkpoint'), 'Error: No checkpoint directory found!'
_, file_name = getNetwork(args)
checkpoint = torch.load('./checkpoint/'+args.dataset+os.sep+file_name+'_2readout_3.t7')
net = checkpoint['net']
best_acc = 100.0
#start_epoch = checkpoint['epoch']
start_epoch = 200
else:
print('| Building net type [' + args.net_type + ']...')
net, file_name = getNetwork(args)
net.apply(conv_init)
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
w_loss = nn.MSELoss()
# Similarity Loss Computation
# Training
similarities = {}
accs = []
def train(epoch):
net.train()
train_loss = 0
correct = 0
total = 0
optimizer = optim.SGD(net.module.linear_clean.parameters(), lr=cf.learning_rate(args.lr, epoch), momentum=0.9, weight_decay=5e-4)
print('\n=> Training Epoch #%d, LR=%.4f' %(epoch, cf.learning_rate(args.lr, epoch)))
for batch_idx, ((inputs1, targets1), (inputs2, targets2)) in enumerate(zip(trainloader_noise, trainloader_clean)):
if use_cuda:
# inputs, targets = inputs.cuda(), targets.cuda()
inputs1, targets1 = inputs1.cuda(), targets1.cuda() # GPU settings
inputs2, targets2 = inputs2.cuda(), targets2.cuda()
optimizer.zero_grad()
outputs_n = net(inputs1, img_type="noise", compute_similarity=False)
l1 = criterion(outputs_n, targets1)
l1.backward(retain_graph=True)
outputs_c = net(inputs2, img_type="clean", compute_similarity=False)
l2 = criterion(outputs_c, targets2)
l2.backward(retain_graph=True)
l3 = w_loss(outputs_n, outputs_c)
l3.backward(retain_graph=True)
optimizer.step() # Optimizer update
loss = l1 + l2 + l3
train_loss += loss.item()
_, predicted = torch.max(outputs_c.data, 1)
total += targets2.size(0)
correct += predicted.eq(targets2.data).cpu().sum()
sys.stdout.write('\r')
sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\t\t Loss: %.4f Acc@1: %.3f%%'
%(epoch, num_epochs, batch_idx+1,
(len(trainset_noise)//batch_size)+1, loss.item(), 100.*correct/total))
sys.stdout.flush()
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for batch_idx, (inputs1, targets1) in enumerate(testloader_noise):
if use_cuda:
inputs1, targets1 = inputs1.cuda(), targets1.cuda()
outputs_n = net(inputs1, img_type="noise", compute_similarity=False)
loss = criterion(outputs_n, targets1)
test_loss += loss.item()
_, predicted1 = torch.max(outputs_n.data, 1)
total1 += targets1.size(0)
correct1 += predicted1.eq(targets1.data).cpu().sum()
acc = 100.*correct1/total1
for batch_idx, (inputs2, targets2) in enumerate(testloader_noise):
if use_cuda:
inputs2, targets2 = inputs2.cuda(), targets2.cuda()
outputs_c = net(inputs2, img_type="clean", compute_similarity=False)
loss2 = criterion(outputs_c, targets2)
_, predicted2 = torch.max(outputs_c.data, 1)
total2 += targets2.size(0)
correct2 += predicted2.eq(targets2.data).cpu().sum()
acc2 = 100.*correct2/total2
print("\n| Validation Epoch #%d\t\t\tLoss (Noise): %.4f Acc@1: %.2f%%" %(epoch, loss.item(), acc))
print("\n| Validation Epoch #%d\t\t\tLoss (Clean): %.4f Acc@1: %.2f%%" %(epoch, loss2.item(), acc2))
# Save checkpoint when best model
if acc > best_acc:
print('| Saving Best model...\t\t\tTop1 = %.2f%%' %(acc))
state = {
'net':net.module if use_cuda else net,
'acc':acc,
'epoch':epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
save_point = './checkpoint/'+args.dataset+os.sep
if not os.path.isdir(save_point):
os.mkdir(save_point)
torch.save(state, save_point+file_name+'.t7')
best_acc = acc
accs.append(acc)
#net.train()
print('\n[Phase 3] : Training model')
print('| Training Epochs = ' + str(num_epochs))
print('| Initial Learning Rate = ' + str(args.lr))
print('| Optimizer = ' + str(optim_type))
elapsed_time = 0
for epoch in range(start_epoch, start_epoch+num_epochs):
start_time = time.time()
train(epoch)
test(epoch)
epoch_time = time.time() - start_time
elapsed_time += epoch_time
print('| Elapsed time : %d:%02d:%02d' %(cf.get_hms(elapsed_time)))
np.save('epoch_accs', accs)
print('\n[Phase 4] : Testing model')
print('* Test results : Acc@1 = %.2f%%' %(best_acc))
print('| Saving model...')
state = {
'net':net.module if use_cuda else net,
#'acc':acc,
#'epoch':epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
save_point = './checkpoint/'+args.dataset+os.sep
if not os.path.isdir(save_point):
os.mkdir(save_point)
torch.save(state, save_point+file_name+'robust_readout_matching_basicblock.t7')
| 38.107143 | 133 | 0.670181 | 1,664 | 12,804 | 5.016827 | 0.184495 | 0.018448 | 0.009703 | 0.013776 | 0.508984 | 0.453402 | 0.418304 | 0.389075 | 0.373742 | 0.322832 | 0 | 0.0245 | 0.187129 | 12,804 | 335 | 134 | 38.220896 | 0.777575 | 0.083958 | 0 | 0.351145 | 0 | 0.01145 | 0.133219 | 0.007012 | 0 | 0 | 0 | 0 | 0.007634 | 1 | 0.01145 | false | 0 | 0.080153 | 0 | 0.09542 | 0.087786 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dadcb95e7278009dc692c238d4ccf77a3fb2e55e | 1,843 | py | Python | tools/list_exp.py | AlexMaximenko/sew | f26b45f1374f3c3ce477abcc07a2d0017859c026 | [
"MIT"
] | 49 | 2021-09-15T06:10:07.000Z | 2022-03-28T17:37:28.000Z | tools/list_exp.py | AlexMaximenko/sew | f26b45f1374f3c3ce477abcc07a2d0017859c026 | [
"MIT"
] | 3 | 2021-09-26T03:45:04.000Z | 2022-02-21T09:36:38.000Z | tools/list_exp.py | AlexMaximenko/sew | f26b45f1374f3c3ce477abcc07a2d0017859c026 | [
"MIT"
] | 9 | 2021-09-15T07:35:09.000Z | 2022-02-21T00:10:20.000Z | # Copyright (c) ASAPP Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import fire
import os
import sys
import time
from tqdm.auto import tqdm
import re
import json
from colorama import Fore
def get_num_updates(folder):
num_updates = 0
try:
with open(os.path.join(folder, 'hydra_train.log')) as f:
lines = [line.strip() for line in f][-50:]
for line in lines:
try:
start = line.find('num_updates": "')
if start >= 0:
start += len('num_updates": "')
end = line[start:].find('",') + start
if end >= start:
num_updates = int(line[start:end])
except:
continue
return num_updates
except:
return 0
def print_folder(folder, total_updates):
num_updates = get_num_updates(folder)
if num_updates >= total_updates:
color = Fore.GREEN
else:
color = Fore.RED
print(f"{folder}\t{color}{num_updates}{Fore.RESET}")
def main(root="exp-bu", total_updates=100_000):
exp_dirs = set()
match_names = set()
for dirname, dirs, files in tqdm(os.walk(root)):
if 'checkpoints' in dirs:
exp_dirs.add(dirname)
exp_dirs = sorted(exp_dirs)
evaled = []
not_evaled = []
for x in exp_dirs:
if os.path.exists(f"{x}/eval.log"):
evaled.append(x)
else:
not_evaled.append(x)
print("Evaluated:")
for folder in evaled:
print_folder(folder, total_updates)
print("\nNot evaluated:")
for folder in not_evaled:
print_folder(folder, total_updates)
if __name__ == "__main__":
fire.Fire(main)
| 27.102941 | 65 | 0.571351 | 238 | 1,843 | 4.260504 | 0.394958 | 0.098619 | 0.050296 | 0.065089 | 0.097633 | 0.069034 | 0 | 0 | 0 | 0 | 0 | 0.008821 | 0.323386 | 1,843 | 67 | 66 | 27.507463 | 0.80433 | 0.078133 | 0 | 0.145455 | 0 | 0 | 0.089728 | 0.024793 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054545 | false | 0 | 0.145455 | 0 | 0.236364 | 0.109091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dade5fb81eac6f03d7cd440bbd9b3041f81946b2 | 554 | py | Python | worker_zeromq/addresses.py | espang/projects | 3a4d93592bc3427a6abd8d2170081155862754a8 | [
"MIT"
] | null | null | null | worker_zeromq/addresses.py | espang/projects | 3a4d93592bc3427a6abd8d2170081155862754a8 | [
"MIT"
] | null | null | null | worker_zeromq/addresses.py | espang/projects | 3a4d93592bc3427a6abd8d2170081155862754a8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 09:11:06 2016
@author: eikes
"""
ADD_VENTI_WORKER = 'tcp://*:4555'
ADD_VENTI_LH_WORKER = 'tcp://localhost:4555'
ADD_SINK_RECEIVE = 'tcp://*:4556'
ADD_SINK_LH_RECEIVE = 'tcp://localhost:4556'
ADD_SINK_PUBLISH = 'tcp://*:4557'
ADD_SINK_LH_PUBLISH = 'tcp://localhost:4557'
ADD_VENTI_RECEIVE = 'tcp://*:4558'
ADD_VENTI_LH_RECEIVE = 'tcp://localhost:4558'
ADD_LOG_CONTROLLER = 'tcp://*:4560'
ADD_LOG_LH_CONTROLLER = 'tcp://localhost:4560'
ADD_LOGGING = 'tcp://*:4559'
ADD_LH_LOGGING = 'tcp://localhost:4559'
| 29.157895 | 46 | 0.707581 | 84 | 554 | 4.333333 | 0.380952 | 0.197802 | 0.054945 | 0.115385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121756 | 0.095668 | 554 | 18 | 47 | 30.777778 | 0.60479 | 0.133574 | 0 | 0 | 0 | 0 | 0.40678 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dade9648a2fc13cba868380ebc0181b3e2615f68 | 593 | py | Python | ancli/__main__.py | devforfu/ancli | 44ea37e756fb12221bfafc1ecae3b8ef46d84cce | [
"MIT"
] | 1 | 2020-10-24T16:08:14.000Z | 2020-10-24T16:08:14.000Z | ancli/__main__.py | devforfu/ancli | 44ea37e756fb12221bfafc1ecae3b8ef46d84cce | [
"MIT"
] | null | null | null | ancli/__main__.py | devforfu/ancli | 44ea37e756fb12221bfafc1ecae3b8ef46d84cce | [
"MIT"
] | null | null | null | import importlib
import sys
from ancli import make_cli
try:
entry_point = sys.argv[1]
except IndexError:
print('Error: no entry point name provided!')
sys.exit(1)
try:
module_path, function_name = entry_point.split(':')
except ValueError:
print('Error: entry point name should have format a.b.c:function')
sys.exit(1)
mod = importlib.import_module(module_path)
try:
func = getattr(mod, function_name)
except AttributeError:
print(f'Error: function \'{function_name}\' is not found')
sys.exit(1)
sys.argv = [sys.argv[0]] + sys.argv[2:]
make_cli(func)
| 20.448276 | 70 | 0.701518 | 89 | 593 | 4.561798 | 0.460674 | 0.098522 | 0.059113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012245 | 0.173693 | 593 | 28 | 71 | 21.178571 | 0.816327 | 0 | 0 | 0.285714 | 0 | 0 | 0.209106 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.190476 | 0 | 0.190476 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dadeeda64957f5eeac274eff704cc39ef9a9cf4d | 31,856 | py | Python | venv/Lib/site-packages/pyramid/traversal.py | mandamg/Exercicios-de-Python-do-Curso-em-Video | 3f818c11c3c10213bebc1dfb6a740adee468ea3a | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyramid/traversal.py | mandamg/Exercicios-de-Python-do-Curso-em-Video | 3f818c11c3c10213bebc1dfb6a740adee468ea3a | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyramid/traversal.py | mandamg/Exercicios-de-Python-do-Curso-em-Video | 3f818c11c3c10213bebc1dfb6a740adee468ea3a | [
"MIT"
] | null | null | null | from functools import lru_cache
from urllib.parse import unquote_to_bytes
from zope.interface import implementer
from zope.interface.interfaces import IInterface
from pyramid.encode import url_quote
from pyramid.exceptions import URLDecodeError
from pyramid.interfaces import (
VH_ROOT_KEY,
IRequestFactory,
IResourceURL,
ITraverser,
)
from pyramid.location import lineage
from pyramid.threadlocal import get_current_registry
from pyramid.util import ascii_, is_nonstr_iter, text_
PATH_SEGMENT_SAFE = "~!$&'()*+,;=:@" # from webob
PATH_SAFE = PATH_SEGMENT_SAFE + "/"
def find_root(resource):
"""Find the root node in the resource tree to which ``resource``
belongs. Note that ``resource`` should be :term:`location`-aware.
Note that the root resource is available in the request object by
accessing the ``request.root`` attribute.
"""
for location in lineage(resource):
if location.__parent__ is None:
resource = location
break
return resource
def find_resource(resource, path):
"""Given a resource object and a string or tuple representing a path
(such as the return value of :func:`pyramid.traversal.resource_path` or
:func:`pyramid.traversal.resource_path_tuple`), return a resource in this
application's resource tree at the specified path. The resource passed
in *must* be :term:`location`-aware. If the path cannot be resolved (if
the respective node in the resource tree does not exist), a
:exc:`KeyError` will be raised.
This function is the logical inverse of
:func:`pyramid.traversal.resource_path` and
:func:`pyramid.traversal.resource_path_tuple`; it can resolve any
path string or tuple generated by either of those functions.
Rules for passing a *string* as the ``path`` argument: if the
first character in the path string is the ``/``
character, the path is considered absolute and the resource tree
traversal will start at the root resource. If the first character
of the path string is *not* the ``/`` character, the path is
considered relative and resource tree traversal will begin at the resource
object supplied to the function as the ``resource`` argument. If an
empty string is passed as ``path``, the ``resource`` passed in will
be returned. Resource path strings must be escaped in the following
manner: each path segment must be UTF-8 encoded and escaped via Python's
:mod:`urllib.quote`. For example, ``/path/to%20the/La%20Pe%C3%B1a``
(absolute) or ``to%20the/La%20Pe%C3%B1a`` (relative). The
:func:`pyramid.traversal.resource_path` function generates strings
which follow these rules (albeit only absolute ones). The text may not
have any non-ASCII characters in it.
Rules for passing a *tuple* as the ``path`` argument: if the first
element in the path tuple is the empty string (for example ``('',
'a', 'b', 'c')``, the path is considered absolute and the resource tree
traversal will start at the resource tree root object. If the first
element in the path tuple is not the empty string (for example
``('a', 'b', 'c')``), the path is considered relative and resource tree
traversal will begin at the resource object supplied to the function
as the ``resource`` argument. If an empty sequence is passed as
``path``, the ``resource`` passed in itself will be returned. No
URL-quoting of individual path segments within the tuple is required
(each segment may be any string representing a resource name). Resource
path tuples generated by :func:`pyramid.traversal.resource_path_tuple` can
always be resolved by ``find_resource``.
"""
if isinstance(path, str):
path = ascii_(path)
D = traverse(resource, path)
view_name = D['view_name']
context = D['context']
if view_name:
raise KeyError('%r has no subelement %s' % (context, view_name))
return context
find_model = find_resource # b/w compat (forever)
def find_interface(resource, class_or_interface):
"""
Return the first resource found in the :term:`lineage` of ``resource``
which, a) if ``class_or_interface`` is a Python class object, is an
instance of the class or any subclass of that class or b) if
``class_or_interface`` is a :term:`interface`, provides the specified
interface. Return ``None`` if no resource providing ``interface_or_class``
can be found in the lineage. The ``resource`` passed in *must* be
:term:`location`-aware.
"""
if IInterface.providedBy(class_or_interface):
test = class_or_interface.providedBy
else:
test = lambda arg: isinstance(arg, class_or_interface)
for location in lineage(resource):
if test(location):
return location
def resource_path(resource, *elements):
"""Return a string object representing the absolute physical path of the
resource object based on its position in the resource tree, e.g
``/foo/bar``. Any positional arguments passed in as ``elements`` will be
appended as path segments to the end of the resource path. For instance,
if the resource's path is ``/foo/bar`` and ``elements`` equals ``('a',
'b')``, the returned string will be ``/foo/bar/a/b``. The first
character in the string will always be the ``/`` character (a leading
``/`` character in a path string represents that the path is absolute).
Resource path strings returned will be escaped in the following
manner: each path segment will be encoded as UTF-8 and escaped via
Python's :mod:`urllib.quote`.
For example, ``/path/to%20the/La%20Pe%C3%B1a``.
This function is a logical inverse of
:mod:`pyramid.traversal.find_resource`: it can be used to generate
path references that can later be resolved via that function.
The ``resource`` passed in *must* be :term:`location`-aware.
.. note::
Each segment in the path string returned will use the ``__name__``
attribute of the resource it represents within the resource tree. Each
of these segments *should* be a string (as per the
contract of :term:`location`-awareness). However, no conversion or
safety checking of resource names is performed. For instance, if one of
the resources in your tree has a ``__name__`` which (by error) is a
dictionary, the :func:`pyramid.traversal.resource_path` function will
attempt to append it to a string and it will cause a
:exc:`pyramid.exceptions.URLDecodeError`.
.. note::
The :term:`root` resource *must* have a ``__name__`` attribute with a
value of either ``None`` or the empty string for paths to be generated
properly. If the root resource has a non-null ``__name__`` attribute,
its name will be prepended to the generated path rather than a single
leading '/' character.
"""
# joining strings is a bit expensive so we delegate to a function
# which caches the joined result for us
return _join_path_tuple(resource_path_tuple(resource, *elements))
model_path = resource_path # b/w compat (forever)
def traverse(resource, path):
"""Given a resource object as ``resource`` and a string or tuple
representing a path as ``path`` (such as the return value of
:func:`pyramid.traversal.resource_path` or
:func:`pyramid.traversal.resource_path_tuple` or the value of
``request.environ['PATH_INFO']``), return a dictionary with the
keys ``context``, ``root``, ``view_name``, ``subpath``,
``traversed``, ``virtual_root``, and ``virtual_root_path``.
A definition of each value in the returned dictionary:
- ``context``: The :term:`context` (a :term:`resource` object) found
via traversal or URL dispatch. If the ``path`` passed in is the
empty string, the value of the ``resource`` argument passed to this
function is returned.
- ``root``: The resource object at which :term:`traversal` begins.
If the ``resource`` passed in was found via URL dispatch or if the
``path`` passed in was relative (non-absolute), the value of the
``resource`` argument passed to this function is returned.
- ``view_name``: The :term:`view name` found during
:term:`traversal` or :term:`URL dispatch`; if the ``resource`` was
found via traversal, this is usually a representation of the
path segment which directly follows the path to the ``context``
in the ``path``. The ``view_name`` will be a string. The
``view_name`` will be the empty string if
there is no element which follows the ``context`` path. An
example: if the path passed is ``/foo/bar``, and a resource
object is found at ``/foo`` (but not at ``/foo/bar``), the 'view
name' will be ``'bar'``. If the ``resource`` was found via
URL dispatch, the ``view_name`` will be the empty string unless
the ``traverse`` predicate was specified or the ``*traverse`` route
pattern was used, at which point normal traversal rules dictate the
result.
- ``subpath``: For a ``resource`` found via :term:`traversal`, this
is a sequence of path segments found in the ``path`` that follow
the ``view_name`` (if any). Each of these items is a string.
If no path segments follow the ``view_name``, the
subpath will be the empty sequence. An example: if the path
passed is ``/foo/bar/baz/buz``, and a resource object is found at
``/foo`` (but not ``/foo/bar``), the 'view name' will be
``'bar'`` and the :term:`subpath` will be ``['baz', 'buz']``.
For a ``resource`` found via URL dispatch, the subpath will be a
sequence of values discerned from ``*subpath`` in the route
pattern matched or the empty sequence.
- ``traversed``: The sequence of path elements traversed from the
root to find the ``context`` object during :term:`traversal`.
Each of these items is a string. If no path segments
were traversed to find the ``context`` object (e.g. if the
``path`` provided is the empty string), the ``traversed`` value
will be the empty sequence. If the ``resource`` is a resource found
via :term:`URL dispatch`, traversed will be None.
- ``virtual_root``: A resource object representing the 'virtual' root
of the resource tree being traversed during :term:`traversal`.
See :ref:`vhosting_chapter` for a definition of the virtual root
object. If no virtual hosting is in effect, and the ``path``
passed in was absolute, the ``virtual_root`` will be the
*physical* root resource object (the object at which :term:`traversal`
begins). If the ``resource`` passed in was found via :term:`URL
dispatch` or if the ``path`` passed in was relative, the
``virtual_root`` will always equal the ``root`` object (the
resource passed in).
- ``virtual_root_path`` -- If :term:`traversal` was used to find
the ``resource``, this will be the sequence of path elements
traversed to find the ``virtual_root`` resource. Each of these
items is a string. If no path segments were traversed
to find the ``virtual_root`` resource (e.g. if virtual hosting is
not in effect), the ``traversed`` value will be the empty list.
If URL dispatch was used to find the ``resource``, this will be
``None``.
If the path cannot be resolved, a :exc:`KeyError` will be raised.
Rules for passing a *string* as the ``path`` argument: if the
first character in the path string is the with the ``/``
character, the path will considered absolute and the resource tree
traversal will start at the root resource. If the first character
of the path string is *not* the ``/`` character, the path is
considered relative and resource tree traversal will begin at the resource
object supplied to the function as the ``resource`` argument. If an
empty string is passed as ``path``, the ``resource`` passed in will
be returned. Resource path strings must be escaped in the following
manner: each path segment must be encoded as UTF-8 and escaped via
Python's :mod:`urllib.quote`. For example,
``/path/to%20the/La%20Pe%C3%B1a`` (absolute) or
``to%20the/La%20Pe%C3%B1a`` (relative). The
:func:`pyramid.traversal.resource_path` function generates strings
which follow these rules (albeit only absolute ones).
Rules for passing a *tuple* as the ``path`` argument: if the first
element in the path tuple is the empty string (for example ``('',
'a', 'b', 'c')``, the path is considered absolute and the resource tree
traversal will start at the resource tree root object. If the first
element in the path tuple is not the empty string (for example
``('a', 'b', 'c')``), the path is considered relative and resource tree
traversal will begin at the resource object supplied to the function
as the ``resource`` argument. If an empty sequence is passed as
``path``, the ``resource`` passed in itself will be returned. No
URL-quoting or UTF-8-encoding of individual path segments within
the tuple is required (each segment may be any string representing
a resource name).
Explanation of the decoding of ``path`` segment values during traversal:
Each segment is URL-unquoted, and UTF-8 decoded. Each segment is assumed
to be encoded using the UTF-8 encoding (or a subset, such as ASCII); a
:exc:`pyramid.exceptions.URLDecodeError` is raised if a segment
cannot be decoded. If a segment name is empty or if it is ``.``,
it is ignored. If a segment name is ``..``, the previous segment
is deleted, and the ``..`` is ignored. As a result of this
process, the return values ``view_name``, each element in the
``subpath``, each element in ``traversed``, and each element in
the ``virtual_root_path`` will be decoded strings.
"""
if is_nonstr_iter(path):
# the traverser factory expects PATH_INFO to be a string and it
# expects path segments to be utf-8 and
# urlencoded (it's the same traverser which accepts PATH_INFO
# from user agents; user agents always send strings).
if path:
path = _join_path_tuple(tuple(path))
else:
path = ''
# The user is supposed to pass us a string object, never Unicode. In
# practice, however, users indeed pass Unicode to this API. If they do
# pass a Unicode object, its data *must* be entirely encodeable to ASCII,
# so we encode it here as a convenience to the user and to prevent
# second-order failures from cropping up (all failures will occur at this
# step rather than later down the line as the result of calling
# ``traversal_path``).
path = ascii_(path)
if path and path[0] == '/':
resource = find_root(resource)
reg = get_current_registry()
request_factory = reg.queryUtility(IRequestFactory)
if request_factory is None:
from pyramid.request import Request # avoid circdep
request_factory = Request
request = request_factory.blank(path)
request.registry = reg
traverser = reg.queryAdapter(resource, ITraverser)
if traverser is None:
traverser = ResourceTreeTraverser(resource)
return traverser(request)
def resource_path_tuple(resource, *elements):
"""
Return a tuple representing the absolute physical path of the
``resource`` object based on its position in a resource tree, e.g
``('', 'foo', 'bar')``. Any positional arguments passed in as
``elements`` will be appended as elements in the tuple
representing the resource path. For instance, if the resource's
path is ``('', 'foo', 'bar')`` and elements equals ``('a', 'b')``,
the returned tuple will be ``('', 'foo', 'bar', 'a', 'b')``. The
first element of this tuple will always be the empty string (a
leading empty string element in a path tuple represents that the
path is absolute).
This function is a logical inverse of
:func:`pyramid.traversal.find_resource`: it can be used to
generate path references that can later be resolved by that function.
The ``resource`` passed in *must* be :term:`location`-aware.
.. note::
Each segment in the path tuple returned will equal the ``__name__``
attribute of the resource it represents within the resource tree. Each
of these segments *should* be a string (as per the
contract of :term:`location`-awareness). However, no conversion or
safety checking of resource names is performed. For instance, if one of
the resources in your tree has a ``__name__`` which (by error) is a
dictionary, that dictionary will be placed in the path tuple; no warning
or error will be given.
.. note::
The :term:`root` resource *must* have a ``__name__`` attribute with a
value of either ``None`` or the empty string for path tuples to be
generated properly. If the root resource has a non-null ``__name__``
attribute, its name will be the first element in the generated path
tuple rather than the empty string.
"""
return tuple(_resource_path_list(resource, *elements))
model_path_tuple = resource_path_tuple # b/w compat (forever)
def _resource_path_list(resource, *elements):
"""Implementation detail shared by resource_path and
resource_path_tuple"""
path = [loc.__name__ or '' for loc in lineage(resource)]
path.reverse()
path.extend(elements)
return path
_model_path_list = _resource_path_list # b/w compat, not an API
def virtual_root(resource, request):
"""
Provided any :term:`resource` and a :term:`request` object, return
the resource object representing the :term:`virtual root` of the
current :term:`request`. Using a virtual root in a
:term:`traversal` -based :app:`Pyramid` application permits
rooting. For example, the resource at the traversal path ``/cms`` will
be found at ``http://example.com/`` instead of rooting it at
``http://example.com/cms/``.
If the ``resource`` passed in is a context obtained via
:term:`traversal`, and if the ``HTTP_X_VHM_ROOT`` key is in the
WSGI environment, the value of this key will be treated as a
'virtual root path': the :func:`pyramid.traversal.find_resource`
API will be used to find the virtual root resource using this path;
if the resource is found, it will be returned. If the
``HTTP_X_VHM_ROOT`` key is not present in the WSGI environment,
the physical :term:`root` of the resource tree will be returned instead.
Virtual roots are not useful at all in applications that use
:term:`URL dispatch`. Contexts obtained via URL dispatch don't
really support being virtually rooted (each URL dispatch context
is both its own physical and virtual root). However if this API
is called with a ``resource`` argument which is a context obtained
via URL dispatch, the resource passed in will be returned
unconditionally."""
try:
reg = request.registry
except AttributeError:
reg = get_current_registry()
url_adapter = reg.queryMultiAdapter((resource, request), IResourceURL)
if url_adapter is None:
url_adapter = ResourceURL(resource, request)
vpath, rpath = url_adapter.virtual_path, url_adapter.physical_path
if rpath != vpath and rpath.endswith(vpath):
vroot_path = rpath[: -len(vpath)]
return find_resource(resource, vroot_path)
try:
return request.root
except AttributeError:
return find_root(resource)
def traversal_path(path):
"""Variant of :func:`pyramid.traversal.traversal_path_info` suitable for
decoding paths that are URL-encoded.
If this function is passed a string, it *must* be directly encodeable to
ASCII. For example, '/foo' will work but '/<unprintable unicode>' (a
string object with characters that cannot be encoded to ASCII) will
not. A :exc:`UnicodeEncodeError` will be raised if the string cannot be
encoded directly to ASCII.
"""
if isinstance(path, str):
# must not possess characters outside ascii
path = path.encode('ascii')
# we unquote this path exactly like a PEP 3333 server would
path = unquote_bytes_to_wsgi(path) # result will be a native string
return traversal_path_info(path) # result will be a tuple of unicode
@lru_cache(1000)
def traversal_path_info(path):
"""Given``path``, return a tuple representing that path which can be
used to traverse a resource tree. ``path`` is assumed to be an
already-URL-decoded ``str`` type as if it had come to us from an upstream
WSGI server as the ``PATH_INFO`` environ variable.
The ``path`` is first decoded from its WSGI representation to text.
Per the :pep:`3333` spec, ``path`` is first encoded to bytes using the
Latin-1 encoding; the resulting set of bytes is subsequently decoded to
text using the UTF-8 encoding; a :exc:`pyramid.exc.URLDecodeError` is
raised if the URL cannot be decoded.
The ``path`` is split on slashes, creating a list of segments. If a
segment name is empty or if it is ``.``, it is ignored. If a segment
name is ``..``, the previous segment is deleted, and the ``..`` is
ignored.
Examples:
``/``
()
``/foo/bar/baz``
('foo', 'bar', 'baz')
``foo/bar/baz``
('foo', 'bar', 'baz')
``/foo/bar/baz/``
('foo', 'bar', 'baz')
``/foo//bar//baz/``
('foo', 'bar', 'baz')
``/foo/bar/baz/..``
('foo', 'bar')
``/my%20archives/hello``
('my archives', 'hello')
``/archives/La%20Pe%C3%B1a``
('archives', '<unprintable unicode>')
.. note::
This function does not generate the same type of tuples that
:func:`pyramid.traversal.resource_path_tuple` does. In particular, the
leading empty string is not present in the tuple it returns, unlike
tuples returned by :func:`pyramid.traversal.resource_path_tuple`. As a
result, tuples generated by ``traversal_path`` are not resolveable by
the :func:`pyramid.traversal.find_resource` API. ``traversal_path`` is
a function mostly used by the internals of :app:`Pyramid` and by people
writing their own traversal machinery, as opposed to users writing
applications in :app:`Pyramid`.
"""
try:
path = decode_path_info(path) # result will be Unicode
except UnicodeDecodeError as e:
raise URLDecodeError(e.encoding, e.object, e.start, e.end, e.reason)
return split_path_info(path) # result will be tuple of Unicode
@lru_cache(1000)
def split_path_info(path):
# suitable for splitting an already-unquoted-already-decoded (unicode)
# path value
path = path.strip('/')
clean = []
for segment in path.split('/'):
if not segment or segment == '.':
continue
elif segment == '..':
if clean:
del clean[-1]
else:
clean.append(segment)
return tuple(clean)
# see PEP 3333 for why we encode to latin-1 then decode to utf-8
def decode_path_info(path):
return path.encode('latin-1').decode('utf-8')
# see PEP 3333 for why we decode the path to latin-1
def unquote_bytes_to_wsgi(bytestring):
return unquote_to_bytes(bytestring).decode('latin-1')
_segment_cache = {}
def quote_path_segment(segment, safe=PATH_SEGMENT_SAFE):
"""
Return a quoted representation of a 'path segment' (such as
the string ``__name__`` attribute of a resource) as a string. If the
``segment`` passed in is a bytes object, it is decoded as a UTF-8 string.
The result is then URL-quoted using Python's ``urllib.quote``.
If the segment passed in is not bytes nor a string, an error will be
raised. The return value of ``quote_path_segment`` is always a string.
You may pass a string of characters that need not be encoded as
the ``safe`` argument to this function. This corresponds to the
``safe`` argument to :mod:`urllib.quote`.
.. note::
The return value for each segment passed to this
function is cached in a module-scope dictionary for
speed: the cached version is returned when possible
rather than recomputing the quoted version. No cache
emptying is ever done for the lifetime of an
application, however. If you pass arbitrary
user-supplied strings to this function (as opposed to
some bounded set of values from a 'working set' known to
your application), it may become a memory leak.
"""
# The bit of this code that deals with ``_segment_cache`` is an
# optimization: we cache all the computation of URL path segments
# in this module-scope dictionary with the original string as the
# key, so we can look it up later without needing to reencode
# or re-url-quote it
try:
if segment.__class__ not in (str, bytes):
segment = str(segment)
return _segment_cache[(segment, safe)]
except KeyError:
result = url_quote(text_(segment, 'utf-8'), safe)
# we don't need a lock to mutate _segment_cache, as the below
# will generate exactly one Python bytecode (STORE_SUBSCR)
_segment_cache[(segment, safe)] = result
return result
@implementer(ITraverser)
class ResourceTreeTraverser:
"""A resource tree traverser that should be used (for speed) when
every resource in the tree supplies a ``__name__`` and
``__parent__`` attribute (ie. every resource in the tree is
:term:`location` aware) ."""
VH_ROOT_KEY = VH_ROOT_KEY
VIEW_SELECTOR = '@@'
def __init__(self, root):
self.root = root
def __call__(self, request):
environ = request.environ
matchdict = request.matchdict
if matchdict is not None:
path = matchdict.get('traverse', '/') or '/'
if is_nonstr_iter(path):
# this is a *traverse stararg (not a {traverse})
# routing has already decoded these elements, so we just
# need to join them
path = '/' + '/'.join(path) or '/'
subpath = matchdict.get('subpath', ())
if not is_nonstr_iter(subpath):
# this is not a *subpath stararg (just a {subpath})
# routing has already decoded this string, so we just need
# to split it
subpath = split_path_info(subpath)
else:
# this request did not match a route
subpath = ()
try:
# empty if mounted under a path in mod_wsgi, for example
path = request.path_info or '/'
except KeyError:
# if environ['PATH_INFO'] is just not there
path = '/'
except UnicodeDecodeError as e:
raise URLDecodeError(
e.encoding, e.object, e.start, e.end, e.reason
)
if self.VH_ROOT_KEY in environ:
# HTTP_X_VHM_ROOT
vroot_path = decode_path_info(environ[self.VH_ROOT_KEY])
vroot_tuple = split_path_info(vroot_path)
vpath = (
vroot_path + path
) # both will (must) be unicode or asciistr
vroot_idx = len(vroot_tuple) - 1
else:
vroot_tuple = ()
vpath = path
vroot_idx = -1
root = self.root
ob = vroot = root
if vpath == '/': # invariant: vpath must not be empty
# prevent a call to traversal_path if we know it's going
# to return the empty tuple
vpath_tuple = ()
else:
# we do dead reckoning here via tuple slicing instead of
# pushing and popping temporary lists for speed purposes
# and this hurts readability; apologies
i = 0
view_selector = self.VIEW_SELECTOR
vpath_tuple = split_path_info(vpath)
for segment in vpath_tuple:
if segment[:2] == view_selector:
return {
'context': ob,
'view_name': segment[2:],
'subpath': vpath_tuple[i + 1 :],
'traversed': vpath_tuple[: vroot_idx + i + 1],
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root,
}
try:
getitem = ob.__getitem__
except AttributeError:
return {
'context': ob,
'view_name': segment,
'subpath': vpath_tuple[i + 1 :],
'traversed': vpath_tuple[: vroot_idx + i + 1],
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root,
}
try:
next = getitem(segment)
except KeyError:
return {
'context': ob,
'view_name': segment,
'subpath': vpath_tuple[i + 1 :],
'traversed': vpath_tuple[: vroot_idx + i + 1],
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root,
}
if i == vroot_idx:
vroot = next
ob = next
i += 1
return {
'context': ob,
'view_name': '',
'subpath': subpath,
'traversed': vpath_tuple,
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root,
}
ModelGraphTraverser = (
ResourceTreeTraverser # b/w compat, not API, used in wild
)
@implementer(IResourceURL)
class ResourceURL:
VH_ROOT_KEY = VH_ROOT_KEY
def __init__(self, resource, request):
physical_path_tuple = resource_path_tuple(resource)
physical_path = _join_path_tuple(physical_path_tuple)
if physical_path_tuple != ('',):
physical_path_tuple = physical_path_tuple + ('',)
physical_path = physical_path + '/'
virtual_path = physical_path
virtual_path_tuple = physical_path_tuple
environ = request.environ
vroot_path = environ.get(self.VH_ROOT_KEY)
# if the physical path starts with the virtual root path, trim it out
# of the virtual path
if vroot_path is not None:
vroot_path = vroot_path.rstrip('/')
if vroot_path and physical_path.startswith(vroot_path):
vroot_path_tuple = tuple(vroot_path.split('/'))
numels = len(vroot_path_tuple)
virtual_path_tuple = ('',) + physical_path_tuple[numels:]
virtual_path = physical_path[len(vroot_path) :]
self.virtual_path = virtual_path # IResourceURL attr
self.physical_path = physical_path # IResourceURL attr
self.virtual_path_tuple = virtual_path_tuple # IResourceURL attr (1.5)
self.physical_path_tuple = (
physical_path_tuple # IResourceURL attr (1.5)
)
@lru_cache(1000)
def _join_path_tuple(tuple):
return tuple and '/'.join([quote_path_segment(x) for x in tuple]) or '/'
class DefaultRootFactory:
__parent__ = None
__name__ = None
def __init__(self, request):
pass
| 41.915789 | 79 | 0.651588 | 4,382 | 31,856 | 4.640575 | 0.12346 | 0.02867 | 0.015736 | 0.012147 | 0.440521 | 0.387067 | 0.336661 | 0.313646 | 0.297467 | 0.288812 | 0 | 0.0042 | 0.260014 | 31,856 | 759 | 80 | 41.971014 | 0.85844 | 0.645781 | 0 | 0.255814 | 0 | 0 | 0.038244 | 0 | 0.003876 | 0 | 0 | 0 | 0 | 1 | 0.073643 | false | 0.003876 | 0.042636 | 0.011628 | 0.232558 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dadf717f7a0bf06444bc04239a755273afb5cf77 | 3,485 | py | Python | Lib/site-packages/visual/examples/faces_heightfield.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | Lib/site-packages/visual/examples/faces_heightfield.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | Lib/site-packages/visual/examples/faces_heightfield.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | ## Demonstrates some techniques for working with "faces", and
## shows how to build a height field (a common feature request)
## with it.
## David Scherer July 2001
from visual import *
class Model:
def __init__(self):
self.frame = frame()
self.model = faces(frame=self.frame)
self.twoSided = true # add every face twice with opposite normals
def FacetedTriangle(self, v1, v2, v3, color=color.white):
"""Add a triangle to the model, apply faceted shading automatically"""
v1 = vector(v1)
v2 = vector(v2)
v3 = vector(v3)
try:
normal = norm( cross(v2-v1, v3-v1) )
except:
normal = vector(0,0,0)
for v in (v1,v2,v3):
self.model.append( pos=v, color=color, normal=normal )
if self.twoSided:
for v in (v1,v3,v2):
self.model.append( pos=v, color=color, normal=-normal )
def FacetedPolygon(self, *v):
"""Appends a planar polygon of any number of vertices to the model,
applying faceted shading automatically."""
for t in range(len(v)-2):
self.FacetedTriangle( v[0], v[t+1], v[t+2] )
def DoSmoothShading(self):
"""Change a faceted model to smooth shaded, by averaging normals at
coinciding vertices.
This is a very slow and simple smooth shading
implementation which has to figure out the connectivity of the
model and does not attempt to detect sharp edges.
It attempts to work even in two-sided mode where there are two
opposite normals at each vertex. It may fail somehow in pathological
cases. """
pos = self.model.pos
normal = self.model.normal
vertex_map = {} # vertex position -> vertex normal
vertex_map_backface = {}
for i in range( len(pos) ):
tp = tuple(pos[i])
old_normal = vertex_map.get( tp, (0,0,0) )
if dot(old_normal, normal[i]) >= 0:
vertex_map[tp] = normal[i] + old_normal
else:
vertex_map_backface[tp] = normal[i] + vertex_map_backface.get(tp, (0,0,0))
for i in range( len(pos) ):
tp = tuple(pos[i])
if dot(vertex_map[tp], normal[i]) >= 0:
normal[i] = vertex_map[tp] and norm( vertex_map[ tp ] )
else:
normal[i] = vertex_map_backface[tp] and norm(vertex_map_backface[tp] )
def DrawNormal(self, scale):
pos = self.model.pos
normal = self.model.normal
for i in range(len(pos)):
arrow(pos=pos[i], axis=normal[i]*scale)
class Mesh (Model):
def __init__(self, xvalues, yvalues, zvalues):
Model.__init__(self)
points = zeros( xvalues.shape + (3,), Float )
points[...,0] = xvalues
points[...,1] = yvalues
points[...,2] = zvalues
for i in range(zvalues.shape[0]-1):
for j in range(zvalues.shape[1]-1):
self.FacetedPolygon( points[i,j], points[i,j+1],
points[i+1,j+1], points[i+1,j] )
## Graph a function of two variables (a height field)
x = arange(-1,1,2./20)
y = arange(-1,1,2./20)
z = zeros( (len(x),len(y)), Float )
x,y = x[:,NewAxis]+z, y+z
m = Mesh( x, (sin(x*pi)+sin(y*pi))*0.2, y )
m.DoSmoothShading()
##m.DrawNormal(0.05)
| 35.561224 | 91 | 0.556385 | 479 | 3,485 | 3.983299 | 0.325679 | 0.051887 | 0.044549 | 0.023061 | 0.212788 | 0.129979 | 0.110063 | 0.110063 | 0.072327 | 0.02935 | 0 | 0.027554 | 0.323099 | 3,485 | 97 | 92 | 35.927835 | 0.781263 | 0.245624 | 0 | 0.177419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.016129 | 0 | 0.145161 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dae1d10cf13014392041b42324790708bc97af7d | 766 | py | Python | tests/test_appcli_jwt.py | Carrene/restfulpy | 21472af0415fffc23f8003b6074afc2de2e0b414 | [
"MIT"
] | 25 | 2017-06-19T10:01:36.000Z | 2019-08-03T10:35:41.000Z | tests/test_appcli_jwt.py | Carrene/restfulpy | 21472af0415fffc23f8003b6074afc2de2e0b414 | [
"MIT"
] | 209 | 2017-09-27T11:59:29.000Z | 2019-08-10T11:10:48.000Z | tests/test_appcli_jwt.py | Carrene/restfulpy | 21472af0415fffc23f8003b6074afc2de2e0b414 | [
"MIT"
] | 16 | 2017-11-22T15:44:31.000Z | 2019-04-14T12:29:34.000Z | import base64
import json
from bddcli import Given, given, when, stdout, stderr, Application, status
from restfulpy import Application as RestfulpyApplication
foo = RestfulpyApplication(name='jwt')
app = Application('foo', 'tests.test_appcli_jwt:foo.cli_main')
def test_jwt():
with Given(app, 'jwt create'):
assert stderr == ''
assert status == 0
assert len(stdout) > 10
when(given + '\'{"foo": 1}\'')
assert stderr == ''
assert status == 0
header, payload, signature = stdout.encode().split(b'.')
payload = base64.urlsafe_b64decode(payload)
assert json.loads(payload) == {'foo': 1}
if __name__ == '__main__': # pragma: no cover
foo.cli_main(['jwt', 'create', '{"foo": 1}'])
| 25.533333 | 74 | 0.627937 | 91 | 766 | 5.131868 | 0.483516 | 0.025696 | 0.042827 | 0.102784 | 0.107066 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021922 | 0.225849 | 766 | 29 | 75 | 26.413793 | 0.765599 | 0.020888 | 0 | 0.210526 | 0 | 0 | 0.113788 | 0.045515 | 0 | 0 | 0 | 0 | 0.315789 | 1 | 0.052632 | false | 0 | 0.210526 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dae369d14e4029c4e7a65aeb8f3575d751e3c7bb | 2,277 | py | Python | hw4/visualizations.py | zeynepCankara/NTU_DLCV2019 | 2dc44584ec7b9e1d84e688551eb8cef48d501b45 | [
"MIT"
] | 1 | 2022-01-17T14:28:46.000Z | 2022-01-17T14:28:46.000Z | hw4/visualizations.py | zeynepCankara/NTU_DLCV2019 | 2dc44584ec7b9e1d84e688551eb8cef48d501b45 | [
"MIT"
] | null | null | null | hw4/visualizations.py | zeynepCankara/NTU_DLCV2019 | 2dc44584ec7b9e1d84e688551eb8cef48d501b45 | [
"MIT"
] | 2 | 2021-11-08T19:05:57.000Z | 2022-01-17T14:28:48.000Z | import os
import numpy as np
import matplotlib.pyplot as plt
def plot_embedding(X, y, training_mode, save_name):
"""
Reference: https://github.com/NaJaeMin92/pytorch_DANN/
Gets the t-sne output and actions label encodings plot T-SNE
"""
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure(figsize=(10, 10))
for i in range(len(y)): # X.shape[0] : 1024
# plot colored number
if y[i] == 0:
colors = (0.0, 0.0, 1.0, 1.0)
elif y[i] == 1:
colors = (1.0, 0.0, 0.0, 1.0)
elif y[i] == 2:
colors = (1.0, 1.0, 0.0, 1.0)
elif y[i] == 3:
colors = (1.0, 1.0, 1.0, 1.0)
elif y[i] == 4:
colors = (1.0, 0.5, 0.0, 1.0)
elif y[i] == 5:
colors = (1.0, 0.0, 0.5, 1.0)
elif y[i] == 6:
colors = (1.0, 1.0, 0.0, 0.0)
elif y[i] == 7:
colors = (1.0, 0.0, 1.0, 1.0)
elif y[i] == 8:
colors = (0.5, 0.5, 0.5, 0.5)
elif y[i] == 9:
colors = (0.5, 0.2, 0.2, 0.2)
elif y[i] == 10:
colors = (1.0, 0.5, 0.2, 1.0)
else:
colors = (1.0, 0.2, 0.5, 1.0)
plt.text(X[i, 0], X[i, 1], str(y[i]),
color=colors,
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([]), plt.yticks([])
if save_name is not None:
plt.title(save_name)
save_folder = 'saved_plot'
if not os.path.exists(save_folder):
os.makedirs(save_folder)
fig_name = 'saved_plot/' + str(training_mode) + '_' + str(save_name) + '.png'
plt.savefig(fig_name)
print('{} is saved'.format(fig_name))
def plot_p1_train_info(training_loss, val_accuracy, save_dir = "./saved_plot/problem1_loss_acc.png"):
"""
Plots training Loss and Validation Acc
"""
plt.figure(figsize=(20,8))
plt.subplot(1,2,1)
plt.plot(training_loss, color = 'red')
plt.title("Training Loss vs # Epochs")
plt.ylabel("Cross Entropy Loss")
plt.xlabel("Number of Epochs")
plt.subplot(1,2,2)
plt.plot(val_accuracy, color = 'blue')
plt.title("Validation Accuracy vs # Epochs")
plt.ylabel("Accuracy")
plt.xlabel("Number of Epochs")
plt.savefig(save_dir)
plt.show()
| 28.4625 | 101 | 0.527448 | 384 | 2,277 | 3.039063 | 0.278646 | 0.041131 | 0.030848 | 0.047986 | 0.186804 | 0.169666 | 0.077121 | 0.050557 | 0.022279 | 0.022279 | 0 | 0.083903 | 0.293368 | 2,277 | 79 | 102 | 28.822785 | 0.641392 | 0.084761 | 0 | 0.034483 | 0 | 0 | 0.101328 | 0.016724 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.051724 | 0 | 0.086207 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dae48a50c2d41cee7a17a1643cdfb185e9a03591 | 3,274 | py | Python | experiments_histogram.py | LaGuer/DeepSphere | 5a02ac01b8fe3a5b9814ae9efd20d10e0c843209 | [
"MIT"
] | 71 | 2018-10-30T09:46:19.000Z | 2020-02-15T12:02:00.000Z | experiments_histogram.py | aosheng1996/DeepSphere | ebcf162eaa6e23c1c92dbc84e0908695bb7245d7 | [
"MIT"
] | 9 | 2018-10-31T02:32:00.000Z | 2020-02-12T08:55:43.000Z | experiments_histogram.py | aosheng1996/DeepSphere | ebcf162eaa6e23c1c92dbc84e0908695bb7245d7 | [
"MIT"
] | 16 | 2018-10-31T00:32:33.000Z | 2020-01-19T21:24:46.000Z | #!/usr/bin/env python3
# coding: utf-8
"""
Script to run the baseline experiment:
SVM classification with histogram features.
"""
import os
import sys
import numpy as np
from deepsphere import experiment_helper
from grid import pgrid
def single_experiment(sigma, order, sigma_noise, path):
"""Run as experiment.
Check the notebook `part_sphere.ipynb` to get more insides about this code.
"""
Nside = 1024
print('Solve the histogram problem for sigma {}, order {}, noise {}'.format(sigma, order, sigma_noise), flush=True)
EXP_NAME = '40sim_{}sides_{}noise_{}order_{}sigma'.format(
Nside, sigma_noise, order, sigma)
x_raw_train, labels_raw_train, x_raw_std = experiment_helper.get_training_data(sigma, order)
x_raw_test, labels_test, _ = experiment_helper.get_testing_data(sigma, order, sigma_noise, x_raw_std)
if order==4:
augmentation = 20
else:
augmentation = 40
ret = experiment_helper.data_preprossing(x_raw_train, labels_raw_train, x_raw_test, sigma_noise, feature_type='histogram', augmentation=augmentation)
features_train, labels_train, features_validation, labels_validation, features_test = ret
ntrain = len(features_train)//augmentation
nsamples = list(ntrain // 12 * np.linspace(1, 6, num=6).astype(np.int))
nsamples += list(ntrain // 2 * np.linspace(1, augmentation*2, num=40).astype(np.int))
err_train = np.zeros(shape=[len(nsamples)])
err_validation = np.zeros(shape=[len(nsamples)])
err_train[:] = np.nan
err_validation[:] = np.nan
for i, n in enumerate(nsamples):
print('{} Solve it for {} samples'.format(i, n), flush=True)
err_train[i], err_validation[i], _ = experiment_helper.err_svc_linear(
features_train[:n], labels_train[:n], features_validation,
labels_validation)
e_train, e_validation, C = experiment_helper.err_svc_linear(
features_train, labels_train, features_validation, labels_validation)
print('The validation error is {}%'.format(e_validation * 100), flush=True)
# Cheating in favor of SVM
e_train, e_test = experiment_helper.err_svc_linear_single(C,
features_train, labels_train, features_test, labels_test)
print('The test error is {}%'.format(e_test * 100), flush=True)
np.savez(path + EXP_NAME, [nsamples, err_train, err_validation, e_test])
return e_test
if __name__ == '__main__':
if len(sys.argv) > 1:
sigma = int(sys.argv[1])
order = int(sys.argv[2])
sigma_noise = float(sys.argv[3])
grid = [(sigma, order, sigma_noise)]
else:
grid = pgrid()
path = 'results/histogram/'
os.makedirs(path, exist_ok=True)
for sigma, order, sigma_noise in grid:
print('Launch experiment for sigma={}, order={}, noise={}'.format(sigma, order, sigma_noise))
res = single_experiment(sigma, order, sigma_noise, path)
filepath = os.path.join(path, 'histogram_results_list_sigma{}'.format(sigma))
new_data = [order, sigma_noise, res]
if os.path.isfile(filepath+'.npz'):
results = np.load(filepath+'.npz')['data'].tolist()
else:
results = []
results.append(new_data)
np.savez(filepath, data=results)
| 35.204301 | 153 | 0.678986 | 442 | 3,274 | 4.78733 | 0.29638 | 0.051985 | 0.056711 | 0.066163 | 0.245274 | 0.216919 | 0.192344 | 0.121928 | 0.041588 | 0 | 0 | 0.012576 | 0.198534 | 3,274 | 92 | 154 | 35.586957 | 0.793826 | 0.072999 | 0 | 0.050847 | 0 | 0 | 0.099003 | 0.022259 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016949 | false | 0 | 0.084746 | 0 | 0.118644 | 0.084746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dae6cac23452498b7274b531bf02a845882db8e7 | 2,827 | py | Python | tests/deepspeed/ds_pipe.py | drunkcoding/model-inference | 02d2240bc7052fa32223a80fa63625fe681db102 | [
"MIT"
] | 1 | 2021-11-15T19:07:13.000Z | 2021-11-15T19:07:13.000Z | tests/deepspeed/ds_pipe.py | drunkcoding/model-inference | 02d2240bc7052fa32223a80fa63625fe681db102 | [
"MIT"
] | null | null | null | tests/deepspeed/ds_pipe.py | drunkcoding/model-inference | 02d2240bc7052fa32223a80fa63625fe681db102 | [
"MIT"
] | null | null | null | import os
import torch.cuda
from hfutils.model_pipe import T5DeepSpeedPipe
import deepspeed
from tqdm import tqdm
from transformers.models.t5.configuration_t5 import T5Config
from transformers import DataCollatorForSeq2Seq, default_data_collator
import argparse
from deepspeed.utils import RepeatingLoader
from hfutils.arg_parser import HfArguments
from hfutils.loader import ModelLoader, DatasetLoader
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from datasets import load_dataset, load_metric, concatenate_datasets
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
args = HfArguments()
data_args = args.data_args
dataset_loader = DatasetLoader(args)
tokenizer, _ = ModelLoader(args).load(load_model=False)
eval_dataset = dataset_loader.load(
tokenizer, partition="validation", create_dataloader=False
)
batch_size = data_args.eval_bsz
user_path = os.path.expanduser("~")
# model_path = os.path.join(user_path, "HuggingFace", "google", "t5-xl-lm-adapt")
# model_path = "/mnt/yavin/checkpoints/t5-xl-lm-adapt/sst2/checkpoint-1380/"
# model_path = "google/t5-small-lm-adapt"
model_path = args.model_args.model_name_or_path
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(tokenizer)
class PipeDataset(Dataset):
def __init__(self, dataset: Dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, i):
return {
"encoder_input_ids": self.dataset[i]['input_ids'],
"encoder_attention_mask": self.dataset[i]['attention_mask'],
}
eval_dataset = concatenate_datasets([eval_dataset]*70)
eval_dataset = PipeDataset(eval_dataset)
# print(eval_dataset[0])
def eval_generator():
eval_dataloader = DataLoader(
eval_dataset,
shuffle=True,
collate_fn=data_collator,
batch_size=batch_size,
)
for batch in tqdm(eval_dataloader, desc="eval_generator"):
shape = batch["encoder_input_ids"].shape
yield ((
batch["encoder_input_ids"],
batch["encoder_attention_mask"],
), torch.zeros(shape[0]))
# print(shape)
# yield (
# batch["encoder_input_ids"],
# batch["encoder_attention_mask"],
# )
# exit()
config = T5Config.from_pretrained(
model_path
)
deepspeed.init_distributed()
model = T5DeepSpeedPipe(config, num_stages=torch.cuda.device_count())
engine, _, _, _ = deepspeed.initialize(args.ds_args, model=model)
for step, batch in enumerate(RepeatingLoader(eval_generator())):
if step > 500: break
engine.eval_batch(iter([batch]*1), compute_loss=False)
# engine.eval_batch(RepeatingLoader(eval_generator()), compute_loss=False)
| 30.074468 | 81 | 0.723382 | 345 | 2,827 | 5.649275 | 0.344928 | 0.039507 | 0.030785 | 0.030785 | 0.056439 | 0.056439 | 0.056439 | 0.056439 | 0.056439 | 0.056439 | 0 | 0.010261 | 0.172621 | 2,827 | 93 | 82 | 30.397849 | 0.823001 | 0.140432 | 0 | 0 | 0 | 0 | 0.070306 | 0.027295 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063492 | false | 0 | 0.222222 | 0.031746 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dae6f59082823ad960155ff941d486f9928ce8dd | 2,694 | py | Python | Polynomial_dict.py | chapman-phys227-2016s/hw-7-patti102 | 8688cbb3649ae623ec870ea380cd7470d69a2517 | [
"MIT"
] | null | null | null | Polynomial_dict.py | chapman-phys227-2016s/hw-7-patti102 | 8688cbb3649ae623ec870ea380cd7470d69a2517 | [
"MIT"
] | null | null | null | Polynomial_dict.py | chapman-phys227-2016s/hw-7-patti102 | 8688cbb3649ae623ec870ea380cd7470d69a2517 | [
"MIT"
] | null | null | null | import numpy
import copy
class Polynomial(object):
def __init__(self, coefficients):
self.coeff = coefficients
def __call__(self, x):
"""Evaluate the polynomial."""
s = 0
for degree in self.coeff:
s += self.coeff[degree]*x**degree
return s
def __add__(self, other):
"""Return self + other as Polynomial object."""
result_coeff = copy.deepcopy(self.coeff)
for key in other.coeff:
if key in self.coeff:
result_coeff[key] = result_coeff[key] + other.coeff[key]
else:
result_coeff[key] = other.coeff[key]
return Polynomial(result_coeff)
def __mul__(self, other):
result_coeff = {}
for keyself in self.coeff:
for keyother in other.coeff:
if keyself + keyother in result_coeff:
result_coeff[keyself+keyother] = result_coeff[keyself+keyother] + self.coeff[keyself] * other.coeff[keyother]
else:
result_coeff[keyself+keyother] = self.coeff[keyself] * other.coeff[keyother]
return Polynomial(result_coeff)
def differentiate(self):
"""Differentiate this polynomial in-place."""
for i in range(1, len(self.coeff)):
self.coeff[i-1] = i*self.coeff[i]
del self.coeff[-1]
def derivative(self):
"""Copy this polynomial and return its derivative."""
dpdx = Polynomial(self.coeff[:]) # make a copy
dpdx.differentiate()
return dpdx
def __str__(self):
s = ''
for i in range(0, len(self.coeff)):
if self.coeff[i] != 0:
s += ' + %g*x^%d' % (self.coeff[i], i)
# Fix layout
s = s.replace('+ -', '- ')
s = s.replace('x^0', '1')
s = s.replace(' 1*', ' ')
s = s.replace('x^1 ', 'x ')
#s = s.replace('x^1', 'x') # will replace x^100 by x^00
if s[0:3] == ' + ': # remove initial +
s = s[3:]
if s[0:3] == ' - ': # fix spaces for initial -
s = '-' + s[3:]
return s
def simplestr(self):
s = ''
for i in range(0, len(self.coeff)):
s += ' + %g*x^%d' % (self.coeff[i], i)
return s
def test_Polynomial():
p1 = Polynomial({1:1, 100:-3})
p2 = Polynomial({20:1, 1:-1, 100:4})
assert (p1.__add__(p2)).coeff == {1:0, 20:1, 100:1}, 'Improper addition.'
assert(p1.__mul__(p2)).coeff == {2:-1, 21:1, 101:7, 120:-3, 200:-12}, 'Improper multiplication.'
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2 and sys.argv[1] == 'verify':
test_Polynomial() | 32.853659 | 129 | 0.528582 | 349 | 2,694 | 3.939828 | 0.234957 | 0.117818 | 0.036364 | 0.056727 | 0.251636 | 0.208 | 0.151273 | 0.151273 | 0.129455 | 0.129455 | 0 | 0.039869 | 0.320342 | 2,694 | 82 | 130 | 32.853659 | 0.711087 | 0.101336 | 0 | 0.206349 | 0 | 0 | 0.042571 | 0 | 0 | 0 | 0 | 0 | 0.031746 | 1 | 0.142857 | false | 0 | 0.047619 | 0 | 0.301587 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
daeae031a92bdd332e807b1e1b225a48f958d13f | 4,283 | py | Python | pymultimediacompression/videomedia/compressor.py | AbdullrhmanAljasser/PyMultiMediaCompression | 0ed5a1ac43050b1c9fe84f277a18beaf4bbdc705 | [
"MIT"
] | null | null | null | pymultimediacompression/videomedia/compressor.py | AbdullrhmanAljasser/PyMultiMediaCompression | 0ed5a1ac43050b1c9fe84f277a18beaf4bbdc705 | [
"MIT"
] | null | null | null | pymultimediacompression/videomedia/compressor.py | AbdullrhmanAljasser/PyMultiMediaCompression | 0ed5a1ac43050b1c9fe84f277a18beaf4bbdc705 | [
"MIT"
] | null | null | null | import os
import ffmpeg
import subprocess
import shutil
from pymultimediacompression.utilities.utils import bitrate_size_based, gb_to_bit, mb_to_bit, kb_to_bit, b_to_bit
MISSING_REQUIREMENTS = "FFmpeg required to be installed to use PyMultiMediaCompression \n Check https://github.com/AbdullrhmanAljasser/PyMultiMediaCompression"
'''
Check if required installs are satisfied
Raise an error if not
'''
def check_required():
check = subprocess.call(['which', 'ffmpeg'])
if check != 0:
raise Exception(MISSING_REQUIREMENTS)
check = subprocess.call(['which', 'ffprobe'])
if check != 0:
raise Exception(MISSING_REQUIREMENTS)
'''
Video Compression Based on given Size
Description:
A function to allow users to compressa given video to their expected video size
Input Paramters:
filepath (Required): Path of the video file being compressed
finalsize (Required): expected final size of video
size_type (Optional): Specify final size type gb, mb, kb, or b (Default mb)
output (Optional): To keep original video specify output path to stop overwriting
codec (Optional): Specify the codec used to compress (Default x264)
#TODO: More parameters to give more freedom to user
'''
def video_compress_size_based(
filepath,
finalsize,
size_type='mb',
output=None,
codec='libx264'
):
# Check if the required installs are satisfied
check_required()
# Check if filepath is a file
if not os.path.isfile(filepath):
raise Exception("File path is not a valid file")
# Check if filepath is absolute or not
if not os.path.isabs(filepath):
filepath = os.getcwd() + filepath
# Check if asked size is a correct number ==>
try:
float(finalsize)
except Exception as e:
raise (e)
if finalsize <= 0:
raise Exception("Unable to compress to 0 or below size")
# END <==
# Retrieve file extension to ensure it applicable ==>
ext = os.path.splitext(filepath)[-1].lower()
file_name_w_ext = filepath.split('\\')[-1]
splitter = filepath.split('\\')
path_to_file = ''
for x in range(len(splitter)-1):
path_to_file = path_to_file + '\\' +splitter[x]
if not valid_video_ext(ext):
raise Exception("Input file is not of valid video type")
# END <==
# Setup output (Overwrite/None)
if output is None:
if not os.path.isdir('compressed'):
os.mkdir('compressed')
os.chdir(os.getcwd()+'\\compressed')
else:
os.chdir(os.getcwd()+'\\compressed')
else:
if os.path.isdir(output):
if not os.path.isfile(output):
if not os.path.isabs(output):
output = os.getcwd() + output
os.chdir(output)
else:
os.chdir(output)
else:
raise Exception("Output path is a file not a directory")
else:
raise Exception("Output path is not a valid directory, maybe file doesn't exists?")
file_info = ffmpeg.probe(filepath)
file_info_size = file_info['format']['size']
file_info_duration = file_info['format']['duration']
file_info_bitrate = (float(file_info_size)) / float(file_info_duration)
if not valid_size_type(size_type):
raise Exception("Size type is not correct, must be gb, mb, kb, or b")
finalsize = final_bit_size(finalsize, size_type=size_type)
bitrate_for_compression = bitrate_size_based(finalsize, file_info_duration)
try:
ffmpeg.input(filepath)\
.output(file_name_w_ext, **{'vcodec':codec, 'video_bitrate':bitrate_for_compression})\
.overwrite_output()\
.run()
except Exception as e:
raise (e)
path_to_compressed = os.getcwd() + '\\' + file_name_w_ext
if output is None:
# Moving to overwrite file
shutil.move(path_to_compressed, filepath)
else:
# Moving to the specified output
True
# shutil.move(path_to_compressed, output)
return True
__all__ = [
'video_compress_size_based',
'check_required'
] | 32.694656 | 160 | 0.632267 | 541 | 4,283 | 4.855823 | 0.273567 | 0.027408 | 0.013323 | 0.020936 | 0.159878 | 0.094404 | 0.031214 | 0 | 0 | 0 | 0 | 0.004177 | 0.273406 | 4,283 | 131 | 161 | 32.694656 | 0.839974 | 0.081018 | 0 | 0.268293 | 0 | 0 | 0.176433 | 0.015287 | 0 | 0 | 0 | 0.007634 | 0 | 1 | 0.02439 | false | 0 | 0.060976 | 0 | 0.097561 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
daf17f6b45920374fa2e9b1df4b56aef139d6271 | 5,822 | py | Python | python/interpreter/environment.py | yehzhang/x9 | 2d06767ecd3521e9255748d14ab43938d1693bbc | [
"MIT"
] | 1 | 2017-11-19T02:04:44.000Z | 2017-11-19T02:04:44.000Z | python/interpreter/environment.py | yehzhang/x9 | 2d06767ecd3521e9255748d14ab43938d1693bbc | [
"MIT"
] | null | null | null | python/interpreter/environment.py | yehzhang/x9 | 2d06767ecd3521e9255748d14ab43938d1693bbc | [
"MIT"
] | null | null | null | from collections import defaultdict
class Environment:
def __init__(self, config):
self.aliases = {}
self.registers = Registers(self.aliases, config)
self.memory = Memory(config)
# :type Dict[str, Label]: {label_name: Label}
self.labels = {}
self.execution_count = 0
# :type Dict[str, Dict[str, int]]: populated by TokenMappers in asm module
# e.g. instruction_id = luts[BranchEqual.mnemonic][immediate]
# e.g. addr = luts[LoadWord.mnemonic][immediate]
self.luts = defaultdict(dict)
self._acc_luts = defaultdict(dict)
# :type int: not a genuine register
self.pc = 0
self.cout = 0
def __repr__(self):
items = [
('Memory', repr(self.memory)),
('Registers', self.registers.as_str(self.aliases)),
('Dynamic instruction count', str(self.execution_count)),
]
str_items = []
for k, v in items:
lns = v.splitlines()
if len(lns) > 1:
v = ''.join('\n\t' + ln for ln in lns)
s = '{}: {}'.format(k, v)
str_items.append(s)
return '\n'.join(str_items)
def unalias(self, op):
return self.aliases.get(op, op)
class Registers:
def __init__(self, aliases, config):
assert len(config['reg_names']) <= 16
super().__setattr__('names', config['reg_names'])
regs = make_bytes(config['reg_default'], len(self.names))
regs = dict(zip(self.names, regs))
super().__setattr__('registers', regs)
super().__setattr__('aliases', aliases)
def __repr__(self):
return self.as_str()
def as_str(self, aliases=None):
inv_aliases = {v: k for k, v in self.aliases.items()}
return '\n'.join('{}: {}'.format(
inv_aliases.get(n, n), self.registers[n]) for n in self.names)
def __getattr__(self, name):
name = self.aliases.get(name, name)
return self.registers[name].get()
def __setattr__(self, name, value):
name = self.aliases.get(name, name)
self.registers[name].set(value)
def __getitem__(self, key):
"""
:param int key:
"""
return self.__getattr__(self.names[key])
def __setitem__(self, key, value):
"""
:param int key:
"""
return self.__setattr__(self.names[key], value)
class Memory:
""" Memory is of big-endian format.
"""
def __init__(self, config):
assert 0 < config['mem_size']
self.memory = make_bytes(config['mem_default'], config['mem_size'])
def __repr__(self):
return '\n'.join(' '.join(map(str, self.memory[i:i + 8]))
for i in range(0, len(self.memory), 8))
def __getitem__(self, key):
"""
Load an unsigned byte at address 0xff: memory[0xff]
Load an unsigned word at address 0xff: memory[0xff, 4]
"""
if not isinstance(key, tuple):
key = key,
return self.load(*key)
def __setitem__(self, key, value):
"""
Store a byte at address 0xff: memory[0xff] = 0xab
Store a word at address 0xff: memory[0xff, 4] = 0xab
"""
if isinstance(key, tuple):
addr, size = key
key = addr, value, size
else:
key = key, value
self.store(*key)
def load(self, addr, size=1, signed=False):
assert 1 <= size
assert 0 <= addr
assert addr + size <= len(self.memory)
value = 0
for i in range(addr, addr + size):
byte = self.memory[i].get()
value = (value << 8) | byte
if signed:
value = convert_to_signed_integer(value, size * 8)
return value
def store(self, addr, value, size=1):
assert 1 <= size
assert 0 <= addr
assert addr + size <= len(self.memory)
value = convert_to_unsigned_integer(value, size * 8)
for i in range(size - 1, -1, -1):
self.memory[addr + i].set(value & 0xff)
value >>= 8
class Byte:
def __init__(self):
self.value = 0
def __repr__(self):
return '0x{:02x}'.format(self.value)
def set(self, value):
# Signed minimum and unsigned maximum
self.value = convert_to_unsigned_integer(value, 8)
return self
def get(self):
return self.value
def convert_to_unsigned_integer(value, size):
"""
:param int size: number of bits containing this integer
"""
upper_bound = 2 ** size
if not (-upper_bound // 2 <= value < upper_bound):
msg = '{} is out of range of {} bits'.format(value, size)
raise ValueError(msg)
all_f_mask = upper_bound - 1
return value & all_f_mask
def convert_to_signed_integer(value, size):
"""
:param int size: number of bits containing this integer
"""
upper_bound = 2 ** size
if not (-upper_bound // 2 <= value < upper_bound):
msg = '{} is out of range of {} bits'.format(value, size)
raise ValueError(msg)
if value >= 0:
msb_mask = 1 << (size - 1)
if value & msb_mask:
value -= upper_bound
return value
def make_bytes(default, size=None):
"""
:param int|List[int] default:
:param int size: number of bytes in the list, if default is int
:return List[Byte]:
"""
if isinstance(default, int):
if size is None:
raise ValueError("'size' is not specified when default is int")
return [Byte().set(default) for _ in range(size)]
bytes = [Byte().set(d) for d in default]
if size is not None and len(bytes) != size:
raise ValueError("'default' and 'size' are not of the same length")
return bytes
| 29.256281 | 82 | 0.567159 | 748 | 5,822 | 4.241979 | 0.195187 | 0.031201 | 0.013867 | 0.023952 | 0.270407 | 0.243303 | 0.153167 | 0.135518 | 0.135518 | 0.135518 | 0 | 0.012389 | 0.306767 | 5,822 | 198 | 83 | 29.40404 | 0.773786 | 0.137238 | 0 | 0.224 | 0 | 0 | 0.061031 | 0 | 0 | 0 | 0.000825 | 0 | 0.064 | 1 | 0.184 | false | 0 | 0.008 | 0.04 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
daf44bfec449975c869de1732b37b6845adb00a5 | 4,225 | py | Python | WebUtils/Funcs.py | PeaceWorksTechnologySolutions/w4py | 74f5a03a63f1a93563502b908474aefaae2abda2 | [
"MIT"
] | 18 | 2016-08-01T20:15:59.000Z | 2019-12-24T16:00:03.000Z | WebUtils/Funcs.py | WebwareForPython/w4py | bba08f5974d49f5da7e88abe3eeda1037d0824a3 | [
"MIT"
] | 6 | 2016-09-13T05:48:45.000Z | 2020-01-09T18:29:12.000Z | WebUtils/Funcs.py | WebwareForPython/w4py | bba08f5974d49f5da7e88abe3eeda1037d0824a3 | [
"MIT"
] | 6 | 2016-09-16T14:32:29.000Z | 2020-01-03T18:52:16.000Z | """WebUtils.Funcs
This module provides some basic functions that are useful
in HTML and web development.
You can safely import * from WebUtils.Funcs if you like.
"""
__all__ = [
'htmlEncode', 'htmlEncodeStr', 'htmlDecode', 'urlEncode', 'urlDecode',
'htmlForDict', 'requestURI', 'normURL']
htmlForNone = '-' # used by htmlEncode
htmlCodes = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
# ['\n', '<br>'],
)
htmlCodesReversed = tuple(reversed(htmlCodes))
def htmlEncode(what, codes=htmlCodes):
"""Return the HTML encoded version of the given object.
The optional 'codes' parameter allows passing custom translations.
"""
if what is None:
return htmlForNone
if hasattr(what, 'html'):
# allow objects to specify their own translation to html
# via a method, property or attribute
ht = what.html
if callable(ht):
ht = ht()
return ht
what = str(what)
return htmlEncodeStr(what, codes)
def htmlEncodeStr(s, codes=htmlCodes):
"""Return the HTML encoded version of the given string.
This is useful to display a plain ASCII text string on a web page.
The optional 'codes' parameter allows passing custom translations.
"""
for c, e in codes:
s = s.replace(c, e)
return s
def htmlDecode(s, codes=htmlCodesReversed):
"""Return the ASCII decoded version of the given HTML string.
This does NOT remove normal HTML tags like <p>.
It is the inverse of htmlEncode().
The optional 'codes' parameter allows passing custom translations.
"""
for c, e in codes:
s = s.replace(e, c)
return s
# Aliases for URL encoding and decoding functions:
from urllib import quote_plus as urlEncode, unquote_plus as urlDecode
def htmlForDict(d, addSpace=None, filterValueCallBack=None,
maxValueLength=None, topHeading=None, isEncoded=None):
"""Return an HTML string with a table where each row is a key-value pair."""
if not d:
return ''
# A really great (er, bad) example of hardcoding. :-)
html = ['<table class="NiceTable">\n']
if topHeading:
html.append('<tr class="TopHeading"><th')
html.append(('>%s</th><th>%s' if isinstance(topHeading, tuple)
else ' colspan="2">%s') % topHeading)
html.append('</th></tr>\n')
for key in sorted(d):
value = d[key]
if addSpace and key in addSpace:
target = addSpace[key]
value = (target + ' ').join(value.split(target))
if filterValueCallBack:
value = filterValueCallBack(value, key, d)
if maxValueLength and not isEncoded:
value = str(value)
if len(value) > maxValueLength:
value = value[:maxValueLength-3] + '...'
key = htmlEncode(key)
if not isEncoded:
value = htmlEncode(value)
html.append('<tr><th style="text-align:left">%s</th><td>%s</td></tr>\n'
% (key, value))
html.append('</table>')
return ''.join(html)
def requestURI(env):
"""Return the request URI for a given CGI-style dictionary.
Uses REQUEST_URI if available, otherwise constructs and returns it
from SCRIPT_URL, SCRIPT_NAME, PATH_INFO and QUERY_STRING.
"""
uri = env.get('REQUEST_URI')
if uri is None:
uri = env.get('SCRIPT_URL')
if uri is None:
uri = env.get('SCRIPT_NAME', '') + env.get('PATH_INFO', '')
query = env.get('QUERY_STRING', '')
if query != '':
uri += '?' + query
return uri
def normURL(path):
"""Normalizes a URL path, like os.path.normpath.
Acts on a URL independent of operating system environment.
"""
if not path:
return
initialslash = path[0] == '/'
lastslash = path[-1] == '/'
comps = path.split('/')
newcomps = []
for comp in comps:
if comp in ('', '.'):
continue
if comp != '..':
newcomps.append(comp)
elif newcomps:
newcomps.pop()
path = '/'.join(newcomps)
if path and lastslash:
path += '/'
if initialslash:
path = '/' + path
return path
| 28.741497 | 80 | 0.59574 | 517 | 4,225 | 4.837524 | 0.355899 | 0.019992 | 0.014394 | 0.020392 | 0.145542 | 0.145542 | 0.145542 | 0.145542 | 0.102359 | 0.102359 | 0 | 0.001307 | 0.27574 | 4,225 | 146 | 81 | 28.938356 | 0.816013 | 0.300592 | 0 | 0.068182 | 0 | 0.011364 | 0.11676 | 0.024895 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.011364 | 0 | 0.193182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
daf646afbefc8a29534d759d8a3042f9a4ff070a | 8,996 | py | Python | src/slizzy/main.py | matheushsouza/slizzy | f224b8e4621d11031315da9178202781b4a2dcef | [
"BSD-3-Clause"
] | 1 | 2019-12-24T03:08:12.000Z | 2019-12-24T03:08:12.000Z | src/slizzy/main.py | matheushsouza/slizzy | f224b8e4621d11031315da9178202781b4a2dcef | [
"BSD-3-Clause"
] | null | null | null | src/slizzy/main.py | matheushsouza/slizzy | f224b8e4621d11031315da9178202781b4a2dcef | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import argparse
import enum
import sys
import slizzy.config as config
import slizzy.version as version
from slizzy.track import Track
from slizzy.util import color, logging, time
__all__ = [
"module",
"all_modules",
"slizzy"
]
module = enum.Enum("modules", "slider mp3co zippy")
all_modules = { module.slider, module.mp3co, module.zippy }
def picky_selection(available_downloads, logger):
logger.log("\"picky\" flag: Select which entries to download by providing their " +
"(comma-separated) indexes in the list below. Alternatively, enter 'A'" +
"to download (a)ll entries.",
level = logging.level.info)
print("\nA. download all entries")
template = "{0}. {1:80} | {2:4} | {3:4.2f} MB | {4:3.2f} Kbps"
for i, entry in enumerate(available_downloads) :
print(template.format(
i,
entry.name,
time.to_str(entry.duration),
entry.size,
entry.bitrate
))
tries = 3
for i in range(tries) :
print("\nYour selection: ", end='')
try:
selection = input().strip()
if selection in ['a', 'A', 'all', 'All'] :
return available_downloads
else :
selection = list(map(int, selection.split(',')))
tracks_to_download = list(map(available_downloads.__getitem__, selection))
logger.br()
return tracks_to_download
except Exception as e:
if i < tries - 1 :
print("Error: " + str(e) + ". Was that a typo?", file = sys.stderr)
else :
print("Error: " + str(e) + ".", file = sys.stderr)
sys.exit(1)
def slizzy(track, modules, download_tracks, picky = False, fetch_limit = False):
logger = logging.Logger("slizzy")
logger.log("Slizzy magic for track '" + track.title + "'", logging.level.info)
logger.log("Query string: " + track.query_string, logging.level.info)
logger.br()
if fetch_limit :
logger.log("\"fetch_limit\" flag: a maximum of {} {} will be fetched from each provider.".format(
fetch_limit, "files" if fetch_limit > 1 else "file"
), logging.level.info)
logger.br()
if not track.duration: # Duration not supplied from command line.
try:
from slizzy.google import google
from slizzy.beatport import beatport
track.duration = next( # Extract duration from the first matching page.
duration
for page in google(track, beatport.domain)
for duration in [ beatport.get_metadata(track, page) ]
if duration
)
except StopIteration:
logger.log("Track duration unavailable", logging.level.error)
return
if module.slider in modules:
from slizzy.slider import slider
slider_downloads = slider(track, fetch_limit)
else:
slider_downloads = []
if module.mp3co in modules:
from slizzy.mp3co import mp3co
mp3co_downloads = mp3co(track, fetch_limit)
else:
mp3co_downloads = []
if module.zippy in modules:
from slizzy.google import google
from slizzy.zippy import zippy
zippy_downloads = [
dl
for page in google(track, zippy.domain, fetch_limit)
for dl in [ zippy.get_download(track, page) ]
if dl
]
else:
zippy_downloads = []
if module.slider in modules:
logger.log(
"Selected " + color.result(len(slider_downloads)) + " slider entries.",
logging.level.info
)
if module.mp3co in modules:
logger.log(
"Selected " + color.result(len(mp3co_downloads)) + " mp3co entries.",
logging.level.info
)
if module.zippy in modules:
logger.log(
"Selected " + color.result(len(zippy_downloads)) + " zippy entries.",
logging.level.info
)
available_downloads = slider_downloads + mp3co_downloads + zippy_downloads
if not available_downloads:
logger.log("No entries to download.")
return
if picky :
tracks_to_download = picky_selection(available_downloads, logger)
else:
tracks_to_download = available_downloads
if download_tracks:
from slizzy.downloader import download
download(tracks_to_download)
else:
logger.log("Selected urls:\n " + "\n ".join(
download.name + " | " + download.link
for download in tracks_to_download
))
logger.br()
logger.finish("Slizzied " + str(len(tracks_to_download)) + " files.")
def parse_args(argv):
parser = argparse.ArgumentParser(
description = "Slizzy is a tool to search for and "
"download slider.kz, mp3co.biz and zippyshare objects.",
formatter_class = argparse.RawTextHelpFormatter
)
parser.add_argument(
"--version", "-v",
action = "version",
version = "\n".join([
"%(prog)s " + version.__version__,
"Copyright (c) 2018, gahag.",
"All rights reserved."
])
)
commands = parser.add_subparsers(dest = "command", help = "commands")
dl = commands.add_parser("dl", help="download tracks")
lns = commands.add_parser("lns", help="get download links")
cfg = commands.add_parser("cfg", help="config")
for command in [ dl, lns ]:
command.add_argument(
"tracks",
help = "one or more tracks to seach, in the format: "
"A & B ft. C - ID (D vs. E Remix)",
nargs = "+"
)
command.add_argument(
"-d", "--duration",
help = "manually specify the track duration, eliding the beatport search"
)
command.add_argument(
"--fetch_limit",
help = "limits the number of entries fetched from each provider"
)
command.add_argument(
"--slider",
action = "store_true",
help = "search in slider.kz instead of all resources"
)
command.add_argument(
"--mp3co",
action = "store_true",
help = "search in mp3co.biz instead of all resources"
)
command.add_argument(
"--zippy",
action = "store_true",
help = "search only in zippyshare instead of all resources"
)
command.add_argument(
"--picky",
action = "store_true",
help = "pick which files to download instead of downloading all eligible files"
)
cfg.add_argument("--google-key", help = "set the google API key")
cfg.add_argument("--beatport-cx", help = "set the cx API key for the beatport search")
cfg.add_argument("--zippyshare-cx", help = "set the cx API key for the zippyshare search")
# add arguments for other settings, specially thresholds.
if not argv:
parser.print_usage()
sys.exit(1)
args = parser.parse_args(argv)
if args.command in [ "dl", "lns" ]:
if args.duration:
if len(args.tracks) > 1:
print(
"Error: with the duration parameter, only one track may be specified.",
file = sys.stderr
)
sys.exit(1)
try:
args.duration = time.from_str(args.duration)
except Exception as e:
print("Error: " + str(e) + ".", file = sys.stderr)
sys.exit(1)
if args.fetch_limit :
try:
args.fetch_limit = int(args.fetch_limit)
except Exception as e:
print("Error: " + str(e) + ".", file = sys.stderr)
sys.exit(1)
if args.fetch_limit <= 0:
print(
"Error: fetch limit must be an integer greater than zero.",
file = sys.stderr
)
sys.exit(1)
if args.command == "cfg":
pass # validate args
return args
def main(argv):
args = parse_args(argv)
if args.command in [ "dl", "lns" ]:
for i, track in enumerate(args.tracks):
try:
args.tracks[i] = Track(track, args.duration)
except:
print("Error: invalid track format '" + track + "'.", file = sys.stderr)
sys.exit(1)
modules = {
m
for m, arg in [
(module.slider, args.slider),
(module.mp3co, args.mp3co),
(module.zippy, args.zippy)
]
if arg
} or all_modules
download_tracks = args.command == "dl"
tracks = iter(args.tracks)
try:
slizzy(
next(tracks),
modules,
download_tracks,
fetch_limit = args.fetch_limit,
picky = args.picky
)
for track in tracks:
print(color.yellow(70 * "-"))
slizzy(track, modules, download_tracks)
except config.ConfigError as e:
print("Error (config): " + str(e), file = sys.stderr)
sys.exit(2)
if args.command == "cfg":
if args.google_key:
config.cfg["google"]["key"] = args.google_key
if args.beatport_cx:
config.cfg["beatport"]["cx"] = args.beatport_cx
if args.zippyshare_cx:
config.cfg["zippyshare"]["cx"] = args.zippyshare_cx
try:
config.update(config.cfg)
except config.ConfigError as e:
print("Error (config): " + str(e), file = sys.stderr)
sys.exit(2)
def cli():
import signal
def sigint(sig, frame):
print() # Exit progress logging
print("Slizzy: interrupted.", file = sys.stderr)
sys.exit(130)
signal.signal(signal.SIGINT, sigint)
main(sys.argv[1:])
| 25.925072 | 101 | 0.619942 | 1,130 | 8,996 | 4.841593 | 0.215044 | 0.029245 | 0.023762 | 0.026321 | 0.235971 | 0.178212 | 0.139828 | 0.104551 | 0.077682 | 0.056114 | 0 | 0.008097 | 0.258671 | 8,996 | 346 | 102 | 26 | 0.812266 | 0.022343 | 0 | 0.277154 | 0 | 0.003745 | 0.200933 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022472 | false | 0.003745 | 0.05618 | 0 | 0.097378 | 0.059925 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97008b753f686abd12cd11cf56dd253fd134810a | 2,120 | py | Python | hugs/manager.py | Bogdanp/hugs | e7f16f15369fbe3da11d89882d76c7ef432f3709 | [
"BSD-3-Clause"
] | 22 | 2017-07-20T18:02:27.000Z | 2021-06-10T13:06:22.000Z | hugs/manager.py | Bogdanp/hugs | e7f16f15369fbe3da11d89882d76c7ef432f3709 | [
"BSD-3-Clause"
] | null | null | null | hugs/manager.py | Bogdanp/hugs | e7f16f15369fbe3da11d89882d76c7ef432f3709 | [
"BSD-3-Clause"
] | 2 | 2019-12-11T20:44:08.000Z | 2021-02-02T04:37:04.000Z | import functools
import sqlite3
from .repository import Repository
try:
from psycopg2.extras import DictRow, RealDictRow
ROW_CLASSES = (dict, sqlite3.Row, DictRow, RealDictRow)
except ImportError: # pragma: no cover
ROW_CLASSES = (dict, sqlite3.Row)
class Manager:
"""Managers extend repositories with the ability to iterate over
queries and convert result rows to concrete data types.
"""
def __init__(self, value_factory=dict, *, repository=None):
self.repository = repository or Repository()
self.load_queries = self.repository.load_queries
self.value_factory = value_factory
def __getattr__(self, name):
fn = getattr(self.repository, name)
if getattr(fn, "is_command", False):
return command_runner(fn)
return query_iterator(fn, self.value_factory)
def command_runner(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
fn(*args, **kwargs)
# Grab the cursor late so that if the user forgets to provide
# it the TypeError they get will refer to the query function,
# not wrapper() itself.
cursor = args[0]
if cursor.description:
result = cursor.fetchone()
if isinstance(result, ROW_CLASSES):
return result
return {col[0]: val for col, val in zip(cursor.description, result)}
return None
return wrapper
def query_iterator(fn, value_factory):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
fn(*args, **kwargs)
# Grab the cursor late so that if the user forgets to provide
# it the TypeError they get will refer to the query function,
# not wrapper() itself.
cursor = args[0]
while True:
results = cursor.fetchmany()
if not results:
break
for result in results:
if not isinstance(result, ROW_CLASSES):
result = {col[0]: val for col, val in zip(cursor.description, result)}
yield value_factory(**result)
return wrapper
| 29.859155 | 90 | 0.628774 | 257 | 2,120 | 5.089494 | 0.357977 | 0.055046 | 0.036697 | 0.03211 | 0.365443 | 0.328746 | 0.328746 | 0.328746 | 0.328746 | 0.328746 | 0 | 0.005305 | 0.288679 | 2,120 | 70 | 91 | 30.285714 | 0.862069 | 0.197642 | 0 | 0.227273 | 0 | 0 | 0.005952 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.113636 | 0 | 0.431818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
970361b69af566e027101b9098d4da56f00db470 | 3,344 | py | Python | spydrnet/__init__.py | ganeshgore/spydrnet | 22672b8fc7d63461a71077bd20f29df6d38e96f4 | [
"BSD-3-Clause"
] | null | null | null | spydrnet/__init__.py | ganeshgore/spydrnet | 22672b8fc7d63461a71077bd20f29df6d38e96f4 | [
"BSD-3-Clause"
] | null | null | null | spydrnet/__init__.py | ganeshgore/spydrnet | 22672b8fc7d63461a71077bd20f29df6d38e96f4 | [
"BSD-3-Clause"
] | null | null | null | """
SpyDrNet
========
SpyDrNet is an EDA tool for analyzing and transforming netlists.
See https://byuccl.github.io/spydrnet for more details.
"""
import importlib
import pkgutil
import pathlib
import sys
import os
discovered_plugins = {
name: importlib.import_module(name)
for finder, name, ispkg
in pkgutil.iter_modules()
if name.startswith('spydrnet_')
}
print("Installed Plugins", discovered_plugins.keys())
def get_active_plugins():
active_plugins = {}
config_file = os.path.join(pathlib.Path.home(), ".spydrnet")
if os.path.isfile(config_file):
for plugin in open(config_file, "r").read().split():
if discovered_plugins.get(plugin, None):
active_plugins.update({plugin: discovered_plugins[plugin]})
else:
print("Plugin %s is not installed " % plugin)
else:
with open(config_file, "w") as fp:
fp.write("\n".join(discovered_plugins.keys()))
active_plugins.update(discovered_plugins)
return active_plugins
print("Active Plugins", get_active_plugins().keys())
# Release data
from spydrnet import release
__author__ = '%s <%s>\n%s <%s>\n%s <%s>' % \
(release.authors['Keller'] + release.authors['Skouson'] +
release.authors['Wirthlin'])
__license__ = release.license
__date__ = release.date
__version__ = release.version
__release__ = release.release
from spydrnet.ir import *
from spydrnet.util.hierarchical_reference import HRef
OUT = Port.Direction.OUT
IN = Port.Direction.IN
INOUT = Port.Direction.INOUT
UNDEFINED = Port.Direction.UNDEFINED
from spydrnet.util.selection import INSIDE, OUTSIDE, BOTH, ALL
from spydrnet.testing.test import run as test
from spydrnet.parsers import parse
from spydrnet.composers import compose
from spydrnet.plugins import namespace_manager
from spydrnet.util import get_netlists, get_libraries, get_definitions, get_ports, get_cables, get_instances,\
get_wires, get_pins
from spydrnet.util import get_hinstances, get_hports, get_hpins, get_hcables, get_hwires
import os
base_dir = os.path.dirname(os.path.abspath(__file__))
import glob
example_netlist_names = list()
for filename in glob.glob(os.path.join(base_dir, 'support_files', 'EDIF_netlists', "*")):
basename = os.path.basename(filename)
example_netlist_names.append(basename[:basename.index('.')])
example_netlist_names.sort()
# logger for the module
import logging
import sys
LOG_FORMAT = "%(levelname)5s %(filename)s:%(lineno)s (%(threadName)10s) - %(message)s"
logger = logging.getLogger('spydrnet_logs')
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(stream_handler)
def enable_file_logging(LOG_LEVEL=None, filename=""):
LOG_LEVEL = LOG_LEVEL or "INFO"
file_handler = logging.FileHandler("_" + filename + "_spydrnet.log", mode='w')
file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
file_handler.setLevel(eval(f"logging.{LOG_LEVEL}"))
logger.addHandler(file_handler)
return file_handler
def load_example_netlist_by_name(name):
assert name in example_netlist_names, "Example netlist not found"
return parse(os.path.join(base_dir, 'support_files', 'EDIF_netlists', name + ".edf.zip"))
| 30.962963 | 110 | 0.739833 | 440 | 3,344 | 5.395455 | 0.370455 | 0.050548 | 0.026959 | 0.00337 | 0.09604 | 0.071609 | 0.034541 | 0.034541 | 0.034541 | 0 | 0 | 0.001047 | 0.142943 | 3,344 | 107 | 111 | 31.252336 | 0.827285 | 0.052632 | 0 | 0.078947 | 0 | 0.013158 | 0.106046 | 0.007281 | 0 | 0 | 0 | 0 | 0.013158 | 1 | 0.039474 | false | 0 | 0.263158 | 0 | 0.342105 | 0.039474 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9703d55b61dbe9671b606d116415a69bd78ce48a | 620 | py | Python | tools/sparse_dense_size_comparison.py | 3rror/aca_pathfinding_project | d7a91b1491e8eca08849e526634bdaa9a3833fa2 | [
"MIT"
] | null | null | null | tools/sparse_dense_size_comparison.py | 3rror/aca_pathfinding_project | d7a91b1491e8eca08849e526634bdaa9a3833fa2 | [
"MIT"
] | null | null | null | tools/sparse_dense_size_comparison.py | 3rror/aca_pathfinding_project | d7a91b1491e8eca08849e526634bdaa9a3833fa2 | [
"MIT"
] | null | null | null | # Compare memory usage of a dense and a sparse adjancency matrix.
#
# Requires numpy. Install it with `pip3 install --user numpy`
# Authors: Gianluca Andreotti, Aurora Lucrezia Castro
import numpy as np
from scipy.sparse import csr_matrix
import sys
def load_matrix(file):
matrix = np.loadtxt(file, dtype=int, ndmin=2)
print("Nodes: " + str(len(matrix)))
print(f"Dense matrix: {matrix.nbytes / 1000}mb")
sparse_csr_mat = csr_matrix(matrix)
print(f"Sparse matrix: {sparse_csr_mat.data.nbytes / 1000}mb")
print("")
if __name__ == "__main__":
for f in sys.argv[1:]:
load_matrix(f)
| 25.833333 | 66 | 0.696774 | 93 | 620 | 4.473118 | 0.591398 | 0.043269 | 0.057692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021825 | 0.187097 | 620 | 23 | 67 | 26.956522 | 0.803571 | 0.282258 | 0 | 0 | 0 | 0 | 0.238636 | 0.061364 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.307692 | 0.307692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97062759c3659ac5db7690deb35f3812cc14b685 | 5,298 | py | Python | app/crypto_algo.py | paul3bin/cryptography-api | 13145ef897dcd3e5d4850bbf9f3572556a055e22 | [
"MIT"
] | null | null | null | app/crypto_algo.py | paul3bin/cryptography-api | 13145ef897dcd3e5d4850bbf9f3572556a055e22 | [
"MIT"
] | null | null | null | app/crypto_algo.py | paul3bin/cryptography-api | 13145ef897dcd3e5d4850bbf9f3572556a055e22 | [
"MIT"
] | null | null | null | from pandas import DataFrame
class CaesarCipher:
def __init__(self, text, key) -> None:
self.text = text
self.key = key
def decrypt(self):
decipheredText = ""
for ch in self.text.upper():
if ch.isalpha():
newchar = ord(ch)-self.key
if newchar < ord('A'):
newchar += 26
newShiftedALphabet = chr(newchar)
decipheredText += newShiftedALphabet
return decipheredText.upper()
def encrypt(self):
cipherText = ""
for ch in self.text.upper():
if ch.isalpha():
alphabetInString = ord(ch) + self.key
if alphabetInString > ord('Z'):
alphabetInString -= 26
shiftedAlphabet = chr(alphabetInString)
cipherText += shiftedAlphabet
return cipherText.upper()
class VigenereCipher:
# Initialization funtion
def __init__(self, text, key):
# converting key and text to uppercase and removing spaces from them.
self.text = "".join(text.upper().split(' '))
self.key = "".join(key.upper().split(' '))
# Function that encrypts the given plain text using given key.
def encrypt(self):
cipher_text, j = [], 0
for i in range(len(self.text)):
if j > len(self.key)-1:
j = 0
character = ((ord(self.text[i])+ord(self.key[j])) % 26)+ord('A')
j += 1
cipher_text.append(chr(character))
return ''.join(cipher_text)
# Function that decrypts the given plain text using given key.
def decrypt(self):
plain_text, j = [], 0
for i in range(len(self.text)):
if j > len(self.key)-1:
j = 0
character = ((ord(self.text[i])-ord(self.key[j])+26) % 26)+ord('A')
plain_text.append(chr(character))
j += 1
return ''.join(plain_text)
class MorseCode:
def __init__(self, message: str) -> None:
self.message = message.upper()
# Function that returns value or key from morse_dict dictionary
def getDictItems(self, val, option):
morse_dict = {'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....',
'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.',
'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..',
'0': '-----', '1': '.----', '2': '..--', '3': '...--', '4': '....-',
'5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.',
'.': '.-.-.-', ',': '--..--', '?': '..--..', '!': '-.-.--', '/': '-..-.',
'(': '-.--.', ')': '-.--.-', '&': '.-...', ':': '---...', ';': '-.-.-.',
'=': '-...-', '+': '.-.-.', '-': '-....-', '_': '..--.-', '$': '...-..-', '@': '.--.-.'}
operation = {1: morse_dict, 2: list(morse_dict.keys())}
if option == 1:
return operation[option][val]
else:
return operation[option][list(morse_dict.values()).index(val)]
# Function to encrypt given message
def encrypt(self):
return "".join(['/ ' if character == ' '
else f'{self.getDictItems(character, 1)} '
for character in self.message])
# Function to decrypt given cipher text
def decrypt(self):
return "".join([' ' if character == '/'
else f'{self.getDictItems(character, 2)}'
for character in self.message.split(' ')])
class RunningKeyCipher:
def __init__(self, plainText, key):
# converting the plain text and key to upper case and removing spaces
self.pt = "".join(plainText.upper().split(' '))
self.ky = "".join(key.upper().split(' '))
# creating a DataFrame of size 26x26
tab, tableau = [chr(a) for a in range(65, 91)], []
for i in range(26):
row = tab[i:]+tab[:i]
tableau.append(row)
self.tabulaRecta = DataFrame(tableau, index=tab, columns=tab)
def encrypt(self):
encryptedText = ''
for i in range(len(self.pt)):
encryptedText += self.tabulaRecta.values[ord(
self.pt[i])-65][ord(self.ky[i])-65]
return encryptedText
def decrypt(self):
decryptedText = ''
for i in range(len(self.pt)):
decryptedText += ''.join(
self.tabulaRecta[self.tabulaRecta[self.ky[i]] == self.pt[i]].index.values)
return decryptedText
class ROT13:
def __init__(self, text: str):
# removing spaces from the text
self.text = "".join(text.upper().split(' '))
def encrypt(self):
return "".join([chr(ord(alphabet)-13) if ord(alphabet) > ord('A')+12
else chr(ord(alphabet)+13) for alphabet in self.text])
def decrypt(self):
return "".join([chr(ord(alphabet)-13) if ord(alphabet) > ord('A')+12
else chr(ord(alphabet)+13) for alphabet in self.text])
| 35.32 | 118 | 0.466214 | 554 | 5,298 | 4.400722 | 0.236462 | 0.045939 | 0.022559 | 0.022559 | 0.337982 | 0.283019 | 0.251846 | 0.235439 | 0.208368 | 0.182937 | 0 | 0.017001 | 0.322763 | 5,298 | 149 | 119 | 35.557047 | 0.662486 | 0.090411 | 0 | 0.291262 | 0 | 0 | 0.074667 | 0.012063 | 0.009709 | 0 | 0 | 0 | 0 | 1 | 0.15534 | false | 0 | 0.009709 | 0.038835 | 0.330097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97072840b3c078e668d6a47321880a4ea8713698 | 2,638 | py | Python | MultiPManager/brokermq.py | sebastiantrianac/SoftTLON | 3b798393efbd49b4a22acf19a056d64bef1d0ddf | [
"MIT"
] | null | null | null | MultiPManager/brokermq.py | sebastiantrianac/SoftTLON | 3b798393efbd49b4a22acf19a056d64bef1d0ddf | [
"MIT"
] | null | null | null | MultiPManager/brokermq.py | sebastiantrianac/SoftTLON | 3b798393efbd49b4a22acf19a056d64bef1d0ddf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
#
# A module for create a multi-agent system over Ad-hoc networks
# Copyright (C) 2017-2018
# Juan Sebastian Triana Correa <justrianaco@unal.edu.co>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
__author__ = "Juan Sebastian Triana Correa"
__copyright__ = "Copyright 2015, TLON group"
__license__ = "LGPL"
__version__ = "1.2"
__email__ = "justrianaco@unal.edu.co"
__status__ = "Development"
import sys
import stomp
import dill as pickle
import MultiPManager.managerImp as managerImp
import MultiPManager.multiProc as multiProc
AUTHKEY = ''
tlon_resources = {}
def BrokerConnect(ip, port):
conn = stomp.Connection([(ip, port)])
conn.start()
conn.connect(wait=True)
return conn
class __resourceTopicListener__(stomp.ConnectionListener):
def on_error(self, headers, message):
print('Received an error {}'.format(message))
def on_message(self, headers, message):
global tlon_resources
#print('Received a message {}'.format(message))
#print('Request for resource {}'.format(message))
if sys.version_info[0]<3:
tmp = pickle.loads(message)
else:
tmp = pickle.loads(message.encode())
tlon_resources[tmp.__name__] = tmp
class __ordersTopicListener__(stomp.ConnectionListener):
def on_error(self, headers, message):
print('Received an error {}'.format(message))
def on_message(self, headers, message):
global tlon_resources
if sys.version_info[0]<3:
tmp = pickle.loads(message)
else:
tmp = pickle.loads(message.encode())
print("{},{},{}".format(tmp['ip'], tmp['portnum'], tmp['authkey']))
print(tmp)
if tmp['resourceName'] in tlon_resources:
manager = managerImp.make_client_manager(tmp['ip'], tmp['portnum'], tmp['authkey'])
job_q = manager.get_job_q()
result_q = manager.get_result_q()
multiProc.tlon_multiprocessing(job_q, result_q, tlon_resources[tmp['resourceName']])
| 33.820513 | 96 | 0.689917 | 344 | 2,638 | 5.119186 | 0.453488 | 0.044293 | 0.040886 | 0.0477 | 0.303805 | 0.289608 | 0.230551 | 0.230551 | 0.230551 | 0.230551 | 0 | 0.009497 | 0.201668 | 2,638 | 77 | 97 | 34.25974 | 0.826686 | 0.331691 | 0 | 0.363636 | 0 | 0 | 0.114237 | 0.013203 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113636 | false | 0 | 0.113636 | 0 | 0.295455 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9707bc0df9d0b4b62a5f4981d1500fdead7c395d | 672 | py | Python | tests/span/span_helpers.py | dmytroxshevchuk/sonic-mgmt | b784e598a063aba71fdf9ca23fe142840926e4cf | [
"Apache-2.0"
] | 132 | 2016-10-19T12:34:44.000Z | 2022-03-16T09:00:39.000Z | tests/span/span_helpers.py | dmytroxshevchuk/sonic-mgmt | b784e598a063aba71fdf9ca23fe142840926e4cf | [
"Apache-2.0"
] | 3,152 | 2016-09-21T23:05:58.000Z | 2022-03-31T23:29:08.000Z | tests/span/span_helpers.py | dmytroxshevchuk/sonic-mgmt | b784e598a063aba71fdf9ca23fe142840926e4cf | [
"Apache-2.0"
] | 563 | 2016-09-20T01:00:15.000Z | 2022-03-31T22:43:54.000Z | '''
Helper functions for span tests
'''
import ptf.testutils as testutils
def send_and_verify_mirrored_packet(ptfadapter, src_port, monitor):
'''
Send packet from ptf and verify it on monitor port
Args:
ptfadapter: ptfadapter fixture
src_port: ptf port index, from which packet will be sent
monitor: ptf port index, where packet will be verified on
'''
src_mac = ptfadapter.dataplane.get_mac(0, src_port)
pkt = testutils.simple_icmp_packet(eth_src=src_mac, eth_dst='ff:ff:ff:ff:ff:ff')
ptfadapter.dataplane.flush()
testutils.send(ptfadapter, src_port, pkt)
testutils.verify_packet(ptfadapter, pkt, monitor)
| 29.217391 | 84 | 0.71875 | 96 | 672 | 4.864583 | 0.4375 | 0.042827 | 0.051392 | 0.051392 | 0.025696 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001848 | 0.19494 | 672 | 22 | 85 | 30.545455 | 0.861368 | 0.36756 | 0 | 0 | 0 | 0 | 0.044041 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97089962ed3ea0a8a6e34e92416d3e39210a2bda | 9,039 | py | Python | bonobo/nodes/basics.py | a-musing-moose/bonobo | b3e9ddd028aed4a8a4df8e4b89334951a343e6fa | [
"Apache-2.0"
] | null | null | null | bonobo/nodes/basics.py | a-musing-moose/bonobo | b3e9ddd028aed4a8a4df8e4b89334951a343e6fa | [
"Apache-2.0"
] | null | null | null | bonobo/nodes/basics.py | a-musing-moose/bonobo | b3e9ddd028aed4a8a4df8e4b89334951a343e6fa | [
"Apache-2.0"
] | null | null | null | import functools
import html
import itertools
import pprint
from bonobo import settings
from bonobo.config import Configurable, Option, Method, use_raw_input, use_context, use_no_input
from bonobo.config.functools import transformation_factory
from bonobo.config.processors import ContextProcessor, use_context_processor
from bonobo.constants import NOT_MODIFIED
from bonobo.util.objects import ValueHolder
from bonobo.util.term import CLEAR_EOL
from mondrian import term
__all__ = [
'FixedWindow',
'Format',
'Limit',
'OrderFields',
'PrettyPrinter',
'Rename',
'SetFields',
'Tee',
'UnpackItems',
'count',
'identity',
'noop',
]
def identity(x):
return x
class Limit(Configurable):
"""
Creates a Limit() node, that will only let go through the first n rows (defined by the `limit` option), unmodified.
.. attribute:: limit
Number of rows to let go through.
TODO: simplify into a closure building factory?
"""
limit = Option(positional=True, default=10)
@ContextProcessor
def counter(self, context):
yield ValueHolder(0)
def __call__(self, counter, *args, **kwargs):
counter += 1
if counter <= self.limit:
yield NOT_MODIFIED
def Tee(f):
from bonobo.constants import NOT_MODIFIED
@functools.wraps(f)
def wrapped(*args, **kwargs):
nonlocal f
f(*args, **kwargs)
return NOT_MODIFIED
return wrapped
def _shorten(s, w):
if w and len(s) > w:
s = s[0:w - 3] + '...'
return s
class PrettyPrinter(Configurable):
max_width = Option(
int,
default=term.get_size()[0],
required=False,
__doc__='''
If set, truncates the output values longer than this to this width.
'''
)
filter = Method(
default=
(lambda self, index, key, value: (value is not None) and (not isinstance(key, str) or not key.startswith('_'))),
__doc__='''
A filter that determine what to print.
Default is to ignore any key starting with an underscore and none values.
'''
)
@ContextProcessor
def context(self, context):
context.setdefault('_jupyter_html', None)
yield context
if context._jupyter_html is not None:
from IPython.display import display, HTML
display(HTML('\n'.join(['<table>'] + context._jupyter_html + ['</table>'])))
def __call__(self, context, *args, **kwargs):
if not settings.QUIET:
if term.isjupyter:
self.print_jupyter(context, *args, **kwargs)
return NOT_MODIFIED
if term.istty:
self.print_console(context, *args, **kwargs)
return NOT_MODIFIED
self.print_quiet(context, *args, **kwargs)
return NOT_MODIFIED
def print_quiet(self, context, *args, **kwargs):
for index, (key, value) in enumerate(itertools.chain(enumerate(args), kwargs.items())):
if self.filter(index, key, value):
print(self.format_quiet(index, key, value, fields=context.get_input_fields()))
def format_quiet(self, index, key, value, *, fields=None):
# XXX should we implement argnames here ?
return ' '.join(((' ' if index else '-'), str(key), ':', str(value).strip()))
def print_console(self, context, *args, **kwargs):
print('\u250c')
for index, (key, value) in enumerate(itertools.chain(enumerate(args), kwargs.items())):
if self.filter(index, key, value):
print(self.format_console(index, key, value, fields=context.get_input_fields()))
print('\u2514')
def format_console(self, index, key, value, *, fields=None):
fields = fields or []
if not isinstance(key, str):
if len(fields) > key and str(key) != str(fields[key]):
key = '{}{}'.format(fields[key], term.lightblack('[{}]'.format(key)))
else:
key = str(index)
prefix = '\u2502 {} = '.format(key)
prefix_length = len(prefix)
def indent(text, prefix):
for i, line in enumerate(text.splitlines()):
yield (prefix if i else '') + line + CLEAR_EOL + '\n'
repr_of_value = ''.join(
indent(pprint.pformat(value, width=self.max_width - prefix_length), '\u2502' + ' ' * (len(prefix) - 1))
).strip()
return '{}{}{}'.format(prefix, repr_of_value.replace('\n', CLEAR_EOL + '\n'), CLEAR_EOL)
def print_jupyter(self, context, *args):
if not context._jupyter_html:
context._jupyter_html = [
'<thead><tr>',
*map('<th>{}</th>'.format, map(html.escape, map(str,
context.get_input_fields() or range(len(args))))),
'</tr></thead>',
]
context._jupyter_html += [
'<tr>',
*map('<td>{}</td>'.format, map(html.escape, map(repr, args))),
'</tr>',
]
@use_no_input
def noop(*args, **kwargs):
return NOT_MODIFIED
class FixedWindow(Configurable):
"""
Transformation factory to create fixed windows of inputs, as lists.
For example, if the input is successively 1, 2, 3, 4, etc. and you pass it through a ``FixedWindow(2)``, you'll get
lists of elements 2 by 2: [1, 2], [3, 4], ...
"""
length = Option(int, positional=True) # type: int
@ContextProcessor
def buffer(self, context):
buffer = yield ValueHolder([])
if len(buffer):
last_value = buffer.get()
last_value += [None] * (self.length - len(last_value))
context.send(*last_value)
@use_raw_input
def __call__(self, buffer, bag):
buffer.append(bag)
if len(buffer) >= self.length:
yield tuple(buffer.get())
buffer.set([])
@transformation_factory
def OrderFields(fields):
"""
Transformation factory to reorder fields in a data stream.
:param fields:
:return: callable
"""
fields = list(fields)
@use_context
@use_raw_input
def _OrderFields(context, row):
nonlocal fields
context.setdefault('remaining', None)
if not context.output_type:
context.remaining = list(sorted(set(context.get_input_fields()) - set(fields)))
context.set_output_fields(fields + context.remaining)
yield tuple(row.get(field) for field in context.get_output_fields())
return _OrderFields
@transformation_factory
def SetFields(fields):
"""
Transformation factory that sets the field names on first iteration, without touching the values.
:param fields:
:return: callable
"""
@use_context
@use_no_input
def _SetFields(context):
nonlocal fields
if not context.output_type:
context.set_output_fields(fields)
return NOT_MODIFIED
return _SetFields
@transformation_factory
def UnpackItems(*items, fields=None, defaults=None):
"""
>>> UnpackItems(0)
:param items:
:param fields:
:param defaults:
:return: callable
"""
defaults = defaults or {}
@use_context
@use_raw_input
def _UnpackItems(context, bag):
nonlocal fields, items, defaults
if fields is None:
fields = ()
for item in items:
fields += tuple(bag[item].keys())
context.set_output_fields(fields)
values = ()
for item in items:
values += tuple(bag[item].get(field, defaults.get(field)) for field in fields)
return values
return _UnpackItems
@transformation_factory
def Rename(**translations):
# XXX todo handle duplicated
fields = None
translations = {v: k for k, v in translations.items()}
@use_context
@use_raw_input
def _Rename(context, bag):
nonlocal fields, translations
if not fields:
fields = tuple(translations.get(field, field) for field in context.get_input_fields())
context.set_output_fields(fields)
return NOT_MODIFIED
return _Rename
@transformation_factory
def Format(**formats):
fields, newfields = None, None
@use_context
@use_raw_input
def _Format(context, bag):
nonlocal fields, newfields, formats
if not context.output_type:
fields = context.input_type._fields
newfields = tuple(field for field in formats if not field in fields)
context.set_output_fields(fields + newfields)
return tuple(
formats[field].format(**bag._asdict()) if field in formats else bag.get(field)
for field in fields + newfields
)
return _Format
def _count(self, context):
counter = yield ValueHolder(0)
context.send(counter.get())
@use_no_input
@use_context_processor(_count)
def count(counter):
counter += 1
| 27.557927 | 120 | 0.605819 | 1,056 | 9,039 | 5.042614 | 0.214962 | 0.022535 | 0.021972 | 0.01784 | 0.212207 | 0.176338 | 0.073615 | 0.073615 | 0.058592 | 0.039437 | 0 | 0.005682 | 0.279566 | 9,039 | 327 | 121 | 27.642202 | 0.812039 | 0.093263 | 0 | 0.229358 | 0 | 0 | 0.060144 | 0 | 0 | 0 | 0 | 0.006116 | 0 | 1 | 0.133028 | false | 0 | 0.06422 | 0.013761 | 0.316514 | 0.059633 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9709d0267ed5104b57450430822594c17f2e9707 | 1,093 | py | Python | examplesimple.py | atxarib99/kNN | 20c41a549f9f101478c1523b2ba683a8058fa6c8 | [
"MIT"
] | null | null | null | examplesimple.py | atxarib99/kNN | 20c41a549f9f101478c1523b2ba683a8058fa6c8 | [
"MIT"
] | null | null | null | examplesimple.py | atxarib99/kNN | 20c41a549f9f101478c1523b2ba683a8058fa6c8 | [
"MIT"
] | null | null | null | '''
This file serves to be an example on how to use kNN. This file is the simplified version with no graphs. For an example with graphs, check example.py
Change k, noise, and amount of data to see how accuracy is affected.
'''
import kNN as knn
import modeler
#setup the trainer. Tune your k parameter here.
trainer = knn.knn(k=5)
#using basic modeler provided, can define how many elements, and how much noise we want.
parameters, labels = modeler.generateData(100, noiseFactor=.25)
#get parameters to test on. These should have 0 noise so we can accurately test them
testParameters, testLabel = modeler.generateData(25, noiseFactor=0)
#load the train data into the trainer
trainer.loadData(parameters, labels)
#holds the number of incorrect
error = 0
#for each test element
for i in range(len(testParameters)):
#use trainer to get a guess
confidence,guess = trainer.predict(testParameters[i], negativeValue=0)
#check if we were incorrect
if guess != testLabel[i]:
error += 1
#calcuate and print error
print("Accuracy", 1 - error / len(testParameters)) | 34.15625 | 153 | 0.743824 | 169 | 1,093 | 4.810651 | 0.544379 | 0.01968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015625 | 0.180238 | 1,093 | 32 | 154 | 34.15625 | 0.891741 | 0.547118 | 0 | 0 | 0 | 0 | 0.016949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
970a946d901c0d355d2502d9d1f07264bbcb2be3 | 424 | py | Python | python/simpletest/test.py | hackin-zhao/interesting_code | 25fecb716f5c8f9143ec2824bbf8996b347ce8a9 | [
"MIT"
] | 1 | 2021-09-22T10:56:13.000Z | 2021-09-22T10:56:13.000Z | python/simpletest/test.py | hackin-zhao/interesting_code | 25fecb716f5c8f9143ec2824bbf8996b347ce8a9 | [
"MIT"
] | 1 | 2022-02-15T03:53:47.000Z | 2022-02-15T03:53:47.000Z | python/simpletest/test.py | hackin-zhao/interesting_code | 25fecb716f5c8f9143ec2824bbf8996b347ce8a9 | [
"MIT"
] | null | null | null | import unittest
from mathtest import add, minus
class TestMathFunc(unittest.TestCase):
""" Test math function """
def test_add(self):
""" Test method add(a,b) """
self.assertEqual(3, add(1, 2))
self.assertNotEqual(3, add(2, 2))
def test_minus(self):
""" Test method test minus(a,b) """
self.assertEqual(1, minus(3, 2))
if __name__ == '__main__':
unittest.main()
| 21.2 | 43 | 0.601415 | 56 | 424 | 4.375 | 0.446429 | 0.057143 | 0.114286 | 0.138776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028213 | 0.247642 | 424 | 19 | 44 | 22.315789 | 0.739812 | 0.162736 | 0 | 0 | 0 | 0 | 0.023881 | 0 | 0 | 0 | 0 | 0 | 0.3 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
970bccc21f91dbbbb912e9ec8f46b93496941b85 | 585 | py | Python | src/data/439.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/439.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/439.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from collections import deque
N, Q = map(int, input().split())
G = [[] for _ in range(N)]
for _ in range(N - 1):
a, b = map(lambda x: int(x) - 1, input().split())
G[a].append(b)
G[b].append(a)
s = 0
dist = [-1] * N
que = deque()
que.append(s)
dist[s] = 0
while que:
i = que.popleft()
for j in G[i]:
if dist[j] == -1:
dist[j] = 1 - dist[i]
que.append(j)
for _ in range(Q):
c, d = map(lambda x: int(x) - 1, input().split())
if dist[c] == dist[d]:
print("Town")
else:
print("Road")
| 20.172414 | 53 | 0.499145 | 101 | 585 | 2.861386 | 0.386139 | 0.103806 | 0.103806 | 0.076125 | 0.17301 | 0.17301 | 0.17301 | 0.17301 | 0 | 0 | 0 | 0.021792 | 0.294017 | 585 | 28 | 54 | 20.892857 | 0.677966 | 0.035897 | 0 | 0 | 0 | 0 | 0.01421 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.041667 | 0 | 0.041667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
970ca64e46ad17397f8882ea7b2df30130763068 | 5,384 | py | Python | axis_inspection/axis_inspection/report/project_status_summary/project_status_summary.py | Subramani830/testrepo | 4568c628dc0731d08315ebc830dc813e0abfc60d | [
"MIT"
] | null | null | null | axis_inspection/axis_inspection/report/project_status_summary/project_status_summary.py | Subramani830/testrepo | 4568c628dc0731d08315ebc830dc813e0abfc60d | [
"MIT"
] | null | null | null | axis_inspection/axis_inspection/report/project_status_summary/project_status_summary.py | Subramani830/testrepo | 4568c628dc0731d08315ebc830dc813e0abfc60d | [
"MIT"
] | null | null | null | # Copyright (c) 2013, veena and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
data = []
conditions=get_conditions(filters)
data = get_data(filters,conditions)
for project in data:
project["total_tasks"] = frappe.db.count("Task", filters={"project": project['name']})
project["completed_tasks"] = frappe.db.count("Task", filters={"project": project['name'], "status": "Completed"})
project["overdue_tasks"] = frappe.db.count("Task", filters={"project": project['name'], "status": "Overdue"})
chart = get_chart_data(data)
report_summary = get_report_summary(data)
return columns, data, None, chart, report_summary
def get_columns():
return [
{
"fieldname": "name",
"label": _("Project"),
"fieldtype": "Link",
"options": "Project",
"width": 200
},
{
"fieldname": "project_type",
"label": _("Type"),
"fieldtype": "Link",
"options": "Project Type",
"width": 120
},
{
"fieldname": "status",
"label": _("Status"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "total_tasks",
"label": _("Total Tasks"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "completed_tasks",
"label": _("Tasks Completed"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "overdue_tasks",
"label": _("Tasks Overdue"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "percent_complete",
"label": _("Completion"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "expected_start_date",
"label": _("Start Date"),
"fieldtype": "Date",
"width": 120
},
{
"fieldname": "expected_end_date",
"label": _("End Date"),
"fieldtype": "Date",
"width": 120
},
{
"fieldname": "date_percentage",
"label": _("Duration Left%"),
"fieldtype": "percent",
"width": 200
},
{
"fieldname": "total_sales_amount",
"label": _("Total Sales Amount"),
"fieldtype": "currency",
"width": 150
},
{
"fieldname": "total_billed_amount",
"label": _("Total Billed Amount"),
"fieldtype": "currency",
"width": 200
},
{
"fieldname": "sales_percentage",
"label": _("Sales Remaining%"),
"fieldtype": "percent",
"width": 150
}
]
def get_data(filters,conditions):
query="""select p.name,p.status,p.percent_complete,p.expected_start_date,p.expected_end_date,p.project_type,(CASE WHEN p.expected_start_date > NOW() THEN 100 ELSE ( (DATEDIFF(p.expected_end_date,Now()) / DATEDIFF(p.expected_end_date, p.expected_start_date))*100)END)as date_percentage,p.total_sales_amount,p.total_billed_amount,(((p.total_sales_amount-p.total_billed_amount)/p.total_sales_amount)*100) as sales_percentage from `tabProject` p WHERE {conditions} ORDER BY p.expected_end_date ASC""".format(conditions=conditions)
proj=frappe.db.sql(query, as_dict=True)
return proj
def get_conditions(filters):
conditions=""
if filters.get('company'):
conditions += " p.company = '{}'".format(filters.get('company'))
conditions += " AND p.expected_end_date >= NOW()"
if filters.get('is_active'):
conditions += " AND p.is_active = '{}'".format(filters.get('is_active'))
if filters.get('status'):
conditions += " AND p.status = '{}'".format(filters.get('status'))
if filters.get('project_type'):
conditions += " AND p.project_type = '{}'".format(filters.get('project_type'))
if filters.get('priority'):
conditions += " AND p.priority = '{}'".format(filters.get('priority'))
return conditions
def get_chart_data(data):
labels = []
total = []
completed = []
overdue = []
date_per = []
sales_per = []
for project in data:
labels.append(project.name)
total.append(project.total_tasks)
completed.append(project.completed_tasks)
overdue.append(project.overdue_tasks)
date_per.append(project.date_percentage)
sales_per.append(project.sales_percentage)
return {
"data": {
'labels': labels[:50],
'datasets': [
{
"name": "Overdue",
"values": overdue[:30]
},
{
"name": "Completed",
"values": completed[:30]
},
{
"name": "Total Tasks",
"values": total[:30]
},
{
"name": "Duration Left%",
"values": date_per[:30]
},
{
"name": "Sales Remaining%",
"values": sales_per[:50]
},
]
},
"type": "bar",
"colors": ["#fc4f51", "#ffd343","#00FF00", "#7575ff","#78d6ff"],
"barOptions": {
"stacked": False
}
}
def get_report_summary(data):
if not data:
return None
avg_completion = sum([project.percent_complete for project in data]) / len(data)
total = sum([project.total_tasks for project in data])
total_overdue = sum([project.overdue_tasks for project in data])
completed = sum([project.completed_tasks for project in data])
return [
{
"value": avg_completion,
"indicator": "Green" if avg_completion > 50 else "Red",
"label": "Average Completion",
"datatype": "Percent",
},
{
"value": total,
"indicator": "Blue",
"label": "Total Tasks",
"datatype": "Int",
},
{
"value": completed,
"indicator": "Green",
"label": "Completed Tasks",
"datatype": "Int",
},
{
"value": total_overdue,
"indicator": "Green" if total_overdue == 0 else "Red",
"label": "Overdue Tasks",
"datatype": "Int",
}
]
| 25.158879 | 527 | 0.633172 | 617 | 5,384 | 5.345219 | 0.197731 | 0.030321 | 0.041237 | 0.029109 | 0.184657 | 0.09339 | 0.072771 | 0.072771 | 0.072771 | 0.05852 | 0 | 0.019035 | 0.180349 | 5,384 | 213 | 528 | 25.276995 | 0.728303 | 0.016716 | 0 | 0.169231 | 0 | 0.005128 | 0.381024 | 0.065961 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030769 | false | 0 | 0.015385 | 0.005128 | 0.082051 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
971109a2af40ebe4d09693183a658f563772f76a | 4,652 | py | Python | animate_gp.py | mzwiessele/notebook_playground | 14938eddf5491eb0356fa4f73d16047202131e4d | [
"BSD-2-Clause"
] | null | null | null | animate_gp.py | mzwiessele/notebook_playground | 14938eddf5491eb0356fa4f73d16047202131e4d | [
"BSD-2-Clause"
] | null | null | null | animate_gp.py | mzwiessele/notebook_playground | 14938eddf5491eb0356fa4f73d16047202131e4d | [
"BSD-2-Clause"
] | null | null | null | #===============================================================================
# Copyright (c) 2018, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of animate_gp nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
import GPy
from matplotlib import pyplot as plt
from matplotlib import animation
def exp_map_sphere(mu, E):
theta = np.sqrt((E ** 2).sum(0))[None, :]
M = mu * np.sin(theta)
M = M + (E * (np.cos(theta)) / theta)
M[:, np.abs(theta[0]) <= 1e-7] = mu
return M
def exp_map(mu, E):
theta = np.sqrt((E ** 2).sum(0))[None]
M = mu * np.sin(theta)
M = M + (E * (np.cos(theta)) / theta)
M[:, np.abs(theta[0]) <= 1e-7] = mu
return M
def animation_matrix(N, n):
u = np.random.normal(0, 1, size=(N, 1))
r = np.sqrt((u ** 2).sum())
u /= r
t = np.random.normal(0, 1, size=(N, 1))
t = t - (t.T.dot(u)).dot(u.T).T
t /= np.sqrt((t ** 2).sum())
# start = np.random.uniform(0,2*np.pi)
# T = np.linspace(start, start+2*np.pi, n)[None, :] * t
return r * exp_map_sphere(u, np.linspace(0.001, 2 * np.pi, n)[None] * t)
def get_percs(mu, K):
s = np.random.multivariate_normal(mu.squeeze(), K, size=(50000)).T
return np.percentile(s, np.linspace(0.01, 99.99, 75), overwrite_input=True, axis=1)
def create_empty_ax():
fig, ax = plt.subplots(figsize=(4.2 * (16 / 9), 4.20))
ax.set_frame_on(False)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
return fig, ax
def plot_data(ax, X, Y):
return ax.scatter(X, Y, marker='x', color='k')
def fill_grad(ax, X, mu, K):
from GPy.plotting.matplot_dep.plot_definitions import MatplotlibPlots
mat_plot = MatplotlibPlots()
mat_plot.fill_gradient(ax, X[:, 0], get_percs(mu, K), color='#687C8E', linewidth=0, alpha=1.)
def animate_kernel(fig, ax, X, mu, K, out, frames=200):
colors = ['#f7fbff',
'#deebf7',
'#c6dbef',
"#9ecae1",
"#6baed6",
"#4292c6",
'#2171b5',
'#08519c',
'#08306b',
]
L = GPy.util.linalg.pdinv(K + np.eye(K.shape[0]) * 1e-8)[1]
lines = [ax.plot([], [], lw=.8, color=c)[0] for c in colors]
Rs = [animation_matrix(X.shape[0], frames) for _ in lines]
def init():
for line in lines:
line.set_data([], [])
return lines
def animate(i):
for animatrix, line in zip(Rs, lines):
# print y[:,i].shape, x.shape
line.set_data(X[:, 0], mu[:,[0]] + L.dot(animatrix[:, [i]]))
return lines
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=20, blit=False,
repeat=True, save_count=frames,
)
writer = animation.FFMpegFileWriter(
fps=30,
codec='libx264',
extra_args=[
'-pix_fmt', 'yuva420p',
],
)
anim.save(
out,
writer=writer,
dpi=150,
savefig_kwargs={'transparent': False, 'facecolor': 'white'},
)
return anim
| 36.34375 | 97 | 0.593508 | 649 | 4,652 | 4.201849 | 0.417565 | 0.003667 | 0.0033 | 0.016868 | 0.151815 | 0.134213 | 0.126146 | 0.126146 | 0.110011 | 0.110011 | 0 | 0.031945 | 0.246346 | 4,652 | 127 | 98 | 36.629921 | 0.745864 | 0.377902 | 0 | 0.126582 | 0 | 0 | 0.041929 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126582 | false | 0 | 0.063291 | 0.012658 | 0.303797 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9711a13f8bc8673b475c4605a1bf57d9fc492541 | 1,190 | py | Python | M4_Eval_SDK/Source/samples/python/console_window.py | analogdevicesinc/ApplicationsWaveTool | 0c1f236dd0745caa3187841ee1a882f209ac3ebe | [
"Apache-2.0"
] | 2 | 2019-03-11T15:24:51.000Z | 2022-03-07T09:42:05.000Z | M4_Eval_SDK/Source/samples/python/console_window.py | analogdevicesinc/ApplicationsWaveTool | 0c1f236dd0745caa3187841ee1a882f209ac3ebe | [
"Apache-2.0"
] | null | null | null | M4_Eval_SDK/Source/samples/python/console_window.py | analogdevicesinc/ApplicationsWaveTool | 0c1f236dd0745caa3187841ee1a882f209ac3ebe | [
"Apache-2.0"
] | 1 | 2021-03-16T08:26:05.000Z | 2021-03-16T08:26:05.000Z | import socket, sys, time, colorama, os
from threading import *
colorama.init(autoreset=True)
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "localhost"
port = int(sys.argv[1])
try:
serversocket.bind((host, port))
except Exception as e:
import traceback
print("An error occurred while trying to open socket {}.{}:\n".format(host, port, str(e)))
traceback.print_exc()
input("Hit any key to exit")
sys.exit(-1)
class client(Thread):
def __init__(self, socket, address):
Thread.__init__(self)
self.sock = socket
self.addr = address
self.start()
def run(self):
while 1:
try:
data = self.sock.recv(1024).decode()
except:
input("The socket was broken! Hit any key to exit")
os._exit(0)
if not data:
input("The client disconnected! Hit any key to exit")
os._exit(0)
print (data)
serversocket.listen(5)
print ("Console monitor started, listening on {}:{}".format(host, port))
while 1:
clientsocket, address = serversocket.accept()
client(clientsocket, address)
| 28.333333 | 94 | 0.610084 | 150 | 1,190 | 4.753333 | 0.5 | 0.033661 | 0.037868 | 0.046283 | 0.082749 | 0.061711 | 0.061711 | 0.061711 | 0 | 0 | 0 | 0.012717 | 0.273109 | 1,190 | 41 | 95 | 29.02439 | 0.811561 | 0 | 0 | 0.166667 | 0 | 0 | 0.177311 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.083333 | 0 | 0.166667 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97127268ae73df5b3be209c30b4a2132ec27b166 | 1,517 | py | Python | src/day11.py | chao-mu/aoc2021 | 9cb0590a8de100f260a78f22e50d00d6acb13ae4 | [
"CC0-1.0"
] | null | null | null | src/day11.py | chao-mu/aoc2021 | 9cb0590a8de100f260a78f22e50d00d6acb13ae4 | [
"CC0-1.0"
] | null | null | null | src/day11.py | chao-mu/aoc2021 | 9cb0590a8de100f260a78f22e50d00d6acb13ae4 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
from aoc2021.util import print_solutions, import_matrix
from math import inf
def part_1(dumbos):
flashes = 0
for _ in range(100):
for coord in dumbos:
dumbos[coord] += 1
flashed = set()
for coord, dumbo in dumbos.items():
if coord not in flashed and dumbo > 9:
flash(dumbos, coord, flashed)
flashes += len(flashed)
return flashes
def part_2(dumbos):
flashes = 0
step = 0
while True:
step += 1
for coord in dumbos:
dumbos[coord] += 1
flashed = set()
for coord, dumbo in dumbos.items():
if coord not in flashed and dumbo > 9:
flash(dumbos, coord, flashed)
if len(flashed) == len(dumbos):
return step
def flash(dumbos, coord, flashed):
if coord not in dumbos:
return
if coord in flashed:
return
dumbos[coord] += 1
if dumbos[coord] <= 9:
return
dumbos[coord] = 0
flashed.add(coord)
for offset in [(0, 1), (0, -1), (1, 0), (-1, 0), (-1, -1), (-1, 1), (1, 1), (1, -1)]:
flash(dumbos, tuple(map(sum, zip(coord, offset))), flashed)
def main():
print_solutions(
["resources/day11-test.txt", "resources/day11.txt"],
import_matrix,
part_1
)
print_solutions(
["resources/day11-test.txt", "resources/day11.txt"],
import_matrix,
part_2
)
if __name__ == "__main__":
main()
| 20.780822 | 89 | 0.544496 | 194 | 1,517 | 4.159794 | 0.262887 | 0.109046 | 0.022305 | 0.024783 | 0.506815 | 0.463445 | 0.463445 | 0.453532 | 0.453532 | 0.453532 | 0 | 0.046397 | 0.332235 | 1,517 | 72 | 90 | 21.069444 | 0.750247 | 0.013843 | 0 | 0.470588 | 0 | 0 | 0.062876 | 0.032107 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078431 | false | 0 | 0.078431 | 0 | 0.254902 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9713a29a49b88c9107c1462a864743e09d61110c | 4,221 | py | Python | bot.py | Ashkar2001/shareurlbotv1 | f61913731f0064aceb7e67ed8f2b98e0de0015bb | [
"MIT"
] | 3 | 2021-04-22T19:37:01.000Z | 2022-03-28T09:09:19.000Z | bot.py | krbishnoi46/ShareurlbotV1 | f61913731f0064aceb7e67ed8f2b98e0de0015bb | [
"MIT"
] | null | null | null | bot.py | krbishnoi46/ShareurlbotV1 | f61913731f0064aceb7e67ed8f2b98e0de0015bb | [
"MIT"
] | 2 | 2021-05-02T14:32:52.000Z | 2021-09-13T19:42:02.000Z | import urllib
from pyrogram import Client, filters
from pyrogram.types import (InlineKeyboardButton, InlineKeyboardMarkup, InlineQueryResultArticle, InputTextMessageContent)
from config import Config
bot = Client(
'shareurl-generator',
bot_token = Config.BOT_TOKEN,
api_id = Config.API_ID,
api_hash = Config.API_HASH
)
@bot.on_message(filters.command(['start']))
def start(client, message):
rep = f"**Hi {message.from_user.username}**\n\n**Am a bot to convert __text into Shareable telegram link__.**\nWorks on both **in pm and in Inline😊**\n\nClick __/help__ if needed.."
message.reply_text(
text=rep,
quote=False,
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('SOURCE', url='https://github.com/ashkar2001/shareurlbotv1')],[InlineKeyboardButton("Search Here", switch_inline_query_current_chat=""),InlineKeyboardButton("Go Inline", switch_inline_query="")], [InlineKeyboardButton('Share Me', url='https://t.me/share/url?url=%2A%2AHello%20Plox%20%F0%9F%91%8B%2A%2A%0A%0A__I%20just%20found%20a%20Bot%20to%20convert__%20%2A%2AText%20as%20a%20Shareable%20Text%20Link%2A%2A%20__format%20%F0%9F%A4%A9.%20Hope%20it%20would%20be%20very%20helpful%20for%20u%20too...%F0%9F%A4%97%F0%9F%A4%97__%0A%0A%2A%2ABot%20Link%3A%20%40ShareUrlBot%20%F0%9F%A5%B0%2A%2A')]]))
@bot.on_message(filters.command(['help']))
def help(client, message):
message.reply_text("**Nothing Complicated..🤓**\n\n**For PM:**\n__Send your desired text to this bot to get your link.__\n\n**For Inline Method:**\n__Type__ `@ShareUrlBot your text`\n__in any chats keyboard and hit the inline result.__", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('SOURCE', url='https://github.com/ashkar2001/shareurlbotv1')]]))
@bot.on_message(filters.command(['about']))
def about(client, message):
message.reply_text(f"""**• Bot Info •**
**My Name** :- `Share Url Generator`
**Creator** :- @B_woy
**Language** :- `Python3`
**Library** :- `Pyrogram 1.2.8`
**Server** :- `Heroku.com`
**Build Status** :- `V 0.2`
**• User Info •**
**Name** :- `{message.from_user.first_name} {message.from_user.last_name}`
**ID** :- `{message.from_user.id}`
**Username** :- @{message.from_user.username}
**DC ID** :- `{message.from_user.dc_id}`""", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('SOURCE', url = 'https://github.com/ashkar2001/shareurlbotv1')]]))
@bot.on_message(filters.text)
def shareurl(client, message):
query = message.text
url = urllib.parse.quote(query)
rpl = f"https://t.me/share/url?url={url}"
rslt = f"""**Click to CopY ⬇️⬇️** \n\n```{rpl}```"""
message.reply_text(text=rslt, reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('Click to Try on This Link ⬆️⬆️', url=f'{rpl}')]]))
@bot.on_inline_query()
def inline(client, message):
query = message.query.lower()
if query == "":
result= [InlineQueryResultArticle(title = "Help !!",
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("Search Here", switch_inline_query_current_chat=""),InlineKeyboardButton("Go Inline", switch_inline_query="")]]),
description ="How t0 usE meH !!",
thumb_url="https://telegra.ph/file/99d8f16a777c2ee2781c1.jpg",
input_message_content = InputTextMessageContent(message_text ="**Nothing Complicated..**🤓\n\nType `@ShareUrlBot your text` \nin any chats keyboard and hit the inline result.\n\nNote: __U can also use Me in PM!__"))
]
message.answer(result)
return
else:
url = urllib.parse.quote(query)
rpl = f"https://t.me/share/url?url={url}"
rslt = f"""**Click to CopY⬇️⬇️** \n\n```{rpl}```"""
result = [InlineQueryResultArticle(title = f'{query}',
description =f'{rpl}',
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('Click to Try on This linK ⬆️⬆️', url=f'{rpl}')], [InlineKeyboardButton("Search Again", switch_inline_query_current_chat=""),InlineKeyboardButton("Go Inline", switch_inline_query="")]]),
input_message_content = InputTextMessageContent(message_text = rslt))
]
message.answer(result)
bot.run()
| 55.539474 | 654 | 0.683487 | 548 | 4,221 | 5.144161 | 0.337591 | 0.027315 | 0.031926 | 0.108549 | 0.453352 | 0.397659 | 0.353317 | 0.353317 | 0.327066 | 0.327066 | 0 | 0.037119 | 0.144752 | 4,221 | 75 | 655 | 56.28 | 0.737396 | 0 | 0 | 0.125 | 0 | 0.0625 | 0.43331 | 0.054016 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078125 | false | 0 | 0.0625 | 0 | 0.15625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9714d810307fa953764fccad45ba80c5500ed6fb | 6,745 | py | Python | scripts/pybeacondump.py | pjsier/il-2020-election-precinct-data | 67a763aece2939c50cccd11fb3deeba61ca2b6e6 | [
"MIT"
] | null | null | null | scripts/pybeacondump.py | pjsier/il-2020-election-precinct-data | 67a763aece2939c50cccd11fb3deeba61ca2b6e6 | [
"MIT"
] | null | null | null | scripts/pybeacondump.py | pjsier/il-2020-election-precinct-data | 67a763aece2939c50cccd11fb3deeba61ca2b6e6 | [
"MIT"
] | null | null | null | import collections
import copy
import http.client
import json
import re
import sys
import urllib.parse
import bs4
import requests
from geomet import wkt
"""
Based on conversation and code from https://github.com/openaddresses/machine/issues/580
"""
BEACON_HEADERS = {
"Content-Type": "application/json",
"User-Agent": "OA",
}
BODY_TEMPLATE = {
"layerId": None,
"useSelection": False,
"ext": {"minx": 0, "miny": 0, "maxx": 40000000, "maxy": 40000000},
"wkt": None,
"spatialRelation": 1,
"featureLimit": 1,
}
name_value_pattern = re.compile(r"^(\w+) = (.*)$", re.M)
coordinate_pattern = re.compile(r"(?P<x>-?\d+(\.\d+)?)\s+(?P<y>-?\d+(\.\d+)?)")
def get_query_url(start_url):
"""Create a query URL including a dynamically assigned QPS value"""
res = requests.get(start_url)
soup = bs4.BeautifulSoup(res.text, "html.parser")
config_script = soup.find_all("script", attrs={"type": "text/javascript"})[-1]
script_content = config_script.contents[0]
script_data_str = re.search(r"(?<=\= )\{.*\}(?=;)", script_content).group()
script_data = json.loads(script_data_str)
return (
"https://beacon.schneidercorp.com/api/beaconCore/GetVectorLayer?QPS="
+ script_data["QPS"]
)
def get_connection(raw_url):
""" Return an HTTPConnection and URL path for a starting Beacon URL.
Expects a raw URL similar to:
https://beacon.schneidercorp.com/api/beaconCore/GetVectorLayer?QPS=xxxx
"""
# Safari developer tools sneaks in some zero-width spaces:
# http://www.fileformat.info/info/unicode/char/200B/index.htm
url = raw_url.replace("\u200b", "")
scheme, host, path, _, query, _ = urllib.parse.urlparse(url)
layer_path = urllib.parse.urlunparse(("", "", path, None, query, None))
if scheme == "https":
return http.client.HTTPSConnection(host), layer_path
elif scheme == "http":
return http.client.HTTPConnection(host), layer_path
def get_starting_bbox(conn, layer_path, layer_id, radius_km=200):
""" Retrieves a bounding box tuple for a Beacon layer and radius in km.
This is meant to be an overly-large, generous bbox that should
encompass any reasonable county or city data source.
"""
body = copy.deepcopy(BODY_TEMPLATE)
body["layerId"] = int(layer_id)
conn.request("POST", url=layer_path, body=json.dumps(body), headers=BEACON_HEADERS)
resp = conn.getresponse()
if resp.status not in range(200, 299):
raise RuntimeError("Bad status in get_starting_bbox")
results = json.load(resp)
wkt = results.get("d", [{}])[0].get("WktGeometry", None)
if not wkt:
raise RuntimeError("Missing WktGeometry in get_starting_bbox")
match = coordinate_pattern.search(wkt)
if not match:
raise RuntimeError("Unparseable WktGeometry in get_started_bbox")
x, y = float(match.group("x")), float(match.group("y"))
xmin, ymin = x - radius_km * 1000, y - radius_km * 1000
xmax, ymax = x + radius_km * 1000, y + radius_km * 1000
return xmin, ymin, xmax, ymax
def partition_bbox(xmin, ymin, xmax, ymax):
""" Cut a bounding box into four smaller bounding boxes.
"""
xmid, ymid = xmin / 2 + xmax / 2, ymin / 2 + ymax / 2
return [
(xmin, ymin, xmid, ymid),
(xmin, ymid, xmid, ymax),
(xmid, ymin, xmax, ymid),
(xmid, ymid, xmax, ymax),
]
def get_features(conn, layer_path, layer_id, bbox, limit=0, depth=0):
""" Return a list of features after geographically searching a layer.
"""
body = copy.deepcopy(BODY_TEMPLATE)
body["layerId"], body["featureLimit"] = int(layer_id), limit
body["ext"] = dict(minx=bbox[0], miny=bbox[1], maxx=bbox[2], maxy=bbox[3])
conn.request("POST", url=layer_path, body=json.dumps(body), headers=BEACON_HEADERS)
resp = conn.getresponse()
if resp.status not in range(200, 299):
raise RuntimeError("Bad status in get_features")
records = json.load(resp).get("d", [])
if limit == 0:
# This is our first time through and we don't actually know how many
# things there are. Assume that the current count is the limit.
limit = len(records)
if len(records) >= limit:
# There are too many records, recurse!
# This also happens the first time through before we know anything.
bbox1, bbox2, bbox3, bbox4 = partition_bbox(*bbox)
return (
get_features(conn, layer_path, layer_id, bbox1, limit, depth + 1)
+ get_features(conn, layer_path, layer_id, bbox2, limit, depth + 1)
+ get_features(conn, layer_path, layer_id, bbox3, limit, depth + 1)
+ get_features(conn, layer_path, layer_id, bbox4, limit, depth + 1)
)
# We are good, make some GeoJSON.
print(" " * depth, "found", len(records), "in", bbox, file=sys.stderr)
return [make_feature(record) for record in records]
def extract_properties(record):
""" Get a dictionary of GeoJSON feature properties for a record.
"""
properties = collections.OrderedDict()
html1 = record.get("TipHtml", "").replace("\r\n", "\n")
html2 = record.get("ResultHtml", "").replace("\r\n", "\n")
soup1 = bs4.BeautifulSoup(html1, "html.parser")
soup2 = bs4.BeautifulSoup(html2, "html.parser")
for text in soup1.find_all(text=name_value_pattern):
properties.update({k: v for (k, v) in name_value_pattern.findall(text)})
for text in soup2.find_all(text=name_value_pattern):
properties.update({k: v for (k, v) in name_value_pattern.findall(text)})
return properties
def extract_geometry(record):
""" Get a GeoJSON geometry object for a record.
"""
prop = extract_properties(record)
try:
geom = dict(type="Point", coordinates=[float(prop["Long"]), float(prop["Lat"])])
except ValueError:
geom = None
return geom
def make_feature(record):
""" Get a complete GeoJSON feature object for a record.
"""
return dict(
type="Feature",
id=record.get("Key"),
geometry=wkt.loads(record.get("WktGeometry")),
properties=extract_properties(record),
)
if __name__ == "__main__":
_, start_url, layer_id, filename = sys.argv
query_url = get_query_url(start_url)
conn, layer_path = get_connection(query_url)
bbox = get_starting_bbox(conn, layer_path, layer_id)
print(bbox, file=sys.stderr)
features = get_features(conn, layer_path, layer_id, bbox)
geojson = dict(type="FeatureCollection", features=list(features))
if filename == "-":
json.dump(geojson, sys.stdout)
else:
with open(filename, "w") as f:
json.dump(geojson, f)
| 31.666667 | 88 | 0.648184 | 905 | 6,745 | 4.708287 | 0.316022 | 0.029571 | 0.027458 | 0.033795 | 0.239615 | 0.230697 | 0.230697 | 0.205116 | 0.133302 | 0.133302 | 0 | 0.018487 | 0.214085 | 6,745 | 212 | 89 | 31.816038 | 0.785324 | 0.162639 | 0 | 0.09375 | 0 | 0 | 0.113048 | 0.007891 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.078125 | 0 | 0.21875 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97176fec2bfad5af9c57f1ea37398060856e772d | 22,033 | py | Python | Lib/site-packages/pylint/checkers/base/basic_error_checker.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/pylint/checkers/base/basic_error_checker.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/pylint/checkers/base/basic_error_checker.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/pylint/blob/main/CONTRIBUTORS.txt
"""Basic Error checker from the basic checker."""
from __future__ import annotations
import itertools
from collections.abc import Iterator
from typing import Any
import astroid
from astroid import nodes
from pylint.checkers import utils
from pylint.checkers.base.basic_checker import _BasicChecker
from pylint.checkers.utils import infer_all
from pylint.interfaces import HIGH
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _get_break_loop_node(break_node: nodes.Break) -> nodes.For | nodes.While | None:
"""Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (nodes.For, nodes.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop: nodes.For | nodes.While) -> bool:
"""Returns true if a loop may end with a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may end with a break statement, False otherwise.
"""
loop_nodes = (nodes.For, nodes.While)
definition_nodes = (nodes.FunctionDef, nodes.ClassDef)
inner_loop_nodes: list[nodes.For | nodes.While] = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(nodes.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _has_abstract_methods(node):
"""Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def redefined_by_decorator(node: nodes.FunctionDef) -> bool:
"""Return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, nodes.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement, remove the else and"
" de-indent all the code inside it",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.only_required_for_messages("function-redefined")
def visit_classdef(self, node: nodes.ClassDef) -> None:
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple: nodes.Tuple) -> bool:
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, nodes.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, nodes.Starred):
starred_count += 1
return starred_count > 1
@utils.only_required_for_messages(
"too-many-star-expressions", "invalid-star-assignment-target"
)
def visit_assign(self, node: nodes.Assign) -> None:
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], nodes.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, nodes.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.only_required_for_messages("star-needs-assignment-target")
def visit_starred(self, node: nodes.Starred) -> None:
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, nodes.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(node.parent, (nodes.List, nodes.Tuple, nodes.Set, nodes.Dict)):
# PEP 448 unpacking.
return
stmt = node.statement(future=True)
if not isinstance(stmt, nodes.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.only_required_for_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
nodes.Return, skip_klass=(nodes.FunctionDef, nodes.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = {}
arguments: Iterator[Any] = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
if arg.name in arg_clusters:
self.add_message(
"duplicate-argument-name",
node=arg,
args=(arg.name,),
confidence=HIGH,
)
else:
arg_clusters[arg.name] = arg
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node: nodes.FunctionDef) -> None:
scope_globals = {
name: child
for child in node.nodes_of_class(nodes.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(nodes.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node: nodes.FunctionDef) -> None:
"""Check that a name is both nonlocal and global."""
def same_scope(current: nodes.Global | nodes.Nonlocal) -> bool:
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(nodes.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(nodes.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.only_required_for_messages("return-outside-function")
def visit_return(self, node: nodes.Return) -> None:
if not isinstance(node.frame(future=True), nodes.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.only_required_for_messages("yield-outside-function")
def visit_yield(self, node: nodes.Yield) -> None:
self._check_yield_outside_func(node)
@utils.only_required_for_messages("yield-outside-function")
def visit_yieldfrom(self, node: nodes.YieldFrom) -> None:
self._check_yield_outside_func(node)
@utils.only_required_for_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node: nodes.Continue) -> None:
self._check_in_loop(node, "continue")
@utils.only_required_for_messages("not-in-loop")
def visit_break(self, node: nodes.Break) -> None:
self._check_in_loop(node, "break")
@utils.only_required_for_messages("useless-else-on-loop")
def visit_for(self, node: nodes.For) -> None:
self._check_else_on_loop(node)
@utils.only_required_for_messages("useless-else-on-loop")
def visit_while(self, node: nodes.While) -> None:
self._check_else_on_loop(node)
@utils.only_required_for_messages("nonexistent-operator")
def visit_unaryop(self, node: nodes.UnaryOp) -> None:
"""Check use of the non-existent ++ and -- operators."""
if (
(node.op in "+-")
and isinstance(node.operand, nodes.UnaryOp)
and (node.operand.op == node.op)
and (node.col_offset + 1 == node.operand.col_offset)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node: nodes.Nonlocal, name: str) -> None:
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (nodes.ClassDef, nodes.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, nodes.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.only_required_for_messages("nonlocal-without-binding")
def visit_nonlocal(self, node: nodes.Nonlocal) -> None:
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.only_required_for_messages("abstract-class-instantiated")
def visit_call(self, node: nodes.Call) -> None:
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
for inferred in infer_all(node.func):
self._check_inferred_class_is_abstract(inferred, node)
def _check_inferred_class_is_abstract(self, inferred, node: nodes.Call):
if not isinstance(inferred, nodes.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node: nodes.Yield) -> None:
if not isinstance(node.frame(future=True), (nodes.FunctionDef, nodes.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node: nodes.For | nodes.While) -> None:
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(
self, node: nodes.Continue | nodes.Break, node_name: str
) -> None:
"""Check that a node is inside a for or while loop."""
for parent in node.node_ancestors():
if isinstance(parent, (nodes.For, nodes.While)):
if node not in parent.orelse:
return
if isinstance(parent, (nodes.ClassDef, nodes.FunctionDef)):
break
if (
isinstance(parent, nodes.TryFinally)
and node in parent.finalbody
and isinstance(node, nodes.Continue)
):
self.add_message("continue-in-finally", node=node)
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(
self, redeftype: str, node: nodes.Call | nodes.FunctionDef
) -> None:
"""Check for redefinition of a function / method / class name."""
parent_frame = node.parent.frame(future=True)
# Ignore function stubs created for type information
redefinitions = [
i
for i in parent_frame.locals[node.name]
if not (isinstance(i.parent, nodes.AnnAssign) and i.parent.simple)
]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, nodes.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, nodes.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, nodes.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, nodes.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, nodes.Compare)
and isinstance(node.parent.test.left, nodes.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], nodes.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = self.linter.config.dummy_variables_rgx
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
| 38.318261 | 89 | 0.591113 | 2,568 | 22,033 | 4.92757 | 0.159657 | 0.021337 | 0.020547 | 0.022127 | 0.234945 | 0.168721 | 0.130947 | 0.099494 | 0.077999 | 0.068911 | 0 | 0.006959 | 0.315209 | 22,033 | 574 | 90 | 38.385017 | 0.831721 | 0.118459 | 0 | 0.268519 | 0 | 0 | 0.199033 | 0.042219 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064815 | false | 0.002315 | 0.023148 | 0.002315 | 0.157407 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
971bccd2014298cba9295d6dd03cd7d5f6c9e5d5 | 12,539 | py | Python | archived/main.py | joyjeni/detr-fine | dfc0f4abc2579a2b3ef4527904af3345c7a9de4d | [
"Apache-2.0"
] | null | null | null | archived/main.py | joyjeni/detr-fine | dfc0f4abc2579a2b3ef4527904af3345c7a9de4d | [
"Apache-2.0"
] | null | null | null | archived/main.py | joyjeni/detr-fine | dfc0f4abc2579a2b3ef4527904af3345c7a9de4d | [
"Apache-2.0"
] | null | null | null | """
Train and eval functions used in main.py
"""
import os
import torch
from torch.utils.data import DataLoader, DistributedSampler
import math
import sys
import time
import datetime
from typing import Iterable
from pathlib import Path
import json
import random
import numpy as np
import torch
import wandb
from dataset.evaluator import SmoothedValue, MetricLogger
from model.detr import build_model
from dataset.construction_dataset import build_dataset
from dataset.evaluator import collate_fn, evaluate, save_on_master
seed = 42
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.use_deterministic_algorithms(False) # missing some deterministic impl
device = torch.device("cuda:0")
class Args:
pass
args = Args()
# Postitional encoding
args.position_embedding = "sine"
# CNN Backbone
args.backbone = "resnet50"
args.dilation = None
# Hungarian matcher
args.set_cost_class = 1
args.set_cost_bbox = 5
args.set_cost_giou = 2
# Transformer
args.hidden_dim = 256
args.dropout = 0.1
args.nheads = 8
args.dim_feedforward = 2048
args.enc_layers = 6
args.dec_layers = 6
args.pre_norm = None
# DETR
args.num_queries = 100
args.aux_loss = True # calculate loss at eache decoder layer
args.masks = True
args.frozen_weights = None
args.bbox_loss_coef = 5
args.mask_loss_coef = 1
args.dice_loss_coef = 1
args.giou_loss_coef = 2
args.eos_coef = 0.1
# Dataset
args.dataset_file = "coco_panoptic" # construction
args.coco_path = "./data"
args.coco_panoptic_path = "./data"
# Training
args.lr = 1e-4
args.weight_decay = 1e-4
args.lr_backbone = 0 # 0 means frozen backbone
args.batch_size = 3
args.epochs = 2
args.lr_drop = 200
args.clip_max_norm = 0.1
args.output_dir = "out_dir"
args.eval = False
# !mkdir out_dir/panoptic_eval -p
try:
os.mkdir("out_dir/panoptic_eval")
except Exception as e:
pass
# set if you plan to log on wandb
ENABLE_WANDB = True
# if set not train from scratch (detre pretrained on COCO)
used_artifact = None # "2_2_attentionfreeze_aux:latest"
# set if starting a new run
wandb_experiment_name = "2_2_1_transf_unfreeze_aux"
# set to None if starting a new run
run_id = None
if ENABLE_WANDB:
import wandb
if run_id is not None:
wandb.init(project="detr", id=run_id, resume="allow")
else:
wandb.init(project="detr", name=wandb_experiment_name)
wandb.config.position_embedding = args.position_embedding
wandb.config.backbone = args.backbone
wandb.config.dilation = args.dilation
wandb.config.set_cost_class = args.set_cost_class
wandb.config.set_cost_bbox = args.set_cost_bbox
wandb.config.set_cost_giou = args.set_cost_giou
wandb.config.hidden_dim = args.hidden_dim
wandb.config.dropout = args.dropout
wandb.config.nheads = args.nheads
wandb.config.dim_feedforward = args.dim_feedforward
wandb.config.enc_layers = args.enc_layers
wandb.config.dec_layers = args.dec_layers
wandb.config.pre_norm = args.pre_norm
wandb.config.num_queries = args.num_queries
wandb.config.aux_loss = args.aux_loss
wandb.config.masks = args.masks
wandb.config.frozen_weights = args.frozen_weights
wandb.config.bbox_loss_coef = args.bbox_loss_coef
wandb.config.mask_loss_coef = args.mask_loss_coef
wandb.config.dice_loss_coef = args.dice_loss_coef
wandb.config.giou_loss_coef = args.giou_loss_coef
wandb.config.eos_coef = args.eos_coef
wandb.config.lr = args.lr
wandb.config.weight_decay = args.weight_decay
wandb.config.lr_backbone = args.lr_backbone
wandb.config.batch_size = args.batch_size
wandb.config.epochs = args.epochs
wandb.config.lr_drop = args.lr_drop
wandb.config.clip_max_norm = args.clip_max_norm
def freeze_attn(model, args):
for i in range(args.dec_layers):
for param in model.detr.transformer.decoder.layers[i].self_attn.parameters():
param.requires_grad = False
for param in model.detr.transformer.decoder.layers[
i
].multihead_attn.parameters():
param.requires_grad = False
for i in range(args.enc_layers):
for param in model.detr.transformer.encoder.layers[i].self_attn.parameters():
param.requires_grad = False
def freeze_decoder(model, args):
for param in model.detr.transformer.decoder.parameters():
param.requires_grad = False
def freeze_first_layers(model, args):
for i in range(args.enc_layers // 2):
for param in model.detr.transformer.encoder.layers[i].parameters():
param.requires_grad = False
for i in range(args.dec_layers // 2):
for param in model.detr.transformer.decoder.layers[i].parameters():
param.requires_grad = False
def build_pretrained_model(args):
pre_trained = torch.hub.load(
"facebookresearch/detr",
"detr_resnet50_panoptic",
pretrained=True,
return_postprocessor=False,
num_classes=250,
)
model, criterion, postprocessors = build_model(args)
model.detr.backbone.load_state_dict(pre_trained.detr.backbone.state_dict())
model.detr.bbox_embed.load_state_dict(pre_trained.detr.bbox_embed.state_dict())
model.detr.query_embed.load_state_dict(pre_trained.detr.query_embed.state_dict())
model.detr.input_proj.load_state_dict(pre_trained.detr.input_proj.state_dict())
model.detr.transformer.load_state_dict(pre_trained.detr.transformer.state_dict())
model.bbox_attention.load_state_dict(pre_trained.bbox_attention.state_dict())
model.mask_head.load_state_dict(pre_trained.mask_head.state_dict())
freeze_attn(model, args)
return model, criterion, postprocessors
def train_one_epoch(
model: torch.nn.Module,
criterion: torch.nn.Module,
data_loader: Iterable,
optimizer: torch.optim.Optimizer,
device: torch.device,
epoch: int,
max_norm: float = 0,
):
model.train()
criterion.train()
metric_logger = MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
metric_logger.add_meter(
"class_error", SmoothedValue(window_size=1, fmt="{value:.2f}")
)
header = "Epoch: [{}]".format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(
loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict
)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = loss_dict
loss_dict_reduced_unscaled = {
f"{k}_unscaled": v for k, v in loss_dict_reduced.items()
}
loss_dict_reduced_scaled = {
k: v * weight_dict[k]
for k, v in loss_dict_reduced.items()
if k in weight_dict
}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(
loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled
)
metric_logger.update(class_error=loss_dict_reduced["class_error"])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
if ENABLE_WANDB:
wandb.log(loss_dict_reduced)
wandb.log({"loss": loss_value})
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def train():
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
model, criterion, postprocessors = build_pretrained_model(args)
model.to(device)
if ENABLE_WANDB:
wandb.watch(model)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("number of params:", n_parameters)
param_dicts = [
{
"params": [
p
for n, p in model_without_ddp.named_parameters()
if "backbone" not in n and p.requires_grad
]
},
{
"params": [
p
for n, p in model_without_ddp.named_parameters()
if "backbone" in n and p.requires_grad
],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(
param_dicts, lr=args.lr, weight_decay=args.weight_decay
)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
if ENABLE_WANDB and used_artifact is not None:
artifact = wandb.use_artifact(used_artifact)
artifact_dir = artifact.download()
checkpoint = torch.load(artifact_dir + "/checkpoint.pth")
model.load_state_dict(checkpoint["model"])
if run_id is not None:
optimizer.load_state_dict(checkpoint["optimizer"])
# lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
start_epoch = checkpoint["epoch"]
else:
start_epoch = 0
dataset_train = build_dataset(image_set="train", args=args)
dataset_val = build_dataset(image_set="val", args=args)
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True
)
data_loader_train = DataLoader(
dataset_train,
batch_sampler=batch_sampler_train,
collate_fn=collate_fn,
num_workers=4,
)
data_loader_val = DataLoader(
dataset_val,
args.batch_size,
sampler=sampler_val,
drop_last=False,
collate_fn=collate_fn,
num_workers=4,
)
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location="cpu")
model_without_ddp.detr.load_state_dict(checkpoint["model"])
output_dir = Path(args.output_dir)
if args.eval:
test_stats = evaluate(
model, criterion, postprocessors, data_loader_val, device, args.output_dir
)
print(test_stats)
return
print("Start training")
start_time = time.time()
for epoch in range(start_epoch + 1, args.epochs):
train_stats = train_one_epoch(
model,
criterion,
data_loader_train,
optimizer,
device,
epoch,
args.clip_max_norm,
)
lr_scheduler.step()
if args.output_dir:
checkpoint_path = output_dir / "checkpoint.pth"
save_on_master(
{
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"args": args,
},
checkpoint_path,
)
if ENABLE_WANDB:
artifact = wandb.Artifact(wandb_experiment_name, type="model")
artifact.add_file(checkpoint_path)
wandb.log_artifact(artifact)
test_stats = evaluate(
model, criterion, postprocessors, data_loader_val, device, args.output_dir
)
log_stats = {
**{f"train_{k}": v for k, v in train_stats.items()},
**{f"test_{k}": v for k, v in test_stats.items()},
"epoch": epoch,
"n_parameters": n_parameters,
}
if ENABLE_WANDB:
wandb.log(test_stats)
if args.output_dir:
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print("Training time {}".format(total_time_str))
| 31.037129 | 87 | 0.670229 | 1,668 | 12,539 | 4.788969 | 0.185252 | 0.039935 | 0.017902 | 0.014021 | 0.220831 | 0.166875 | 0.137331 | 0.090636 | 0.082123 | 0.044567 | 0 | 0.00708 | 0.23407 | 12,539 | 403 | 88 | 31.114144 | 0.824656 | 0.044182 | 0 | 0.121875 | 0 | 0 | 0.045489 | 0.007442 | 0 | 0 | 0 | 0 | 0.003125 | 1 | 0.01875 | false | 0.00625 | 0.059375 | 0 | 0.090625 | 0.028125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9721be068a8518541b5cace614456bd0b705d2e4 | 1,231 | py | Python | motion_optimizer.py | sehoonha/pydart_green_stair | a644355c6ef8068aea5cd0c807b74454f2f9ce4d | [
"MIT"
] | null | null | null | motion_optimizer.py | sehoonha/pydart_green_stair | a644355c6ef8068aea5cd0c807b74454f2f9ce4d | [
"MIT"
] | null | null | null | motion_optimizer.py | sehoonha/pydart_green_stair | a644355c6ef8068aea5cd0c807b74454f2f9ce4d | [
"MIT"
] | null | null | null | import logging
import scipy.optimize
class MotionOptimizer(object):
def __init__(self, motion, evaluator):
self.logger = logging.getLogger(__name__)
self.motion = motion
self.evaluator = evaluator
def obj(self, x):
self.counter += 1
self.motion.set_params(x)
cost = self.evaluator.cost()
# self.logger.debug('params = %s' % x)
if self.counter % 100 == 1:
self.logger.debug('%d: cost = %.10f' % (self.counter, cost))
return cost
# return x[0] ** 2 + (x[1] - 1.3) ** 2
def solve(self):
self.counter = 0
logging.info('start to solve optimization')
logger = self.logger
x0 = self.motion.params()
# x0 = [10.0, 10.0]
logger.info('x0 = %s' % x0)
options = {'maxiter': 100000, 'maxfev': 100000,
'xtol': 10e-10, 'ftol': 10e-10}
logger.info('options = %s' % options)
res = scipy.optimize.minimize(self.obj, x0,
method='SLSQP',
options=options)
logger.info('result = %s' % res)
logger.info('finished to solve optimization')
logger.info('OK')
| 33.27027 | 72 | 0.523964 | 141 | 1,231 | 4.510638 | 0.375887 | 0.078616 | 0.04717 | 0.078616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055215 | 0.337937 | 1,231 | 36 | 73 | 34.194444 | 0.725153 | 0.073924 | 0 | 0 | 0 | 0 | 0.115317 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.068966 | 0 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97223e918d7ed3c66e5db364f3b9dcfe0630ba1d | 64,626 | py | Python | crystalparser/crystal_parser.py | nomad-coe/nomad-parser-crystal | b9e2e6d0973372a803f9caa2b719a91f6b9e4139 | [
"Apache-2.0"
] | null | null | null | crystalparser/crystal_parser.py | nomad-coe/nomad-parser-crystal | b9e2e6d0973372a803f9caa2b719a91f6b9e4139 | [
"Apache-2.0"
] | null | null | null | crystalparser/crystal_parser.py | nomad-coe/nomad-parser-crystal | b9e2e6d0973372a803f9caa2b719a91f6b9e4139 | [
"Apache-2.0"
] | null | null | null | #
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD.
# See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
import textwrap
import datetime
import ase
import numpy as np
from nomad.units import ureg
from nomad import atomutils
from nomad.parsing.parser import FairdiParser
from nomad.parsing.file_parser import TextParser, Quantity
from nomad.datamodel.metainfo.public import (
section_run,
section_method,
section_system,
section_XC_functionals,
section_scf_iteration,
section_single_configuration_calculation,
section_sampling_method,
section_frame_sequence,
section_dos,
section_k_band,
section_k_band_segment,
section_basis_set_atom_centered
)
from crystalparser.metainfo.crystal import x_crystal_section_shell
def capture(regex):
return r'(' + regex + r')'
flt = r'-?(?:\d+\.?\d*|\d*\.?\d+)(?:E[\+-]?\d+)?' # Floating point number
flt_c = capture(flt) # Captures a floating point number
flt_crystal_c = r'(-?\d+(?:.\d+)?\*\*-?.*\d+)' # Crystal specific floating point syntax
ws = r'\s+' # Series of white-space characters
integer = r'-?\d+' # Integer number
integer_c = capture(integer) # Captures integer number
word = r'[a-zA-Z]+' # A single alphanumeric word
word_c = capture(word) # Captures a single alphanumeric word
br = r'\r?\n' # Newline that works for both Windows and Unix. Crystal can be run on a Windows machine as well.
class CrystalParser(FairdiParser):
"""NOMAD-lab parser for Crystal.
"""
def __init__(self):
super().__init__(
name='parsers/crystal',
code_name='Crystal',
code_homepage='https://www.crystal.unito.it/',
mainfile_contents_re=(
fr'({br} \*\s+CRYSTAL[\d]+\s+\*{br} \*\s*{word} : \d+[\.\d+]*)'
)
)
def parse_output(self, filepath):
"""Reads the calculation output.
"""
outputparser = TextParser(
filepath,
quantities=[
# Header
Quantity("datetime", fr'(?:Date\:|date)\s+(.*?){br}', str_operation=lambda x: x, repeats=False),
Quantity("hostname", fr'(?:Running on\:|hostname)\s+(.*?){br}', str_operation=lambda x: x, repeats=False),
Quantity("os", fr'(?:system)\s+(.*?){br}', str_operation=lambda x: x, repeats=False),
Quantity("user", fr'user\s+(.*?){br}', str_operation=lambda x: x, repeats=False),
Quantity("input_path", fr'(?:Input data|input data in)\s+(.*?){br}', str_operation=lambda x: x, repeats=False),
Quantity("output_path", fr'(?:Output\:|output data in)\s+(.*?){br}', str_operation=lambda x: x, repeats=False),
Quantity("executable_path", fr'(?:Executable\:|crystal executable in)\s+(.*?){br}', str_operation=lambda x: x, repeats=False),
Quantity("tmpdir", fr'(?:Temporary directory\:|temporary directory)\s+(.*?){br}', str_operation=lambda x: x, repeats=False),
Quantity("system_type", fr'(CRYSTAL|SLAB|POLYMER|HELIX|MOLECULE|EXTERNAL|DLVINPUT)', repeats=False),
Quantity("calculation_type", fr'(OPTGEOM|FREQCALC|ANHARM)', repeats=False),
# Input
Quantity(
"dftd3",
fr'(DFTD3{br}[\s\S]*?END{br})',
sub_parser=TextParser(quantities=[
Quantity(
"version",
r'(VERSION \d)',
str_operation=lambda x: x,
repeats=False,
),
]),
repeats=False,
),
Quantity(
"grimme",
fr'(GRIMME{br}[\s\S]*?END{br})',
repeats=False,
),
Quantity(
"dft",
fr'(DFT{br}[\w\s]*?END{br})',
sub_parser=TextParser(quantities=[
Quantity(
"exchange",
fr'EXCHANGE{br}(LDA|VBH|BECKE|PBE|PBESOL|mPW91|PWGGA|SOGGA|WCGGA)',
repeats=False,
),
Quantity(
"correlation",
fr'CORRELAT{br}(PZ|VBH|VWN|LYP|P86|PBE|PBESOL|PWGGA|PWLSD|WL)',
repeats=False,
),
Quantity(
"exchange_correlation",
fr'(SVWN|BLYP|PBEXC|PBESOLXC|SOGGAXC|B3PW|B3LYP|PBE0|PBESOL0|B1WC|WCILYP|B97H|PBE0-13|HYBRID|NONLOCAL|HSE06|HSESOL|HISS|RSHXLDA|wB97|wB97X|LC-WPBE|LC-WPBESOL|LC-WBLYP|M05-2X|M05|M062X|M06HF|M06L|M06|B2PLYP|B2GPPLYP|mPW2PLYP|DHYBRID)',
repeats=False,
),
]),
repeats=False,
),
Quantity("program_version", fr'{br} \*\s+CRYSTAL([\d]+)\s+\*', repeats=False, dtype=str),
Quantity("distribution", fr'{br} \*\s*({word} : \d+[\.\d+]*)', str_operation=lambda x: x, repeats=False),
Quantity("start_timestamp", fr' EEEEEEEEEE STARTING DATE\s+(.*? TIME .*?){br}', str_operation=lambda x: x, repeats=False),
Quantity("title", fr' EEEEEEEEEE STARTING DATE.*?{br}\s*(.*?){br}{br}', str_operation=lambda x: x, repeats=False),
Quantity("hamiltonian_type", fr' (KOHN-SHAM HAMILTONIAN|HARTREE-FOCK HAMILTONIAN)', str_operation=lambda x: x, repeats=False),
Quantity("xc_out", fr' \(EXCHANGE\)\[CORRELATION\] FUNCTIONAL:(\([\s\S]+?\)\[[\s\S]+?\])', str_operation=lambda x: x, repeats=False),
Quantity("hybrid_out", fr' HYBRID EXCHANGE - PERCENTAGE OF FOCK EXCHANGE\s+{flt_c}', repeats=False),
# Geometry optimization settings
Quantity('initial_trust_radius', fr' INITIAL TRUST RADIUS\s+{flt_c}', repeats=False),
Quantity('maximum_trust_radius', fr' MAXIMUM TRUST RADIUS\s+{flt_c}', repeats=False),
Quantity('maximum_gradient_component', fr' MAXIMUM GRADIENT COMPONENT\s+{flt_c}', repeats=False),
Quantity('rms_gradient_component', fr' R\.M\.S\. OF GRADIENT COMPONENT\s+{flt_c}', repeats=False),
Quantity('rms_displacement_component', fr' R\.M\.S\. OF DISPLACEMENT COMPONENTS\s+{flt_c}', repeats=False),
Quantity('geometry_change', fr' MAXIMUM DISPLACEMENT COMPONENT\s+{flt_c}', unit=ureg.bohr, repeats=False),
Quantity('energy_change', fr' THRESHOLD ON ENERGY CHANGE\s+{flt_c}', unit=ureg.hartree, repeats=False),
Quantity('extrapolating_polynomial_order', fr' EXTRAPOLATING POLYNOMIAL ORDER{ws}{integer_c}', repeats=False),
Quantity('max_steps', fr' MAXIMUM ALLOWED NUMBER OF STEPS\s+{integer_c}', repeats=False),
Quantity('sorting_of_energy_points', fr'SORTING OF ENERGY POINTS\:\s+{word_c}', repeats=False),
# System
Quantity("material_type", fr' ((?:MOLECULAR|SLAB) CALCULATION){br}', str_operation=lambda x: x, repeats=False),
Quantity("crystal_family", fr' CRYSTAL FAMILY\s*:\s*([\s\S]+?)\s*{br}', str_operation=lambda x: x, repeats=False),
Quantity("crystal_class", fr' CRYSTAL CLASS \(GROTH - 1921\)\s*:\s*([\s\S]+?)\s*{br}', str_operation=lambda x: x, repeats=False),
Quantity("space_group", fr' SPACE GROUP \(CENTROSYMMETRIC\)\s*:\s*([\s\S]+?)\s*{br}', str_operation=lambda x: x, repeats=False),
Quantity("dimensionality", fr' GEOMETRY FOR WAVE FUNCTION - DIMENSIONALITY OF THE SYSTEM\s+(\d)', repeats=False),
Quantity(
'lattice_parameters',
fr' (?:PRIMITIVE CELL - CENTRING CODE\s*[\s\S]*?\s*VOLUME=\s*{flt} - DENSITY\s*{flt} g/cm\^3{br}|PRIMITIVE CELL{br})' +\
fr' A B C ALPHA BETA GAMMA\s*' +\
fr'{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}{br}',
shape=(6),
dtype=np.float64,
repeats=False,
),
Quantity(
"labels_positions",
fr' ATOMS IN THE ASYMMETRIC UNIT\s+{integer} - ATOMS IN THE UNIT CELL:\s+{integer}{br}' +\
fr'\s+ATOM\s+X(?:/A|\(ANGSTROM\))\s+Y(?:/B|\(ANGSTROM\))\s+Z(?:/C|\(ANGSTROM\))\s*{br}' +\
re.escape(' *******************************************************************************') +\
fr'((?:\s+{integer}\s+(?:T|F)\s+{integer}\s+[\s\S]*?\s+{flt}\s+{flt}\s+{flt}{br})+)',
shape=(-1, 7),
dtype=str,
repeats=False,
),
# Used to capture an edited geometry. Can contain
# substitutions, supercells, deformations etc. in any order.
Quantity(
'system_edited',
fr' \*\s+GEOMETRY EDITING[\S\s]*?' +\
re.escape(' *******************************************************************************') + fr'{br}' +\
fr' LATTICE PARAMETERS \(ANGSTROMS AND DEGREES\) - BOHR =\s*0?\.\d+ ANGSTROM{br}' +\
fr' (?:PRIMITIVE CELL - CENTRING CODE [\s\S]*?VOLUME=\s*{flt} - DENSITY\s*{flt} g/cm\^3|PRIMITIVE CELL){br}' +\
fr'\s+A\s+B\s+C\s+ALPHA\s+BETA\s+GAMMA\s*{br}' +\
fr'(\s+{flt}\s+{flt}\s+{flt}\s+{flt}\s+{flt}\s+{flt}{br}' +\
re.escape(' *******************************************************************************') + fr'{br}' +\
fr' ATOMS IN THE ASYMMETRIC UNIT\s+{integer} - ATOMS IN THE UNIT CELL:\s+{integer}{br}' +\
fr'\s+ATOM\s+X(?:/A|\(ANGSTROM\))\s+Y(?:/B|\(ANGSTROM\))\s+Z(?:/C|\(ANGSTROM\))(?:\s+R\(ANGS\))?\s*{br}' +\
re.escape(' *******************************************************************************') +\
fr'(?:\s+{integer}\s+(?:T|F)\s+{integer}\s+[\s\S]*?\s+{flt}\s+{flt}\s+{flt}(?:\s+{flt})?{br})+)' +\
fr'{br}' +\
fr' T = ATOM BELONGING TO THE ASYMMETRIC UNIT',
sub_parser=TextParser(quantities=[
Quantity(
"lattice_parameters",
fr'\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}{br}',
shape=(6),
dtype=np.float64,
repeats=False,
),
Quantity(
"labels_positions",
fr'\s+ATOM\s+X(?:/A|\(ANGSTROM\))\s+Y(?:/B|\(ANGSTROM\))\s+Z(?:/C|\(ANGSTROM\))\s*{br}' +\
re.escape(' *******************************************************************************') +\
fr'((?:\s+{integer}\s+(?:T|F)\s+{integer}\s+[\s\S]*?\s+{flt}\s+{flt}\s+{flt}{br})+)',
shape=(-1, 7),
dtype=str,
repeats=False,
),
Quantity(
"labels_positions_nanotube",
fr'\s+ATOM\s+X/A\s+Y\(ANGSTROM\)\s+Z\(ANGSTROM\)\s+R\(ANGS\)\s*{br}' +\
re.escape(' *******************************************************************************') +\
fr'((?:\s+{integer}\s+(?:T|F)\s+{integer}\s+[\s\S]*?\s+{flt}\s+{flt}\s+{flt}\s+{flt}{br})+)',
shape=(-1, 8),
dtype=str,
repeats=False,
),
]),
repeats=False,
),
Quantity(
'lattice_vectors_restart',
fr' DIRECT LATTICE VECTOR COMPONENTS \(ANGSTROM\){br}' +\
fr'\s+{flt_c}\s+{flt_c}\s+{flt_c}{br}' +\
fr'\s+{flt_c}\s+{flt_c}\s+{flt_c}{br}' +\
fr'\s+{flt_c}\s+{flt_c}\s+{flt_c}{br}',
shape=(3, 3),
dtype=np.float64,
repeats=False,
),
Quantity(
"labels_positions_restart",
fr' ATOM N\.AT\. SHELL X\(A\) Y\(A\) Z\(A\) EXAD N\.ELECT\.{br}' +\
re.escape(' *******************************************************************************') +\
fr'((?:\s+{integer}\s+{integer}\s+{word}\s+{integer}\s+{flt}\s+{flt}\s+{flt}\s+{flt}\s+{flt}{br})+)',
shape=(-1, 9),
dtype=str,
repeats=False,
),
Quantity("symmops", fr' NUMBER OF SYMMETRY OPERATORS\s*:\s*(\d){br}', repeats=False),
# Method
Quantity(
'basis_set',
re.escape(r' *******************************************************************************') +\
fr'{br} LOCAL ATOMIC FUNCTIONS BASIS SET{br}' +\
re.escape(r' *******************************************************************************') +\
fr'{br} ATOM X\(AU\) Y\(AU\) Z\(AU\) N. TYPE EXPONENT S COEF P COEF D/F/G COEF{br}' +\
fr'([\s\S]*?){br} INFORMATION',
sub_parser=TextParser(quantities=[
Quantity(
"basis_sets",
fr'({ws}{integer}{ws}{word}{ws}{flt}{ws}{flt}{ws}{flt}{br}(?:(?:\s+(?:\d+-\s+)?\d+\s+(?:S|P|SP|D|F|G)\s*{br}[\s\S]*?(?:{ws}{flt}(?:{ws})?{flt}(?:{ws})?{flt}(?:{ws})?{flt}{br})+)+)?)',
sub_parser=TextParser(quantities=[
Quantity(
"species",
fr'({ws}{integer}{ws}{word}{ws}{flt}{ws}{flt}{ws}{flt}{br})',
repeats=False,
),
Quantity(
"shells",
fr'(\s+(?:\d+-\s+)?\d+\s+(?:S|P|SP|D|F|G)\s*{br}[\s\S]*?(?:{ws}{flt}(?:{ws})?{flt}(?:{ws})?{flt}(?:{ws})?{flt}{br})+)',
sub_parser=TextParser(quantities=[
Quantity(
"shell_range",
r'(\s+(?:\d+-\s+)?\d+)',
str_operation=lambda x: "".join(x.split()),
repeats=False,
),
Quantity(
"shell_type",
fr'((?:S|P|SP|D|F|G))\s*{br}',
str_operation=lambda x: x.strip(),
repeats=False,
),
Quantity(
"shell_coefficients",
fr'{ws}({flt})(?:{ws})?({flt})(?:{ws})?({flt})(?:{ws})?({flt}){br}',
repeats=True,
dtype=np.float64,
shape=(4)
),
]),
repeats=True,
),
]),
repeats=True,
),
]),
repeats=False,
),
Quantity("fock_ks_matrix_mixing", fr' INFORMATION \*+.*?\*+.*?\:\s+FOCK/KS MATRIX MIXING SET TO\s+{integer_c}\s+\%{br}', repeats=False),
Quantity("coulomb_bipolar_buffer", fr' INFORMATION \*+.*?\*+.*?\:\s+COULOMB BIPOLAR BUFFER SET TO\s+{flt_c} Mb{br}', repeats=False),
Quantity("exchange_bipolar_buffer", fr' INFORMATION \*+.*?\*+.*?\:\s+EXCHANGE BIPOLAR BUFFER SET TO\s+{flt_c} Mb{br}', repeats=False),
Quantity("toldee", fr' INFORMATION \*+ TOLDEE \*+\s*\*+ SCF TOL ON TOTAL ENERGY SET TO\s+{flt_c}{br}', repeats=False),
Quantity("n_atoms_per_cell", r' N\. OF ATOMS PER CELL\s+' + integer_c, repeats=False),
Quantity("n_shells", r' NUMBER OF SHELLS\s+' + integer_c, repeats=False),
Quantity("n_ao", r' NUMBER OF AO\s+' + integer_c, repeats=False),
Quantity("n_electrons", r' N\. OF ELECTRONS PER CELL\s+' + integer_c, repeats=False),
Quantity("n_core_electrons", r' CORE ELECTRONS PER CELL\s+' + integer_c, repeats=False),
Quantity("n_symmops", r' N\. OF SYMMETRY OPERATORS\s+' + integer_c, repeats=False),
Quantity("tol_coulomb_overlap", r' COULOMB OVERLAP TOL\s+\(T1\) ' + flt_crystal_c, str_operation=to_float, repeats=False),
Quantity("tol_coulomb_penetration", r' COULOMB PENETRATION TOL\s+\(T2\) ' + flt_crystal_c, str_operation=to_float, repeats=False),
Quantity("tol_exchange_overlap", r' EXCHANGE OVERLAP TOL\s+\(T3\) ' + flt_crystal_c, str_operation=to_float, repeats=False),
Quantity("tol_pseudo_overlap_f", r' EXCHANGE PSEUDO OVP \(F\(G\)\)\s+\(T4\) ' + flt_crystal_c, str_operation=to_float, repeats=False),
Quantity("tol_pseudo_overlap_p", r' EXCHANGE PSEUDO OVP \(P\(G\)\)\s+\(T5\) ' + flt_crystal_c, str_operation=to_float, repeats=False),
Quantity("pole_order", r' POLE ORDER IN MONO ZONE\s+' + integer_c, repeats=False),
Quantity("calculation_type", fr' TYPE OF CALCULATION \:\s+(.*?{br}\s+.*?){br}', str_operation=lambda x: " ".join(x.split()), repeats=False),
Quantity('xc_functional', fr' \(EXCHANGE\)\[CORRELATION\] FUNCTIONAL:(\(.+\)\[.+\]){br}', str_operation=lambda x: x, repeats=False,),
Quantity("cappa", fr'CAPPA:IS1\s+{integer_c};IS2\s+{integer_c};IS3\s+{integer_c}; K PTS MONK NET\s+{integer_c}; SYMMOPS:\s*K SPACE\s+{integer_c};G SPACE\s+{integer_c}', repeats=False),
Quantity('scf_max_iteration', r' MAX NUMBER OF SCF CYCLES\s+' + integer_c, repeats=False),
Quantity('convergenge_deltap', r'CONVERGENCE ON DELTAP\s+' + flt_crystal_c, str_operation=to_float, repeats=False), Quantity('weight_f', r'WEIGHT OF F\(I\) IN F\(I\+1\)\s+' + integer_c, repeats=False),
Quantity('scf_threshold_energy_change', r'CONVERGENCE ON ENERGY\s+' + flt_crystal_c, str_operation=to_float, repeats=False, unit=ureg.hartree),
Quantity('shrink', r'SHRINK\. FACT\.\(MONKH\.\)\s+(' + integer + ws + integer + ws + integer + r')', repeats=False),
Quantity('n_k_points_ibz', r'NUMBER OF K POINTS IN THE IBZ\s+' + integer_c, repeats=False),
Quantity('shrink_gilat', r'SHRINKING FACTOR\(GILAT NET\)\s+' + integer_c, repeats=False),
Quantity('n_k_points_gilat', r'NUMBER OF K POINTS\(GILAT NET\)\s+' + integer_c, repeats=False),
# SCF
Quantity(
"scf_block",
r' CHARGE NORMALIZATION FACTOR([\s\S]*?) == SCF ENDED',
sub_parser=TextParser(quantities=[
Quantity(
'scf_iterations',
r'( CHARGE NORMALIZATION FACTOR[\s\S]*? (?:TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT PDIG|TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT MPP_KSPA|== SCF ENDED))',
sub_parser=TextParser(quantities=[
Quantity('charge_normalization_factor', fr' CHARGE NORMALIZATION FACTOR{ws}{flt}{br}', repeats=False),
Quantity('total_atomic_charges', fr' TOTAL ATOMIC CHARGES:{br}(?:{ws}{flt})+{br}', repeats=False),
Quantity('QGAM', fr' TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT QGAM TELAPSE{ws}{flt}{ws}TCPU{ws}{flt}{br}', repeats=False),
Quantity('BIEL2', fr' TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT BIEL2 TELAPSE{ws}{flt}{ws}TCPU{ws}{flt}{br}', repeats=False),
Quantity('energy_kinetic', fr' ::: KINETIC ENERGY\s+{flt_c}{br}', unit=ureg.hartree, repeats=False),
Quantity('energy_ee', fr' ::: TOTAL E-E\s+{flt_c}{br}', unit=ureg.hartree, repeats=False),
Quantity('energy_en_ne', fr' ::: TOTAL E-N \+ N-E\s+{flt_c}{br}', unit=ureg.hartree, repeats=False),
Quantity('energy_nn', fr' ::: TOTAL N-N\s+{flt_c}{br}', unit=ureg.hartree, repeats=False),
Quantity('virial_coefficient', fr' ::: VIRIAL COEFFICIENT\s+{flt_c}{br}', repeats=False),
Quantity('TOTENY', fr' TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT TOTENY TELAPSE{ws}{flt}{ws}TCPU{ws}{flt}{br}', repeats=False),
Quantity('integrated_density', fr' NUMERICALLY INTEGRATED DENSITY{ws}{flt}{br}', repeats=False),
Quantity('NUMDFT', fr' TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT NUMDFT TELAPSE{ws}{flt}{ws}TCPU{ws}{flt}{br}', repeats=False),
Quantity('energies', fr' CYC{ws}{integer}{ws}ETOT\(AU\){ws}{flt_c}{ws}DETOT{ws}{flt_c}{ws}tst{ws}{flt}{ws}PX{ws}{flt}{br}', repeats=False, dtype=np.float64, unit=ureg.hartree),
Quantity('FDIK', fr' TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT FDIK TELAPSE{ws}{flt}{ws}TCPU{ws}{flt}{br}', repeats=False),
]),
repeats=True,
),
]),
repeats=False,
),
Quantity('number_of_scf_iterations', fr' == SCF ENDED - CONVERGENCE ON (?:ENERGY|TESTER)\s+E\(AU\)\s*{flt}\s*CYCLES\s+{integer_c}', repeats=False),
Quantity(
'energy_total',
fr' TOTAL ENERGY\((?:DFT|HF)\)\(AU\)\(\s*{integer}\)\s*{flt_c} DE\s*{flt} (?:tester|tst)\s*{flt}',
unit=ureg.hartree,
repeats=False,
),
# Geometry optimization steps
Quantity(
"geo_opt",
fr'( (?:COORDINATE AND CELL OPTIMIZATION|COORDINATE OPTIMIZATION) - POINT\s+1{br}' +\
r'[\s\S]*?' +\
re.escape(r' ******************************************************************') + fr'{br}' +\
fr'\s*\* OPT END - CONVERGED \* E\(AU\)\:\s+{flt}\s+POINTS\s+{integer})\s+\*{br}',
sub_parser=TextParser(quantities=[
Quantity(
'geo_opt_step',
fr' (?:COORDINATE AND CELL OPTIMIZATION|COORDINATE OPTIMIZATION) - POINT\s+{integer}{br}' +\
fr'([\s\S]*?)' +\
fr' (?:TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT OPTI|\* OPT END)',
sub_parser=TextParser(quantities=[
Quantity(
'lattice_parameters',
fr' (?:PRIMITIVE CELL - CENTRING CODE [\s\S]*?VOLUME=\s*{flt} - DENSITY\s*{flt} g/cm\^3{br}|PRIMITIVE CELL{br})' +\
fr' A B C ALPHA BETA GAMMA\s*' +\
fr'{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}\s+{flt_c}{br}',
shape=(6),
dtype=np.float64,
repeats=False,
),
Quantity(
"labels_positions",
fr'\s+ATOM\s+X(?:/A|\(ANGSTROM\))\s+Y(?:/B|\(ANGSTROM\))\s+Z(?:/C|\(ANGSTROM\))\s*{br}' +\
re.escape(' *******************************************************************************') +\
fr'((?:\s+{integer}\s+(?:T|F)\s+{integer}\s+[\s\S]*?\s+{flt}\s+{flt}\s+{flt}{br})+)',
shape=(-1, 7),
dtype=str,
repeats=False,
),
Quantity(
"labels_positions_nanotube",
fr'\s+ATOM\s+X/A\s+Y\(ANGSTROM\)\s+Z\(ANGSTROM\)\s+R\(ANGS\)\s*{br}' +\
re.escape(' *******************************************************************************') +\
fr'((?:\s+{integer}\s+(?:T|F)\s+{integer}\s+[\s\S]*?\s+{flt}\s+{flt}\s+{flt}\s+{flt}{br})+)',
shape=(-1, 8),
dtype=str,
repeats=False,
),
Quantity('energy', fr' TOTAL ENERGY\({word}\)\(AU\)\(\s*{integer}\)\s*{flt_c}', unit=ureg.hartree, repeats=False),
]),
repeats=True,
),
Quantity('converged', fr' \* OPT END - ([\s\S]*?) \* E\(AU\)\:\s+{flt}\s+POINTS\s+{integer}', repeats=False),
]),
repeats=False,
),
# Band structure
Quantity(
"band_structure",
re.escape(fr' *******************************************************************************') + fr'{br}' +\
fr' \* \*{br}' +\
fr' \* BAND STRUCTURE \*{br}' +\
fr'[\s\S]*?' +\
fr' \* FROM BAND\s+{integer} TO BAND\s+{integer}\s+\*{br}' +\
fr' \* TOTAL OF\s+{integer} K-POINTS ALONG THE PATH\s+\*{br}' +\
fr' \* \*{br}' +\
re.escape(r' *******************************************************************************') + fr'{br}' +\
fr'([\s\S]*?' +\
fr' ENERGY RANGE \(A\.U\.\)\s*{flt} - \s*{flt} EFERMI\s*{flt_c}{br})',
sub_parser=TextParser(quantities=[
Quantity(
'segments',
fr' (LINE\s+{integer} \( {flt} {flt} {flt}: {flt} {flt} {flt}\) IN TERMS OF PRIMITIVE LATTICE VECTORS{br}' +\
fr'\s+{integer} POINTS - SHRINKING_FACTOR\s*{integer}{br}' +\
fr' CARTESIAN COORD\.\s+\( {flt} {flt} {flt}\):\( {flt} {flt} {flt}\) STEP\s+{flt}{br}{br}{br})',
sub_parser=TextParser(quantities=[
Quantity(
'start_end',
fr'LINE\s+{integer} \( {flt_c} {flt_c} {flt_c}: {flt_c} {flt_c} {flt_c}\) IN TERMS OF PRIMITIVE LATTICE VECTORS{br}',
type=np.float64,
shape=(2, 3),
repeats=False,
),
Quantity(
'n_steps',
fr'\s+{integer_c} POINTS - ',
repeats=False,
),
Quantity(
'shrinking_factor',
fr'SHRINKING_FACTOR\s*{integer_c}{br}',
repeats=False,
),
]),
repeats=True,
),
Quantity("fermi_energy", fr' ENERGY RANGE \(A\.U\.\)\s*{flt} - \s*{flt} EFERMI\s*{flt_c}', repeats=False),
]),
repeats=False,
),
# DOS
Quantity(
'dos',
fr' RESTART WITH NEW K POINTS NET{br}' +\
fr'([\s\S]+?' +\
fr' TOTAL AND PROJECTED DENSITY OF STATES - FOURIER LEGENDRE METHOD{br}' +\
fr'[\s\S]+?)' +\
fr' TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT DOSS TELAPSE',
sub_parser=TextParser(quantities=[
Quantity(
'k_points',
fr' \*\*\* K POINTS COORDINATES (OBLIQUE COORDINATES IN UNITS OF IS = {int}){br}',
repeats=False,
),
Quantity(
'highest_occupied',
fr' TOP OF VALENCE BANDS - BAND\s*{integer}; K\s*{integer}; EIG {flt_c}\s*AU',
unit=ureg.hartree,
repeats=False,
),
Quantity(
'lowest_unoccupied',
fr' BOTTOM OF VIRTUAL BANDS - BAND\s*{integer}; K\s*{integer}; EIG\s*{flt_c}\s*AU',
unit=ureg.hartree,
repeats=False,
),
]),
repeats=False,
),
Quantity("end_timestamp", fr' EEEEEEEEEE TERMINATION DATE\s+(.*? TIME .*?){br}', str_operation=lambda x: x, repeats=False),
# Forces
Quantity(
'forces',
fr' CARTESIAN FORCES IN HARTREE/BOHR \(ANALYTICAL\){br}'
fr' ATOM X Y Z{br}' +\
fr'((?:' + ws + integer + ws + integer + ws + flt + ws + flt + ws + flt + fr'{br})*)',
shape=(-1, 5),
dtype=str,
repeats=False,
),
Quantity("end_timestamp", fr' EEEEEEEEEE TERMINATION DATE\s+(.*? TIME .*?){br}', str_operation=lambda x: x, repeats=False),
# Filepaths
Quantity("f25_filepath1", fr'file fort\.25 saved as ([\s\S]+?){br}', str_operation=lambda x: x, repeats=False),
Quantity("f25_filepath2", fr'BAND/MAPS/DOSS data for plotting fort.25 saved as ([\s\S]+?){br}', str_operation=lambda x: x, repeats=False),
]
)
return outputparser
def parse_f25(self, filepath):
"""Parses the f25 file containing e.g. the band structure energies."
"""
f25parser = TextParser(
filepath,
quantities=[
# Band structure energies
Quantity(
'segments',
fr'(-\%-0BAND\s*{integer}\s*{integer}\s?{flt}\s?{flt}\s?{flt}{br}' +\
fr'\s*{flt}\s*{flt}{br}' +\
fr'\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}{br}' +\
fr'(?:\s*{flt})+)',
sub_parser=TextParser(quantities=[
Quantity(
'first_row',
fr'-\%-0BAND\s*{integer_c}\s*{integer_c}\s?{flt_c}\s?{flt_c}\s?{flt_c}{br}',
repeats=False,
),
Quantity(
'second_row',
fr'\s?{flt_c}\s?{flt_c}{br}',
repeats=False,
),
Quantity(
'energies',
fr'\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}{br}' +\
fr'((?:{flt}\s?)+)',
str_operation=lambda x: x,
repeats=False,
),
]),
repeats=True,
),
# DOS values
Quantity(
"dos",
fr'(-\%-0DOSS\s*{integer}\s*{integer}\s?{flt}\s?{flt}\s?{flt}{br}' +\
fr'\s*{flt}\s?{flt}{br}' +\
fr'\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}{br}' +\
fr'(?:\s*{flt})+)',
sub_parser=TextParser(quantities=[
Quantity(
'first_row',
fr'-\%-0DOSS\s*{integer_c}\s*{integer_c}\s?{flt_c}\s?{flt_c}\s?{flt_c}{br}',
repeats=False,
),
Quantity(
'second_row',
fr'\s?{flt_c}\s?{flt_c}{br}',
repeats=False,
),
Quantity(
'values',
fr'\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}\s*{integer}{br}' +\
fr'((?:\s*{flt})+)',
str_operation=lambda x: x,
repeats=False,
),
]),
repeats=False,
),
]
)
return f25parser
def parse(self, filepath, archive, logger):
# Read files
out = self.parse_output(filepath)
wrkdir, _ = os.path.split(filepath)
f25_filepath1 = out["f25_filepath1"]
f25_filepath2 = out["f25_filepath2"]
f25_filepath_original = f25_filepath1 if f25_filepath1 else f25_filepath2
f25 = None
if f25_filepath_original is not None:
_, f25_filename = os.path.split(f25_filepath_original)
f25_filepath = os.path.join(wrkdir, f25_filename)
if os.path.exists(f25_filepath):
f25 = self.parse_f25(f25_filepath)
# Run
run = archive.m_create(section_run)
run.program_name = 'Crystal'
run.program_version = out["program_version"]
run.program_basis_set_type = 'gaussians'
run.x_crystal_datetime = out["datetime"]
run.x_crystal_hostname = out["hostname"]
run.x_crystal_user = out["user"]
run.x_crystal_os = out["os"]
run.x_crystal_input_path = out["input_path"]
run.x_crystal_output_path = out["output_path"]
run.x_crystal_tmpdir = out["tmpdir"]
run.x_crystal_executable_path = out["executable_path"]
distribution = out["distribution"]
if distribution is not None:
dist, minor = distribution.split(" : ", 1)
run.x_crystal_distribution = dist
run.x_crystal_version_minor = minor
title = out["title"]
if title is not None:
run.x_crystal_run_title = title.strip()
run.time_run_date_start = to_unix_time(out["start_timestamp"])
run.time_run_date_end = to_unix_time(out["end_timestamp"])
# System. There are several alternative sources for this information
# depending on the run type.
system = run.m_create(section_system)
material_type = out["material_type"]
system_edited = out["system_edited"]
labels_positions = out["labels_positions"]
lattice_vectors_restart = out["lattice_vectors_restart"]
pbc = None if material_type == "MOLECULAR CALCULATION" else np.array([True, True, True])
# By default the system is read from the configuration at the beginning
# of the file: it may come from restart or clean start
pos_type = {
"MOLECULAR CALCULATION": "cartesian",
"SLAB CALCULATION": "slab",
None: "scaled",
}.get(material_type)
if labels_positions is not None:
atomic_numbers = labels_positions[:, 2]
atom_labels = labels_positions[:, 3]
atom_pos = labels_positions[:, 4:7]
lattice = out["lattice_parameters"]
elif lattice_vectors_restart is not None:
labels_positions = out["labels_positions_restart"]
atomic_numbers = labels_positions[:, 1]
atom_labels = labels_positions[:, 2]
atom_pos = labels_positions[:, 4:7]
lattice = lattice_vectors_restart
pos_type = "cartesian"
# If any geometry edits (supercells, substitutions, dispplacements,
# deformations, nanotube construction, etc.) are done on top of the
# original system, they override the original system.
if system_edited is not None:
if system_edited["labels_positions_nanotube"] is not None:
pos_type = "nanotube"
labels_positions = system_edited["labels_positions_nanotube"]
else:
labels_positions = system_edited["labels_positions"]
atomic_numbers = labels_positions[:, 2]
atom_labels = labels_positions[:, 3]
atom_pos = labels_positions[:, 4:7]
lattice = system_edited["lattice_parameters"]
cart_pos, atomic_numbers, atom_labels, lattice_vectors = to_system(
atomic_numbers,
atom_labels,
atom_pos,
lattice,
pos_type=pos_type,
)
system.lattice_vectors = lattice_vectors
system.configuration_periodic_dimensions = pbc
system.atom_positions = cart_pos
system.atom_species = atomic_numbers
system.atom_labels = atom_labels
dimensionality = out["dimensionality"]
system.x_crystal_dimensionality = dimensionality
crystal_family = out["crystal_family"]
system.x_crystal_family = crystal_family
crystal_class = out["crystal_class"]
system.x_crystal_class = crystal_class
n_symmops = out["n_symmops"]
system.x_crystal_n_symmops = n_symmops
space_group = out["space_group"]
system.x_crystal_space_group = space_group
# Method
method = run.m_create(section_method)
method.electronic_structure_method = 'DFT'
method.scf_max_iteration = out["scf_max_iteration"]
method.scf_threshold_energy_change = out["scf_threshold_energy_change"]
dftd3 = out["dftd3"]
if dftd3:
if dftd3["version"] == "VERSION 2":
method.van_der_Waals_method = "G06"
else:
method.van_der_Waals_method = "DFT-D3"
if out["grimme"]:
method.van_der_Waals_method = "G06"
# Try to primarily read the methodology from input
dft = out["dft"]
if dft:
exchange = dft["exchange"]
correlation = dft["correlation"]
exchange_correlation = dft["exchange_correlation"]
functionals = to_libxc(exchange, correlation, exchange_correlation)
if functionals:
for xc in functionals:
method.m_add_sub_section(section_method.section_XC_functionals, xc)
method.XC_functional = to_libxc_name(functionals)
# If methodology not reported in input, try to read from output
if dft is None or not functionals:
hamiltonian_type = out["hamiltonian_type"]
if hamiltonian_type == "HARTREE-FOCK HAMILTONIAN":
xc = section_XC_functionals()
xc.XC_functional_name = "HF_X"
xc.XC_functional_weight = 1.0
method.m_add_sub_section(section_method.section_XC_functionals, xc)
method.XC_functional = to_libxc_name([xc])
elif hamiltonian_type == "KOHN-SHAM HAMILTONIAN":
xc_output = out["xc_out"]
hybrid = out["hybrid_out"]
functionals = to_libxc_out(xc_output, hybrid)
if functionals:
for xc in functionals:
method.m_add_sub_section(section_method.section_XC_functionals, xc)
method.XC_functional = to_libxc_name(functionals)
method.x_crystal_fock_ks_matrix_mixing = out["fock_ks_matrix_mixing"]
method.x_crystal_coulomb_bipolar_buffer = out["coulomb_bipolar_buffer"]
method.x_crystal_exchange_bipolar_buffer = out["exchange_bipolar_buffer"]
method.x_crystal_toldee = out["toldee"]
method.x_crystal_n_atoms = out["n_atoms_per_cell"]
method.x_crystal_n_shells = out["n_shells"]
method.x_crystal_n_orbitals = out["n_ao"]
method.x_crystal_n_electrons = out["n_electrons"]
method.x_crystal_n_core_electrons = out["n_core_electrons"]
method.x_crystal_n_symmops = out["n_symmops"]
method.x_crystal_tol_coulomb_overlap = out["tol_coulomb_overlap"]
method.x_crystal_tol_coulomb_penetration = out["tol_coulomb_penetration"]
method.x_crystal_tol_exchange_overlap = out["tol_exchange_overlap"]
method.x_crystal_tol_pseudo_overlap_f = out["tol_pseudo_overlap_f"]
method.x_crystal_tol_pseudo_overlap_p = out["tol_pseudo_overlap_p"]
method.x_crystal_pole_order = out["pole_order"]
method.x_crystal_type_of_calculation = out["calculation_type"]
cappa = out["cappa"]
if cappa is not None:
method.x_crystal_is1 = cappa[0]
method.x_crystal_is2 = cappa[1]
method.x_crystal_is3 = cappa[2]
method.x_crystal_k_pts_monk_net = cappa[3]
method.x_crystal_symmops_k = cappa[4]
method.x_crystal_symmops_g = cappa[5]
method.x_crystal_weight_f = out["weight_f"]
method.x_crystal_shrink = out["shrink"]
method.x_crystal_shrink_gilat = out["shrink_gilat"]
method.x_crystal_convergence_deltap = out["convergenge_deltap"]
method.x_crystal_n_k_points_ibz = out["n_k_points_ibz"]
method.x_crystal_n_k_points_gilat = out["n_k_points_gilat"]
basis_set = out["basis_set"]
covered_species = set()
if basis_set is not None:
for bs in basis_set["basis_sets"]:
atomic_number = label_to_atomic_number(bs["species"][1])
shells = bs["shells"]
if atomic_number != covered_species and shells is not None:
section_basis_set = section_basis_set_atom_centered()
section_basis_set.basis_set_atom_number = atomic_number
run.m_add_sub_section(section_run.section_basis_set_atom_centered, section_basis_set)
covered_species.add(atomic_number)
for shell in shells:
section_shell = x_crystal_section_shell()
section_shell.x_crystal_shell_range = str(shell["shell_range"])
section_shell.x_crystal_shell_type = shell["shell_type"]
section_shell.x_crystal_shell_coefficients = np.array(shell["shell_coefficients"])
section_basis_set.m_add_sub_section(section_basis_set_atom_centered.x_crystal_section_shell, section_shell)
# SCC
scc = run.m_create(section_single_configuration_calculation)
scf_block = out["scf_block"]
if scf_block is not None:
number_of_scf_iterations = out["number_of_scf_iterations"]
scc.single_configuration_calculation_converged = number_of_scf_iterations is not None
for scf in scf_block["scf_iterations"]:
energies = scf["energies"]
section_scf = section_scf_iteration()
section_scf.energy_total_scf_iteration = energies[0]
section_scf.energy_change_scf_iteration = energies[1]
energy_kinetic = scf["energy_kinetic"]
section_scf.electronic_kinetic_energy_scf_iteration = energy_kinetic
energy_ee = scf["energy_ee"]
section_scf.x_crystal_scf_energy_ee = energy_ee
energy_en_ne = scf["energy_en_ne"]
section_scf.x_crystal_scf_energy_en_ne = energy_en_ne
energy_nn = scf["energy_nn"]
section_scf.x_crystal_scf_energy_nn = energy_nn
virial_coefficient = scf["virial_coefficient"]
section_scf.x_crystal_scf_virial_coefficient = virial_coefficient
scc.m_add_sub_section(section_single_configuration_calculation.section_scf_iteration, section_scf)
scc.number_of_scf_iterations = len(scc.section_scf_iteration)
if out["energy_total"] is not None:
# If the final energy is found, replace the final SCF step energy
# with it, as it is more accurate.
if scc.section_scf_iteration:
scc.section_scf_iteration[-1].energy_total_scf_iteration = out["energy_total"]
scc.energy_total = out["energy_total"]
forces = out["forces"]
if forces is not None:
scc.atom_forces = forces[:, 2:].astype(float) * ureg.hartree / ureg.bohr
scc.single_configuration_calculation_to_system_ref = system
scc.single_configuration_to_calculation_method_ref = method
# Band structure
band_structure = out["band_structure"]
if band_structure is not None:
section_band = section_k_band()
section_band.band_structure_kind = "electronic"
section_band.reciprocal_cell = atomutils.reciprocal_cell(system.lattice_vectors.magnitude) * 1 / ureg.meter
segments = band_structure["segments"]
k_points = to_k_points(segments)
for i_seg, segment in enumerate(segments):
section_segment = section_k_band_segment()
start_end = segment["start_end"]
section_segment.band_k_points = k_points[i_seg]
section_segment.band_segm_start_end = start_end
section_segment.number_of_k_points_per_segment = k_points[i_seg].shape[0]
section_band.m_add_sub_section(section_k_band.section_k_band_segment, section_segment)
# Read energies from the f25-file. If the file is not found, the
# band structure is not written in the archive. The meaning of the
# values is given in an appendix of the Crystal manual.
if f25 is not None:
segments = f25["segments"]
prev_energy = None
prev_k_point = None
first_row = segments[0]["first_row"]
fermi_energy = first_row[4]
scc.energy_reference_fermi = np.array([fermi_energy]) * ureg.hartree
for i_seg, segment in enumerate(segments):
first_row = segment["first_row"]
cols = int(first_row[0])
rows = int(first_row[1])
energies = segment["energies"]
energies = to_array(cols, rows, energies)
# If a segment starts from the previous point, then
# re-report the energy. This way segments get the same
# treatment in the metainfo whether they are continuous
# or not.
start_k_point = section_band.section_k_band_segment[i_seg].band_k_points[0]
end_k_point = section_band.section_k_band_segment[i_seg].band_k_points[-1]
if prev_k_point is not None and np.allclose(prev_k_point, start_k_point):
energies = np.concatenate(([prev_energy], energies), axis=0)
section_band.section_k_band_segment[i_seg].band_energies = energies[None, :] * ureg.hartree
prev_energy = energies[-1]
prev_k_point = end_k_point
scc.m_add_sub_section(section_single_configuration_calculation.section_k_band, section_band)
# DOS
dos = out["dos"]
if dos is not None:
# Read values and energies from the f25-file. If the file is not
# found, the dos is not written in the archive. The meaning of the
# values is given in an appendix of the Crystal manual.
if f25 is not None:
dos_f25 = f25["dos"]
if dos_f25 is not None:
scc_dos = section_single_configuration_calculation()
scc_dos.single_configuration_calculation_to_system_ref = system
scc_dos.single_configuration_to_calculation_method_ref = method
sec_dos = section_dos()
first_row = dos_f25["first_row"]
cols = int(first_row[0])
rows = int(first_row[1])
de = first_row[3]
fermi_energy = first_row[4]
scc_dos.energy_reference_fermi = np.array([fermi_energy]) * ureg.hartree
second_row = dos_f25["second_row"]
start_energy = second_row[1]
sec_dos.dos_energies = (start_energy + np.arange(rows) * de) * ureg.hartree
dos_values = dos_f25["values"]
dos_values = to_array(cols, rows, dos_values)
sec_dos.dos_values = dos_values.T
sec_dos.dos_kind = "electronical"
sec_dos.number_of_dos_values = sec_dos.dos_values.shape[1]
scc_dos.m_add_sub_section(section_single_configuration_calculation.section_dos, sec_dos)
run.m_add_sub_section(section_run.section_single_configuration_calculation, scc_dos)
# Sampling
geo_opt = out["geo_opt"]
if geo_opt is not None:
steps = geo_opt["geo_opt_step"]
if steps is not None:
sampling_method = section_sampling_method()
sampling_method.sampling_method = "geometry_optimization"
sampling_method.geometry_optimization_energy_change = out["energy_change"]
sampling_method.geometry_optimization_geometry_change = out["geometry_change"]
run.m_add_sub_section(section_run.section_sampling_method, sampling_method)
fs = section_frame_sequence()
run.m_add_sub_section(section_run.section_frame_sequence, fs)
# First step is special: it refers to the initial system which
# was printed before entering the geometry optimization loop.
i_system = system
i_energy = steps[0]["energy"]
scc.energy_total = i_energy
frames = []
for step in steps[1:]:
i_scc = section_single_configuration_calculation()
i_system = section_system()
i_energy = step["energy"]
if step["labels_positions_nanotube"] is not None:
i_labels_positions = step["labels_positions_nanotube"]
else:
i_labels_positions = step["labels_positions"]
i_atomic_numbers = i_labels_positions[:, 2]
i_atom_labels = i_labels_positions[:, 3]
i_atom_pos = i_labels_positions[:, 4:7]
i_lattice_parameters = step["lattice_parameters"]
i_cart_pos, i_atomic_numbers, i_atom_labels, i_lattice_vectors = to_system(
i_atomic_numbers,
i_atom_labels,
i_atom_pos,
i_lattice_parameters,
pos_type,
)
i_system.atom_species = i_atomic_numbers
i_system.atom_labels = i_atom_labels
i_system.atom_positions = i_cart_pos
i_system.lattice_vectors = i_lattice_vectors
i_system.configuration_periodic_dimensions = pbc
i_scc.energy_total = i_energy
i_scc.single_configuration_calculation_to_system_ref = i_system
i_scc.single_configuration_to_calculation_method_ref = method
run.m_add_sub_section(section_run.section_system, i_system)
run.m_add_sub_section(section_run.section_single_configuration_calculation, i_scc)
frames.append(i_scc)
fs.frame_sequence_local_frames_ref = frames
fs.number_of_frames_in_sequence = len(fs.frame_sequence_local_frames_ref)
fs.frame_sequence_to_sampling_ref = sampling_method
fs.geometry_optimization_converged = geo_opt["converged"] == "CONVERGED"
# Remove ghost atom information. The metainfo does not provide a very
# good way to deal with them currently so they are simply removed.
remove_ghosts(run)
def to_k_points(segments):
"""Converts the given start and end points, the shrinking factor and the
number of steps into a list of concrete sampling points in k-space. The
shrinking factor tells to how many portions one reciprocal basis vector is
divided into. This needs to be done manually as sometimes the k-points are
not reported in the output.
"""
all_k_points = []
prev_point = None
for segment in segments:
start = segment["start_end"][0, :]
end = segment["start_end"][1, :]
shrinking_factor = segment["shrinking_factor"]
n_steps = segment["n_steps"]
# Segments that do not start from a previous segment get special
# treatment.
end_idx = n_steps + 1
if prev_point is None or not np.allclose(prev_point, start):
end_idx = n_steps
n_steps = n_steps - 1
delta = end - start
start_step = (shrinking_factor * start).astype(np.int)
step_size = (shrinking_factor * delta / n_steps).astype(np.int)
steps = (start_step + step_size * np.arange(0, end_idx)[:, None])
k_points = steps / shrinking_factor
all_k_points.append(k_points)
prev_point = end
return all_k_points
def to_system(atomic_numbers, labels, positions, lattice, pos_type="scaled", wrap=False):
"""Converts a Crystal-specific structure format into cartesian positions
and lattice vectors (if present). The conversion depends on the material
type.
"""
atomic_numbers = std_atomic_number(atomic_numbers.astype(np.int))
atom_labels = std_label(labels)
positions = positions.astype(np.float64)
# Get the lattice vectors
if lattice is not None:
if lattice.shape == (6,):
lattice_vectors = atomutils.cellpar_to_cell(lattice, degrees=True)
elif lattice.shape == (3, 3):
lattice_vectors = lattice
else:
lattice_vectors = None
# Convert positions based on the given type
if pos_type == "cartesian":
if lattice_vectors is not None and wrap:
cart_pos = atomutils.wrap_positions(positions, lattice_vectors)
else:
cart_pos = positions
elif pos_type == "slab":
n_atoms = atomic_numbers.shape[0]
scaled_pos = np.zeros((n_atoms, 3), dtype=np.float64)
scaled_pos[:, 0:2] = positions[:, 0:2]
if wrap:
wrapped_pos = atomutils.wrap_positions(scaled_pos)
else:
wrapped_pos = scaled_pos
cart_pos = atomutils.to_cartesian(wrapped_pos, lattice_vectors)
cart_pos[:, 2:3] = positions[:, 2:3]
elif pos_type == "nanotube":
n_atoms = atomic_numbers.shape[0]
scaled_pos = np.zeros((n_atoms, 3), dtype=np.float64)
scaled_pos[:, 0:1] = positions[:, 0:1]
if wrap:
wrapped_pos = atomutils.wrap_positions(scaled_pos)
else:
wrapped_pos = scaled_pos
cart_pos = atomutils.to_cartesian(wrapped_pos, lattice_vectors)
cart_pos[:, 1:3] = positions[:, 1:3]
elif pos_type == "scaled":
scaled_pos = atomutils.wrap_positions(positions) if wrap else positions
cart_pos = atomutils.to_cartesian(scaled_pos, lattice_vectors)
if lattice_vectors is not None:
lattice_vectors *= ureg.angstrom
return cart_pos * ureg.angstrom, atomic_numbers, atom_labels, lattice_vectors
def to_float(value):
"""Transforms the Crystal-specific float notation into a floating point
number.
"""
base, exponent = value.split("**")
base = int(base)
exponent = int("".join(exponent.split()))
return pow(base, exponent)
def to_array(cols, rows, values):
"""Transforms the Crystal-specific f25 array syntax into a numpy array.
"""
values.replace("\n", "")
values = textwrap.wrap(values, 12)
values = np.array(values, dtype=np.float64)
values = values.reshape((rows, cols))
return values
def std_atomic_number(value):
"""Given an atomic numer in the NAT form (conventional atomic number, where
the real atomic number is the remainder when divided by 100), return the
actual atomic number.
"""
return value % 100
def remove_ghosts(run):
"""Removes ghost atoms from the given section_system. In Crystal ghost
atoms are indicated by the atomic number 0.
"""
for system in run.section_system:
ghosts_mask = system.atom_species == 0
if np.any(ghosts_mask):
system.atom_species = np.delete(system.atom_species, ghosts_mask)
system.atom_labels = np.delete(system.atom_labels, ghosts_mask)
system.atom_positions = np.delete(system.atom_positions.magnitude, ghosts_mask, axis=0)
def label_to_atomic_number(value):
"""Given a Crystal specific uppercase species name, returns the
corresponding atomic number.
"""
symbol = value.lower().capitalize()
atomic_number = ase.data.atomic_numbers[symbol]
return atomic_number
def atomic_numbers_to_labels(value):
"""Given a NAT atomic number, returns the
corresponding label.
"""
atomic_numbers = std_atomic_number(value)
labels = np.array(ase.data.chemical_symbols)[atomic_numbers]
return labels
def std_label(value):
"""Given Crystal specific uppercase species names, returns the capitalized
versions.
"""
labels = []
for label in value:
labels.append(label.lower().capitalize())
return labels
def to_unix_time(value):
"""Transforms the Crystal-specific float notation into a floating point
number.
"""
if value is None:
return None
value = value.strip()
date_time_obj = datetime.datetime.strptime(value, '%d %m %Y TIME %H:%M:%S.%f')
return date_time_obj.timestamp()
def to_libxc(exchange, correlation, exchange_correlation):
"""Transforms the Crystal-specific XC naming into a list of
section_XC_functionals.
"""
xc_list = []
# Handle the XC's defined with single shortcut
if exchange_correlation:
exchange_correlation = exchange_correlation.upper()
shortcut_map = {
"PBEXC": ["GGA_C_PBE", "GGA_X_PBE"],
"PBE0": ["HYB_GGA_XC_PBEH"],
"B3LYP": ["HYB_GGA_XC_B3LYP"],
"HSE06": ["HYB_GGA_XC_HSE06"],
"M06": ["HYB_MGGA_XC_M06"],
"M05-2X": ["HYB_MGGA_XC_M05_2X"],
"LC-WPBE": ["HYB_GGA_XC_LRC_WPBE"],
}
norm_xc = shortcut_map.get(exchange_correlation)
if norm_xc:
xc_list.extend(norm_xc)
# Handle the exchange part
if exchange:
exchange = exchange.upper()
exchange_map = {
"PBE": "GGA_X_PBE",
"PBESOL": "GGA_X_PBE_SOL",
"BECKE": "GGA_X_B88",
"LDA": "LDA_X",
"PWGGA": "GGA_X_PW91",
}
norm_x = exchange_map.get(exchange)
if norm_x:
xc_list.append(norm_x)
# Handle the correlation part
if correlation:
correlation = correlation.upper()
correlation_map = {
"PBE": "GGA_C_PBE",
"PBESOL": "GGA_C_PBE_SOL",
"PZ": "LDA_C_PZ",
"WFN": "LDA_C_VWN",
"PWGGA": "GGA_C_PW91",
}
norm_c = correlation_map.get(correlation)
if norm_c:
xc_list.append(norm_c)
# Go throught the XC list and add the sections and gather a summary
functionals = []
for xc in xc_list:
section = section_XC_functionals()
weight = 1.0
section.XC_functional_name = xc
section.XC_functional_weight = weight
functionals.append(section)
return functionals
def to_libxc_out(xc, hybridization):
"""Transforms the Crystal-specific XC naming in the output into a list of
section_XC_functionals.
"""
xc_list = []
exchange, correlation = xc[1:-1].split(")[")
# Handle the exchange part
if exchange:
exchange = exchange.upper()
exchange_map = {
"PERDEW-BURKE-ERNZERHOF": "GGA_X_PBE",
"PERDEW-WANG GGA": "GGA_X_PW91",
"WU-COHEN GGA": "GGA_X_WC",
}
norm_x = exchange_map.get(exchange)
if norm_x:
xc_list.append(norm_x)
# Handle the correlation part
if correlation:
correlation = correlation.upper()
correlation_map = {
"PERDEW-BURKE-ERNZERHOF": "GGA_C_PBE",
"PERDEW-WANG GGA": "GGA_C_PW91",
"LEE-YANG-PARR": "GGA_C_LYP",
}
norm_c = correlation_map.get(correlation)
if norm_c:
xc_list.append(norm_c)
# Shortcuts
if norm_x == "GGA_X_PBE" and norm_c == "GGA_C_PBE" and hybridization == 25.00:
section = section_XC_functionals()
section.XC_functional_name = "HYB_GGA_XC_PBEH"
section.XC_functional_weight = 1
return [section]
# Go throught the XC list and add the sections and gather a summary
functionals = []
if hybridization:
section = section_XC_functionals()
section.XC_functional_name = "HF_X"
section.XC_functional_weight = float(hybridization) / 100
functionals.append(section)
for xc in xc_list:
section = section_XC_functionals()
weight = 1.0
if hybridization and "_X_" in xc:
weight = 1.0 - float(hybridization) / 100
section.XC_functional_name = xc
section.XC_functional_weight = weight
functionals.append(section)
return functionals
def to_libxc_name(functionals):
"""Given a list of section_XC_functionals, returns the single string that
represents them all.
"""
return "+".join("{}*{}".format(x.XC_functional_weight, x.XC_functional_name) for x in sorted(functionals, key=lambda x: x.XC_functional_name))
| 52.033816 | 262 | 0.512781 | 7,016 | 64,626 | 4.502281 | 0.105616 | 0.046727 | 0.060783 | 0.008104 | 0.428834 | 0.372515 | 0.31474 | 0.289762 | 0.252818 | 0.225307 | 0 | 0.008146 | 0.350339 | 64,626 | 1,241 | 263 | 52.075745 | 0.744218 | 0.07302 | 0 | 0.34476 | 0 | 0.05191 | 0.254424 | 0.115596 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01763 | false | 0 | 0.011753 | 0.000979 | 0.047013 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9723dfd188ebc676983837f7b0d623a43502f121 | 3,540 | py | Python | eval/img_mask_util.py | zhxl0903/Apple-Segmentation-FCN | 0a072e2a6dd3b68b8a95c78b14091d33549fb4a2 | [
"MIT"
] | 1 | 2022-01-22T14:40:22.000Z | 2022-01-22T14:40:22.000Z | eval/img_mask_util.py | zhxl0903/Apple-Segmentation-FCN | 0a072e2a6dd3b68b8a95c78b14091d33549fb4a2 | [
"MIT"
] | null | null | null | eval/img_mask_util.py | zhxl0903/Apple-Segmentation-FCN | 0a072e2a6dd3b68b8a95c78b14091d33549fb4a2 | [
"MIT"
] | null | null | null | import os
import numpy as np
from PIL import Image
import cv2
class ImageMaskUtil:
# RGB Color Maps for labelling
label_color_map = {
'background': (0, 0, 0), # background
'apple': (224, 0, 224), # apple
}
def __init__(self, img_dir=None, mask_dir=None, transforms=None):
print("***", img_dir, '\n', mask_dir)
if not img_dir or not os.path.exists(img_dir):
raise FileNotFoundError("Image path does not exist")
if not mask_dir or not os.path.exists(mask_dir):
raise FileNotFoundError("Mask path does not exist")
self.img_dir = img_dir
self.mask_dir = mask_dir
self.transforms = transforms
# Load all image and mask files, sorting them to ensure they are aligned
file_types = ("png", "jpg", "jpeg")
self.imgs = list(sorted(os.listdir(img_dir)))
self.imgs = [i for i in self.imgs if i.endswith(file_types)]
self.masks = list(sorted(os.listdir(mask_dir)))
self.masks = [i for i in self.masks if i.endswith(file_types)]
print("***", self.imgs, '\n', self.masks)
if len(self.imgs) != len(self.masks):
raise ValueError("Number of images must be equal to number of masks")
@staticmethod
def overlay_mask_on_image(image: np.array, mask: np.array, alpha=1.0, beta=0.9, gamma=0.0) -> np.array:
"""
alpha = 1 # transparency for the original image
beta = 0.9 # transparency for the segmentation map
gamma = 0 # scalar added to each sum
"""
# mask = cv2.cvtColor(mask, cv2.COLOR_RGB2BGR)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return cv2.addWeighted(image, alpha, mask, beta, gamma)
def __getitem__(self, idx):
pass
def get_img_mask_overlay(self, idx, n_channels=3, cls="apple"):
if idx >= self.__len__():
raise IndexError(f"requested image index {idx} must be less than {self.__len__()}")
# Load image and mask
img_path = os.path.join(self.img_dir, self.imgs[idx])
mask_path = os.path.join(self.mask_dir, self.masks[idx])
img: Image.Image = Image.open(img_path).convert("RGB")
img_res = np.array(img)
mask: Image.Image = Image.open(mask_path)
# Convert the PIL image to np array
mask_np = np.array(mask)
# Convert non-zero values to constant 1, retains 0 (background)
mask_np = np.minimum(1, mask_np)
mask_col = []
for i in range(0, n_channels):
res = np.multiply(mask_np, np.full_like(mask_np, self.label_color_map[cls][i], dtype=np.uint8))
mask_col.append(res)
mask_res = np.stack(mask_col, axis=2)
return Image.fromarray(ImageMaskUtil.overlay_mask_on_image(img_res, mask_res))
def __len__(self):
return len(self.imgs)
def get_img_name(self, idx):
return self.imgs[idx]
def show_image(self, idx):
if idx >= self.__len__():
raise IndexError(f"requested image index {idx} must be less than {self.__len__()}")
img_path = os.path.join(self.img_dir, self.imgs[idx])
img: Image.Image = Image.open(img_path).convert("RGB")
img.show()
def show_mask(self, idx):
if idx >= self.__len__():
raise IndexError(f"requested mask index {idx} must be less than {self.__len__()}")
mask_path = os.path.join(self.mask_dir, self.masks[idx])
mask: Image.Image = Image.open(mask_path)
mask.show()
| 37.263158 | 107 | 0.620339 | 515 | 3,540 | 4.071845 | 0.254369 | 0.025751 | 0.019075 | 0.026705 | 0.299475 | 0.269909 | 0.250835 | 0.221268 | 0.207439 | 0.207439 | 0 | 0.013354 | 0.259605 | 3,540 | 94 | 108 | 37.659574 | 0.786723 | 0.131073 | 0 | 0.206349 | 0 | 0 | 0.108653 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126984 | false | 0.015873 | 0.063492 | 0.031746 | 0.285714 | 0.031746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97265f76f9295dbfffcf1f1e54d2fcd3f6836b66 | 4,340 | py | Python | deploy.py | f5devcentral/volterra-management | 8f10cad2f87a50766f85db6dbb33e1eedcebc969 | [
"Apache-2.0"
] | null | null | null | deploy.py | f5devcentral/volterra-management | 8f10cad2f87a50766f85db6dbb33e1eedcebc969 | [
"Apache-2.0"
] | 7 | 2021-08-10T22:56:47.000Z | 2022-02-16T13:56:47.000Z | deploy.py | f5devcentral/volterra-management | 8f10cad2f87a50766f85db6dbb33e1eedcebc969 | [
"Apache-2.0"
] | null | null | null | from az.cli import az
import os, configparser
def checkVars(config):
required_vars = {
'AADclientID': False,
'AADtenantID': False,
'AADsecret': False,
'AADGroupName': False,
'VoltTenantApiToken': False,
'VoltTenantTokenName': False,
'VoltTenantName': False,
'Region': False,
'ResourceGroupName': False,
'StorageName': False,
'KeyVaultName': False,
'FunctionAppName': False,
'TeamsWebhookUrl': False
}
for s in config.sections():
for v in required_vars:
required_vars[v] = config.has_option(s, v)
if required_vars[v] == False:
raise ValueError("A value must be provided for: {0} in section: {1}".format(v, s))
def kvSecret(vault: str, name: str, value: str):
return azCommand("keyvault secret set --vault-name {0} --name {1} --value {2}".format(vault, name, value))
def appSetting(name: str, vault: str, function: str, resourceGroup: str):
settingURI = azCommand("keyvault secret show --vault-name {0} --name {1} --query id".format(vault, name))
return azCommand('functionapp config appsettings set --name {0} --resource-group {1} --settings "{2}=@Microsoft.KeyVault(SecretUri={3})"'.format(function, resourceGroup, name, settingURI))
def azCommand(command: str):
res = az(command)
if res[0]:
raise RuntimeError(res[2])
return res[1]
def azCmdNoError(command: str):
res = az(command)
#NOTE:: Intentionally returning the entire dict response (in case we need to do something else with it)
return res
def deployBase(section):
secrets = {
"VoltTenantName" : section['VoltTenantName'],
"VoltTenantApiToken" : section['VoltTenantApiToken'],
"VoltTenantTokenName" : section['VoltTenantTokenName'],
"AADclientID" : section['AADclientID'],
"AADtenantID" : section['AADtenantID'],
"AADsecret" : section['AADsecret'],
"AADGroupName" : section['AADGroupName'],
"TeamsWebhookUrl" : section['TeamsWebhookUrl']
}
createRG = "group create --name {0} --location {1}" \
.format(section['ResourceGroupName'], section['Region'])
azCommand(createRG)
createSA = "storage account create --name {0} --location {1} --resource-group {2} --sku Standard_LRS" \
.format(section['StorageName'], section['Region'], section['ResourceGroupName'])
azCommand(createSA)
#KeyVaults are, evidently, **not** idempotent in the Azure CLI. We need treat them differently.
createKV = "keyvault create --name {0} --resource-group {1} --location {2}" \
.format(section['KeyVaultName'], section['ResourceGroupName'], section['Region'])
try:
azCommand(createKV)
except:
print("KeyVault likely already exists. Skipping creation.")
pass
for s in secrets:
kvSecret(section['KeyVaultName'], s, secrets[s])
createApp = "functionapp create --name {0} --storage-account {1} --consumption-plan-location {2} --resource-group {3} --os-type linux --functions-version 3 --runtime python" \
.format(section['FunctionAppName'], section['StorageName'], section['Region'], section['ResourceGroupName'])
azCommand(createApp)
appId = "functionapp identity assign --resource-group {0} --name {1}" \
.format(section['ResourceGroupName'], section['FunctionAppName'])
azCommand(appId)
principalId = azCommand("functionapp identity show --resource-group {0} --name {1} --query principalId".format(section['ResourceGroupName'], section['FunctionAppName']))
kvPolicy = "keyvault set-policy --name {0} --resource-group {1} --object-id {2} --secret-permission get list" \
.format(section['KeyVaultName'], section['ResourceGroupName'], principalId)
azCommand(kvPolicy)
for a in secrets:
appSetting(a, section['KeyVaultName'], section['FunctionAppName'], section['ResourceGroupName'])
def main():
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'funcConfig.ini'))
checkVars(config)
for section in config.sections():
deployBase(config[section])
print("Deployment for {0} complete.".format(section))
print("All Deployments Complete.")
if __name__ == "__main__":
main()
| 40.560748 | 192 | 0.654608 | 453 | 4,340 | 6.231788 | 0.331126 | 0.014169 | 0.008502 | 0.019129 | 0.204038 | 0.045342 | 0.045342 | 0 | 0 | 0 | 0 | 0.010089 | 0.200691 | 4,340 | 106 | 193 | 40.943396 | 0.80369 | 0.045161 | 0 | 0.02381 | 0 | 0.047619 | 0.402852 | 0.016191 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0.011905 | 0.02381 | 0.011905 | 0.154762 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9726974206475436bf6ec6fcd8ca28dbc44033c4 | 710 | py | Python | Linguagens/Python/Exercicios/cursos_em_video/aulas-22_23/109.py | rafaelvizu/Estudos | eef5e3e3706ff99959226c51b9907b6af4377bfe | [
"MIT"
] | null | null | null | Linguagens/Python/Exercicios/cursos_em_video/aulas-22_23/109.py | rafaelvizu/Estudos | eef5e3e3706ff99959226c51b9907b6af4377bfe | [
"MIT"
] | null | null | null | Linguagens/Python/Exercicios/cursos_em_video/aulas-22_23/109.py | rafaelvizu/Estudos | eef5e3e3706ff99959226c51b9907b6af4377bfe | [
"MIT"
] | null | null | null | from pacotes import ex109
print('\033[36;40mExercício Python #109 - Formatando Moedas em Python\033[m\n')
v = float(input('Digite o valor: '))
escolhaMoeda = False
while True:
escolha = str(input('Ele devera ser formatado como moeda? [s/n]: ')).strip().lower()[0]
if escolha == 's' or escolha == 'n':
if escolha == 's':
escolhaMoeda = True
break
else:
print('\nVALOR INVÁLIDO. TENTE NOVAMENTE!\n')
print(f'\nA metade de {v} é {ex109.metade(v, escolhaMoeda)}')
print(f'O dobro de {v} é {ex109.dobro(v, escolhaMoeda)}')
print(f'Aumentando 10%, temos {ex109.aumentar(v, 10, escolhaMoeda)}')
print(f'Reduzindo 13%, temos {ex109.diminuir(v, 13, escolhaMoeda)}') | 33.809524 | 91 | 0.647887 | 102 | 710 | 4.509804 | 0.568627 | 0.052174 | 0.117391 | 0.03913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064014 | 0.185915 | 710 | 21 | 92 | 33.809524 | 0.731834 | 0 | 0 | 0 | 0 | 0.0625 | 0.541491 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9728ac0573f8175100d55ffa0754e092810cb584 | 662 | py | Python | loadbalanceRL/lib/environment/cellular/dev/apis.py | fqzhou/LoadBalanceControl-RL | 689eec3b3b27e121aa45d2793e411f1863f6fc0b | [
"MIT"
] | 11 | 2018-10-29T06:50:43.000Z | 2022-03-28T14:26:09.000Z | loadbalanceRL/lib/environment/cellular/dev/apis.py | fqzhou/LoadBalanceControl-RL | 689eec3b3b27e121aa45d2793e411f1863f6fc0b | [
"MIT"
] | 1 | 2022-03-01T13:46:25.000Z | 2022-03-01T13:46:25.000Z | loadbalanceRL/lib/environment/cellular/dev/apis.py | fqzhou/LoadBalanceControl-RL | 689eec3b3b27e121aa45d2793e411f1863f6fc0b | [
"MIT"
] | 6 | 2019-02-05T20:01:53.000Z | 2020-09-04T12:30:00.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
""" Contains list of APIs for Development model """
__author__ = 'Ari Saha (arisaha@icloud.com), Mingyang Liu(liux3941@umn.edu)'
# Index
INDEX = '/index'
# Environment params
NUM_UES = '/num_ues'
NUM_APS = '/num_aps'
AP_LIST = '/ap_list'
AP_INFO = '/ap_info/'
BR_LIST = '/br_list'
BR_INFO = '/br_info/'
UE_LIST = '/ue_list'
UE_INFO = '/ue_info/'
RESET_NETWORK = '/reset_network'
RESET_NETWORK_AFTER_MOVE = '/reset_network_after_move'
NEIGHBORING_APS = '/neighboring_aps/'
UE_THROUGHPUT = '/ue_throughput/'
UE_SLA = '/ue_sla/'
UE_SIGNAL_POWER = '/ue_signal_power/'
AP_SLAS = '/ap_slas/'
HANDOFF = '/handoff/'
| 22.066667 | 76 | 0.702417 | 98 | 662 | 4.336735 | 0.469388 | 0.112941 | 0.042353 | 0.112941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010417 | 0.129909 | 662 | 29 | 77 | 22.827586 | 0.727431 | 0.172205 | 0 | 0 | 0 | 0 | 0.460967 | 0.124535 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
972a2b2e21605641fa0b11bb08a11cacd4d680ed | 9,349 | py | Python | src/cli.py | terraspanner/terraspanner-cli | 0538e8d17fdd1bd03b999fe4386e95c896c9611c | [
"Apache-2.0"
] | null | null | null | src/cli.py | terraspanner/terraspanner-cli | 0538e8d17fdd1bd03b999fe4386e95c896c9611c | [
"Apache-2.0"
] | null | null | null | src/cli.py | terraspanner/terraspanner-cli | 0538e8d17fdd1bd03b999fe4386e95c896c9611c | [
"Apache-2.0"
] | null | null | null | from optparse import OptionParser
import os
import tempfile
from git import Repo
from python_terraform import *
import logging
import json
import re
import copy
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
tf = Terraform()
default_git_user=os.getenv('GIT_USER')
default_git_token=os.getenv('GIT_TOKEN')
default_git_repo=os.getenv('GIT_REPO')
default_git_domain=os.getenv('GIT_DOMAIN')
def tf_plan(arguments, options):
path=arguments[0]
return_code, stdout, stderr = tf.plan(path,
compact_warnings = options.compact_warnings,
destroy = options.destroy,
detailed_exitcode = options.detailed_exitcode,
lock = options.lock,
no_color = options.no_color,
lock_timeout = options.lock_timeout,
out = options.out,
state = options.state,
parallelism = options.parallelism,
refresh = options.refresh,
target = options.target,
var_file = options.var_file,
var={var.split()[0] : var.split()[1] for var in options.var} if options.var is not None else None
)
if stdout is not None:
logging.info(stdout)
if return_code != 0:
raise Exception(stderr)
def tf_apply(arguments, options):
path=arguments[0]
return_code, stdout, stderr = tf.plan(path,
auto_approve = options.auto_approve,
backup = options.backup,
state_out = options.state_out,
compact_warnings = options.compact_warnings,
lock = options.lock,
no_color = options.no_color,
lock_timeout = options.lock_timeout,
state = options.state,
parallelism = options.parallelism,
refresh = options.refresh,
target = options.target,
var_file = options.var_file,
var={var.split()[0] : var.split()[1] for var in options.var}
)
if stdout is not None:
logging.info(stdout)
if stderr is not None:
raise Exception(stderr)
def try_get_target_from_commit(local_repo_path):
try:
repo = Repo(local_repo_path)
last_commit_message = repo.head.commit.message
if "[terraspanner]" not in last_commit_message:
return None
parameters=json.loads(last_commit_message[len("[terraspanner]")+1:len(last_commit_message)-2])
return parameters['target']
except Exception as ex:
logging.debug(ex)
return None
def try_get_var_from_commit(local_repo_path):
try:
repo = Repo(local_repo_path)
last_commit_message = repo.head.commit.message
if "[terraspanner]" not in last_commit_message:
return None
parameters=json.loads(last_commit_message[len("[terraspanner]")+1:len(last_commit_message)-2])
return parameters['var']
except Exception as ex:
logging.debug(ex)
return None
tf_commands = {
'plan': tf_plan,
'apply': tf_apply
}
def run_tf_command(arguments, options):
if options.target is None:
options.target = try_get_target_from_commit(options.local_repo_path)
if options.var is None:
options.var = try_get_var_from_commit(options.local_repo_path)
command=arguments[0]
command_arguments=arguments[1:len(arguments)]
tf_commands[command](command_arguments, options)
def validate_trigger_terraform_repo(options):
if options.token is None or options.domain is None or options.repo is None:
raise Exception('some parameters are missing')
def trigger_terraform_repo(_, options):
validate_trigger_terraform_repo(options)
with tempfile.TemporaryDirectory() as temp_repo_dir:
git_url=f'https://{options.token}@{options.domain}/{options.repo}.git'
repo = Repo.clone_from(git_url, temp_repo_dir)
repo.git.commit('--allow-empty','-m', f'"[terraspanner]{json.dumps({ "target": options.target, "var": options.var })}"')
repo.git.push()
commands = {
'trigger': trigger_terraform_repo,
'tf': run_tf_command
}
def main():
parser = OptionParser()
#trigger options
parser.add_option('-t','--git-token', dest='token', help='git token', metavar='GIT_TOKEN', default=default_git_token)
parser.add_option('-d','--git-domain', dest='domain', help='git domain (ex. github.com)', metavar='GIT_DOMAIN', default=default_git_domain)
parser.add_option('-r','--git-repo', dest='repo', help='git repo (ex. )', metavar='GIT_DOMAIN', default=default_git_domain)
#repo finder
parser.add_option('-l','--local-repo-path', dest='local_repo_path', help='local repository path (defaults to local folder)', metavar='GIT_DOMAIN', default=os.getcwd())
#terraform plan options
parser.add_option('--compact-warnings', dest='compact_warnings', action='store_true', help='If Terraform produces any warnings that are not accompanied by errors, show them in a more compact form that includes only the summary messages.')
parser.add_option('--destroy', dest='destroy', action='store_true', help='If set, a plan will be generated to destroy all resources managed by the given configuration and state.')
parser.add_option('--detailed-exitcode', dest='detailed_exitcode', action='store_true', help='return detailed exit codes when the command exits.')
parser.add_option('--lock', dest='lock', action='store_true', help='Lock the state file when locking is supported.')
parser.add_option('--no-color', dest='no_color', action='store_true', help='If specified, output won\'t contain any color.')
parser.add_option('--lock-timeout', dest='lock_timeout', help='Duration to retry a state lock.', metavar='TF_LOCK_TIMEOUT')
parser.add_option('--out', dest='out', help='Write a plan file to the given path. This can be used as input to the "apply" command.', metavar='TF_OUT')
parser.add_option('--state', dest='state', help='Path to a Terraform state file to use to look up Terraform-managed resources. By default it will use the state "terraform.tfstate" if it exists.', metavar='TF_STATE')
parser.add_option('--parallelism', dest='parallelism', help='Limit the number of concurrent operations. Defaults to 10.', metavar='TF_PARALLELISM')
parser.add_option('--refresh', dest='refresh', action='store_true', help='Update state prior to checking for differences.', metavar='TF_REFRESH')
parser.add_option('--target', dest='target', action='append', help='Resource to target. Operation will be limited to this resource and its dependencies. This flag can be used multiple times.', metavar='TF_TARGET')
parser.add_option('--var', dest='var', action='append', help='Set a variable in the Terraform configuration. This flag can be set multiple times.', metavar='TF_VAR')
parser.add_option('--var-file', dest='var_file', help='Set variables in the Terraform configuration from a file.', metavar='TF_VAR_FILE')
#terraform apply options
parser.add_option('--auto-approve', dest='auto_approve', action='store_true', help='Skip interactive approval of plan before applying.', default=True)
parser.add_option('--backup', dest='backup', help='Path to backup the existing state file before modifying. Defaults to the "-state-out" path with ".backup" extension. Set to "-" to disable backup.')
parser.add_option('--state-out', dest='state_out', help='Path to write state to that is different than "-state". This can be used to preserve the old state.')
# parser.add_option('--compact-warnings', dest='compact_warnings', action='store_true', help='If Terraform produces any warnings that are not accompanied by errors, show them in a more compact form that includes only the summary messages.')
# parser.add_option('--lock', dest='lock', action='store_true', help='Lock the state file when locking is supported.')
# parser.add_option('--no-color', dest='no_color', action='store_true', help='If specified, output won\'t contain any color.')
# parser.add_option('--lock-timeout', dest='lock_timeout', help='Duration to retry a state lock.', metavar='TF_LOCK_TIMEOUT')
# parser.add_option('--state', dest='state', help='Path to a Terraform state file to use to look up Terraform-managed resources. By default it will use the state \"terraform.tfstate\" if it exists.', metavar='TF_STATE')
# parser.add_option('--parallelism', dest='parallelism', help='Limit the number of concurrent operations. Defaults to 10.', metavar='TF_PARALLELISM')
# parser.add_option('--refresh', dest='refresh', action='store_true', help='Update state prior to checking for differences.', metavar='TF_REFRESH')
# parser.add_option('--target', dest='target', action='append', help='Resource to target. Operation will be limited to this resource and its dependencies. This flag can be used multiple times.', metavar='TF_TARGET')
# parser.add_option('--var', dest='var', action='append', help='Set a variable in the Terraform configuration. This flag can be set multiple times.', metavar='TF_VAR')
# parser.add_option('--var-file', dest='var_file', help='Set variables in the Terraform configuration from a file.', metavar='TF_VAR_FILE')
(options, arguments) = parser.parse_args()
command=arguments[0]
command_args=arguments[1:len(arguments)]
try:
commands[command](command_args,options)
except Exception as ex:
logging.error('command does not exist or failed with:', ex)
main() | 57.355828 | 244 | 0.708525 | 1,301 | 9,349 | 4.939277 | 0.166026 | 0.042017 | 0.070028 | 0.032524 | 0.619203 | 0.582944 | 0.573607 | 0.561469 | 0.561469 | 0.537193 | 0 | 0.002436 | 0.165793 | 9,349 | 163 | 245 | 57.355828 | 0.821516 | 0.182479 | 0 | 0.37037 | 0 | 0.014815 | 0.306422 | 0.006946 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059259 | false | 0 | 0.066667 | 0 | 0.17037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
972cb8d5d6817e9274b9009a061e7f62e70b9c3c | 543 | py | Python | 07. Python-Fundamentals-Strings-and-Text-Processing/07 Serialise.py | Bugzey/Softuni-Python-Fundamentals | 6f66e143809988398896cfc771cce1db9220df27 | [
"MIT"
] | 1 | 2021-01-31T00:14:08.000Z | 2021-01-31T00:14:08.000Z | 07. Python-Fundamentals-Strings-and-Text-Processing/07 Serialise.py | Bugzey/Softuni-Python-Fundamentals | 6f66e143809988398896cfc771cce1db9220df27 | [
"MIT"
] | null | null | null | 07. Python-Fundamentals-Strings-and-Text-Processing/07 Serialise.py | Bugzey/Softuni-Python-Fundamentals | 6f66e143809988398896cfc771cce1db9220df27 | [
"MIT"
] | null | null | null | # Get a string, print a list of chars and the indices of those chars
user_input = input()
str_order = list(user_input)
str_dict = {}
str_dict = {key: [] for key in str_order}
for key in str_dict.keys():
start_index = 0
while True:
try:
start_index = user_input.index(key, start_index)
str_dict[key].append(str(start_index))
start_index += 1
except ValueError:
break
result = [key + ':' + '/'.join(str_dict[key]) for key in str_dict.keys()]
print(*result, sep = '\n')
| 28.578947 | 73 | 0.61326 | 81 | 543 | 3.91358 | 0.432099 | 0.132492 | 0.094637 | 0.104101 | 0.217666 | 0.217666 | 0.132492 | 0 | 0 | 0 | 0 | 0.005013 | 0.265193 | 543 | 18 | 74 | 30.166667 | 0.789474 | 0.121547 | 0 | 0 | 0 | 0 | 0.008457 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
972d6292a88fb5c55480bbdced96a910e37ff845 | 1,323 | py | Python | 2algo/results and data complilation/time_complexity.py | allengrr/deadlock_project | 933878077c45a7df04daa087407bb2620c064617 | [
"MIT"
] | null | null | null | 2algo/results and data complilation/time_complexity.py | allengrr/deadlock_project | 933878077c45a7df04daa087407bb2620c064617 | [
"MIT"
] | null | null | null | 2algo/results and data complilation/time_complexity.py | allengrr/deadlock_project | 933878077c45a7df04daa087407bb2620c064617 | [
"MIT"
] | 1 | 2021-03-21T17:54:26.000Z | 2021-03-21T17:54:26.000Z | import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
# marker size
n = 10
x = np.arange(1, 100, 10)
# x = np.arange(1, 7)
algo_dict = {'RMS+Bankers': r'$ALG_1$',
'EDF+Bankers': r'$ALG_2$',
'RMS+wound wait': r'$ALG_3$',
'RMS+wait die': r'$ALG_4$',
'EDF+wound wait': r'$ALG_5$',
'EDF+wait die': r'$ALG_6$'}
y1 = x ** 2
y2 = x * (2 ** (1 / x) - 1)
y3 = x * np.log2(x)
y4 = x
y5 = x
case1 = y1 + y2
case2 = y3 + y1
case3 = y2 + y4
case4 = y2 + y5
case5 = y3 + y4
case6 = y3 + y5
plt.grid(True)
plt.yscale('log')
plt.plot(x, case1, 'r--+', label=algo_dict['RMS+Bankers'], markersize=n)
plt.plot(x, case2, 'g-->', label=algo_dict['EDF+Bankers'], markersize=n)
plt.plot(x, case3, 'y--o', label=algo_dict['RMS+wound wait'], markersize=n)
plt.plot(x, case4, 'b--*', label=algo_dict['RMS+wait die'], markersize=n)
plt.plot(x, case5, 'c--s', label=algo_dict['EDF+wound wait'], markersize=n)
plt.plot(x, case6, 'k--^', label=algo_dict['EDF+wait die'], markersize=n)
plt.ylabel('No of Process')
plt.xlabel('No of Resources')
plt.title('Time Complexity Analysis')
# ax_dl.set_title('Deadlock Prevention/Avoidence Algorithms')
plt.legend()
# ax_rt.legend()
plt.show()
#https://matplotlib.org/3.1.1/tutorials/text/mathtext.html | 26.46 | 75 | 0.617536 | 226 | 1,323 | 3.544248 | 0.384956 | 0.069913 | 0.059925 | 0.11236 | 0.223471 | 0.134831 | 0.069913 | 0 | 0 | 0 | 0 | 0.050274 | 0.173091 | 1,323 | 50 | 76 | 26.46 | 0.681901 | 0.123205 | 0 | 0 | 0 | 0 | 0.240693 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.057143 | 0 | 0.057143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
972f8b6751c5b2afa662f260119fde2fc1802242 | 3,633 | py | Python | ykdl/extractors/pps.py | Fearyncess/ykdl | d12a7b4dbad936e6312e084848a19378ed47bbd7 | [
"MIT"
] | 1,153 | 2016-06-02T09:57:22.000Z | 2021-05-14T13:02:35.000Z | ykdl/extractors/pps.py | Fearyncess/ykdl | d12a7b4dbad936e6312e084848a19378ed47bbd7 | [
"MIT"
] | 515 | 2016-06-02T10:03:10.000Z | 2021-05-14T14:37:08.000Z | ykdl/extractors/pps.py | Fearyncess/ykdl | d12a7b4dbad936e6312e084848a19378ed47bbd7 | [
"MIT"
] | 274 | 2016-06-13T12:31:20.000Z | 2021-04-13T02:20:04.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
from ykdl.util.html import get_content, add_header
from ykdl.util.match import match1
from ykdl.compact import urlencode
from .iqiyi.util import get_macid
import json
import time
import random
import hashlib
def gsign(params):
s = []
for key in sorted(params.keys()):
s.append('{}:{}'.format(key, params[key]))
s.append('w!ytDgy#lEXWoJmN4HPf')
s = ''.join(s)
return hashlib.sha1(s.encode('utf8')).hexdigest()
def getlive(uid, rate='source'):
tm = int(time.time())
api = 'https://m-glider-xiu.pps.tv/v2/stream/get.json'
params = {
'type_id': 1,
'vid': 1,
'anchor_id': uid,
'app_key': 'show_web_h5',
'version': '1.0.0',
'platform': '1_10_101',
'time': tm,
'netstat': 'wifi',
'device_id': get_macid(),
'bit_rate_type': rate,
'protocol': 5,
}
params['sign'] = gsign(params)
data = urlencode(params)
if not isinstance(data, bytes):
data = data.encode()
html = get_content(api, data=data)
return json.loads(html)
class PPS(VideoExtractor):
name = u"奇秀(Qixiu)"
ids = ['TD', 'HD', 'SD']
rate_2_id = {
'source': 'TD',
'high': 'HD',
'smooth': 'SD'
}
rate_2_profile = {
'source': u'超清',
'high': u'高清',
'smooth': u'标清'
}
def prepare(self):
info = VideoInfo(self.name, True)
html = get_content(self.url)
self.vid = match1(html, '"user_id":"([^"]+)",')
title = json.loads(match1(html, '"room_name":("[^"]*"),'))
artist = json.loads(match1(html, '"nick_name":("[^"]+"),'))
info.title = u'{} - {}'.format(title, artist)
info.artist = artist
def get_live_info(rate='source'):
data = getlive(self.vid, rate)
self.logger.debug('data:\n' + str(data))
if data['code'] != 'A00000':
return data.get('msg')
data = data['data']
url = data.get('https_flv') or data.get('flv') or data.get('rtmp')
if url:
url = url.replace('rtmp://', 'http://')
ran = random.randrange(1e4)
if '?' in url:
url = '{}&ran={}'.format(url, ran)
else:
url = '{}?ran={}'.format(url, ran)
stream_profile = self.rate_2_profile[rate]
stream_id = self.rate_2_id[rate]
info.stream_types.append(stream_id)
info.streams[stream_id] = {
'video_profile': stream_profile,
'container': 'flv',
'src' : [url],
'size': float('inf')
}
error_msges = []
if rate == 'source':
rate_list = data['rate_list']
if 'source' in rate_list:
rate_list.remove('source')
for rate in rate_list:
error_msg = get_live_info(rate)
if error_msg:
error_msges.append(error_msg)
if error_msges:
return ', '.join(error_msges)
error_msg = get_live_info()
if error_msg:
self.logger.debug('error_msg:\n' + error_msg)
assert len(info.stream_types), error_msg or 'can\'t play this live video!!'
info.stream_types = sorted(info.stream_types, key=self.ids.index)
return info
site = PPS()
| 30.529412 | 83 | 0.512524 | 429 | 3,633 | 4.198135 | 0.358974 | 0.035536 | 0.033315 | 0.021099 | 0.041088 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013636 | 0.333884 | 3,633 | 118 | 84 | 30.788136 | 0.730579 | 0.011561 | 0 | 0.019802 | 0 | 0 | 0.134857 | 0.01226 | 0 | 0 | 0 | 0 | 0.009901 | 1 | 0.039604 | false | 0 | 0.09901 | 0 | 0.237624 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97325c37bd3759407fc051583513adc0103085dc | 784 | py | Python | python/mathematics/mat_exp.py | jtpio/algo-toolbox | d0f675889889ad52d853d948b0191bbd14c1e9cd | [
"MIT"
] | null | null | null | python/mathematics/mat_exp.py | jtpio/algo-toolbox | d0f675889889ad52d853d948b0191bbd14c1e9cd | [
"MIT"
] | null | null | null | python/mathematics/mat_exp.py | jtpio/algo-toolbox | d0f675889889ad52d853d948b0191bbd14c1e9cd | [
"MIT"
] | null | null | null | def mat_exp(mat, p, mod):
"""
Fast Matrix Exponentiation with modulo
Parameters
----------
mat: numpy matrix
The matrix to exponentiate. It must be a matrix that supports the *
operator.
p: int
The power to raise the matrix to.
mod: int
The modulo value used to calculate elements of the matrix
Returns
-------
out: numpy matrix
Exponentiated matrix
"""
if p < 0:
mat = mat ** (-1)
p = -p
if p == 0:
return mat
mat2 = 1
while p > 1:
if p % 2 == 0:
mat = (mat * mat) % mod
p //= 2
else:
mat2 = (mat2 * mat) % mod
mat = (mat * mat) % mod
p = (p - 1) // 2
return (mat * mat2) % mod
| 23.058824 | 75 | 0.468112 | 100 | 784 | 3.66 | 0.41 | 0.081967 | 0.060109 | 0.065574 | 0.071038 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030973 | 0.423469 | 784 | 33 | 76 | 23.757576 | 0.778761 | 0.433673 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9733165a2c2800de7fed4fd918055591257301d4 | 3,276 | py | Python | python/01_italable_class/iter_advanced.py | sunao11/eGov_hourei | 0731cfa43ac701ca17bec178f00f68f393efe7c8 | [
"Apache-2.0"
] | 2 | 2021-05-28T10:02:10.000Z | 2021-06-10T09:02:08.000Z | python/01_italable_class/iter_advanced.py | sunao11/eGov_hourei | 0731cfa43ac701ca17bec178f00f68f393efe7c8 | [
"Apache-2.0"
] | 1 | 2020-09-20T06:12:06.000Z | 2020-09-20T06:12:06.000Z | python/01_italable_class/iter_advanced.py | sunao11/eGov_hourei | 0731cfa43ac701ca17bec178f00f68f393efe7c8 | [
"Apache-2.0"
] | 2 | 2021-06-12T08:54:49.000Z | 2022-01-06T23:28:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Unit(object):
"""
The smallest unit.
"""
def __init__(self, value):
if not isinstance(value, (float, int)):
raise TypeError(
f"@value must be integer or float value, but {value} was applied.")
self._value = value
self._enabled = True
def __bool__(self):
return self._enabled
@property
def value(self):
"""
float: value of the unit
"""
return self._value
def enable(self):
"""
Enable the unit.
"""
self._enabled = True
def disable(self):
"""
Disable the unit.
"""
self._enabled = False
class Series(object):
"""
A series of units.
"""
def __init__(self):
self._units = []
def __iter__(self):
yield from self._units
def add(self, unit):
"""
Append a unit.
Args:
unit (Unit]): the smallest unit
Raises:
TypeError: unit is not an instance of Unit
"""
if not isinstance(unit, Unit):
raise TypeError("@unit must be a instance of Unit")
self._units.append(unit)
def _validate_index(self, num):
"""
Validate the index number.
Args:
num (int): index number of a unit
Raises:
TypeError: @num is not an integer
IndexError: @num is not a valid index number
"""
if not isinstance(num, int):
raise TypeError(
f"@num must be integer, but {num} was applied.")
try:
self._units[num]
except IndexError:
raise IndexError(f"@num must be under {len(self._units)}")
def enable(self, num):
"""
Enable a unit.
Args:
num (int): index of the unit to be enabled
Raises:
TypeError: @num is not an integer
IndexError: @num is not a valid index number
"""
self._validate_index(num)
self._units[num].enable()
def disable(self, num):
"""
Disable a unit.
Args:
num (int): index of the unit to be disabled
Raises:
TypeError: @num is not an integer
IndexError: @num is not a valid index number
"""
self._validate_index(num)
self._units[num].disable()
def show_enabled(series):
"""
Show the values of enabled units.
"""
if not isinstance(series, Series):
raise TypeError("@unit must be a instance of Series")
print([unit.value for unit in series if unit])
if __name__ == "__main__":
# Unit class
unit1 = Unit(value=1.0)
if unit1:
print(unit1.value)
unit1.disable()
if unit1:
print("Disabled")
print(unit1.value)
unit1.enable()
if unit1:
print("Enabled")
print(unit1.value)
# Create a series of units
series = Series()
[series.add(Unit(i)) for i in range(6)]
show_enabled(series)
# Disable two units
series.disable(4)
series.disable(5)
show_enabled(series)
# Enable one disabled unit
series.enable(4)
show_enabled(series)
| 22.438356 | 83 | 0.537851 | 389 | 3,276 | 4.40874 | 0.213368 | 0.036735 | 0.027988 | 0.026239 | 0.233819 | 0.233819 | 0.233819 | 0.233819 | 0.193003 | 0.193003 | 0 | 0.007623 | 0.35928 | 3,276 | 145 | 84 | 22.593103 | 0.809433 | 0.264652 | 0 | 0.238095 | 0 | 0 | 0.113936 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0 | 0.015873 | 0.253968 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97340d2d28590597d1a93c772426eaebb769394a | 2,101 | py | Python | textinator/image.py | ijks/textinator | 26c6cd3f7e4ee48e23fd66d809ebe0656d41a6b9 | [
"MIT"
] | 1 | 2015-03-08T18:20:30.000Z | 2015-03-08T18:20:30.000Z | textinator/image.py | ijks/textinator | 26c6cd3f7e4ee48e23fd66d809ebe0656d41a6b9 | [
"MIT"
] | null | null | null | textinator/image.py | ijks/textinator | 26c6cd3f7e4ee48e23fd66d809ebe0656d41a6b9 | [
"MIT"
] | null | null | null | from ansi.colour.rgb import rgb256
from ansi.colour.fx import reset
class TextImage():
def __init__(self, original, palette):
self.original = original
self.palette = palette
self.lines = self._build_lines()
self.text = self._build_text()
def _build_text(self):
result = str()
for line in self.lines:
result += line + '\n'
return result
def _build_lines(self):
result = list()
width, height = self.original.size
for y in range(height):
line = str()
for x in range(width):
value = self.original.getpixel((x, y))
char = self._value_to_char(value)
line += char
result.append(line)
return result
def _value_to_char(self, value):
if type(value) is int or len(value) != 3:
raise TypeError("Value should be a 3-tuple")
r, g, b = value
luminosity = 0.2 * r + 0.72 * g + 0.07 * b
palette_range = (0, len(self.palette))
mapped = int(_scale(luminosity, (0, 256), palette_range))
char = self.palette[mapped]
return char
class ColourImage(TextImage):
def __init__(self, *args, background=False):
self.background = background
super().__init__(*args)
def _value_to_char(self, value):
char = super()._value_to_char(value)
colour = rgb256(*value)
if self.background:
colour.replace('38', '48', 1)
# Modify the ANSI escape code to use bg instead of fg
return colour + char + str(reset)
class Animation():
def __init__(frames, speed):
pass
def _scale(value, source, destination):
"""
Linear map a value from a source to a destination range.
:param int value: original value
:param tuple source: source range
:param tuple destination: destination range
:rtype: float
"""
return (
((value - source[0]) / (source[1]-source[0]))
* (destination[1]-destination[0])
+ destination[0]
)
| 25.011905 | 65 | 0.576392 | 258 | 2,101 | 4.53876 | 0.348837 | 0.040991 | 0.037575 | 0.034159 | 0.039283 | 0.039283 | 0 | 0 | 0 | 0 | 0 | 0.022238 | 0.315088 | 2,101 | 83 | 66 | 25.313253 | 0.791522 | 0.111852 | 0 | 0.076923 | 0 | 0 | 0.016903 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0.019231 | 0.038462 | 0 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97344ad23f706ec259299413e63f1f8749dc6a20 | 14,126 | py | Python | test/test_server.py | Gandalf-/apocrypha | 0d524e9e292acb8744f31b82f0f9a21e6dece5a7 | [
"MIT"
] | 1 | 2018-06-25T01:08:52.000Z | 2018-06-25T01:08:52.000Z | test/test_server.py | Gandalf-/apocrypha | 0d524e9e292acb8744f31b82f0f9a21e6dece5a7 | [
"MIT"
] | 6 | 2018-06-20T20:13:25.000Z | 2019-01-13T00:08:08.000Z | test/test_server.py | Gandalf-/apocrypha | 0d524e9e292acb8744f31b82f0f9a21e6dece5a7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# pylint: disable=protected-access
# pylint: disable=no-self-use
# pylint: disable=missing-docstring
# pylint: disable=too-many-public-methods
import time
import threading
import unittest
import apocrypha.client
from apocrypha.exceptions import DatabaseError
from apocrypha.server import ServerDatabase, ServerHandler, Server
from test_node import random_query
PORT = 49999
client = apocrypha.client.Client(port=PORT)
def query(args, raw=False):
''' list of string -> string
'''
return client.query(args, interpret=raw)
class TestServerBase(unittest.TestCase):
database = None
server = None
server_thread = None
@classmethod
def setUpClass(cls):
'''
create an Apocrypha instance and server to handle connections
run the server in a thread so test cases may run
'''
# create the ServerDatabase instance, which inherits from Apocrypha
TestServerBase.database = ServerDatabase(
'test/test-db.json',
stateless=True)
# Create the tcp server
host, port = '0.0.0.0', PORT
TestServerBase.server = Server(
(host, port), ServerHandler,
TestServerBase.database, quiet=True)
# start the server
TestServerBase.server_thread = threading.Thread(
target=TestServerBase.server.serve_forever)
TestServerBase.server_thread.start()
TestServerBase.db = apocrypha.client.Client(port=PORT)
@classmethod
def tearDownClass(cls):
'''
shutdown the server
'''
TestServerBase.server.teardown()
TestServerBase.server.socket.close()
TestServerBase.server_thread.join(1)
class TestServer(TestServerBase):
# server tests
# caching
def test_cache_hit(self):
# write operations don't update the cache
query(['pizza', '=', 'sauce'])
self.assertNotIn(('pizza',), TestServer.database.cache)
# get operations do
query(['pizza'])
self.assertIn(('pizza',), TestServer.database.cache)
result = query(['pizza'])
self.assertEqual(result, ['sauce'])
self.assertIn(('pizza',), TestServer.database.cache)
def test_cache_deep_hit(self):
query(['a', '-d'])
query(['a', 'b', 'c', 'd', 'e', '=', 'f'])
query(['a', 'b', 'c', 'd', 'e'])
self.assertIn(
('a', 'b', 'c', 'd', 'e'),
TestServer.database.cache)
@unittest.skip('using simple caching')
def test_cache_invalidate(self):
query(['pizza', '=', 'sauce'])
query(['pizza'])
query([])
self.assertIn(('pizza',), TestServer.database.cache)
self.assertIn((), TestServer.database.cache)
query(['pizza', '-d'])
self.assertNotIn(('pizza',), TestServer.database.cache)
self.assertNotIn((), TestServer.database.cache)
@unittest.skip('using simple caching')
def test_cache_invalidate_parent(self):
'''
changing a child key invalidates all of it's parents
'''
query(['one layer', 'two layer', '=', 'cake'])
query(['one layer', 'two layer'])
self.assertIn(('one layer', 'two layer'), TestServer.database.cache)
query(['one layer'])
self.assertIn(('one layer',), TestServer.database.cache)
# both parent and child are in cache, now change the child and make
# sure the parent is also invalidated
query(['one layer', 'two layer', '=', 'goop'])
self.assertNotIn(('one layer', 'two layer'), TestServer.database.cache)
self.assertNotIn(('one layer',), TestServer.database.cache)
@unittest.skip('using simple caching')
def test_cache_invalidate_child(self):
'''
changing a parent key invalidates all of it's direct children
'''
query(['one layer', 'two layer', '=', 'cake'])
query(['one layer', 'two layer'])
self.assertIn(('one layer', 'two layer'), TestServer.database.cache)
query(['one layer'])
self.assertIn(('one layer',), TestServer.database.cache)
# both parent and child are in cache, now change the parent and make
# sure the child is also invalidated
query(['one layer', '-d'])
self.assertNotIn(('one layer', 'two layer'), TestServer.database.cache)
self.assertNotIn(('one layer',), TestServer.database.cache)
@unittest.skip('unknown issue')
def test_cache_doesnt_effect_sibling(self):
client.delete('one layer')
client.set('one layer', 'two layer', value='cake')
client.set('one layer', 'apple layer', value='sauce')
print(TestServer.database.data)
self.assertEqual(
client.get('one layer', 'two layer'), 'cake')
self.assertEqual(
client.get('one layer', 'apple layer'), 'sauce')
self.assertEqual(
client.get('one layer'),
{'two layer': 'cake', 'apple layer': 'sauce'})
print(TestServer.database.cache)
self.assertIn(('one layer',), TestServer.database.cache)
self.assertIn(('one layer', 'two layer',), TestServer.database.cache)
self.assertIn(('one layer', 'apple layer',), TestServer.database.cache)
def test_cache_top_level_read_operators(self):
'''
make sure --keys, --edit on root are invalidated correctly
'''
pass
def test_cache_top_level_write_operators(self):
'''
writing to root clears the entire cache
'''
pass
def test_cache_write_ops_not_cached(self):
pass
def test_cache_read_ops_are_cached(self):
query(['pizza', '=', 'sauce'])
value = query(['pizza', '--edit'])
self.assertIn(('pizza', '--edit',), TestServer.database.cache)
self.assertEqual(value, ['"sauce"'])
# timing
@unittest.skip('timing not currently supported')
def test_timing(self):
result = query(['-t', 'wolf', 'legs'])
self.assertEqual(result, ['0'])
query(['wolf', 'legs', '=', '4'])
result = query(['-t', 'wolf', 'legs'])
self.assertNotEqual(result, ['0'])
# client tests - query
def test_assign(self):
query(['apple', '=', 'sauce'])
result = query(['apple'])
self.assertEqual(result, ['sauce'])
def test_strict(self):
with self.assertRaises(DatabaseError):
query(['-s', 'gadzooks'])
def test_context(self):
result = query(['-c', '@', 'red'])
self.assertEqual(result, ['sub apple = red'])
def test_query_json_dict(self):
result = query(['octopus'], raw=True)
self.assertEqual(result, {'legs': 8})
self.assertTrue(isinstance(result, dict))
def test_query_json_list(self):
result = query(['colors'], raw=True)
self.assertTrue(isinstance(result, list))
def test_query_json_string(self):
result = query(['apple'], raw=True)
self.assertTrue(isinstance(result, str))
# client tests - Client
def test_get_string(self):
self.assertEqual(
TestServer.db.get('green'), 'nice')
self.assertEqual(
TestServer.db.get('octopus', 'legs'), 8)
# get
def test_get_list(self):
self.assertEqual(
TestServer.db.get('animals'),
['wolf', 'octopus', 'bird'])
def test_get_dict(self):
self.assertEqual(
TestServer.db.get('octopus'),
{'legs': 8})
def test_get_non_existant(self):
self.assertEqual(
TestServer.db.get('yahoo', 'foobar'),
None)
def test_get_default(self):
'''
when a key doesn't exist, default=<something> determines what to
respond with
'''
self.assertEqual(
TestServer.db.get('yahoo', 'foobar', default={}),
{})
self.assertEqual(
TestServer.db.get('yahoo', 'foobar', default=[]),
[])
self.assertEqual(
TestServer.db.get('yahoo', 'foobar', default='abc'),
'abc')
def test_get_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.get('animals', 'octopus')
def test_get_cast_to_list(self):
self.assertEqual(
TestServer.db.get('green', cast=list),
['nice'])
def test_get_cast_to_str(self):
self.assertEqual(
TestServer.db.get('animals', cast=str),
"['wolf', 'octopus', 'bird']")
def test_get_cast_to_set(self):
self.assertEqual(
TestServer.db.get('animals', cast=set),
{'wolf', 'octopus', 'bird'})
def test_get_cast_to_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.get('animals', cast=dict)
# keys
def test_keys(self):
self.assertEqual(
TestServer.db.keys('octopus'), ['legs'])
def test_keys_non_existant(self):
self.assertEqual(
TestServer.db.keys('does not exist', 'foobar'), [])
def test_keys_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.keys('animals', 'octopus')
# remove
def test_remove(self):
TestServer.db.set('test list', value=['a', 'b', 'c'])
TestServer.db.remove('test list', value='a')
self.assertEqual(
TestServer.db.get('test list'),
['b', 'c'])
def test_remove_list(self):
TestServer.db.set('test list', value=['a', 'b', 'c'])
TestServer.db.remove('test list', value=['a', 'b'])
self.assertEqual(
TestServer.db.get('test list'),
'c')
def test_remove_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.remove('octopus', value='sandwich')
def test_remove_type_error(self):
TestServer.db.set('octopus', value={1: 2, 3: 4})
with self.assertRaises(DatabaseError):
TestServer.db.remove('octopus', value='sandwich')
def test_remove_error_top_level(self):
with self.assertRaises(DatabaseError):
TestServer.db.remove(value='key that does not exist')
# append
def test_append(self):
TestServer.db.delete('test list')
TestServer.db.append('test list', value='apple')
self.assertEqual(
TestServer.db.get('test list'),
'apple')
TestServer.db.append('test list', value='blue')
self.assertEqual(
TestServer.db.get('test list'),
['apple', 'blue'])
def test_append_list(self):
TestServer.db.delete('test list')
TestServer.db.append('test list', value=['a', 'b'])
self.assertEqual(
TestServer.db.get('test list'),
['a', 'b'])
TestServer.db.append('test list', value=['c', 'd'])
self.assertEqual(
TestServer.db.get('test list'),
['a', 'b', 'c', 'd'])
def test_append_non_existant(self):
TestServer.db.delete('test list')
TestServer.db.append('test list', value=['a', 'b'])
self.assertEqual(
TestServer.db.get('test list'),
['a', 'b'])
def test_append_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.append('octopus', value='sandwich')
def test_append_type_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.append('octopus', value={'a': 1})
# set
def test_set(self):
TestServer.db.set('test item', value='hello')
value = TestServer.db.get('test item')
self.assertEqual(value, 'hello')
def test_set_list(self):
TestServer.db.set('test list', value=['hello', 'there'])
self.assertEqual(
TestServer.db.get('test list'),
['hello', 'there'])
def test_set_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.set('hello', value=set())
# delete
def test_delete(self):
TestServer.db.set('test item', value='hello')
self.assertEqual(
TestServer.db.get('test item'),
'hello')
TestServer.db.delete('test item')
self.assertEqual(
TestServer.db.get('test item'),
None)
# pop
def test_pop_cast(self):
TestServer.db.set('item', value='hello')
result = TestServer.db.pop('item', cast=list)
self.assertEqual(
result, list('hello'))
def test_pop_bad_cast(self):
TestServer.db.set('item', value='hello')
with self.assertRaises(DatabaseError):
TestServer.db.pop('item', cast=dict)
# apply
def test_apply(self):
TestServer.db.set('list', value=['a', 'a', 'b', 'c'])
TestServer.db.apply('list', func=lambda xs: list(set(xs)))
self.assertEqual(
sorted(TestServer.db.get('list')),
sorted(['a', 'b', 'c']))
# raw query
def test_query(self):
self.assertEqual(
apocrypha.client.query(
['non', 'existant', '--keys'], port=PORT),
[])
def test_fuzz(self):
''' throw a ton of junk at the server and see if it crashes
'''
for _ in range(0, 1000):
random_query(client, debug=False)
def test_lock_stress(self):
''' make a ton of junk queries from several threads
not interested in what the queries do, just that they don't crash the
server
'''
num_requests = 500
num_workers = 10
def worker():
time.sleep(0.1)
for _ in range(0, num_requests):
random_query(client, debug=False)
threads = []
for _ in range(0, num_workers):
threads += [
threading.Thread(target=worker)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if __name__ == '__main__':
unittest.main()
| 29.864693 | 79 | 0.580348 | 1,584 | 14,126 | 5.082071 | 0.162879 | 0.084969 | 0.046584 | 0.077143 | 0.530435 | 0.478758 | 0.391056 | 0.339503 | 0.24882 | 0.226584 | 0 | 0.003509 | 0.273751 | 14,126 | 472 | 80 | 29.927966 | 0.781168 | 0.091675 | 0 | 0.361842 | 0 | 0 | 0.121275 | 0 | 0 | 0 | 0 | 0 | 0.233553 | 1 | 0.177632 | false | 0.009868 | 0.023026 | 0 | 0.220395 | 0.006579 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9734571150ae79e21863d08655f64ceb72d55ed2 | 4,556 | py | Python | pkgs/conf-pkg/src/genie/libs/conf/l2vpn/tests/test_xconnect.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 94 | 2018-04-30T20:29:15.000Z | 2022-03-29T13:40:31.000Z | pkgs/conf-pkg/src/genie/libs/conf/l2vpn/tests/test_xconnect.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 67 | 2018-12-06T21:08:09.000Z | 2022-03-29T18:00:46.000Z | pkgs/conf-pkg/src/genie/libs/conf/l2vpn/tests/test_xconnect.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 49 | 2018-06-29T18:59:03.000Z | 2022-03-10T02:07:59.000Z | #!/usr/bin/env python
import unittest
from unittest.mock import Mock
from genie.conf import Genie
from genie.conf.base import Testbed, Device, Link, Interface
from genie.libs.conf.l2vpn import Xconnect
from genie.libs.conf.bgp import RouteTarget
class test_xconnect(unittest.TestCase):
def test_init(self):
testbed = Genie.testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='iosxr')
intf1 = Interface(device=dev1, name='GigabitEthernet0/0/0/1')
intf2 = Interface(device=dev1, name='GigabitEthernet0/0/0/2')
dev2 = Device(testbed=testbed, name='PE2', os='iosxr')
intf3 = Interface(device=dev2, name='GigabitEthernet0/0/0/3')
intf4 = Interface(device=dev2, name='GigabitEthernet0/0/0/4')
link1 = Link(testbed=testbed, name='link1', interfaces=(intf1, intf3))
link2 = Link(testbed=testbed, name='link2', interfaces=(intf2, intf4))
with self.assertRaises(TypeError):
xc1 = Xconnect()
with self.assertRaises(TypeError):
xc1 = Xconnect(group_name='bg1')
xc1 = Xconnect(name='xc1', group_name='bg1')
self.assertIs(xc1.xconnect_type, Xconnect.Type.p2p)
self.assertEqual(xc1.name, 'xc1')
self.assertEqual(xc1.group_name, 'bg1')
xc1 = Xconnect(name='xc1')
self.assertEqual(xc1.name, 'xc1')
self.assertEqual(xc1.group_name, 'xc1g')
self.assertCountEqual(xc1.devices, [])
self.assertCountEqual(xc1.interfaces, [])
self.assertCountEqual(xc1.segments, [])
self.assertCountEqual(xc1.link.interfaces, [])
dev1.add_feature(xc1)
self.assertCountEqual(xc1.devices, [dev1])
self.assertCountEqual(xc1.interfaces, [])
self.assertCountEqual(xc1.segments, [])
self.assertCountEqual(xc1.link.interfaces, [])
cfgs = xc1.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
self.assertMultiLineEqual(str(cfgs[dev1.name]), '\n'.join([
'l2vpn',
' xconnect group xc1g',
' p2p xc1',
' exit',
' exit',
' exit',
]))
#xc1.add_interface(intf1)
intf1.l2transport.enabled = True
#self.assertCountEqual(xc1.interfaces, [intf1])
#self.assertCountEqual(xc1.devices, [dev1])
#self.assertCountEqual(xc1.segments, [intf1])
#self.assertCountEqual(xc1.link.interfaces, [intf3])
#self.assertCountEqual(xc1.device_attr[dev1].interfaces, [intf1])
#self.assertCountEqual(xc1.device_attr[dev2].interfaces, [])
#self.assertCountEqual(xc1.device_attr[dev1].segments, [intf1])
self.assertCountEqual(xc1.device_attr[dev2].segments, [])
cfgs = xc1.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
if False:
self.assertMultiLineEqual(str(cfgs[dev1.name]), '\n'.join([
'l2vpn',
' xconnect group xc1g',
' p2p xc1',
' interface GigabitEthernet0/0/0/1',
' exit',
' exit',
' exit',
]))
dev2.add_feature(xc1)
xc1.xconnect_type = Xconnect.Type.mp2mp
xc1.autodiscovery_bgp.enabled = True
xc1.autodiscovery_bgp.signaling_protocol_bgp.enabled = True
xc1.autodiscovery_bgp.export_route_targets = [RouteTarget.ImportExport('1.1.1.1:1')]
xc1.autodiscovery_bgp.import_route_targets = [RouteTarget.ImportExport('1.1.1.1:1')]
xc1.autodiscovery_bgp.rd = '1000:1'
xc1.device_attr[dev1].vpn_id = 100
xc1.device_attr[dev2].vpn_id = 101
ce_id1 = 1001
xc1.device_attr[dev1].autodiscovery_bgp.signaling_protocol_bgp.add_ce_id(ce_id1)
xc1.device_attr[dev1].autodiscovery_bgp.signaling_protocol_bgp.ce_attr[ce_id1].add_interface(intf1)
ce_id2 = 1000
xc1.device_attr[dev2].autodiscovery_bgp.signaling_protocol_bgp.add_ce_id(ce_id1)
xc1.device_attr[dev2].autodiscovery_bgp.signaling_protocol_bgp.ce_attr[ce_id2].add_interface(intf2)
xc1.device_attr[dev1].autodiscovery_bgp.signaling_protocol_bgp.ce_attr[ce_id1].interface_attr[intf1].remote_ce_id = ce_id2
xc1.device_attr[dev2].autodiscovery_bgp.signaling_protocol_bgp.ce_attr[ce_id2].interface_attr[intf2].remote_ce_id = ce_id1
cfgs = xc1.build_config(apply=False)
# TODO print(cfgs)
if __name__ == '__main__':
unittest.main()
| 40.318584 | 130 | 0.643986 | 532 | 4,556 | 5.343985 | 0.18797 | 0.126627 | 0.129441 | 0.081252 | 0.659163 | 0.604995 | 0.517763 | 0.412592 | 0.380584 | 0.368625 | 0 | 0.049758 | 0.228051 | 4,556 | 112 | 131 | 40.678571 | 0.758601 | 0.094162 | 0 | 0.382716 | 0 | 0 | 0.076288 | 0.026725 | 0 | 0 | 0 | 0.008929 | 0.246914 | 1 | 0.012346 | false | 0 | 0.098765 | 0 | 0.123457 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
973ab068516510d55017d4d9e5b92353475eda35 | 36,287 | py | Python | srttools/io.py | matteobachetti/srt-single-dish-tools | db02eaa1bb958945a88c36c1be847d43bf6b10b4 | [
"BSD-3-Clause"
] | 2 | 2018-01-29T18:50:25.000Z | 2018-06-18T23:41:43.000Z | srttools/io.py | matteobachetti/srt-single-dish-tools | db02eaa1bb958945a88c36c1be847d43bf6b10b4 | [
"BSD-3-Clause"
] | 95 | 2017-05-25T13:12:14.000Z | 2018-09-24T10:33:11.000Z | srttools/io.py | matteobachetti/srt-single-dish-tools | db02eaa1bb958945a88c36c1be847d43bf6b10b4 | [
"BSD-3-Clause"
] | 3 | 2017-06-13T10:10:50.000Z | 2018-06-08T12:35:57.000Z | """Input/output functions."""
import astropy.io.fits as fits
from astropy.table import Table
import numpy as np
import astropy.units as u
from astropy.coordinates import (
EarthLocation,
AltAz,
Angle,
ICRS,
GCRS,
SkyCoord,
get_sun,
)
import os
from astropy.time import Time
import warnings
from astropy import log
import copy
import re
import glob
from collections.abc import Iterable
from scipy.interpolate import interp1d
from .utils import force_move_file
try:
from sunpy.coordinates import frames, sun
DEFAULT_SUN_FRAME = frames.Helioprojective
except ImportError:
DEFAULT_SUN_FRAME = None
__all__ = [
"mkdir_p",
"detect_data_kind",
"correct_offsets",
"observing_angle",
"get_rest_angle",
"print_obs_info_fitszilla",
"read_data_fitszilla",
"read_data",
"root_name",
"get_chan_columns",
]
chan_re = re.compile(
r"^Ch([0-9]+)$"
r"|^Feed([0-9]+)_([a-zA-Z]+)$"
r"|^Feed([0-9]+)_([a-zA-Z]+)_([0-9]+)$"
)
# 'srt': EarthLocation(4865182.7660, 791922.6890, 4035137.1740,
# unit=u.m)
# EarthLocation(Angle("9:14:42.5764", u.deg),
# Angle("39:29:34.93742", u.deg),
# 600 * u.meter) # not precise enough
locations = {
"srt": EarthLocation(4865182.7660, 791922.6890, 4035137.1740, unit=u.m),
"medicina": EarthLocation(
Angle("11:38:49", u.deg), Angle("44:31:15", u.deg), 25 * u.meter
),
"greenwich": EarthLocation(lat=51.477 * u.deg, lon=0 * u.deg),
}
def interpret_chan_name(chan_name):
"""Get feed, polarization and baseband info from chan name.
Examples
>>> feed, polar, baseband = interpret_chan_name('blablabal')
>>> feed # None
>>> polar # None
>>> baseband # None
>>> feed, polar, baseband = interpret_chan_name('Ch0')
>>> feed
0
>>> polar # None
>>> baseband # None
>>> feed, polar, baseband = interpret_chan_name('Feed1_LCP')
>>> feed
1
>>> polar
'LCP'
>>> baseband # None
>>> feed, polar, baseband = interpret_chan_name('Feed2_LCP_3')
>>> feed
2
>>> polar
'LCP'
>>> baseband
3
"""
matchobj = chan_re.match(chan_name)
if not matchobj:
return None, None, None
matches = [matchobj.group(i) for i in range(7)]
polar, baseband = None, None
if matches[6] is not None:
baseband = int(matchobj.group(6))
polar = matchobj.group(5)
feed = int(matchobj.group(4))
elif matches[3] is not None:
polar = matchobj.group(3)
feed = int(matchobj.group(2))
else:
feed = int(matchobj.group(1))
return feed, polar, baseband
def classify_chan_columns(chans):
"""Classify the name of channels per feed, polarization, baseband.
Examples
--------
>>> chans = ['Feed0_LCP_3', 'Feed0_RCP_3']
>>> classif = classify_chan_columns(chans)
>>> classif[0][3]['LCP']
'Feed0_LCP_3'
>>> classif[0][3]['RCP']
'Feed0_RCP_3'
>>> chans = ['Ch0']
>>> classif = classify_chan_columns(chans)
>>> classif[0][1]['N']
'Ch0'
>>> chans = ['Feed0_LCP']
>>> classif = classify_chan_columns(chans)
>>> classif[0][1]['LCP']
'Feed0_LCP'
"""
combinations = {}
for ch in chans:
feed, polar, baseband = interpret_chan_name(ch)
if baseband is None:
baseband = 1
if polar is None:
polar = "N"
if feed not in combinations:
combinations[feed] = {}
if baseband not in combinations[feed]:
combinations[feed][baseband] = {}
combinations[feed][baseband][polar] = ch
return combinations
def get_chan_columns(table):
return np.array([i for i in table.columns if chan_re.match(i)])
def get_channel_feed(ch):
if re.search("Feed?", ch):
return int(ch[4])
def mkdir_p(path):
"""Safe mkdir function.
Parameters
----------
path : str
Name of the directory/ies to create
Notes
-----
Found at
http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
"""
import errno
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _check_derotator(derot_angle):
# Check that derotator angle is outside any plausible value
if np.any(np.abs(derot_angle) > 2 * 360):
return False
return True
def detect_data_kind(fname):
"""Placeholder for function that recognizes data format."""
if fname.endswith(".hdf5"):
return "hdf5"
elif "fits" in fname:
return "fitszilla"
else:
warnings.warn("File {} is not in a known format".format(fname))
return None
def correct_offsets(obs_angle, xoffset, yoffset):
"""Correct feed offsets for derotation angle.
All angles are in radians.
Examples
--------
>>> x = 2 ** 0.5
>>> y = 2 ** 0.5
>>> angle = np.pi / 4
>>> xoff, yoff = correct_offsets(angle, x, y)
>>> np.allclose([xoff, yoff], 2 ** 0.5)
True
"""
sep = np.sqrt(xoffset ** 2.0 + yoffset ** 2.0)
new_xoff = sep * np.cos(obs_angle)
new_yoff = sep * np.sin(obs_angle)
return new_xoff, new_yoff
def observing_angle(rest_angle, derot_angle):
"""Calculate the observing angle of the multifeed.
If values have no units, they are assumed in radians
Parameters
----------
rest_angle : float or Astropy quantity, angle
rest angle of the feeds
derot_angle : float or Astropy quantity, angle
derotator angle
Examples
--------
>>> observing_angle(0 * u.rad, 2 * np.pi * u.rad).to(u.rad).value
0.0
>>> observing_angle(0, 2 * np.pi).to(u.rad).value
0.0
"""
if not hasattr(rest_angle, "unit"):
rest_angle *= u.rad
if not hasattr(derot_angle, "unit"):
derot_angle *= u.rad
return rest_angle + (2 * np.pi * u.rad - derot_angle)
def _rest_angle_default(n_lat_feeds):
"""Default rest angles for a multifeed, in units of a circle
Assumes uniform coverage.
Examples
--------
>>> np.allclose(_rest_angle_default(5),
... np.array([1., 0.8, 0.6, 0.4, 0.2]))
True
>>> np.allclose(_rest_angle_default(6) * 360,
... np.array([360., 300., 240., 180., 120., 60.]))
True
"""
return np.arange(1, 0, -1 / n_lat_feeds)
def get_rest_angle(xoffsets, yoffsets):
"""Calculate the rest angle for multifeed.
The first feed is assumed to be at position 0, for it the return value is 0
Examples
--------
>>> xoffsets = [0.0, -0.0382222, -0.0191226, 0.0191226, 0.0382222,
... 0.0191226, -0.0191226]
>>> yoffsets = [0.0, 0.0, 0.0331014, 0.0331014, 0.0, -0.0331014,
... -0.0331014]
>>> np.allclose(get_rest_angle(xoffsets, yoffsets).to(u.deg).value,
... np.array([0., 180., 120., 60., 360., 300., 240.]))
True
"""
if len(xoffsets) <= 2:
return np.array([0] * len(xoffsets))
xoffsets = np.asarray(xoffsets)
yoffsets = np.asarray(yoffsets)
n_lat_feeds = len(xoffsets) - 1
rest_angle_default = _rest_angle_default(n_lat_feeds) * 2 * np.pi * u.rad
w_0 = np.where((xoffsets[1:] > 0) & (yoffsets[1:] == 0.0))[0][0]
return (
np.concatenate(([0], np.roll(rest_angle_default.to(u.rad).value, w_0)))
* u.rad
)
def infer_skydip_from_elevation(elevation, azimuth=None):
if azimuth is None:
azimuth = np.array([0, 0])
el_condition = np.max(elevation) - np.min(elevation) > np.pi / 3.0
az_condition = np.max(azimuth) - np.min(azimuth) < 0.1 / 180.0 * np.pi
return az_condition & el_condition
def get_sun_coords_from_radec(obstimes, ra, dec, sun_frame=None):
if sun_frame is None: # pragma: no cover
sun_frame = DEFAULT_SUN_FRAME
coords = GCRS(
ra=Angle(ra),
dec=Angle(dec),
obstime=obstimes,
distance=sun.earth_distance(obstimes),
)
coords_asec = coords.transform_to(
sun_frame(obstime=obstimes, observer="earth")
)
lon = [ca.Tx.value for ca in coords_asec] * coords_asec.Tx.unit
lat = [ca.Ty.value for ca in coords_asec] * coords_asec.Ty.unit
dist = [
ca.distance.value for ca in coords_asec
] * coords_asec.distance.unit
return lon.to(u.radian), lat.to(u.radian), dist.to(u.m).value
def update_table_with_sun_coords(new_table, sun_frame=None):
lon_str, lat_str = "hpln", "hplt"
if not ("dsun" in new_table.colnames):
new_table[lon_str] = np.zeros_like(new_table["el"])
new_table[lat_str] = np.zeros_like(new_table["az"])
new_table["dsun"] = np.zeros(len(new_table["az"]))
for i in range(0, new_table["el"].shape[1]):
obstimes = Time(new_table["time"] * u.day, format="mjd", scale="utc")
lon, lat, dist = get_sun_coords_from_radec(
obstimes,
new_table["ra"][:, i],
new_table["dec"][:, i],
sun_frame=sun_frame,
)
new_table[lon_str][:, i] = lon
new_table[lat_str][:, i] = lat
if i == 0:
new_table["dsun"][:] = dist
return new_table
def get_coords_from_altaz_offset(
obstimes, el, az, xoffs, yoffs, location, inplace=False
):
""""""
# Calculate observing angle
if not inplace:
el = copy.deepcopy(el)
az = copy.deepcopy(az)
el += yoffs.to(u.rad).value
az += xoffs.to(u.rad).value / np.cos(el)
coords = AltAz(
az=Angle(az), alt=Angle(el), location=location, obstime=obstimes
)
# According to line_profiler, coords.icrs is *by far* the longest
# operation in this function, taking between 80 and 90% of the
# execution time. Need to study a way to avoid this.
coords_deg = coords.transform_to(ICRS())
ra = np.radians(coords_deg.ra)
dec = np.radians(coords_deg.dec)
return ra, dec
def is_close_to_sun(ra, dec, obstime, tolerance=3 * u.deg):
"""Test if current source is close to the Sun.
Examples
--------
>>> ra, dec = 131.13535699 * u.deg, 18.08202663 * u.deg
>>> obstime = Time("2017-08-01")
>>> is_close_to_sun(ra, dec, obstime, tolerance=3 * u.deg)
True
>>> is_close_to_sun(ra, dec + 4 * u.deg, obstime, tolerance=3 * u.deg)
False
"""
coords = SkyCoord(ra=ra, dec=dec, frame=GCRS(obstime=obstime))
sun_position = get_sun(obstime).transform_to(GCRS(obstime=obstime))
return (coords.separation(sun_position)).to(u.deg).value < tolerance.value
def update_table_with_offsets(new_table, xoffsets, yoffsets, inplace=False):
rest_angles = get_rest_angle(xoffsets, yoffsets)
if not inplace:
new_table = copy.deepcopy(new_table)
lon_str, lat_str = "ra", "dec"
if not (lon_str in new_table.colnames):
new_table[lon_str] = np.zeros_like(new_table["el"])
new_table[lat_str] = np.zeros_like(new_table["az"])
for i in range(0, new_table["el"].shape[1]):
obs_angle = observing_angle(rest_angles[i], new_table["derot_angle"])
# offsets < 0.001 arcseconds: don't correct (usually feed 0)
if (
np.abs(xoffsets[i]) < np.radians(0.001 / 60.0) * u.rad
and np.abs(yoffsets[i]) < np.radians(0.001 / 60.0) * u.rad
):
continue
xoffs, yoffs = correct_offsets(obs_angle, xoffsets[i], yoffsets[i])
obstimes = Time(new_table["time"] * u.day, format="mjd", scale="utc")
location = locations[new_table.meta["site"]]
lon, lat = get_coords_from_altaz_offset(
obstimes,
new_table["el"][:, i],
new_table["az"][:, i],
xoffs,
yoffs,
location=location,
inplace=inplace,
)
new_table[lon_str][:, i] = lon
new_table[lat_str][:, i] = lat
return new_table
def print_obs_info_fitszilla(fname):
"""Placeholder for function that prints out oberving information."""
with fits.open(fname, memmap=False) as lchdulist:
section_table_data = lchdulist["SECTION TABLE"].data
sample_rates = get_value_with_units(section_table_data, "sampleRate")
print("Sample rates:", sample_rates)
rf_input_data = lchdulist["RF INPUTS"].data
print("Feeds :", get_value_with_units(rf_input_data, "feed"))
print(
"IFs :", get_value_with_units(rf_input_data, "ifChain")
)
print(
"Polarizations :",
get_value_with_units(rf_input_data, "polarization"),
)
print(
"Frequencies :",
get_value_with_units(rf_input_data, "frequency"),
)
print(
"Bandwidths :",
get_value_with_units(rf_input_data, "bandWidth"),
)
def _chan_name(f, p, c=None):
if c is not None:
return "Feed{}_{}_{}".format(f, p, c)
else:
return "Feed{}_{}".format(f, p)
def read_data_fitszilla(fname):
with fits.open(fname, memmap=False) as lchdulist:
retval = _read_data_fitszilla(lchdulist)
return retval
def get_value_with_units(fitsext, keyword, default=""):
if isinstance(fitsext, fits.BinTableHDU):
fitsext = fitsext.data
unitstr = fitsext.columns[keyword].unit
if unitstr is None:
if default not in ["", None]:
unit = u.Unit(default)
else:
unit = 1
else:
unit = u.Unit(unitstr)
value = fitsext[keyword]
is_string = isinstance(value, str)
is_iterable = isinstance(value, Iterable)
if is_string or (is_iterable and isinstance(value[0], str)):
return value
else:
return value * unit
def adjust_temperature_size_rough(temp, comparison_array):
"""Adjust the size of the temperature array.
Examples
--------
>>> temp = [1, 2, 3, 4]
>>> adjust_temperature_size_rough(temp, [5, 6, 7])
array([1, 2, 3])
>>> adjust_temperature_size_rough(temp, [5, 6, 7, 5, 4])
array([1, 2, 3, 4, 4])
>>> adjust_temperature_size_rough(temp, [5, 6])
array([2, 3])
>>> adjust_temperature_size_rough(temp, [5, 6, 7, 5, 4, 6])
array([1, 1, 2, 3, 4, 4])
"""
import copy
temp = np.asarray(temp)
comparison_array = np.asarray(comparison_array)
temp_save = copy.deepcopy(temp)
sizediff = temp.size - comparison_array.size
if sizediff > 0:
temp = temp[sizediff // 2 : sizediff // 2 + comparison_array.size]
elif sizediff < 0:
# make it positive
sizediff = -sizediff
temp = np.zeros_like(comparison_array)
temp[sizediff // 2 : sizediff // 2 + temp_save.size] = temp_save
temp[: sizediff // 2] = temp_save[0]
temp[sizediff // 2 + temp_save.size - 1 :] = temp_save[-1]
return temp
def adjust_temperature_size(temp, comparison_array):
"""Adjust the size of the temperature array.
Examples
--------
>>> temp = [1, 2, 3, 4]
>>> np.allclose(adjust_temperature_size(temp, [5, 6]), [1.0, 4.0])
True
>>> temp = [1, 2, 3, 4]
>>> np.allclose(adjust_temperature_size(temp, [5, 6, 4, 5]), temp)
True
"""
temp = np.asarray(temp)
comparison_array = np.asarray(comparison_array)
Ntemp = temp.shape[0]
Ndata = comparison_array.shape[0]
if Ntemp == Ndata:
return temp
temp_func = interp1d(np.linspace(0, 1, Ntemp), temp)
newtemp = temp_func(np.linspace(0, 1, Ndata))
return newtemp
# from memory_profiler import profile
# @profile
def _read_data_fitszilla(lchdulist):
"""Open a fitszilla FITS file and read all relevant information."""
is_new_fitszilla = np.any(["coord" in i.name.lower() for i in lchdulist])
# ----------- Extract generic observation information ------------------
headerdict = dict(lchdulist[0].header.items())
source = lchdulist[0].header["SOURCE"]
site = lchdulist[0].header["ANTENNA"].lower()
receiver = lchdulist[0].header["RECEIVER CODE"]
ra = lchdulist[0].header["RIGHTASCENSION"] * u.rad
dec = lchdulist[0].header["DECLINATION"] * u.rad
ra_offset = dec_offset = az_offset = el_offset = 0 * u.rad
if "RightAscension Offset" in lchdulist[0].header:
ra_offset = lchdulist[0].header["RightAscension Offset"] * u.rad
if "Declination Offset" in lchdulist[0].header:
dec_offset = lchdulist[0].header["Declination Offset"] * u.rad
if "Azimuth Offset" in lchdulist[0].header:
az_offset = lchdulist[0].header["Azimuth Offset"] * u.rad
if "Elevation Offset" in lchdulist[0].header:
el_offset = lchdulist[0].header["Elevation Offset"] * u.rad
# ----------- Read the list of channel ids ------------------
section_table_data = lchdulist["SECTION TABLE"].data
chan_ids = get_value_with_units(section_table_data, "id")
nbin_per_chan = get_value_with_units(section_table_data, "bins")
sample_rate = get_value_with_units(section_table_data, "sampleRate")
try:
bw_section = get_value_with_units(section_table_data, "bandWidth")
fr_section = get_value_with_units(section_table_data, "frequency")
except KeyError:
bw_section = None
fr_section = None
integration_time = lchdulist["SECTION TABLE"].header["Integration"] * u.ms
if len(list(set(nbin_per_chan))) > 1:
raise ValueError(
"Only datasets with the same nbin per channel are "
"supported at the moment"
)
nbin_per_chan = list(set(nbin_per_chan))[0]
types = get_value_with_units(section_table_data, "type")
if "stokes" in types:
is_polarized = True
else:
is_polarized = False
# Check. If backend is not specified, use Total Power
try:
backend = lchdulist[0].header["BACKEND NAME"]
except Exception:
if "stokes" in types:
if nbin_per_chan == 2048:
backend = "XARCOS"
else:
backend = "SARDARA"
elif "spectra" in types:
backend = "SARDARA"
else:
backend = "TP"
# ----------- Read the list of RF inputs, feeds, polarization, etc. --
rf_input_data = lchdulist["RF INPUTS"].data
feeds = get_value_with_units(rf_input_data, "feed")
IFs = get_value_with_units(rf_input_data, "ifChain")
polarizations = get_value_with_units(rf_input_data, "polarization")
sections = get_value_with_units(rf_input_data, "section")
frequencies_rf = get_value_with_units(rf_input_data, "frequency")
bandwidths_rf = get_value_with_units(rf_input_data, "bandWidth")
local_oscillator = get_value_with_units(rf_input_data, "localOscillator")
try:
cal_mark_temp = get_value_with_units(rf_input_data, "calibrationMark")
except KeyError:
# Old, stupid typo
cal_mark_temp = get_value_with_units(rf_input_data, "calibratonMark")
if bw_section is not None:
bandwidths_section = [bw_section[i] for i in sections]
frequencies_section = [fr_section[i] for i in sections]
frequencies_section = [
f + l for (f, l) in zip(frequencies_section, local_oscillator)
]
if backend == "TP" or bw_section is None:
frequencies, bandwidths = frequencies_rf, bandwidths_rf
else:
frequencies, bandwidths = frequencies_section, bandwidths_section
combinations = list(zip(frequencies, bandwidths))
combination_idx = np.arange(len(combinations))
# Solve stupid problem with old CCB data
if receiver.lower() == "ccb":
feeds[:] = 0
if len(set(combinations)) > 1:
chan_names = [
_chan_name(f, p, c)
for f, p, c in zip(feeds, polarizations, combination_idx)
]
else:
chan_names = [_chan_name(f, p) for f, p in zip(feeds, polarizations)]
# ----- Read the offsets of different feeds (nonzero only if multifeed)--
feed_input_data = lchdulist["FEED TABLE"].data
# Add management of historical offsets.
# Note that we need to add the units by hand in this case.
xoffsets = get_value_with_units(feed_input_data, "xOffset", default="rad")
yoffsets = get_value_with_units(feed_input_data, "yOffset", default="rad")
relpowers = get_value_with_units(feed_input_data, "relativePower")
# -------------- Read data!-----------------------------------------
datahdu = lchdulist["DATA TABLE"]
# N.B.: there is an increase in memory usage here. This is just because
# data are being read from the file at this point, not before.
data_table_data = Table(datahdu.data)
tempdata = Table(lchdulist["ANTENNA TEMP TABLE"].data)
for col in data_table_data.colnames:
if col == col.lower():
continue
data_table_data.rename_column(col, col.lower())
for col in tempdata.colnames:
if col == col.lower():
continue
tempdata.rename_column(col, col.lower())
is_old_spectrum = "SPECTRUM" in list(datahdu.header.values())
if is_old_spectrum:
data_table_data.rename_column("spectrum", "ch0")
sections = np.array([0, 0])
unsupported_temperature = False
if len(tempdata[tempdata.colnames[0]].shape) == 2:
try:
tempdata_new = Table()
for i, (feed, ifnum) in enumerate(zip(feeds, IFs)):
tempdata_new[f"ch{i}"] = tempdata[f"ch{feed}"][:, ifnum]
tempdata = tempdata_new
except Exception: # pragma: no cover
warnings.warn("Temperature format not supported", UserWarning)
unsupported_temperature = True
pass
existing_columns = [
chn for chn in data_table_data.colnames if chn.startswith("ch")
]
if existing_columns == []:
raise ValueError("Invalid data")
is_spectrum = nbin_per_chan > 1
is_single_channel = len(set(combinations)) == 1
good = np.ones(len(feeds), dtype=bool)
for i, s in enumerate(sections):
section_name = "ch{}".format(s)
if section_name not in existing_columns:
good[i] = False
allfeeds = feeds
feeds = allfeeds[good]
IFs = IFs[good]
polarizations = polarizations[good]
sections = sections[good]
if is_spectrum:
nchan = len(chan_ids)
sample_channel = existing_columns[0]
_, nbins = data_table_data[sample_channel].shape
# Development version of SARDARA -- will it remain the same?
if nbin_per_chan == nbins:
IFs = np.zeros_like(IFs)
if nbin_per_chan * nchan * 2 == nbins and not is_polarized:
warnings.warn(
"Data appear to contain polarization information "
"but are classified as simple, not stokes, in the "
"Section table."
)
is_polarized = True
if (
nbin_per_chan != nbins
and nbin_per_chan * nchan != nbins
and nbin_per_chan * nchan * 2 != nbins
and not is_polarized
):
raise ValueError(
"Something wrong with channel subdivision: "
"{} bins/channel, {} channels, "
"{} total bins".format(nbin_per_chan, nchan, nbins)
)
for f, ic, p, s in zip(feeds, IFs, polarizations, sections):
c = s
if is_single_channel:
c = None
section_name = "ch{}".format(s)
ch = _chan_name(f, p, c)
start, end = ic * nbin_per_chan, (ic + 1) * nbin_per_chan
data_table_data[ch] = data_table_data[section_name][:, start:end]
if is_polarized:
# for f, ic, p, s in zip(feeds, IFs, polarizations, sections):
for s in list(set(sections)):
f = feeds[sections == s][0]
c = s
if is_single_channel:
c = None
section_name = "ch{}".format(s)
qname, uname = _chan_name(f, "Q", c), _chan_name(f, "U", c)
qstart, qend = 2 * nbin_per_chan, 3 * nbin_per_chan
ustart, uend = 3 * nbin_per_chan, 4 * nbin_per_chan
data_table_data[qname] = data_table_data[section_name][
:, qstart:qend
]
data_table_data[uname] = data_table_data[section_name][
:, ustart:uend
]
chan_names += [qname, uname]
for f, ic, p, s in zip(feeds, IFs, polarizations, sections):
section_name = "ch{}".format(s)
if section_name in data_table_data.colnames:
data_table_data.remove_column(section_name)
else:
for ic, ch in enumerate(chan_names):
data_table_data[ch] = data_table_data["ch{}".format(chan_ids[ic])]
# ----------- Read temperature data, if possible ----------------
for ic, ch in enumerate(chan_names):
data_table_data[ch + "-Temp"] = 0.0
if unsupported_temperature:
continue
if len(chan_ids) <= ic:
continue
ch_string = f"ch{chan_ids[ic]}"
if ch_string not in tempdata.colnames:
continue
td = np.asarray(tempdata[ch_string])
data_table_data[ch + "-Temp"] = adjust_temperature_size(
td, data_table_data[ch + "-Temp"]
)
info_to_retrieve = [
"time",
"derot_angle",
"weather",
"par_angle",
"flag_track",
"flag_cal",
] + [ch + "-Temp" for ch in chan_names]
new_table = Table()
new_table.meta.update(headerdict)
new_table.meta["SOURCE"] = source
new_table.meta["site"] = site
new_table.meta["backend"] = backend
new_table.meta["receiver"] = receiver
new_table.meta["RA"] = ra
new_table.meta["Dec"] = dec
new_table.meta["channels"] = nbin_per_chan
new_table.meta["VLSR"] = new_table.meta["VLSR"] * u.Unit("km/s")
for i, off in zip(
"ra,dec,el,az".split(","),
[ra_offset, dec_offset, el_offset, az_offset],
):
new_table.meta[i + "_offset"] = off
for info in info_to_retrieve:
new_table[info] = data_table_data[info]
if not _check_derotator(new_table["derot_angle"]):
log.debug("Derotator angle looks weird. Setting to 0")
new_table["derot_angle"][:] = 0
# Duplicate raj and decj columns (in order to be corrected later)
Nfeeds = np.max(allfeeds) + 1
new_table["ra"] = np.tile(
data_table_data["raj2000"], (Nfeeds, 1)
).transpose()
new_table["dec"] = np.tile(
data_table_data["decj2000"], (Nfeeds, 1)
).transpose()
new_table["el"] = np.tile(data_table_data["el"], (Nfeeds, 1)).transpose()
new_table["az"] = np.tile(data_table_data["az"], (Nfeeds, 1)).transpose()
new_table.meta["is_skydip"] = infer_skydip_from_elevation(
data_table_data["el"], data_table_data["az"]
)
for info in ["ra", "dec", "az", "el", "derot_angle"]:
new_table[info].unit = u.radian
if not is_new_fitszilla:
update_table_with_offsets(new_table, xoffsets, yoffsets, inplace=True)
else:
for i in range(len(xoffsets)):
try:
ext = lchdulist["Coord{}".format(i)]
extdata = ext.data
ra, dec = extdata["raj2000"], extdata["decj2000"]
el, az = extdata["el"], extdata["az"]
except KeyError:
ra, dec = new_table["ra"][:, 0], new_table["dec"][:, 0]
el, az = new_table["el"][:, 0], new_table["az"][:, 0]
new_table["ra"][:, i] = ra
new_table["dec"][:, i] = dec
new_table["el"][:, i] = el
new_table["az"][:, i] = az
# Don't know if better euristics is needed
obstime = Time(
np.mean(new_table["time"]) * u.day, format="mjd", scale="utc"
)
if is_close_to_sun(
new_table.meta["RA"],
new_table.meta["Dec"],
obstime,
tolerance=3 * u.deg,
):
if DEFAULT_SUN_FRAME is None:
raise ValueError("You need Sunpy to process Sun observations.")
update_table_with_sun_coords(
new_table, sun_frame=DEFAULT_SUN_FRAME,
)
lchdulist.close()
# So ugly. But it works
filtered_frequencies = [f for (f, g) in zip(frequencies, good) if g]
for i, fr in enumerate(filtered_frequencies):
f = feeds[i]
s = sections[i]
ic = IFs[i]
p = polarizations[i]
b = bandwidths[i]
lo = local_oscillator[i]
cal = cal_mark_temp[i]
c = s
if is_single_channel:
c = None
chan_name = _chan_name(f, p, c)
if bandwidths[ic] < 0:
frequencies[ic] -= bandwidths[ic]
bandwidths[ic] *= -1
for i in range(data_table_data[chan_name].shape[0]):
data_table_data[chan_name][f, :] = data_table_data[chan_name][
f, ::-1
]
new_table[chan_name] = (
data_table_data[chan_name] * relpowers[feeds[ic]]
)
new_table[chan_name + "-filt"] = np.ones(
len(data_table_data[chan_name]), dtype=bool
)
data_table_data.remove_column(chan_name)
newmeta = {
"polarization": polarizations[ic],
"feed": int(f),
"IF": int(ic),
"frequency": fr.to("MHz"),
"bandwidth": b.to("MHz"),
"sample_rate": sample_rate[s],
"sample_time": (1 / (sample_rate[s].to(u.Hz))).to("s"),
"local_oscillator": lo.to("MHz"),
"cal_mark_temp": cal.to("K"),
"integration_time": integration_time.to("s"),
"xoffset": xoffsets[f].to(u.rad),
"yoffset": yoffsets[f].to(u.rad),
"relpower": float(relpowers[f]),
}
new_table[chan_name].meta.update(headerdict)
new_table[chan_name].meta.update(new_table.meta)
new_table[chan_name].meta.update(newmeta)
if is_polarized:
for s in list(set(sections)):
feed = feeds[sections == s][0]
c = s
if is_single_channel:
c = None
for stokes_par in "QU":
chan_name = _chan_name(feed, stokes_par, c)
try:
new_table[chan_name] = data_table_data[chan_name]
except KeyError:
continue
sample_time = 1 / (sample_rate[s].to(u.Hz))
newmeta = {
"polarization": stokes_par,
"feed": int(feed),
"IF": -1,
# There are two IFs for each section
"frequency": frequencies[2 * s].to("MHz"),
"bandwidth": bandwidths[2 * s].to("MHz"),
"sample_rate": sample_rate[s],
"sample_time": sample_time.to("s"),
"local_oscillator": local_oscillator[2 * s].to("MHz"),
"cal_mark_temp": cal_mark_temp[2 * s].to("K"),
"integration_time": integration_time.to("s"),
"xoffset": xoffsets[feed].to(u.rad),
"yoffset": yoffsets[feed].to(u.rad),
"relpower": 1.0,
}
new_table[chan_name].meta.update(headerdict)
new_table[chan_name].meta.update(new_table.meta)
new_table[chan_name].meta.update(newmeta)
new_table[chan_name + "-filt"] = np.ones(
len(data_table_data[chan_name]), dtype=bool
)
data_table_data.remove_column(chan_name)
return new_table
def read_data(fname):
"""Read the data, whatever the format, and return them."""
kind = detect_data_kind(fname)
if kind == "fitszilla":
return read_data_fitszilla(fname)
elif kind == "hdf5":
return Table.read(fname)
else:
return None
def root_name(fname):
"""Return the file name without extension."""
fn, ext = os.path.splitext(fname)
if "fits" in ext and not ext.endswith("fits"):
fn += ext.replace("fits", "").replace(".", "")
return fn
def _try_type(value, dtype):
"""
Examples
--------
>>> _try_type("1", int)
1
>>> _try_type(1.0, int)
1
>>> _try_type("ab", float)
'ab'
"""
try:
return dtype(value)
except ValueError:
return value
def label_from_chan_name(ch):
"""
Examples
--------
>>> label_from_chan_name('Feed0_LCP_1')
'LL'
>>> label_from_chan_name('Feed0_Q_2')
'LR'
>>> label_from_chan_name('Feed3_RCP_1')
'RR'
>>> label_from_chan_name('Feed2_U_3')
'RL'
"""
_, polar, _ = interpret_chan_name(ch)
if polar.startswith("L"):
return "LL"
elif polar.startswith("R"):
return "RR"
elif polar.startswith("Q"):
return "LR"
elif polar.startswith("U"):
return "RL"
else:
raise ValueError("Unrecognized polarization")
def bulk_change(file, path, value):
"""Bulk change keyword or column values in FITS file.
Parameters
----------
file : str
Input file
path : str
it has to be formatted as EXT,data,COLUMN or EXT,header,KEY depending
on what is being changed (a data column or a header key resp.). Ex.
1,TIME to change the values of column TIME in ext. n. 1
value : any acceptable type
Value to be filled in
"""
with fits.open(file, memmap=False) as hdul:
ext, attr, key = path.split(",")
ext = _try_type(ext, int)
data = getattr(hdul[ext], attr)
data[key] = value
setattr(hdul[ext], attr, data)
hdul.writeto("tmp.fits", overwrite=True)
force_move_file("tmp.fits", file)
def main_bulk_change(args=None):
"""Preprocess the data."""
import argparse
description = (
"Change all values of a given column or header keyword in "
"fits files"
)
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"files",
nargs="*",
help="Single files to preprocess",
default=None,
type=str,
)
parser.add_argument(
"-k",
"--key",
type=str,
default=None,
help="Path to key or data column. E.g. "
'"EXT,header,KEY" to change key KEY in the header'
"in extension EXT; EXT,data,COL to change column"
"COL in the data of extension EXT",
)
parser.add_argument(
"-v", "--value", default=None, type=str, help="Value to be written"
)
parser.add_argument(
"--apply-cal-mark",
action="store_true",
default=False,
help='Short for -k "DATA TABLE,data,flag_cal" -v 1',
)
parser.add_argument(
"--recursive",
action="store_true",
default=False,
help="Look for file in up to two subdirectories",
)
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Plot stuff and be verbose",
)
args = parser.parse_args(args)
if args.apply_cal_mark:
args.key = "DATA TABLE,data,flag_cal"
args.value = 1
if args.key is None:
raise ValueError(
"What should I do? Please specify either key and "
"value, or apply-cal-mark"
)
fnames = []
for fname in args.files:
if args.recursive:
if not fname == os.path.basename(fname):
raise ValueError(
"Options recursive requires a file name, not "
"a full path: {}".format(fname)
)
fs = glob.glob(os.path.join("**", fname), recursive=True)
fnames.extend(fs)
else:
fnames.append(fname)
for fname in fnames:
print("Updating", fname, "...", end="")
bulk_change(fname, args.key, args.value)
print(fname, " Done.")
| 30.988044 | 79 | 0.584534 | 4,700 | 36,287 | 4.325319 | 0.140213 | 0.031876 | 0.023661 | 0.020906 | 0.313641 | 0.248364 | 0.205322 | 0.181071 | 0.142309 | 0.103891 | 0 | 0.022886 | 0.283518 | 36,287 | 1,170 | 80 | 31.01453 | 0.759029 | 0.171329 | 0 | 0.211286 | 0 | 0.001312 | 0.097449 | 0.002971 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03937 | false | 0.002625 | 0.026247 | 0.001312 | 0.11811 | 0.013123 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
973cf34dab4ee39dd0d909e99162d9d5963e92b5 | 3,946 | py | Python | src/main/evaluation/evals/evaluate_paper_latency_cdf.py | gaow0007/hire-cluster-simulator | 2c3d466df4f694ca97392b01e4f2339845007645 | [
"Apache-2.0"
] | 10 | 2020-12-14T21:36:03.000Z | 2021-11-24T10:59:14.000Z | src/main/evaluation/evals/evaluate_paper_latency_cdf.py | gaow0007/hire-cluster-simulator | 2c3d466df4f694ca97392b01e4f2339845007645 | [
"Apache-2.0"
] | 2 | 2021-01-06T09:57:27.000Z | 2021-12-16T06:41:18.000Z | src/main/evaluation/evals/evaluate_paper_latency_cdf.py | gaow0007/hire-cluster-simulator | 2c3d466df4f694ca97392b01e4f2339845007645 | [
"Apache-2.0"
] | 2 | 2021-11-24T08:34:32.000Z | 2021-12-02T03:15:52.000Z | #!/usr/bin/env python3
# coding=utf-8
import gc
import logging
import os
import sys
sys.path.append(
# Figure out the path of the evaluation directory
os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
)
from evals.base_evaluate import experiments_directory, get_evaluation_params, finish_evaluation, EvalParams, \
foreach_sweep_constellation
from helpers.data_table_mappers import eval_after_24h_mapper, remove_withdrawn_jobs_from_tenant_statistics, \
eval_tasks_not_started_mapper, \
summarize_cdf
from helpers.experiment_loader import load_multiple_experiments
from helpers.visualization_helpers import set_paper_aesthetics, plot_rel_multiple
logging.basicConfig(level=logging.INFO, format='%(levelname)8s: %(message)s -- in %(filename)s:%(lineno)d')
def str_to_safe_filename(s):
return "".join([c for c in s if c.isalpha() or c.isdigit() or c == ' ']).rstrip()
label_x_inp = "Jobs with INC, µ [%]"
if __name__ == '__main__':
# Load the experiment name and output directory from program args
params: EvalParams = get_evaluation_params(
tmp_directory_suffix="paper-latency-cdf",
name="paper-latency-cdf"
)
if 'mu-inp' not in params.filter:
logging.warning("strictly enforce filter of mu-inp=1.0")
params.filter['mu-inp'] = ["1.0"]
# We are interested in the cell and scheduler dumps
types = [
"taskgroups"
]
mappers = {
"taskgroups": [eval_after_24h_mapper,
remove_withdrawn_jobs_from_tenant_statistics,
eval_tasks_not_started_mapper(params),
summarize_cdf(params, 'PlacementLatency'),
],
}
# Which columns should be kept for each type
columns = {
"taskgroups": ["scheduler",
"JobStatus",
"TaskGroupID",
"TaskGroupType",
"ValidForJob",
"Duration",
"run",
"SubmissionTime",
"TotalTasks",
"TasksStarted",
"PlacementLatency",
"PlacementLatency_cdf_bucket",
"PlacementLatency_cdf_value",
"PlacementLatency_cdf_cvalue",
params.sweep_column]
}
# Load the evaluation experiment
(data, config) = load_multiple_experiments(
directory=experiments_directory,
names=[params.experiment_name] + params.additional_experiments,
types=types,
run_data_mappers=mappers,
run_data_columns=columns,
filter_data=params.filter,
ignore_cols=params.ignore_cols,
done_required=params.load_running,
keep_sweep_columns={params.sweep_column} if params.drop_unused_sweep_cols else {"*"},
sweep_column=params.sweep_column)
# set_paper_aesthetics(font_scale=2, line_width=2.5)
fig_width = 7.5
fig_height = 6.5
linewidth = 2.2
show_legend = True
gc.collect()
set_paper_aesthetics()
def evaluate(dataframes: dict, i):
df = dataframes["taskgroups"]
for mu in df["mu-inp"].unique():
plot_rel_multiple(
x="PlacementLatency_cdf_bucket",
y="PlacementLatency_cdf_cvalue",
x_label=f'Placement latency ' + r' [ms; $log_{10}$]',
y_label=r"Probability ($> x$)",
hue="scheduler",
data=df[df["mu-inp"] == mu],
name=f"latency-{mu}-ccdf",
params=params,
reverse_hue_order=True
)
foreach_sweep_constellation(
data=data,
sweeps=config['sweeps'],
config=config,
params=params,
sink=evaluate
)
finish_evaluation(
params=params
)
| 31.31746 | 110 | 0.601368 | 427 | 3,946 | 5.285714 | 0.437939 | 0.011077 | 0.023926 | 0.01595 | 0.072663 | 0.072663 | 0.072663 | 0.072663 | 0.072663 | 0.072663 | 0 | 0.00798 | 0.301318 | 3,946 | 125 | 111 | 31.568 | 0.810664 | 0.081348 | 0 | 0.021277 | 0 | 0 | 0.159524 | 0.043406 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0 | 0.085106 | 0.010638 | 0.117021 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
973ea41ad870c8e44dccf1cf26f6c10aba40fa1a | 6,814 | py | Python | nerds_osmnx/overpass_query.py | anerv/NERDS_osmnx | c243317b3ac518269c63d2fcec0e51f9a7af8327 | [
"MIT"
] | 2 | 2022-03-02T15:56:49.000Z | 2022-03-09T16:15:37.000Z | nerds_osmnx/overpass_query.py | anerv/NERDS_osmnx | c243317b3ac518269c63d2fcec0e51f9a7af8327 | [
"MIT"
] | null | null | null | nerds_osmnx/overpass_query.py | anerv/NERDS_osmnx | c243317b3ac518269c63d2fcec0e51f9a7af8327 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Functions in order to make complex overpass query through osmnx. Use
get_filter_graphs to get graph for specific filter, then create the union
with compose_graph. Is a proxy of the lack of "or" syntax between keys,
only able to make "or" inside keys or "and" between keys.
"""
import osmnx as ox
import networkx as nx
import shapely
from tqdm import tqdm
def get_filtered_graphs(polygon, filter_dict):
"""
Get every filtered graph coming from a polygon, based on value given as
a dictionary.
Parameters
----------
polygon : shapely.MultiPolygon
Polygon where we will search for with osmnx.graph_from_polygon.
filter_dict : dict
Dictionary composed of dictionaries with the following structure :
{'name1' : {'custom_filter' : filter1},
'name2' : {'network_type'} : type2}
The dictionary for every key have either 'custom_filter' or
'network_type' as a key, and the following filter or type as a value.
Returns
-------
graph_dict : dict
Dictionary of every graph.
"""
graph_dict = dict()
for filter_info in filter_dict.values(): # add useful tag way into
if 'custom_filter' in filter_info: # the osmnx settings to get them
_add_tag_way(filter_info['custom_filter'])
for filter_id, filter_info in tqdm(filter_dict.items(), #tqdm show progress
desc='Networks', leave=True):
for i in range(0, 10): # retry
try: # either custom_filter or network_type, get graph with osmnx
if 'custom_filter' in filter_info:
graph_dict[filter_id] = ox.graph_from_polygon(
polygon,
custom_filter=(filter_info['custom_filter']),
retain_all=True,
simplify=False
)
elif 'network_type' in filter_info:
graph_dict[filter_id] = ox.graph_from_polygon(
polygon,
network_type = (filter_info['network_type']),
retain_all=True,
simplify=False
)
except ValueError: # for empty graph because of the filter used
graph_dict[filter_id] = nx.empty_graph(
create_using=nx.MultiDiGraph
)
break
except:
continue
break
return graph_dict
def _add_tag_way(filter_string):
"""Add way's tag if not present in the osmnx settings"""
split_string = filter_string.split(']') # mark end of one attribute
for i in range(len(split_string) - 1): # avoid last void value
split_string[i] = split_string[i].split('"')
split_string[i] = split_string[i][1] # avoid first [ string
for tag_name in split_string[:-1]: # avoid last void string
if tag_name not in ox.settings.useful_tags_way:
ox.settings.useful_tags_way += [tag_name]
def compose_graph(graph_dict, composed_name, name_list):
"""
Compose multiple graph together coming from a dictionary under a new
entry of the dictionary.
Parameters
----------
graph_dict : dict
Dictionary of every graph.
composed_name : str
Name of the new key of graph_dict with the composed graph.
name_list : list of str
Keys of the graph we want to merge.
Raises
------
ValueError
If the number of key is inferior to 2, can't merge one graph.
Returns
-------
graph_dict : dict
Dictionary of every graph.
"""
if len(name_list) < 2: # can't compose
raise ValueError('Not enough subgraph to compose, need at least 2')
elif len(name_list) == 2: # if exactly 2 use networkx.compose
graph_dict[composed_name] = nx.compose(graph_dict[name_list[0]],
graph_dict[name_list[1]])
else: # more than 2 use networkx.compose_all
subgraph_list = []
for name in name_list:
subgraph_list.append(graph_dict[name])
graph_dict[composed_name] = nx.compose_all(subgraph_list)
return graph_dict
if __name__ == "__main__":
cop = ox.geocode_to_gdf("Copenhagen Municipality")
fre = ox.geocode_to_gdf("Frederiksberg Municipality")
location = shapely.ops.unary_union([cop['geometry'][0],
fre['geometry'][0]])
osmnxparameters = {'car30': {'custom_filter':
'["maxspeed"~"^30$|^20$|^15$|^10$|^5$|^20 mph|^15 mph|^10 mph|^5 mph"]'},
'carall': {'network_type':
'drive'},
'bike_cyclewaytrack': {'custom_filter':
'["cycleway"~"track"]'},
'bike_highwaycycleway': {'custom_filter':
'["highway"~"cycleway"]'},
'bike_designatedpath': {'custom_filter':
'["highway"~"path"]["bicycle"~"designated"]'},
'bike_cyclewayrighttrack': {'custom_filter':
'["cycleway:right"~"track"]'},
'bike_cyclewaylefttrack': {'custom_filter':
'["cycleway:left"~"track"]'},
'bike_cyclestreet': {'custom_filter':
'["cyclestreet"]'},
'bike_bicycleroad': {'custom_filter':
'["bicycle_road"]'},
'bike_livingstreet': {'custom_filter':
'["highway"~"living_street"]'}
}
Gs = get_filtered_graphs(location, osmnxparameters)
Gs = compose_graph(Gs, 'biketrack', ['bike_cyclewaylefttrack',
'bike_cyclewaytrack',
'bike_highwaycycleway',
'bike_bicycleroad',
'bike_cyclewayrighttrack',
'bike_designatedpath',
'bike_cyclestreet'])
Gs = compose_graph(Gs, 'bikeable', ['biketrack',
'car30',
'bike_livingstreet'])
Gs = compose_graph(Gs, 'biketrackcarall', ['biketrack',
'carall'])
#nx.write_graphml(Gs['biketrack'], 'copenhagen_biketrack.graphml')
nx.write_gpickle(Gs['biketrack'], 'copenhagen_biketrack.pickle')
| 41.803681 | 106 | 0.533607 | 709 | 6,814 | 4.916784 | 0.297602 | 0.05852 | 0.020654 | 0.019793 | 0.167814 | 0.139702 | 0.065118 | 0.055077 | 0.030981 | 0.030981 | 0 | 0.009539 | 0.36924 | 6,814 | 162 | 107 | 42.061728 | 0.801536 | 0.269299 | 0 | 0.145833 | 0 | 0.010417 | 0.211185 | 0.062396 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.041667 | 0 | 0.09375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
973fb9aea59fde514b3cd0e87df8930e02f5134e | 340 | py | Python | platzigram/posts/urls.py | dirias/Curso-de-Django | e02878e2419c334c3a84e242cb9422b693f92227 | [
"MIT"
] | null | null | null | platzigram/posts/urls.py | dirias/Curso-de-Django | e02878e2419c334c3a84e242cb9422b693f92227 | [
"MIT"
] | null | null | null | platzigram/posts/urls.py | dirias/Curso-de-Django | e02878e2419c334c3a84e242cb9422b693f92227 | [
"MIT"
] | null | null | null | """POST URLS"""
from django.urls import path
# Views
from posts import views
urlpatterns = [
path(route = '', view=views.PostsFeedView.as_view(), name='feed'),
path(route = 'posts/new/', view = views.PostCreateView.as_view(), name='create'),
path(route='posts<int:pk>', view=views.PostDetailView.as_view(), name='detail')
]
| 24.285714 | 85 | 0.676471 | 45 | 340 | 5.044444 | 0.488889 | 0.118943 | 0.132159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.138235 | 340 | 13 | 86 | 26.153846 | 0.774744 | 0.047059 | 0 | 0 | 0 | 0 | 0.122642 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97420ee00a60c2531a89c2990102430b05c0989b | 1,168 | py | Python | src/pybel/parser/modifiers/variant.py | tehw0lf/pybel | 6f67f8cce15052cc3c42ef87374e3b9ee45e6519 | [
"Apache-2.0"
] | null | null | null | src/pybel/parser/modifiers/variant.py | tehw0lf/pybel | 6f67f8cce15052cc3c42ef87374e3b9ee45e6519 | [
"Apache-2.0"
] | null | null | null | src/pybel/parser/modifiers/variant.py | tehw0lf/pybel | 6f67f8cce15052cc3c42ef87374e3b9ee45e6519 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
HGVS Variants
~~~~~~~~~~~~~
For example, the node :code:`p(HGNC:GSK3B, var(p.Gly123Arg))` is represented with the following:
.. code::
{
FUNCTION: PROTEIN,
NAMESPACE: 'HGNC',
NAME: 'GSK3B',
VARIANTS: [
{
KIND: HGVS,
IDENTIFIER: 'p.Gly123Arg'
}
]
}
.. seealso::
- BEL 2.0 specification on `variants <http://openbel.org/language/version_2.0/bel_specification_version_2.0.html#_variant_var>`_
- HVGS `conventions <http://www.hgvs.org/mutnomen/recs.html>`_
- PyBEL module :py:class:`pybel.parser.modifiers.get_hgvs_language`
"""
from pyparsing import Word, alphanums
from ..utils import nest, one_of_tags, quote
from ...constants import HGVS, IDENTIFIER, KIND
__all__ = [
'get_hgvs_language',
]
variant_tags = one_of_tags(tags=['var', 'variant'], canonical_tag=HGVS, name=KIND)
variant_characters = Word(alphanums + '._*=?>')
def get_hgvs_language():
"""
:rtype: pyparsing.ParseElement
"""
hgvs = (variant_characters | quote)(IDENTIFIER)
language = variant_tags + nest(hgvs)
return language
| 22.901961 | 132 | 0.626712 | 134 | 1,168 | 5.253731 | 0.522388 | 0.008523 | 0.06392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016538 | 0.223459 | 1,168 | 50 | 133 | 23.36 | 0.759647 | 0.59161 | 0 | 0 | 0 | 0 | 0.072848 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9744fdec5f804af7a4c49433b6de9937927e1e26 | 1,044 | py | Python | NumPy_Training/img_histogram_2D.py | jpenrici/Computer_Graphics | 5ba268e9e75de0d7ad733a503400e52b66edc78b | [
"MIT"
] | null | null | null | NumPy_Training/img_histogram_2D.py | jpenrici/Computer_Graphics | 5ba268e9e75de0d7ad733a503400e52b66edc78b | [
"MIT"
] | null | null | null | NumPy_Training/img_histogram_2D.py | jpenrici/Computer_Graphics | 5ba268e9e75de0d7ad733a503400e52b66edc78b | [
"MIT"
] | null | null | null | # -*- Mode: Python3; coding: utf-8; indent-tabs-mpythoode: nil; tab-width: 4 -*-
import os
import numpy as np
from matplotlib import pyplot as plt, colors as colors
PATH = "../Images/"
RED = 0
GREEN = 1
BLUE = 2
def view(data, X, Y, title="2D histogram"):
c = ["RED", "GREEN", "BLUE"]
dataX = data[:, :, X].flatten()
dataY = data[:, :, Y].flatten()
bins = np.arange(0, 256)
# plot
plt.hist2d(dataX, dataY, bins, norm=colors.LogNorm())
plt.title(title)
plt.xlabel(c[X])
plt.ylabel(c[Y])
plt.xlim([0, 255])
plt.ylim([0, 255])
plt.colorbar()
plt.show()
def test(filename):
img_np = PATH + filename + ".npy"
print("Data: ", img_np)
if not os.path.exists(img_np):
print("File not found!")
return
data = np.load(img_np)
h, w, c = data.shape
if c > 3:
data = data[:, :, :3]
view(data, RED, GREEN)
view(data, RED, BLUE)
view(data, GREEN, BLUE)
if __name__ == '__main__':
# ndArray (Imagem)
test("folha_croton")
| 18.642857 | 80 | 0.564176 | 152 | 1,044 | 3.789474 | 0.513158 | 0.055556 | 0.024306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028497 | 0.260536 | 1,044 | 55 | 81 | 18.981818 | 0.717617 | 0.095785 | 0 | 0 | 0 | 0 | 0.084402 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.085714 | 0 | 0.171429 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9745bee63f21db8a2f15dbf9443e575995c9a601 | 4,931 | py | Python | custom_components/weatherflow/config_flow.py | kocour/hass-weatherflow | 4c6d08e7e6468aa008e656b3763d55b6ee2fbb88 | [
"MIT"
] | 30 | 2021-11-29T19:09:09.000Z | 2022-03-15T01:59:36.000Z | custom_components/weatherflow/config_flow.py | kocour/hass-weatherflow | 4c6d08e7e6468aa008e656b3763d55b6ee2fbb88 | [
"MIT"
] | 31 | 2021-11-18T09:42:27.000Z | 2022-03-28T06:21:30.000Z | custom_components/weatherflow/config_flow.py | kocour/hass-weatherflow | 4c6d08e7e6468aa008e656b3763d55b6ee2fbb88 | [
"MIT"
] | 6 | 2021-12-05T22:00:00.000Z | 2022-02-18T13:31:54.000Z | """Config Flow to configure WeatherFlow Integration."""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_API_TOKEN, CONF_ID
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from pyweatherflowrest import (
BadRequest,
Invalid,
NotAuthorized,
WeatherFlowApiClient,
WrongStationID,
)
from pyweatherflowrest.data import StationDescription
from .const import (
CONF_FORECAST_HOURS,
CONF_INTERVAL_FORECAST,
CONF_INTERVAL_OBSERVATION,
CONF_STATION_ID,
DEFAULT_FORECAST_HOURS,
DEFAULT_FORECAST_INTERVAL,
DEFAULT_OBSERVATION_INTERVAL,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
class WeatherFlowFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a WeatherFlow config flow."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
if user_input is None:
return await self._show_setup_form(user_input)
errors = {}
session = async_create_clientsession(self.hass)
weatherflow = WeatherFlowApiClient(
user_input[CONF_STATION_ID], user_input[CONF_API_TOKEN], session=session
)
try:
await weatherflow.initialize()
station_data: StationDescription = weatherflow.station_data
except WrongStationID as err:
_LOGGER.debug(err)
errors["base"] = "wrong_station_id"
return await self._show_setup_form(errors)
except Invalid as err:
_LOGGER.debug(err)
errors["base"] = "invalid_data"
return await self._show_setup_form(errors)
except NotAuthorized as err:
_LOGGER.debug(err)
errors["base"] = "wrong_token"
return await self._show_setup_form(errors)
except BadRequest as err:
_LOGGER.debug(err)
errors["base"] = "bad_request"
return await self._show_setup_form(errors)
unique_id = str(station_data.key)
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=station_data.name,
data={
CONF_ID: station_data.name,
CONF_STATION_ID: user_input[CONF_STATION_ID],
CONF_API_TOKEN: user_input[CONF_API_TOKEN],
},
options={
CONF_FORECAST_HOURS: DEFAULT_FORECAST_HOURS,
CONF_INTERVAL_OBSERVATION: DEFAULT_OBSERVATION_INTERVAL,
CONF_INTERVAL_FORECAST: DEFAULT_FORECAST_INTERVAL,
},
)
async def _show_setup_form(self, errors=None):
"""Show the setup form to the user."""
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_STATION_ID): int,
vol.Required(CONF_API_TOKEN): str,
}
),
errors=errors or {},
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_INTERVAL_OBSERVATION,
default=self.config_entry.options.get(
CONF_INTERVAL_OBSERVATION, DEFAULT_OBSERVATION_INTERVAL
),
): vol.All(vol.Coerce(int), vol.Range(min=1, max=30)),
vol.Optional(
CONF_FORECAST_HOURS,
default=self.config_entry.options.get(
CONF_FORECAST_HOURS, DEFAULT_FORECAST_HOURS
),
): vol.All(vol.Coerce(int), vol.Range(min=24, max=240)),
vol.Optional(
CONF_INTERVAL_FORECAST,
default=self.config_entry.options.get(
CONF_INTERVAL_FORECAST, DEFAULT_FORECAST_INTERVAL
),
): vol.All(vol.Coerce(int), vol.Range(min=15, max=120)),
}
),
)
| 32.873333 | 84 | 0.601095 | 508 | 4,931 | 5.511811 | 0.228346 | 0.032143 | 0.027857 | 0.033929 | 0.381429 | 0.348929 | 0.201786 | 0.135357 | 0.026429 | 0 | 0 | 0.004187 | 0.321841 | 4,931 | 149 | 85 | 33.09396 | 0.833134 | 0.033056 | 0 | 0.247863 | 0 | 0 | 0.015959 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017094 | false | 0 | 0.08547 | 0 | 0.213675 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9749b44259b93b94209924f8e12c6a9a0ca07292 | 4,436 | py | Python | Model/manifest.py | llmay98/WSI-online-analysis-framework | 1f2ee084b7ed87d22f9fbf041a6101c600104bfc | [
"Apache-2.0"
] | 2 | 2020-01-07T04:22:04.000Z | 2021-05-06T11:10:15.000Z | Model/manifest.py | llmay98/WSI-online-analysis-framework | 1f2ee084b7ed87d22f9fbf041a6101c600104bfc | [
"Apache-2.0"
] | 4 | 2021-06-02T13:19:46.000Z | 2022-03-12T00:11:11.000Z | Model/manifest.py | llmay98/WSI-online-analysis-framework | 1f2ee084b7ed87d22f9fbf041a6101c600104bfc | [
"Apache-2.0"
] | 1 | 2021-06-03T04:48:00.000Z | 2021-06-03T04:48:00.000Z | import mysql.connector
import configparser
import os
import uuid
class Manifest:
def __init__(self):
config_file = os.getcwd() + '/config.ini'
self.config = configparser.ConfigParser()
self.config.read(config_file, encoding='utf-8')
db = self.db_connect()
db.close()
def db_connect(self):
return mysql.connector.connect(
host=self.config['db']['host'],
user=self.config['db']['user'],
passwd=self.config['db']['passwd'],
database=self.config['db']['database']
)
def insert(self, slide_uuid=None, svs_file=None, smaller_image=None, background_mask=None):
db = self.db_connect()
cursor = db.cursor()
if slide_uuid is None:
slide_uuid = str(uuid.uuid4())
cursor.execute("SELECT * FROM MANIFEST WHERE (SVS_file = %s)", (svs_file,))
if len(cursor.fetchall()) == 0:
sql = "INSERT INTO MANIFEST( UUID, SVS_file, Smaller_image, Background_mask) VALUES (%s, %s, %s, %s)"
val = (slide_uuid, svs_file, smaller_image, background_mask)
cursor.execute(sql, val)
cursor.close()
db.commit()
db.close()
def get_projects(self):
db = self.db_connect()
cursor = db.cursor()
cursor.execute("SELECT * FROM MANIFEST")
result = cursor.fetchall()
db.close()
return result
def get_project_by_id(self, slide_id):
db = self.db_connect()
cursor = db.cursor()
cursor.execute("SELECT * FROM MANIFEST WHERE (ID = %s)", (slide_id,))
result = cursor.fetchall()[0]
cursor.close()
db.close()
return result
def get_project_by_uuid(self, slide_uuid):
db = self.db_connect()
cursor = db.cursor()
cursor.execute("SELECT * FROM MANIFEST WHERE (UUID = %s)", (slide_uuid,))
result = cursor.fetchall()[0]
cursor.close()
db.close()
return result
def get_project_by_svs_file(self, svs_file):
db = self.db_connect()
cursor = db.cursor()
cursor.execute("SELECT * FROM MANIFEST WHERE (SVS_file = %s)", (svs_file,))
result = cursor.fetchall()[0]
cursor.close()
db.close()
return result
def get_project_by_similar_svs_file(self, svs_file):
db = self.db_connect()
cursor = db.cursor()
svs_file = '%' + svs_file + '%'
cursor.execute("SELECT * FROM MANIFEST WHERE (SVS_file like %s)", (svs_file,))
result = cursor.fetchall()
cursor.close()
db.close()
return result
def update_svs_file_by_id(self, slide_id, svs_file):
db = self.db_connect()
cursor = db.cursor()
cursor.execute("UPDATE MANIFEST SET SVS_file= %s WHERE (ID = %s)", (svs_file, slide_id))
cursor.close()
db.commit()
db.close()
def update_smaller_image_by_id(self, slide_id, smaller_image):
db = self.db_connect()
cursor = db.cursor()
cursor.execute("UPDATE MANIFEST SET Smaller_image = %s WHERE (ID = %s)", (smaller_image, slide_id))
cursor.close()
db.commit()
db.close()
def update_background_mask_by_id(self, slide_id, background_mask):
db = self.db_connect()
cursor = db.cursor()
cursor.execute("UPDATE MANIFEST SET Background_mask = %s WHERE (ID = %s)", (background_mask, slide_id))
cursor.close()
db.commit()
db.close()
def delete_project_by_id(self, slide_id):
db = self.db_connect()
cursor = db.cursor()
cursor.execute("DELETE FROM MANIFEST WHERE (ID = %s)", (slide_id,))
cursor.close()
db.commit()
db.close()
def delete_all_projects(self):
db = self.db_connect()
cursor = db.cursor()
cursor.execute("TRUNCATE TABLE MANIFEST")
cursor.close()
db.commit()
db.close()
def continue_id(self):
db = self.db_connect()
cursor = db.cursor()
cursor.execute("SELECT * FROM MANIFEST")
result = cursor.fetchall()
slide_id = 0
for wsi in result:
slide_id = slide_id + 1
if wsi[0] != slide_id:
cursor.execute("UPDATE MANIFEST SET ID = %s WHERE (ID = %s)", (slide_id, wsi[0]))
cursor.close()
db.commit()
db.close()
| 32.144928 | 113 | 0.578449 | 548 | 4,436 | 4.494526 | 0.131387 | 0.053999 | 0.042225 | 0.079172 | 0.660983 | 0.624442 | 0.602517 | 0.511571 | 0.483963 | 0.483963 | 0 | 0.003182 | 0.291479 | 4,436 | 137 | 114 | 32.379562 | 0.780465 | 0 | 0 | 0.596639 | 0 | 0.008403 | 0.148332 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0.008403 | 0.033613 | 0.008403 | 0.210084 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
974ba423ea4fb5779929e4b07f704b1d42077fa3 | 678 | py | Python | giocomo_lab_to_nwb/conversion_tools/nwb_gui.py | jordanhickman/giocomo-lab-to-nwb | 1d0eb0efc2f3b6d881405b08d7bc0232e0cd79d5 | [
"BSD-3-Clause"
] | null | null | null | giocomo_lab_to_nwb/conversion_tools/nwb_gui.py | jordanhickman/giocomo-lab-to-nwb | 1d0eb0efc2f3b6d881405b08d7bc0232e0cd79d5 | [
"BSD-3-Clause"
] | 4 | 2019-07-30T00:57:41.000Z | 2019-11-03T20:14:03.000Z | giocomo_lab_to_nwb/conversion_tools/nwb_gui.py | ben-dichter-consulting/giocomo-lab-to-nwb | a096cdea15291f77793ca334de1eaaa762685c58 | [
"BSD-3-Clause"
] | 2 | 2020-06-12T20:44:11.000Z | 2020-08-06T02:02:12.000Z | # Opens the NWB conversion GUI
# authors: Luiz Tauffer and Ben Dichter
# written for Giocomo Lab
# ------------------------------------------------------------------------------
from nwbn_conversion_tools.gui.nwbn_conversion_gui import nwbn_conversion_gui
metafile = 'metafile.yml'
conversion_module = 'conversion_module.py'
source_paths = {}
source_paths['spikeglx data'] = {'type': 'file', 'path': ''}
source_paths['processed data'] = {'type': 'file', 'path': ''}
# Other options
kwargs = {'spikeglx': True, 'processed': False}
nwbn_conversion_gui(
metafile=metafile,
conversion_module=conversion_module,
source_paths=source_paths,
kwargs_fields=kwargs,
)
| 29.478261 | 80 | 0.653392 | 74 | 678 | 5.743243 | 0.5 | 0.129412 | 0.12 | 0.117647 | 0.155294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.120944 | 678 | 22 | 81 | 30.818182 | 0.713087 | 0.269912 | 0 | 0 | 0 | 0 | 0.204499 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
974c1455e15872002bcd7d434f9a4adca9b3db79 | 6,276 | py | Python | tests/cosrlibtests/document/html/test_base.py | commonsearch/cosr-back | 28ca0c1b938dde52bf4f59a835c98dd5ab22cad6 | [
"Apache-2.0"
] | 141 | 2016-02-17T14:27:57.000Z | 2021-12-27T02:56:48.000Z | tests/cosrlibtests/document/html/test_base.py | commonsearch/cosr-back | 28ca0c1b938dde52bf4f59a835c98dd5ab22cad6 | [
"Apache-2.0"
] | 69 | 2016-02-20T02:06:59.000Z | 2017-01-29T22:23:46.000Z | tests/cosrlibtests/document/html/test_base.py | commonsearch/cosr-back | 28ca0c1b938dde52bf4f59a835c98dd5ab22cad6 | [
"Apache-2.0"
] | 38 | 2016-02-25T04:40:07.000Z | 2020-06-11T07:22:44.000Z | from cosrlib.document.html import HTMLDocument
def test_get_title():
assert HTMLDocument(
"""<html><head><title>Test title</title></head><body>x</body></html>"""
).parse().get_title() == "Test title"
assert HTMLDocument(
"""<html><title>Test title</title>XX</html>"""
).parse().get_title() == "Test title"
assert HTMLDocument(
"""<html><head><title>Test title</title></head><body><title>x</title></body></html>"""
).parse().get_title() == "Test title"
def test_get_url_words():
doc = HTMLDocument("", url="http://www.nytimes.com/2011/10/06/arts/music/maceo-parker.html?print=true#hash").parse()
assert doc.get_url_words() == [
"nytimes", "com", "2011", "10", "06", "arts", "music", "maceo", "parker", "html"
]
doc = HTMLDocument("", url="https://en.wikipedia.org/wiki/Nine_Inch_Nails").parse()
assert doc.get_url_words() == [
"en", "wikipedia", "org", "wiki", "nine", "inch", "nails"
]
def test_get_domain_paid_words():
doc = HTMLDocument("", url="http://www.bbc.co.uk/2011/10/06/arts/music/maceo-parker.html?print=true")
assert doc.get_domain_paid_words() == ["bbc"]
def test_get_url():
# When none is given, we take the URL
html = """<html><head></head><body>x</body></html>"""
page = HTMLDocument(html, url="http://example.com/page.html").parse()
assert page.get_url().url == "http://example.com/page.html"
# But when a tag is present, it has precedence
html = """<html><head><link rel="canonical" href="http://example.com/page2.html" /></head><body>x</body></html>"""
page = HTMLDocument(html, url="http://example.com/page.html").parse()
assert page.get_url().url == "http://example.com/page2.html"
# Including with strange caps
html = """<htmL><heaD><linK reL="CANonical" hreF="http://example.com/Page2.html" /></head><body>x</body></html>"""
page = HTMLDocument(html, url="http://example.com/page.html").parse()
assert page.get_url().url == "http://example.com/Page2.html"
def test_get_canonical_url():
html = """<html><head></head><body>x</body></html>"""
page = HTMLDocument(html, url="http://example.com/page.html").parse()
assert page.parse_canonical_url() is None
html = """<html><head><link rel="canonical" href="" /></head><body>x</body></html>"""
page = HTMLDocument(html, url="http://example.com/page.html").parse()
assert page.parse_canonical_url() is None
html = """<html><head><link rel="canonical" href="http://example.com/page2.html" /></head><body>x</body></html>"""
page = HTMLDocument(html, url="http://example.com/page.html").parse()
assert page.parse_canonical_url().url == "http://example.com/page2.html"
html = """<html><head><linK reL="caNonical" hreF="http://example.com/page2.html" /></head><body>x</body></html>"""
page = HTMLDocument(html, url="http://example.com/page.html").parse()
assert page.parse_canonical_url().url == "http://example.com/page2.html"
# Cross domain blocked for now
html = """<html><head><linK reL="caNonical" hreF="http://example2.com/page2.html" /></head><body>x</body></html>"""
page = HTMLDocument(html, url="http://example.com/page.html").parse()
assert page.parse_canonical_url() is None
# Relative URLs
html = """<html><head><linK reL="caNonical" hreF="/dir2/page2.html" /></head><body>x</body></html>"""
page = HTMLDocument(html, url="http://example.com/dir/page.html").parse()
assert page.parse_canonical_url().url == "http://example.com/dir2/page2.html"
html = """<html><head><linK reL="caNonical" hreF="dir2/page2.html" /></head><body>x</body></html>"""
page = HTMLDocument(html, url="http://example.com/dir/page.html").parse()
assert page.parse_canonical_url().url == "http://example.com/dir/dir2/page2.html"
html = """<html><head><linK reL="caNonical" hreF="//example.com/dir2/page2.html" /></head><body>x</body></html>"""
page = HTMLDocument(html, url="http://example.com/dir/page.html").parse()
assert page.parse_canonical_url().url == "http://example.com/dir2/page2.html"
def test_hidden_text():
html = """<html><head></head><body>
<script> hello(); </script>
<style> style { good } </style>
<!-- comment -->
text
<p>p</p>
<div style='display: none;'>hidden by display</div>
<div hidden>hidden by html5 attribute</div>
<div aria-hidden="true">hidden by aria</div>
<div aria-hidden="false">not_aria</div>
<div style='visibility: hidden;'>hidden by visibility</div>
</body></html>"""
page = HTMLDocument(html).parse()
assert page.get_all_words() == set(["text", "p", "not_aria"])
def test_get_hyperlinks():
html = """<html><head></head><body>
before
<a href="http://example.com/page1">link text</a>
after
<a href="/page2">relative2</a>
<a href="page3?q=1#d">relative3</a>
<a href="http://other.example.com/page4">absolute4</a>
<a href="//other.example.com/page5?q=1#d">absolute5</a>
<a href="https://other.example.com/page6?q=1#d">absolute6</a>
<a href="javascript:func()">js1</a>
</body></html>"""
page = HTMLDocument(html, url="http://example.com/page.html").parse()
links = page.get_external_hyperlinks()
assert len(links) == 3
assert links[0]["href"].url == "http://other.example.com/page4"
assert links[0]["text"] == "absolute4"
assert links[1]["href"].url == "http://other.example.com/page5?q=1#d"
assert links[1]["text"] == "absolute5"
assert links[2]["href"].url == "https://other.example.com/page6?q=1#d"
assert links[2]["text"] == "absolute6"
# This doesn't return URLs, it returns strings (they are paths)
links = page.get_internal_hyperlinks()
assert len(links) == 3
assert links[0]["path"] == "/page1"
assert links[0]["text"] == "link text"
assert links[1]["path"] == "/page2"
assert links[1]["text"] == "relative2"
assert links[2]["path"] == "page3?q=1#d"
assert links[2]["text"] == "relative3"
# All links in absolute
links = page.get_hyperlinks()
assert len(links) == 6
assert links[2]["href"].url == "http://example.com/page3?q=1#d"
| 40.490323 | 120 | 0.6203 | 872 | 6,276 | 4.397936 | 0.159404 | 0.08605 | 0.094915 | 0.09309 | 0.709518 | 0.663103 | 0.622686 | 0.576532 | 0.533246 | 0.515254 | 0 | 0.017773 | 0.157266 | 6,276 | 154 | 121 | 40.753247 | 0.707317 | 0.037444 | 0 | 0.362745 | 0 | 0.127451 | 0.518525 | 0.117153 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.068627 | false | 0 | 0.009804 | 0 | 0.078431 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
974d884bdfa3addd83d2c9e41abdc6a7827099cf | 2,975 | py | Python | server/workers/tests/test_helpers.py | chreman/Headstart | 5d8b956faac4389c649f3072b5ac55aaa01644c6 | [
"MIT"
] | 111 | 2016-12-10T17:27:46.000Z | 2022-03-29T02:57:19.000Z | server/workers/tests/test_helpers.py | chreman/Headstart | 5d8b956faac4389c649f3072b5ac55aaa01644c6 | [
"MIT"
] | 338 | 2016-12-04T17:43:28.000Z | 2022-03-04T15:50:33.000Z | server/workers/tests/test_helpers.py | chreman/Headstart | 5d8b956faac4389c649f3072b5ac55aaa01644c6 | [
"MIT"
] | 32 | 2016-12-19T12:48:00.000Z | 2022-02-12T17:47:47.000Z | import os
import json
import uuid
import pathlib
import redis
import pandas as pd
from nltk.corpus import stopwords
from tqdm import tqdm
from .conftest import RANDOM
from ..api.src.apis.utils import get_key
# connect via nginx to APIs and submit tests
def get_stopwords(lang):
try:
loc = pathlib.Path(__path__).absolute().parent.parent.parent
except NameError:
loc = pathlib.Path.cwd().absolute().parent.parent
assert lang in ["english", "german"]
resourcedir = os.path.join(loc, "server", "preprocessing", "resources")
stops = set(stopwords.words('english'))
with open(os.path.join(resourcedir, "%s.stop" % lang), "r") as infile:
add_stops = set(infile.read().splitlines())
return stops.union(add_stops)
def get_cases(folder):
try:
loc = pathlib.Path(__path__).parent
except NameError:
loc = pathlib.Path.cwd()
testdatadir = os.path.join(loc, "tests", folder)
casefiles = [f for f in os.listdir(testdatadir) if f.startswith("testcase")]
casefiles.sort()
cases = []
for casefile in casefiles:
with open(os.path.join(testdatadir, casefile)) as infile:
testcase_ = json.load(infile)
casename, _ = os.path.splitext(casefile)
cases.append({"caseid": casename, "casedata": testcase_})
return cases
def retrieve_results(casedata):
k = str(uuid.uuid4())
casedata["params"]["raw"] = True
service = casedata["params"]["service"]
d = {"id": k, "params": casedata["params"],
"endpoint": "search"}
redis_store.rpush(service, json.dumps(d))
result = get_key(redis_store, k)
return result
def get_dataprocessing_result(testcase_):
k = str(uuid.uuid4())
params = testcase_["params"]
input_data = testcase_["input_data"]
res = {}
res["id"] = k
res["params"] = params
res["input_data"] = input_data
redis_store.rpush("input_data", json.dumps(res).encode('utf8'))
result = get_key(redis_store, k)
return pd.DataFrame.from_records(json.loads(result))
def data_generation(KNOWNCASES, RANDOMCASES):
CASENAMES = []
CASEDATA = {}
print("collecting known test cases")
for c in tqdm(KNOWNCASES):
CASENAMES.append(c["caseid"])
CASEDATA[c["caseid"]] = c["casedata"]
if RANDOM:
print("collecting random test cases")
for c in tqdm(RANDOMCASES):
CASENAMES.append(c["caseid"])
CASEDATA[c["caseid"]] = {
"input_data": retrieve_results(c["casedata"])["input_data"],
"params": c["casedata"]["params"]}
return CASENAMES, CASEDATA
KNOWNCASES = get_cases("knowncases")
RANDOMCASES = get_cases("randomcases")
#TRIPLE = get_cases("triple")
CASENAMES, CASEDATA = data_generation(KNOWNCASES, RANDOMCASES)
CASENAMES.sort()
RESULTS = {}
print("collecting dataprocessing results")
for c in tqdm(CASEDATA):
RESULTS[c] = get_dataprocessing_result(CASEDATA[c])
| 30.670103 | 84 | 0.655798 | 363 | 2,975 | 5.253444 | 0.319559 | 0.033036 | 0.029366 | 0.015732 | 0.216046 | 0.128998 | 0.109072 | 0 | 0 | 0 | 0 | 0.001272 | 0.207059 | 2,975 | 96 | 85 | 30.989583 | 0.807122 | 0.023529 | 0 | 0.126582 | 0 | 0 | 0.127498 | 0 | 0 | 0 | 0 | 0 | 0.012658 | 1 | 0.063291 | false | 0 | 0.126582 | 0 | 0.253165 | 0.037975 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
974e0906f60952202f09db80d093bc5842202e9c | 982 | py | Python | tests/test_loader.py | vkubyshko/cloud-custodian | e5e3a0f8b5c85adcbec212d780b453047fb6f4d1 | [
"Apache-2.0"
] | 2,415 | 2018-12-04T00:37:58.000Z | 2022-03-31T12:28:56.000Z | tests/test_loader.py | vkubyshko/cloud-custodian | e5e3a0f8b5c85adcbec212d780b453047fb6f4d1 | [
"Apache-2.0"
] | 3,272 | 2018-12-03T23:58:17.000Z | 2022-03-31T21:15:32.000Z | tests/test_loader.py | vkubyshko/cloud-custodian | e5e3a0f8b5c85adcbec212d780b453047fb6f4d1 | [
"Apache-2.0"
] | 773 | 2018-12-06T09:43:23.000Z | 2022-03-30T20:44:43.000Z | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from os import path
import tempfile
from textwrap import dedent
from c7n import loader
from .common import BaseTest
class TestSourceLocator(BaseTest):
def test_yaml_file(self):
with tempfile.TemporaryDirectory() as tmpdirname:
filename = path.join(tmpdirname, "testfile.yaml")
with open(filename, "w") as f:
f.write(dedent("""\
policies:
- name: foo
resource: s3
# One where name isn't the first element.
- resource: ec2
name: bar
"""))
locator = loader.SourceLocator(filename)
self.assertEqual(locator.find("foo"), "testfile.yaml:2")
self.assertEqual(locator.find("bar"), "testfile.yaml:7")
self.assertEqual(locator.find("non-existent"), "")
| 31.677419 | 68 | 0.5611 | 100 | 982 | 5.49 | 0.6 | 0.065574 | 0.120219 | 0.142077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010802 | 0.340122 | 982 | 30 | 69 | 32.733333 | 0.83642 | 0.075356 | 0 | 0 | 0 | 0 | 0.355801 | 0 | 0 | 0 | 0 | 0 | 0.136364 | 1 | 0.045455 | false | 0 | 0.227273 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
974eb9da087e6259717b8a906569a4f8425cdd9a | 2,635 | py | Python | Sensor_Hat/src/mod_thread.py | carlodones/thread_SenseHat_2 | 0824a718542b9a42159073190e7498288dc57c3a | [
"Apache-2.0"
] | null | null | null | Sensor_Hat/src/mod_thread.py | carlodones/thread_SenseHat_2 | 0824a718542b9a42159073190e7498288dc57c3a | [
"Apache-2.0"
] | null | null | null | Sensor_Hat/src/mod_thread.py | carlodones/thread_SenseHat_2 | 0824a718542b9a42159073190e7498288dc57c3a | [
"Apache-2.0"
] | 1 | 2018-07-13T08:44:50.000Z | 2018-07-13T08:44:50.000Z | import threading
import time
import mod_measure_list
import mod_sense_hat
# Classe per l'avvio dei thread
class ThreadManager(threading.Thread):
def __init__(self, channel, delay, source, measure_list):
threading.Thread.__init__(self)
self.channel = channel # Canale di acquisizione
self.delay = delay # Tempo di acquisizione in ms.
self.source = source # Modalità di acquisizione
self.measure_list = measure_list # Riferimento alla lista misure
self.exit_flag = False # Flag per la terminazione del thread
# Thread per la lettura dei sensori
def acquisition_thread(self):
while (self.exit_flag == False):
# Rilevo il timestamp
ts = time.time()
# Aggiungo alla lista misure
self.measure_list.add_details(self.channel, self.source.read_channel(self.channel), ts)
time.sleep(self.delay)
def stop_thread (self):
self.exit_flag = True
# # Thread per il processamento delle misure
# def parse_measures(self, exit_flag, measure_list):
# while self.counter:
# # Se ho premuto il pulsante, esco e visualizzo
# # il segno verde
# if (self.exit_flag == 1):
# counter = 0
# # Genero le medie per le grandezze rilevate
# for ch in channels:
# meas = self.measure_list.avg_by_channel(ch)
# # Stampo il valore della media
# print("TS:<" + str(meas.timestamp) + ">; NUM:<" + str(meas.count)+ ">; AVG:<" + str(meas.value)+ ">")
# # Aggiorno il codice canale e aggiungo la media alla lista misure
# meas.channel = meas.channel + 10
# self.measure_list.add_measure(meas)
# # Per la temperatura, coloro il display in funzione della media rilevata
# if (meas.channel == 11):
# self.show_temperature(meas.value)
# # Genero il JSON
# main_dic = {}
# main_dic[mkc.key_timestamp] = time.time()
# main_dic[mkc.key_qos] = "good"
# main_dic[mkc.key_values] = self.measure_list.json_dictionary()
# self.measure_list.clear_list()
# print("")
# print("************************")
# print(str(json.dumps(main_dic,
# indent=4, sort_keys=True,
# separators=(',', ': '), ensure_ascii=False)))
# time.sleep(self.delay)
# counter -= 1
| 35.133333 | 119 | 0.554459 | 294 | 2,635 | 4.809524 | 0.408163 | 0.077793 | 0.063649 | 0.027581 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004608 | 0.341176 | 2,635 | 74 | 120 | 35.608108 | 0.809908 | 0.62315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.210526 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
975623b461dce10bee7524337b62f5692bc20bae | 770 | py | Python | 136. Single Number(XOR-ed).py | viewv/leetcode | b31e643846bb38978746342e3e3a94991178565a | [
"MIT"
] | 2 | 2018-02-26T09:12:19.000Z | 2019-06-07T13:38:10.000Z | 136. Single Number(XOR-ed).py | viewv/leetcode | b31e643846bb38978746342e3e3a94991178565a | [
"MIT"
] | 1 | 2018-12-24T07:03:34.000Z | 2018-12-24T07:03:34.000Z | 136. Single Number(XOR-ed).py | viewv/leetcode | b31e643846bb38978746342e3e3a94991178565a | [
"MIT"
] | 2 | 2018-12-24T07:01:03.000Z | 2019-06-07T13:38:07.000Z | # copy from Leetcode, really a good way!
class Solution:
def singleNumber(self, nums):
# using XOR method
single_num = 0
for n in nums:
single_num ^= n
return single_num
'''
if two number is different like 1010 and 1010
1010 xor 1010 = 0 so when a group of number
is contered one number for many time use xor
can make them to 0, but if the 1010 face 0
like this problem single_num=0 meet a single num
it will be a plus, 0000 xor 1010 is 1010 so finally
the single_num will remember the single number
really good way to solve this problem
also you need to know a^b^c^d... can can be
(a^b)^(c^d)... so you don't need to worry about
the problem of sequence, really good, great!
'''
| 32.083333 | 55 | 0.658442 | 136 | 770 | 3.691176 | 0.522059 | 0.10757 | 0.039841 | 0.015936 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067273 | 0.285714 | 770 | 23 | 56 | 33.478261 | 0.845455 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
975aceff3bfddebb1d1b1672bcd24556fb99e1af | 630 | py | Python | galaxy_selenium/__init__.py | jmchilton/galaxy-selenium | fa1c4d0d71001399efc54ae432aea04947c47df1 | [
"CC-BY-3.0"
] | null | null | null | galaxy_selenium/__init__.py | jmchilton/galaxy-selenium | fa1c4d0d71001399efc54ae432aea04947c47df1 | [
"CC-BY-3.0"
] | null | null | null | galaxy_selenium/__init__.py | jmchilton/galaxy-selenium | fa1c4d0d71001399efc54ae432aea04947c47df1 | [
"CC-BY-3.0"
] | null | null | null | """Top-level of library designed to ease use of Selenium targetting Galaxy.
galaxy_selenium is purposes being designed to depend on Python selenium,
six, pyyaml, and optional pyvirtualdisplay but not galaxy-lib (or any of Galaxy
or Galaxy's test stuff) currently.
"""
__version__ = '17.9.0.dev0'
PROJECT_NAME = "galaxy-selenium"
PROJECT_OWNER = PROJECT_USERAME = "galaxyproject"
PROJECT_URL = "https://github.com/galaxyproject/galaxy-selenium"
PROJECT_AUTHOR = 'Galaxy Project and Community'
PROJECT_EMAIL = 'jmchilton@gmail.com'
RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % (
PROJECT_USERAME, PROJECT_NAME
)
| 35 | 79 | 0.773016 | 89 | 630 | 5.303371 | 0.595506 | 0.088983 | 0.088983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009042 | 0.122222 | 630 | 17 | 80 | 37.058824 | 0.844485 | 0.414286 | 0 | 0 | 0 | 0 | 0.469613 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
975cf0d24d9f18d9395f61092ba84bd2deba8dbb | 1,570 | py | Python | src/regression_handler.py | bruggerl/gdp-height | e14e9ab737b1f04f3a0c3fff271f0dfe6831c25d | [
"MIT"
] | null | null | null | src/regression_handler.py | bruggerl/gdp-height | e14e9ab737b1f04f3a0c3fff271f0dfe6831c25d | [
"MIT"
] | null | null | null | src/regression_handler.py | bruggerl/gdp-height | e14e9ab737b1f04f3a0c3fff271f0dfe6831c25d | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import constants as c
class RegressionHandler:
"""
This class encapsulates the logic for calculating and plotting a linear regression.
"""
@staticmethod
def plot_regression(dataframe, sex=c.Sex.MALE):
"""
Plot a linear regression with the GDP per capita being the X-variable and the mean height the Y-variable.
:param dataframe: dataframe containing the data to plot
:param sex: either MALE (0) or FEMALE (1) - specifies the sex for which the given dataframe contains data
:return: nothing
"""
# sort by GDP/capita so that plot can use logarithmic scale
dataframe = dataframe.sort_values([c.GDP], 0)
sex_label = 'males' if sex == c.Sex.MALE else 'females'
X = dataframe.loc[:, c.GDP].values.reshape(-1, 1) # values converts it into a numpy array
Y = dataframe.loc[:, c.AVG_HEIGHT].values.reshape(-1, 1) # -1 means that calculate the dimension of rows, but have 1 column
linear_regressor = LinearRegression() # create object for the class
linear_regressor.fit(X, Y) # perform linear regression
Y_pred = linear_regressor.predict(X) # make predictions
plt.scatter(X, Y)
plt.plot(X, Y_pred, color='red')
plt.xscale('log')
plt.xlabel('GDP per capita [USD]')
plt.ylabel('average height of {0} aged 19 [cm]'.format(sex_label))
plt.savefig('out/regression_{0}.png'.format(sex_label))
plt.show()
| 40.25641 | 132 | 0.663057 | 219 | 1,570 | 4.694064 | 0.506849 | 0.046693 | 0.033074 | 0.021401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010897 | 0.240127 | 1,570 | 38 | 133 | 41.315789 | 0.850796 | 0.383439 | 0 | 0 | 0 | 0 | 0.104444 | 0.024444 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
975e0b8ba97e0a156a85f8001f92c9651a62a699 | 561 | py | Python | server/utils/load_utils.py | caiodeberaldini/intelligent-systems-project | f97d480a7e51c76b0d6335d4ae8d5e6451321fb0 | [
"MIT"
] | null | null | null | server/utils/load_utils.py | caiodeberaldini/intelligent-systems-project | f97d480a7e51c76b0d6335d4ae8d5e6451321fb0 | [
"MIT"
] | null | null | null | server/utils/load_utils.py | caiodeberaldini/intelligent-systems-project | f97d480a7e51c76b0d6335d4ae8d5e6451321fb0 | [
"MIT"
] | null | null | null | import os
import sys
import pickle
from dotenv import load_dotenv
_ = load_dotenv()
def load_utils():
MODEL_PATH = os.getenv("MODEL_PATH")
VECTORIZER_PATH = os.getenv("VECTORIZER_PATH")
LABEL_ENC_PATH = os.getenv("LABEL_ENC_PATH")
with open(MODEL_PATH, 'rb') as f:
model = pickle.load(f)
f.close()
with open(VECTORIZER_PATH, 'rb') as f:
vectorizer = pickle.load(f)
f.close()
with open(LABEL_ENC_PATH, 'rb') as f:
le = pickle.load(f)
f.close()
return (model, vectorizer, le)
| 19.344828 | 50 | 0.627451 | 80 | 561 | 4.2 | 0.2875 | 0.080357 | 0.107143 | 0.080357 | 0.199405 | 0.14881 | 0.14881 | 0 | 0 | 0 | 0 | 0 | 0.253119 | 561 | 28 | 51 | 20.035714 | 0.801909 | 0 | 0 | 0.157895 | 0 | 0 | 0.080214 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.210526 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9760ac4b7c9df5edcac3c9facafdb4273c11312d | 1,297 | py | Python | server.py | kiduyu-klaus/k-books | 1909100aa03e14d5f60e6bc4500b6226a3ce2552 | [
"MIT"
] | 2 | 2019-08-21T08:35:57.000Z | 2021-07-03T19:35:58.000Z | server.py | kiduyu-klaus/k-books | 1909100aa03e14d5f60e6bc4500b6226a3ce2552 | [
"MIT"
] | 1 | 2019-08-21T08:42:24.000Z | 2019-08-21T08:42:24.000Z | server.py | kiduyu-klaus/k-books | 1909100aa03e14d5f60e6bc4500b6226a3ce2552 | [
"MIT"
] | null | null | null | import telegram
import tornado.ioloop
import tornado.web
from tornado.options import define, options
from settings import WEBHOOK_URL, TELEGRAM_ACCESS_TOKEN
from core import bot_handler
define("port", default=5000, help="run on the given port", type=int)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.write('wink, wink')
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write('wink, wink')
def post(self):
data = tornado.escape.json_decode(self.request.body)
self.set_status(200)
return bot_handler(data=data)
class WebHookHandler(tornado.web.RequestHandler):
def get(self):
# one time only operation
bot = telegram.Bot(token=TELEGRAM_ACCESS_TOKEN)
response = bot.setWebhook(WEBHOOK_URL)
if not response:
return self.write('Setting up webhook has failed')
return self.write('Webhook has been successfully set')
def make_app():
return tornado.web.Application([
(r'/', IndexHandler),
(r'/duh', MainHandler),
(r'/setwebhook', WebHookHandler)
])
if __name__ == "__main__":
tornado.options.parse_command_line()
app = make_app()
app.listen(options.port)
tornado.ioloop.IOLoop.current().start()
| 25.94 | 68 | 0.683115 | 161 | 1,297 | 5.36646 | 0.459627 | 0.05787 | 0.083333 | 0.09375 | 0.157407 | 0.157407 | 0.118056 | 0.118056 | 0.118056 | 0.118056 | 0 | 0.006803 | 0.206631 | 1,297 | 49 | 69 | 26.469388 | 0.832847 | 0.017733 | 0 | 0.142857 | 0 | 0 | 0.102987 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.171429 | 0.028571 | 0.514286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9764e3ad21b80a26ed1998bb62b14b203c54a3ac | 516 | py | Python | utility/i2cutil.py | zjcers/ecohawks-battery | dd214584fc141aca64702d1093f218a3238ab22a | [
"MIT"
] | 2 | 2016-11-16T05:53:03.000Z | 2017-09-27T15:12:20.000Z | utility/i2cutil.py | zjcers/ecohawks-battery | dd214584fc141aca64702d1093f218a3238ab22a | [
"MIT"
] | 2 | 2016-04-29T01:38:30.000Z | 2016-09-21T15:41:20.000Z | utility/i2cutil.py | zjcers/ecohawks-battery | dd214584fc141aca64702d1093f218a3238ab22a | [
"MIT"
] | null | null | null | #!/usr/bin/python
#Original Author: Zane J Cersovsky
#Original Date: Mar 6 2016
#Last Modified By: Zane J Cersovsky
#Last Modified On: Mar 7 2016
import bitstring
def reverse_word(word):
r"""
Takes in a int and reverse its bytes
"""
assert type(word) == int
assert word >= 0 and word < 65536
msb = word >> 8
lsb = word & 255
return (lsb << 8) | msb
def twocomplement(word):
r"""
Switches between 2's complement binary and python signed integers
"""
b = bitstring.Bits(uint=word, length=16)
return b.int
| 23.454545 | 66 | 0.70155 | 84 | 516 | 4.297619 | 0.630952 | 0.027701 | 0.077562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057279 | 0.187985 | 516 | 21 | 67 | 24.571429 | 0.804296 | 0.463178 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9765225a4e13a87c2f5b956717cfeffe36f729ff | 6,454 | py | Python | certbot_azure/dns_azure_test.py | vladwing/certbot-azure | 85e0da499d313431bcd47ae39e0e4c88bf2c2561 | [
"MIT"
] | 24 | 2019-01-17T20:32:02.000Z | 2021-07-06T15:34:55.000Z | certbot_azure/dns_azure_test.py | vladwing/certbot-azure | 85e0da499d313431bcd47ae39e0e4c88bf2c2561 | [
"MIT"
] | 5 | 2020-04-27T19:55:45.000Z | 2021-12-03T10:26:15.000Z | certbot_azure/dns_azure_test.py | vladwing/certbot-azure | 85e0da499d313431bcd47ae39e0e4c88bf2c2561 | [
"MIT"
] | 11 | 2020-10-28T16:02:26.000Z | 2021-10-17T20:46:09.000Z | """Tests for certbot_azure.authenticator."""
import os
import unittest
import mock
import json
from certbot import errors
from certbot.plugins import dns_test_common_lexicon
from certbot.plugins.dns_test_common import DOMAIN
from certbot.tests import util as test_util
from requests import Response
from msrestazure.azure_exceptions import CloudError
RESOURCE_GROUP = 'test-test-1'
class AuthenticatorTest(test_util.TempDirTestCase,
dns_test_common_lexicon.BaseLexiconAuthenticatorTest):
def setUp(self):
from certbot_azure.dns_azure import Authenticator
super(AuthenticatorTest, self).setUp()
config_path = AzureClientConfigDummy.build_config(self.tempdir)
self.config = mock.MagicMock(azure_credentials=config_path,
azure_resource_group=RESOURCE_GROUP)
self.auth = Authenticator(self.config, "azure")
self.mock_client = mock.MagicMock()
# pylint: disable=protected-access
self.auth._get_azure_client = mock.MagicMock(return_value=self.mock_client)
def test_perform(self):
self.auth.perform([self.achall])
expected = [mock.call.add_txt_record('_acme-challenge.'+DOMAIN, mock.ANY, mock.ANY)]
self.assertEqual(expected, self.mock_client.mock_calls)
def test_cleanup(self):
# _attempt_cleanup | pylint: disable=protected-access
self.auth._attempt_cleanup = True
self.auth.cleanup([self.achall])
expected = [mock.call.del_txt_record('_acme-challenge.'+DOMAIN)]
self.assertEqual(expected, self.mock_client.mock_calls)
class AzureClientTest(test_util.TempDirTestCase):
zone = "foo.com"
record_name = "bar"
record_content = "baz"
record_ttl = 42
def _getCloudError(self):
response = Response()
response.status_code = 500
return CloudError(response)
def setUp(self):
from certbot_azure.dns_azure import _AzureClient
super(AzureClientTest, self).setUp()
config_path = AzureClientConfigDummy.build_config(self.tempdir)
self.azure_client = _AzureClient(RESOURCE_GROUP, config_path)
self.dns_client = mock.MagicMock()
self.azure_client.dns_client = self.dns_client
# pylint: disable=protected-access
self.azure_client._find_managed_zone = mock.MagicMock()
def test_add_txt_record(self):
# pylint: disable=protected-access
self.azure_client._find_managed_zone.return_value = self.zone
self.azure_client.add_txt_record(self.record_name + "." + self.zone,
self.record_content,
self.record_ttl)
self.dns_client.record_sets.create_or_update.assert_called_with(
self.azure_client.resource_group,
self.zone,
self.record_name,
'TXT',
mock.ANY)
record = self.dns_client.record_sets.create_or_update.call_args[0][4]
self.assertEqual(self.record_ttl, record.ttl)
self.assertEqual([self.record_content], record.txt_records[0].value)
def test_add_txt_record_error(self):
# pylint: disable=protected-access
self.azure_client._find_managed_zone.return_value = self.zone
self.dns_client.record_sets.create_or_update.side_effect = self._getCloudError()
with self.assertRaises(errors.PluginError):
self.azure_client.add_txt_record(self.record_name + "." + self.zone,
self.record_content,
self.record_ttl)
def test_add_txt_record_zone_not_found(self):
# pylint: disable=protected-access
self.azure_client._find_managed_zone.return_value = None
# pylint: disable=protected-access
self.azure_client._find_managed_zone.side_effect = self._getCloudError()
with self.assertRaises(errors.PluginError):
self.azure_client.add_txt_record(self.record_name + "." + self.zone,
self.record_content,
self.record_ttl)
def test_del_txt_record(self):
# pylint: disable=protected-access
self.azure_client._find_managed_zone.return_value = self.zone
self.azure_client.del_txt_record(self.record_name + "." + self.zone)
self.dns_client.record_sets.delete.assert_called_with(self.azure_client.resource_group,
self.zone,
self.record_name,
'TXT')
def test_del_txt_record_no_zone(self):
# pylint: disable=protected-access
self.azure_client._find_managed_zone.return_value = None
# pylint: disable=protected-access
self.azure_client._find_managed_zone.side_effect = self._getCloudError()
self.azure_client.del_txt_record(self.record_name + "." + self.zone)
self.dns_client.record_sets.delete.assert_not_called()
class AzureClientConfigDummy(object):
"""Helper class to create dummy Azure configuration"""
@classmethod
def build_config(cls, tempdir):
"""Helper method to create dummy Azure configuration"""
config_path = os.path.join(tempdir, 'azurecreds.json')
with open(config_path, 'w') as outfile:
json.dump({
"clientId": "uuid",
"clientSecret": "uuid",
"subscriptionId": "uuid",
"tenantId": "uuid",
"activeDirectoryEndpointUrl": "https://login.microsoftonline.com",
"resourceManagerEndpointUrl": "https://management.azure.com/",
"activeDirectoryGraphResourceId": "https://graph.windows.net/",
"sqlManagementEndpointUrl": "https://management.core.windows.net:8443/",
"galleryEndpointUrl": "https://gallery.azure.com/",
"managementEndpointUrl": "https://management.core.windows.net/"
}, outfile)
os.chmod(config_path, 0o600)
return config_path
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 37.964706 | 95 | 0.627053 | 685 | 6,454 | 5.618978 | 0.214599 | 0.051442 | 0.066251 | 0.072746 | 0.54014 | 0.464536 | 0.44583 | 0.44583 | 0.394128 | 0.372304 | 0 | 0.00367 | 0.282306 | 6,454 | 169 | 96 | 38.189349 | 0.827288 | 0.078091 | 0 | 0.275229 | 0 | 0 | 0.082714 | 0.021438 | 0 | 0 | 0 | 0 | 0.082569 | 1 | 0.100917 | false | 0 | 0.110092 | 0 | 0.293578 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97688b09bfb61fdf6e74155a50fefad44a0d8884 | 306 | py | Python | Python/Introduction/Python Print/solution.py | aesavas/HackerRank | c1ffecc92e3e3db923f94594b9115f650dc2983a | [
"MIT"
] | null | null | null | Python/Introduction/Python Print/solution.py | aesavas/HackerRank | c1ffecc92e3e3db923f94594b9115f650dc2983a | [
"MIT"
] | null | null | null | Python/Introduction/Python Print/solution.py | aesavas/HackerRank | c1ffecc92e3e3db923f94594b9115f650dc2983a | [
"MIT"
] | null | null | null | """
author : Ali Emre SAVAS
Link : https://www.hackerrank.com/challenges/python-print/problem
"""
if __name__ == '__main__':
n = int(input())
number = ""
# n+1 -> Because in range function, n+1 is not in the range.
for i in range(1, n+1):
number += str(i)
print(number)
| 25.5 | 69 | 0.588235 | 45 | 306 | 3.822222 | 0.688889 | 0.034884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017544 | 0.254902 | 306 | 11 | 70 | 27.818182 | 0.736842 | 0.486928 | 0 | 0 | 0 | 0 | 0.056738 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
976907f333c9c096d9fd70e01fdfc99447224539 | 2,532 | py | Python | YatzyPy/data.py | markomanninen/YatzyPy | a6904b22473ae909f588e3b82a67b8b4f2dce0f2 | [
"MIT"
] | null | null | null | YatzyPy/data.py | markomanninen/YatzyPy | a6904b22473ae909f588e3b82a67b8b4f2dce0f2 | [
"MIT"
] | null | null | null | YatzyPy/data.py | markomanninen/YatzyPy | a6904b22473ae909f588e3b82a67b8b4f2dce0f2 | [
"MIT"
] | null | null | null | # data.py
import numpy as np
from itertools import product, combinations
from . func import *
from . const import *
# totally 31 hold options in two dimentional array
all_hold_options = np.array([list(combinations([0,1,2,3,4], r=repeat)) for repeat in range(1, 6)])
# plus one empty option, makes it 32 options
all_hold_options[0].append(())
# numbers from 1 to 6
numbers = range(1, 7)
# all unique ordered hands
hands = []
for hand in product(numbers, repeat=5):
hand = list(hand)
hand.sort(reverse=True)
if hand not in hands:
hands.append(hand)
file = 'yatzy_probabilities.csv'
# data table
data = {
CHANGE: {'name': 'change', 'func': isChange, 'target': 17, 'order': 9, 'score': change},
SMALLSTRAIGHT: {'name': 'smallstraight', 'func': isSmallStraight, 'target': SMALL_STRAIGHT_BONUS, 'order': 8, 'score': smallStraight},
LARGESTRAIGHT: {'name': 'largestraight', 'func': isLargeStraight, 'target': LARGE_STRAIGHT_BONUS, 'order': 7, 'score': largeStraight},
DOUBLE: {'name': 'double', 'func': isKindNumber2, 'target': 8, 'order': 6, 'score': kindNumber2},
TRIPLE: {'name': 'triple', 'func': isKindNumber3, 'target': 12, 'order': 5, 'score': kindNumber3},
PAIR: {'name': 'pair', 'func': isPair, 'target': 14, 'order': 4, 'score': pair},
QUADRUPLE: {'name': 'quadruple', 'func': isKindNumber4, 'target': 14, 'order': 1, 'score': kindNumber4},
FULLHOUSE: {'name': 'fullhouse', 'func': isFullHouse, 'target': (FULL_HOUSE_BONUS if FULL_HOUSE_BONUS else 20), 'order': 1, 'score': fullHouse},
ONE: {'name': 'one', 'func': isNumberKind1, 'target': 3, 'order': 1, 'score': numberKind1},
TWO: {'name': 'two', 'func': isNumberKind2, 'target': 6, 'order': 1, 'score': numberKind2},
THREE: {'name': 'three', 'func': isNumberKind3, 'target': 9, 'order': 1, 'score': numberKind3},
FOUR: {'name': 'four', 'func': isNumberKind4, 'target': 12, 'order': 1, 'score': numberKind4},
FIVE: {'name': 'five', 'func': isNumberKind5, 'target': 15, 'order': 1, 'score': numberKind5},
SIX: {'name': 'six', 'func': isNumberKind6, 'target': 18, 'order': 1, 'score': numberKind6},
YATZY: {'name': 'yatzy', 'func': isYatzy, 'target': YATZY_BONUS, 'order': 1, 'score': yatzy}
}
categories = {k:v['name'] for k,v in data.items()}
functions = {k:v['func'] for k,v in data.items()}
targets = {k:v['target'] for k,v in data.items()}
scoring = {k:v['score'] for k,v in data.items()}
#order = [v['order'] for k,v in data.items()]
order = {k:v['order'] for k,v in data.items()}
| 45.214286 | 148 | 0.639021 | 338 | 2,532 | 4.745562 | 0.35503 | 0.013716 | 0.061721 | 0.026185 | 0.073566 | 0.073566 | 0.043641 | 0.027431 | 0 | 0 | 0 | 0.032756 | 0.156003 | 2,532 | 55 | 149 | 46.036364 | 0.717829 | 0.078594 | 0 | 0 | 0 | 0 | 0.215054 | 0.009892 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97691d01b0dc17a421afe81d680dd368bc4cb43f | 2,841 | py | Python | 17_logging_and_monitoring/start_17_blue_yellow_app_monitoring/blue_yellow_app/services/albums_service.py | g2gcio/course-demo | b0d00a6ac7a6a6a17af963cee67cf13dc5941e95 | [
"MIT"
] | 276 | 2016-04-04T20:57:36.000Z | 2022-03-12T02:42:46.000Z | 17_logging_and_monitoring/start_17_blue_yellow_app_monitoring/blue_yellow_app/services/albums_service.py | g2gcio/course-demo | b0d00a6ac7a6a6a17af963cee67cf13dc5941e95 | [
"MIT"
] | 37 | 2016-10-13T12:04:27.000Z | 2020-11-22T10:36:53.000Z | 17_logging_and_monitoring/start_17_blue_yellow_app_monitoring/blue_yellow_app/services/albums_service.py | g2gcio/course-demo | b0d00a6ac7a6a6a17af963cee67cf13dc5941e95 | [
"MIT"
] | 163 | 2016-10-03T02:10:00.000Z | 2022-03-25T03:43:01.000Z | from sqlalchemy.orm import joinedload
from blue_yellow_app.data.album import Album
from blue_yellow_app.data.dbsession import DbSessionFactory
from blue_yellow_app.data.track import Track
class AlbumsService:
@staticmethod
def get_albums():
session = DbSessionFactory.create_session()
albums = session.query(Album) \
.options(joinedload('tracks')) \
.filter(Album.is_published) \
.order_by(Album.year.desc()) \
.all()
return albums
@staticmethod
def old_get_albums():
return [
{
'title': 'Digital age boys and girls',
'year': 2001,
'has_preview': True,
'image': '/static/img/albums/digital_album.jpg',
'tracks': [
{'duration': '0:48', 'title': 'Welcome to the millennium'},
{'duration': '4:20', 'title': 'Renegade coders'},
{'duration': '5:01', 'title': 'Cyberpunks unite!'},
{'duration': '3:21', 'title': "We're all moving the Silicon Valley"},
{'duration': '2:22', 'title': "Tomorrow's people"},
{'duration': '4:24', 'title': 'I thought you were a robot'}
],
'url': 'digital-age-boys-and-girls'
},
{
'title': 'Year of the snake',
'year': 1991,
'has_preview': False,
'image': '/static/img/albums/snake_album.jpg',
'tracks': [
{'duration': '3:02', 'title': "Code like it's 1999"},
{'duration': '2:40', 'title': "Dawn of the iterators"},
{'duration': '5:21', 'title': "Running with descriptors"},
{'duration': '2:01', 'title': "Rage against the compilers"},
{'duration': '4:41', 'title': "Another line in the program"}
],
'url': 'year-of-the-snake'
}
]
@classmethod
def create_album(cls, title: str, year: int, album_image: str,
price: float, url: str, track_titles: []):
session = DbSessionFactory.create_session()
album = Album(name=title, year=year, album_image=album_image, price=price,
url=url, is_published=True)
session.add(album)
for idx, title in enumerate(track_titles):
track = Track(name=title, length=60, display_order=idx + 1)
album.tracks.append(track)
session.commit()
return album
@classmethod
def get_album_by_id(cls, album_id):
session = DbSessionFactory.create_session()
return session.query(Album). \
filter(Album.id == album_id) \
.first()
| 35.962025 | 89 | 0.515312 | 291 | 2,841 | 4.924399 | 0.43299 | 0.016748 | 0.029309 | 0.03559 | 0.074669 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025974 | 0.349525 | 2,841 | 78 | 90 | 36.423077 | 0.749459 | 0 | 0 | 0.169231 | 0 | 0 | 0.23548 | 0.033791 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061538 | false | 0 | 0.061538 | 0.015385 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
976fcbf14dd7896daf4f8d49416555bf79ddb30f | 1,250 | py | Python | utils/distance_functions.py | inesnolas/Rank-based-loss_ICASSP22 | 3ebe7345dc26b8fa74543725a51b43b7170c58cc | [
"MIT"
] | null | null | null | utils/distance_functions.py | inesnolas/Rank-based-loss_ICASSP22 | 3ebe7345dc26b8fa74543725a51b43b7170c58cc | [
"MIT"
] | 1 | 2022-03-10T04:08:49.000Z | 2022-03-10T04:08:49.000Z | utils/distance_functions.py | inesnolas/Rank-based-loss_ICASSP22 | 3ebe7345dc26b8fa74543725a51b43b7170c58cc | [
"MIT"
] | null | null | null | import torch
def compute_pairwise_cosine_distances(minibatch_embeddings, full_matrix=False):
# cosine_distance = 1 - cosine_similarity
# cosine similarity (A,B)= cos(theta) = (A . B ) / (||A||*||B||) ,
# constrainining embeddings into a hypersphere (unit-sphere) so all norms are 1 reduces this to a matrix multiplication (A.B)
D = 1 - torch.mm(minibatch_embeddings, torch.transpose(minibatch_embeddings, 0, 1))
if not full_matrix:
tri_idx = torch.triu_indices(minibatch_embeddings.shape[0],minibatch_embeddings.shape[0],1)
pairwise_dist_vector = D[tri_idx[0],tri_idx[1]]
return pairwise_dist_vector
else:
return D
def compute_pairwise_euclidean_distances(minibatch_embeddings, d, n, full_matrix=False ):
# as per https://www.robots.ox.ac.uk/~albanie/notes/Euclidean_distance_trick.pdf alg.1
X_view1 = minibatch_embeddings.reshape(d, n, 1)
X_view2 = minibatch_embeddings.reshape(d,1,n)
diff_mat = X_view1-X_view2
D = torch.sum(diff_mat**2,dim=0)
if not full_matrix:
tri_idx = torch.triu_indices(n,n,1)
pairwise_dist_vector = D[tri_idx[0],tri_idx[1]]
return torch.sqrt(pairwise_dist_vector)
else :
return torch.sqrt(D)
| 39.0625 | 129 | 0.7008 | 185 | 1,250 | 4.497297 | 0.389189 | 0.182692 | 0.086538 | 0.036058 | 0.252404 | 0.185096 | 0.185096 | 0.185096 | 0.185096 | 0.096154 | 0 | 0.021718 | 0.1896 | 1,250 | 31 | 130 | 40.322581 | 0.799605 | 0.252 | 0 | 0.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.05 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97719823db82abe3944497ea3132f94495350607 | 14,309 | py | Python | python/model.py | jakelong0509/Song_Detector | 57ac4b206b985f2c9e40423a9f79deacbc88f64e | [
"MIT"
] | 3 | 2018-11-09T17:56:58.000Z | 2019-05-15T19:43:58.000Z | python/model.py | jakelong0509/Song_Detector | 57ac4b206b985f2c9e40423a9f79deacbc88f64e | [
"MIT"
] | 1 | 2018-11-09T17:45:12.000Z | 2018-11-09T23:12:21.000Z | python/model.py | jakelong0509/Song_Detector | 57ac4b206b985f2c9e40423a9f79deacbc88f64e | [
"MIT"
] | null | null | null | import numpy as np
import os
import sys
import progressbar
import gc
import pickle
import matplotlib.pyplot as plt
from LSTM import LSTM
from wrapper import Bidirectional
from Regularization import regularization
from attention_model import attention_model
from data_preprocessing import song_preprocessing
from functions import activations as act, helper_func as func
from sklearn.preprocessing import normalize, minmax_scale
class model:
def __init__(self, X, Y, S, Tx, Ty, lr = 0.005, n_a = 64, n_s = 32, jump_step = 100, epoch = 100, sec = 5, optimizer = None):
self.X = X
self.Y = Y
self.S = S
self.Tx = Tx
self.Ty = Ty
self.lr = lr
self.m = X.shape[0]
self.n_x = X.shape[2]
self.n_y = Y.shape[2]
self.n_a = n_a
self.n_s = n_s
self.n_c = self.n_a * 2 # *2 when using bidirectional
self.hidden_dimension = [10]
self.jump_step = jump_step
self.epoch = epoch
self.sec = sec
self.last_layer_hidden_state = None
self.Att_As = []
self.Att_caches = []
self.Att_alphas = []
# Wy shape = (n_s,n_y)
self.Wy = func.xavier((self.n_s, self.n_y))
self.by = np.zeros((1, self.n_y))
self.optimizer = optimizer
self.s_weight = 0
self.s_bias = 0
self.v_weight = 0
self.v_bias = 0
self.TRAINING_THRESHOLD = 0
self.non_random_circle = 10
self._params = {"Wy": self.Wy, "by": self.by}
self.pre_LSTM = LSTM("pre_LSTM", (self.Tx, self.n_x), (self.Tx, self.n_a), optimizer = optimizer)
self.pre_bi_LSTM = Bidirectional("pre_bi_LSTM", self.pre_LSTM, is_dropout = True)
self.attention = attention_model("attention", self.n_c, self.S, self.n_s, self.n_c, self.hidden_dimension, optimizer = optimizer)
self.post_LSTM = LSTM("post_LSTM", (self.Ty, self.n_c), (self.Ty, self.n_s), is_attention = True, optimizer = optimizer)
def forward_propagation_one_ex(self, i, e):
"""
description:
forward propagation for one training example; data x label y
---parameter---
i: index
"""
# self.gradient_checking()
# X = minmax_scale(self.X[i,:,:], feature_range = (0, 1), axis = 0)
X = normalize(self.X[i,:,:], axis=1)
A = self.pre_bi_LSTM.concatLSTM(X) # shape = (Tx, 2 * n_a)
# A = self.pre_LSTM.forward_propagation(X)
self.attention._A = A
# attention and post_LSTM
start = 0
end = self.S
prev_s = np.zeros((1, self.n_s))
prev_a = np.zeros((1, self.n_s))
lstm_S = []
for t in range(self.Ty):
alphas, c, _energies, _caches_t, current_A = self.attention.nn_forward_propagation(prev_s, start, end)
start = start + self.jump_step
end = end + self.jump_step
# for backpropagation use ***** this step take 30% of RAM in total *******
self.Att_As.append(current_A)
self.Att_caches.append(_caches_t)
self.Att_alphas.append(alphas)
st, at, cache = self.post_LSTM.cell_forward(prev_s, prev_a, c)
lstm_S.append(st)
prev_s = st
prev_a = at
# convert lstm_S(list) to lstm_S(np array)
lstm_S = np.array(lstm_S).reshape((self.Ty, self.n_s))
self.last_layer_hidden_state = lstm_S
del lstm_S
# TODO: dropout lstm_S
# lstm_S = act.dropout(lstm_S, level = 0.5)
# initialize last layer Wy
# st shape = (1,n_s)
Y_hat = []
for t in range(self.Ty): # st shape = (1, n_s)
Zy = np.matmul(np.atleast_2d(self.last_layer_hidden_state[t,:]), self._params["Wy"]) + self._params["by"] # shape = (1, n_y)
yt_hat = act.softmax(Zy)
Y_hat.append(yt_hat.reshape(-1)) # yt_hat after reshape = (n_y,)
# Y_hat shape = (Ty, n_y)
Y_true = np.array(self.Y[i,:,:]) # (Ty, n_y)
Y_hat = np.array(Y_hat)
total_lost = 0
for t in range(self.Ty):
lost = func.t_lost(Y_true[t,:], Y_hat[t,:])
total_lost = total_lost + lost
total_lost = (total_lost/self.Ty)
return total_lost, Y_hat, Y_true
def backward_propagation_one_ex(self, Y_hat, Y_true, i, e, lr):
"""
Description:
backward propagation for one training example; data x label y
----parameter---
Y_hat: predicted value given training data X
Y_true: True label value of training data X
"""
# dL = (1/self.Ty)
# shape (Ty, n_y)
dZ = (Y_hat - Y_true)
assert(dZ.shape == (self.Ty, self.n_y))
# calculate dWy and dby
dWy = np.matmul(np.transpose(self.last_layer_hidden_state.reshape(self.Ty, self.n_s)), dZ)
dby = np.atleast_2d(np.sum(dZ, axis = 0))
self.update_weight(dWy, dby, e, lr, optimizer = self.optimizer)
assert(dWy.shape == (self.n_s, self.n_y) and dby.shape == (1, self.n_y))
#shape = (Ty, n_s)
dS = np.matmul(dZ, np.transpose(self.Wy))
d_AS_list = self.post_LSTM.backward_propagation(dS, self.Att_As, self.Att_caches, self.Att_alphas, self.attention)
self.post_LSTM.update_weight(lr, e)
self.attention.update_weight(lr, e)
self.Att_As = []
self.Att_caches = []
self.Att_alphas = []
self.pre_bi_LSTM.cell_backpropagation(d_AS_list, self.jump_step, self.Ty, self.Tx)
self.pre_bi_LSTM.update_weight(lr, e)
def update_weight(self, dWy, dby, i ,lr=0.005, optimizer = None, beta1 = 0.9, beta2 = 0.999, eps = 1e-8):
i = i + 1
lr = lr * np.sqrt(1-beta2**i)/(1-beta1**i)
s_corrected_weight = None
s_corrected_bias = None
v_corrected_weight = None
v_corrected_bias = None
if optimizer == "Adam":
self.s_weight = beta2 * self.s_weight + (1 - beta2) * (dWy ** 2)
s_corrected_weight = self.s_weight / (1 - beta2**i)
self.s_bias = beta2 * self.s_bias + (1 - beta2) * (dby ** 2)
s_corrected_bias = self.s_bias / (1 - beta2**i)
self.v_weight = beta1 * self.v_weight + (1 - beta1) * dWy
v_corrected_weight = self.v_weight / (1 - beta1**i)
self.v_bias = beta1 * self.v_bias + (1 - beta1) * dby
v_corrected_bias = self.v_bias / (1 - beta1**i)
self.Wy = self.Wy - lr*(v_corrected_weight/(np.sqrt(s_corrected_weight) + eps))
self.by = self.by - lr*(v_corrected_bias/(np.sqrt(s_corrected_bias) + eps))
else:
self.Wy = self.Wy - lr*dWy
self.by = self.by - lr*dby
self._params["Wy"] = self.Wy
self._params["by"] = self.by
self.save_weights()
def train(self, songs):
lr = self.lr
loss = []
print("Starting to train Detector..........")
for e in range(self.epoch):
print("Epoch {}/{}".format(e, self.epoch))
lost = 0
for i in progressbar.progressbar(range(self.m)):
total_lost, Y_hat, Y_true = self.forward_propagation_one_ex(i, e)
lost = lost + total_lost
self.backward_propagation_one_ex(Y_hat, Y_true, i, e, lr)
self.predict(self.X[i,:,:], songs, "weights")
loss.append(lost/self.m)
if e % 100 == 0:
print(loss)
print("Total Loss: ", lost/self.m)
def save_weights(self):
with open("weights/predict_layer.pickle", "wb") as f:
pickle.dump(self._params, f, protocol = pickle.HIGHEST_PROTOCOL)
def predict(self, data, songs, folder):
Tx, n_x = data.shape
assert(Tx >= self.S)
pre_LSTM = LSTM("pre_LSTM", (Tx, n_x), (Tx, self.n_a), optimizer = self.optimizer)
pre_bi_LSTM = Bidirectional("pre_bi_LSTM", pre_LSTM)
attention = attention_model("attention", self.n_c, self.S, self.n_s, self.n_c, self.hidden_dimension, optimizer = self.optimizer)
post_LSTM = LSTM("post_LSTM", (self.Ty, self.n_c), (self.Ty, self.n_s), is_attention = True, optimizer = self.optimizer)
LSTM_forward_params = pickle.load(open(folder + "/biDirectional_pre_LSTM_forward.pickle", "rb"))
LSTM_backward_params = pickle.load(open(folder +"/biDirectional_pre_LSTM_backward.pickle", "rb"))
attention_params = pickle.load(open(folder + "/attention.pickle", "rb"))
post_LSTM_params = pickle.load(open(folder + "/post_LSTM.pickle", "rb"))
params = pickle.load(open(folder + "/predict_layer.pickle", "rb"))
pre_bi_LSTM.forward._params = LSTM_forward_params
pre_bi_LSTM.backward._params = LSTM_backward_params
attention._params = attention_params
post_LSTM._params = post_LSTM_params
Ty = song_preprocessing.get_Ty(Tx, self.S, self.jump_step)
# data = minmax_scale(data, feature_range=(0, 1), axis=0)
data = normalize(data)
A = pre_bi_LSTM.concatLSTM(data)
attention._A = A
start = 0
end = self.S
prev_s = np.zeros((1, self.n_s))
prev_a = np.zeros((1, self.n_s))
lstm_S = []
for t in range(Ty):
alphas, c, _energies, _caches_t, current_A = attention.nn_forward_propagation(prev_s, start, end)
start = start + self.jump_step
end = end + self.jump_step
st, at, cache = post_LSTM.cell_forward(prev_s, prev_a, c)
lstm_S.append(st)
prev_s = st
prev_a = at
lstm_S = np.array(lstm_S)
# TODO: dropout lstm_S
# lstm_S = act.dropout(lstm_S, level = 0.5)
# initialize last layer Wy
# st shape = (1,n_s)
y_predict = 0
for t in range(Ty): # st shape = (1, n_s)
Zy = np.matmul(np.atleast_2d(lstm_S[t,:]), params["Wy"]) + params["by"] # shape = (1, n_y)
yt_hat = act.softmax(Zy)
y_predict = y_predict + yt_hat
y_predict = y_predict / Ty
print(y_predict)
index = np.argmax(y_predict)
print("Song predict: ", songs[index])
return songs[index]
def gradient_checking(self, dby, dWy, i, eps = 1e-4):
model_vec, model_keys_shape = func.dictionary_to_vector(self._params)
LSTM_forward_vec, LSTM_forward_keys_shape = func.dictionary_to_vector(self.pre_bi_LSTM.forward._params)
LSTM_backward_vec, LSTM_backward_keys_shape = func.dictionary_to_vector(self.pre_bi_LSTM.backward._params)
attention_vec, attention_keys_shape = func.dictionary_to_vector(self.attention._params)
post_LSTM_vec, post_LSTM_keys_shape = func.dictionary_to_vector(self.post_LSTM._params)
params_vector = np.concatenate([model_vec, LSTM_forward_vec, LSTM_backward_vec, attention_vec, post_LSTM_vec])
remain_vector = None
model_dict = {"dby": dby, "dWy": dWy}
model_grads, model_grads_keys_shape = func.dictionary_to_vector(model_dict)
LSTM_forward_grads, LSTM_forward_grads_keys_shape = func.dictionary_to_vector(self.pre_bi_LSTM.forward.gradients)
LSTM_backward_grads, LSTM_backward_grads_keys_shape = func.dictionary_to_vector(self.pre_bi_LSTM.backward.gradients)
attention_grads, attention_grads_keys_shape = func.dictionary_to_vector(self.attention.gradients_layer)
post_LSTM_grads, post_LSTM_grads_keys_shape = func.dictionary_to_vector(self.post_LSTM.gradients)
grads_vector = np.concatenate([model_grads, LSTM_forward_grads, LSTM_backward_grads, attention_grads, post_LSTM_grads])
num_parameters = params_vector.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
for n in range(num_parameters):
print("{}/{}".format(n,num_parameters))
thetaplus = np.copy(params_vector)
thetaplus[n] = thetaplus[n] + eps
remain_vector, model_params = func.vector_to_dictionary(thetaplus, model_keys_shape)
remain_vector, LSTM_forward_params = func.vector_to_dictionary(remain_vector, LSTM_forward_keys_shape)
remain_vector, LSTM_backward_params = func.vector_to_dictionary(remain_vector, LSTM_backward_keys_shape)
remain_vector, attention_params = func.vector_to_dictionary(remain_vector, attention_keys_shape)
remain_vector, post_LSTM_params = func.vector_to_dictionary(remain_vector, post_LSTM_keys_shape)
self._params = model_params
self.pre_bi_LSTM.forward._params = LSTM_forward_params
self.pre_bi_LSTM.backward._params = LSTM_backward_params
self.attention._params = attention_params
self.post_LSTM._params = post_LSTM_params
J_plus[n], _, _ = self.forward_propagation_one_ex(i)
thetaminus = np.copy(params_vector)
thetaminus[n] = thetaminus[n] + eps
remain_vector, model_params = func.vector_to_dictionary(thetaminus, model_keys_shape)
remain_vector, LSTM_forward_params = func.vector_to_dictionary(remain_vector, LSTM_forward_keys_shape)
remain_vector, LSTM_backward_params = func.vector_to_dictionary(remain_vector, LSTM_backward_keys_shape)
remain_vector, attention_params = func.vector_to_dictionary(remain_vector, attention_keys_shape)
remain_vector, post_LSTM_params = func.vector_to_dictionary(remain_vector, post_LSTM_keys_shape)
self._params = model_params
self.pre_bi_LSTM.forward._params = LSTM_forward_params
self.pre_bi_LSTM.backward._params = LSTM_backward_params
self.attention._params = attention_params
self.post_LSTM._params = post_LSTM_params
J_minus[n], _, _ = self.forward_propagation_one_ex(i)
gradapprox[n] = (J_plus[n] - J_minus[n]) / (2 * eps)
numerator = np.linalg.norm(grads_vector - gradapprox)
demoninator = np.linalg.norm(grads_vector) + np.linalg.norm(gradapprox)
difference = numerator / demoninator
if difference > 1e-7:
print("Wrong")
else:
print("Right")
| 42.20944 | 137 | 0.624991 | 2,027 | 14,309 | 4.118895 | 0.111001 | 0.019164 | 0.019404 | 0.018685 | 0.498143 | 0.419571 | 0.385196 | 0.358007 | 0.318002 | 0.303509 | 0 | 0.011368 | 0.262282 | 14,309 | 338 | 138 | 42.33432 | 0.779557 | 0.077504 | 0 | 0.217213 | 0 | 0 | 0.028082 | 0.009641 | 0 | 0 | 0 | 0.005917 | 0.012295 | 1 | 0.032787 | false | 0 | 0.057377 | 0 | 0.102459 | 0.036885 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97742d434f756859171175e1ee026361918f8086 | 3,151 | py | Python | blueprints/portfolio/build.py | andrenasturas/hausse | 58e7cb71d5105cf1d6ec7d294e85668855bf8336 | [
"MIT"
] | null | null | null | blueprints/portfolio/build.py | andrenasturas/hausse | 58e7cb71d5105cf1d6ec7d294e85668855bf8336 | [
"MIT"
] | 1 | 2021-08-30T21:41:46.000Z | 2021-08-30T21:41:46.000Z | blueprints/portfolio/build.py | andrenasturas/hausse | 58e7cb71d5105cf1d6ec7d294e85668855bf8336 | [
"MIT"
] | 1 | 2021-08-31T19:27:32.000Z | 2021-08-31T19:27:32.000Z | from hausse import Hausse
from hausse.plugins import (
Assets,
DiscoverPartials,
Drop,
Handlebars,
Markdown,
MetadataMarkdown,
Relations,
Collection,
Collections
)
# Collections preparations
# By default, all files in "src/formations" folder will be grouped in this collection
Links = Collection("links")
# Using `indexBy` enables indexation, which is useful for building relations
Projects = Collection("projects", indexBy="title")
Skills = Collection("skills", indexBy="name")
h = Hausse("examples/portfolio")
h.clean()
h.use(
# `use()` method register plugins into the Hausse project
# It is possible to call `use()` once or multiple times, with one or a list of Plugins
# In any cases, Plugins will be called in order.
[
# Assets plugin is used to simply dump static files, like stylesheets or icons
# As it bypass all other plugins by copying directly files in "dist" folder,
# it does not retrives files from "src/assets" but directly from "assets"
Assets("assets"),
# Markdown parses all markdown files found in "src"
# Note that this plugin will also load as metadata all key-values present in headers
Markdown(),
# MetadataMarkdown parses markdown string found in files metadata
MetadataMarkdown("summary"),
# Collections (with a s) auto-creates collections based on files' "collections" metadata
Collections(),
# Each of the following defines a Collection and fill it with according files
Links,
Skills,
Projects,
# Relations helps making links between files in different collections
# That's why Collections have been defined before Hausse() call
# Other solution is to use CollectionSelector(collection_name) instead of the Collection
Relations(Projects, Skills),
# DiscoverPartials registers partials templates for Handlebars layout processing
DiscoverPartials("templates"),
# Handlebars does the actual layout processing to html files
Handlebars("layouts", "layout.hbs", "index.md"),
# Drop removes useless files from the project, before writing them in "dist"
# Note that it does not remove the actual files from "src" folder
# Here, it is used because we build a single page from multiple markdown files
# Once the layout plugin processed them, used markdown files are no longer wanted
Drop("*.md"),
]
)
# And here the magic happens. When `build()` is called, Hausse project generation begins
# Files from "src" directory are loaded and stored in a elements structure
# Every registered Plugin is called in order on the same set of elements, metadata and settings
# When all Plugins have been called, all files from elements are written in "dist" directory
h.build()
# Save will store the Hausse project configuration into a `hausse.json` file,
# which can be used later by Hausse in CLI mode operation : `python -m hausse
# hausse.json`. It is useful to simplify the project setup when development is
# done and it goes to production.
h.save() | 41.460526 | 96 | 0.70676 | 418 | 3,151 | 5.325359 | 0.435407 | 0.020216 | 0.016173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.228499 | 3,151 | 76 | 97 | 41.460526 | 0.915673 | 0.701999 | 0 | 0 | 0 | 0 | 0.106828 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.058824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
977547a86441dd2f34137210aaba3cb7c65a05a6 | 716 | py | Python | yahoo_fin/ex01.py | stormabq/python-examples | 1c1acfc3863a2cf1a9e202c9299f3a526858eb34 | [
"MIT"
] | 1 | 2021-12-12T04:39:53.000Z | 2021-12-12T04:39:53.000Z | yahoo_fin/ex01.py | stormabq/python-examples | 1c1acfc3863a2cf1a9e202c9299f3a526858eb34 | [
"MIT"
] | null | null | null | yahoo_fin/ex01.py | stormabq/python-examples | 1c1acfc3863a2cf1a9e202c9299f3a526858eb34 | [
"MIT"
] | 6 | 2020-05-05T17:21:28.000Z | 2021-12-12T04:39:57.000Z | from yahoo_fin.stock_info import (
get_data,
tickers_sp500,
tickers_nasdaq,
tickers_other,
get_quote_table,
)
""" pull historical data for Netflix (NFLX) """
# nflx = get_data("NFLX")
""" pull data for Apple (AAPL) """
"""case sensitivity does not matter"""
# aapl = get_data("aapl")
""" get list of all stocks currently traded
on NASDAQ exchange """
# nasdaq_ticker_list = tickers_nasdaq()
""" get list of all stocks currently in the S&P 500 """
sp500_ticker_list = tickers_sp500()
print(sp500_ticker_list)
""" get other tickers not in NASDAQ (based off nasdaq.com)"""
# other_tickers = tickers_other()
""" get information on stock from quote page """
# info = get_quote_table("amzn")
| 24.689655 | 61 | 0.698324 | 102 | 716 | 4.686275 | 0.460784 | 0.043933 | 0.062762 | 0.050209 | 0.112971 | 0.112971 | 0 | 0 | 0 | 0 | 0 | 0.025467 | 0.177374 | 716 | 28 | 62 | 25.571429 | 0.786078 | 0.206704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97777e3bdfdec7925db3bd47283e2b2cd170c9f4 | 3,657 | py | Python | arenarobot/video_call/__init__.py | DaAwesomeP/ARENA-robot | ff771c4f5f587fda1bcc79ae165301129a6b1458 | [
"BSD-3-Clause"
] | null | null | null | arenarobot/video_call/__init__.py | DaAwesomeP/ARENA-robot | ff771c4f5f587fda1bcc79ae165301129a6b1458 | [
"BSD-3-Clause"
] | null | null | null | arenarobot/video_call/__init__.py | DaAwesomeP/ARENA-robot | ff771c4f5f587fda1bcc79ae165301129a6b1458 | [
"BSD-3-Clause"
] | null | null | null | """
__init__.py: Definitions for the video call client.
Created by Perry Naseck on 7/1/21.
Copyright (c) 2021, The CONIX Research Center
All rights reserved.
This source code is licensed under the BSD-3-Clause license found in the
LICENSE file in the root directory of this source tree.
"""
# Some code originally from:
# https://github.com/peppelinux/videodrone/blob/97f867bd39d9dfa4c4335487074e77a855858cd1/src/videodrone/drones/__init__.py#L20
import subprocess # nosec B404
from functools import partial
from os import setpgrp
from arena import Scene
from selenium.webdriver import common
from selenium.webdriver.remote.webdriver import WebDriver
_selenium_orig_start = common.service.Service.start
class VideoCall():
"""Video Call class for the ARENA."""
_selenium_start_orig = staticmethod(common.service.Service.start)
_subprocess_popen_orig = staticmethod(subprocess.Popen)
def __init__(self, scene: Scene, browser: WebDriver, options=None):
"""Initialize the video call class."""
self.scene = scene
self.browser = browser
self.options = options
self.instance = None
def __exit__(self, exception_type, exception_value, traceback):
"""Safely exit 'with' statements."""
self.close()
def __del__(self):
"""Safely exit if class deleted."""
self.close()
def is_open(self) -> bool:
"""Return if the browser is running."""
return self.instance is not None
@staticmethod
def _selenium_start(*args, **kwargs):
"""
Start Selenium but ignore handlers like SIGINT.
Modified from https://stackoverflow.com/a/62430234
This allows for manually closing Selenium on CTRL+C so that the Jitsi
call can be properly hung up.
"""
try:
subprocess.Popen = partial(subprocess.Popen,
preexec_fn=setpgrp)
VideoCall._selenium_start_orig(*args, **kwargs)
finally:
subprocess.Popen = VideoCall._subprocess_popen_orig
def open(self):
"""Start the browser."""
token = self.scene.remote_auth_token['token']
url = 'https://jitsi0.andrew.cmu.edu:8443/'
url += f'{self.scene.namespace}_{self.scene.scene}'
print(f"arena-robot VideoCall: opening {url}")
url += f'?jwt={token}'
url += '#config.channelLastN=0&config.resolution=1080'
# Temporarily override the start function to not pass SIGINT
try:
common.service.Service.start = self._selenium_start
self.instance = self.browser(options=self.options)
finally:
common.service.Service.start = self._selenium_start_orig
self.instance.get(url)
def set_name(self, name: str):
"""Set the Jitsi display name."""
script = f"APP.conference.changeLocalDisplayName('{name}');"
self.instance.execute_script(script)
def video_mute(self, mute: bool = True):
"""Set the Jitsi video mute state."""
script = f"APP.conference.muteVideo({str(mute).lower()});"
self.instance.execute_script(script)
def audio_mute(self, mute: bool = True):
"""Set the Jitsi audio mute state."""
script = f"APP.conference.muteAudio({str(mute).lower()});"
self.instance.execute_script(script)
def close(self):
"""Close and quit the browser."""
print("arena-robot VideoCall: closing")
if self.instance is not None:
self.instance.execute_script('APP.conference.hangup();')
self.instance.quit()
self.instance = None
| 33.861111 | 126 | 0.654361 | 441 | 3,657 | 5.29932 | 0.387755 | 0.056483 | 0.034232 | 0.04279 | 0.159178 | 0.141207 | 0.10184 | 0.065896 | 0.039367 | 0 | 0 | 0.021544 | 0.238447 | 3,657 | 107 | 127 | 34.17757 | 0.817594 | 0.275089 | 0 | 0.192982 | 0 | 0 | 0.144768 | 0.098348 | 0 | 0 | 0 | 0 | 0 | 1 | 0.175439 | false | 0 | 0.105263 | 0 | 0.350877 | 0.035088 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97793cb16f646ed93a1d2a99e624885f86ec02c8 | 2,006 | py | Python | ADEC7430 Big Data Econometrics/Midterm1/PyCode/_Functions/34_RidgeLassoEN.py | sherrytp/bc_f19_econ | 0d393e54441fd38faba275bb3e718704fbd18d0d | [
"Apache-2.0"
] | null | null | null | ADEC7430 Big Data Econometrics/Midterm1/PyCode/_Functions/34_RidgeLassoEN.py | sherrytp/bc_f19_econ | 0d393e54441fd38faba275bb3e718704fbd18d0d | [
"Apache-2.0"
] | null | null | null | ADEC7430 Big Data Econometrics/Midterm1/PyCode/_Functions/34_RidgeLassoEN.py | sherrytp/bc_f19_econ | 0d393e54441fd38faba275bb3e718704fbd18d0d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 23 00:14:38 2019
@author: RV
Purpose: use Ridge, Lasso and Elastic Net models on Titanic data
"""
#%% Lasso
from sklearn.linear_model import Lasso
useAlpha = 0.00001
lasso_1 = Lasso(alpha = useAlpha).fit(X, y)
print(lasso_1.coef_)
# OK, got the coefficients, but what do they represent???
X.columns
# OK - can we align the two though?
#??????? yes we can... what is zip function?
[i for i in zip(X.columns, lasso_1.coef_)]
# predictions
lasso_1_is_pred = lasso_1.predict(X)
# attempt to identify a good cutoff
cutoffgrid = np.linspace(min(lasso_1_is_pred), max(lasso_1_is_pred), 100)
tcm1 = [confusionMatrixInfo(lasso_1_is_pred < i, y, labels=[1,0])['accuracy'] for i in cutoffgrid]
plt.figure()
plt.plot(tcm1)
plt.show()
#%%
# Ridge classifier
from sklearn.linear_model import RidgeClassifierCV as RCCV
RCCV_1 = RCCV(alphas=[np.exp(i) for i in np.linspace(-10,0,50)]).fit(X,y)
RCCV_1.score(X,y) # not that great ?
RCCV_1_is_pred = RCCV_1.predict(X)
confusionMatrixInfo(RCCV_1_is_pred,y)
# attempt to identify a good cutoff
cutoffgrid = np.linspace(min(RCCV_1_is_pred), max(RCCV_1_is_pred), 100)
tcm1 = [confusionMatrixInfo(RCCV_1_is_pred < i, y, labels=[1,0])['accuracy'] for i in cutoffgrid]
plt.figure()
plt.plot(tcm1)
plt.show()
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import RidgeClassifierCV
X, y = load_breast_cancer(return_X_y=True)
clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
clf.score(X, y)
#%%
from sklearn.linear_model import ElasticNet as ENet
a = 0.0001
b = 0.0001
alpha = a+b
l1_ratio = a/(a+b)
ENet_1 = ENet(alpha = alpha, l1_ratio= l1_ratio).fit(X,y)
ENet_1_is_pred = ENet_1.predict(X)
cutoffgrid = np.linspace(min(ENet_1_is_pred), max(ENet_1_is_pred), 100)
tcm1 = [confusionMatrixInfo(ENet_1_is_pred < i, y, labels=[1,0])['accuracy'] for i in cutoffgrid]
plt.figure()
plt.plot(tcm1)
plt.show()
| 27.108108 | 99 | 0.70339 | 345 | 2,006 | 3.907246 | 0.324638 | 0.028932 | 0.067507 | 0.040801 | 0.433976 | 0.367211 | 0.227003 | 0.227003 | 0.227003 | 0.227003 | 0 | 0.051693 | 0.161017 | 2,006 | 73 | 100 | 27.479452 | 0.749257 | 0.197408 | 0 | 0.230769 | 0 | 0 | 0.01581 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.128205 | 0 | 0.128205 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
977b336843e723764b4e53b212ee10afac5dc75c | 5,340 | py | Python | pyperformance/_pythoninfo.py | oraluben/pyperformance | 74acfa5fbf801ce79fdadc9e92355c99460e91e4 | [
"MIT"
] | 255 | 2016-08-27T05:38:56.000Z | 2019-05-27T16:15:38.000Z | pyperformance/_pythoninfo.py | oraluben/pyperformance | 74acfa5fbf801ce79fdadc9e92355c99460e91e4 | [
"MIT"
] | 43 | 2016-08-30T15:22:01.000Z | 2019-05-24T10:59:54.000Z | pyperformance/_pythoninfo.py | oraluben/pyperformance | 74acfa5fbf801ce79fdadc9e92355c99460e91e4 | [
"MIT"
] | 55 | 2016-08-30T15:19:45.000Z | 2019-05-20T12:41:02.000Z | # A utility library for getting information about a Python executable.
#
# This may be used as a script.
import importlib.util
import json
import os
import os.path
import sys
import sysconfig
INFO = {
# sys
'executable (sys)': 'sys.executable',
'executable (sys;realpath)': 'executable_realpath',
'prefix (sys)': 'sys.prefix',
'exec_prefix (sys)': 'sys.exec_prefix',
'stdlib_dir (sys)': 'sys._stdlib_dir',
'base_executable (sys)': 'sys._base_executable',
'base_prefix (sys)': 'sys.base_prefix',
'base_exec_prefix (sys)': 'sys.base_exec_prefix',
'version_str (sys)': 'sys.version',
'version_info (sys)': 'sys.version_info',
'hexversion (sys)': 'sys.hexversion',
'api_version (sys)': 'sys.api_version',
'implementation_name (sys)': 'sys.implementation.name',
'implementation_version (sys)': 'sys.implementation.version',
'platform (sys)': 'sys.platform',
# sysconfig
'stdlib_dir (sysconfig)': 'sysconfig.paths.stdlib',
'is_dev (sysconfig)': 'sysconfig.is_python_build',
# other
'base_executable': 'base_executable',
'stdlib_dir': 'stdlib_dir',
'pyc_magic_number': 'pyc_magic_number',
'is_venv': 'is_venv',
}
def get_info(python=sys.executable):
"""Return an object with details about the given Python executable.
Most of the details are grouped by their source.
By default the current Python is used.
"""
if python and python != sys.executable:
# Run _pythoninfo.py to get the raw info.
import subprocess
argv = [python, __file__]
try:
text = subprocess.check_output(argv, encoding='utf-8')
except subprocess.CalledProcessError:
raise Exception(f'could not get info for {python or sys.executable}')
data = _unjsonify_info(text)
else:
data = _get_current_info()
return _build_info(data)
def _build_info(data):
# Map the data into a new types.SimpleNamespace object.
info = type(sys.implementation)()
for key, value in data.items():
try:
field = INFO[key]
except KeyError:
raise NotImplementedError(repr(key))
parent = info
while '.' in field:
pname, _, field = field.partition('.')
try:
parent = getattr(parent, pname)
except AttributeError:
setattr(parent, pname, type(sys.implementation)())
parent = getattr(parent, pname)
setattr(parent, field, value)
return info
def _get_current_info():
is_venv = (sys.prefix != sys.base_prefix)
base_executable = getattr(sys, '_base_executable', None)
if is_venv:
# XXX There is probably a bug related to venv, since
# sys._base_executable should be different.
if base_executable == sys.executable:
# Indicate that we don't know.
base_executable = None
elif not base_executable:
base_executable = sys.executable
info = {
# locations
'executable (sys)': sys.executable,
'executable (sys;realpath)': os.path.realpath(sys.executable),
'prefix (sys)': sys.prefix,
'exec_prefix (sys)': sys.exec_prefix,
'stdlib_dir': os.path.dirname(os.__file__),
'stdlib_dir (sys)': getattr(sys, '_stdlib_dir', None),
'stdlib_dir (sysconfig)': (sysconfig.get_path('stdlib')
if 'stdlib' in sysconfig.get_path_names()
else None),
# base locations
'base_executable': base_executable,
'base_executable (sys)': getattr(sys, '_base_executable', None),
'base_prefix (sys)': sys.base_prefix,
'base_exec_prefix (sys)': sys.base_exec_prefix,
# version
'version_str (sys)': sys.version,
'version_info (sys)': sys.version_info,
'hexversion (sys)': sys.hexversion,
'api_version (sys)': sys.api_version,
# implementation
'implementation_name (sys)': sys.implementation.name,
'implementation_version (sys)': sys.implementation.version,
# build
'is_dev (sysconfig)': sysconfig.is_python_build(),
# host
'platform (sys)': sys.platform,
# virtual envs
'is_venv': is_venv,
# import system
# importlib.util.MAGIC_NUMBER has been around since 3.5.
'pyc_magic_number': importlib.util.MAGIC_NUMBER,
}
return info
def _jsonify_info(info):
data = dict(info)
if isinstance(data['pyc_magic_number'], bytes):
data['pyc_magic_number'] = data['pyc_magic_number'].hex()
return data
def _unjsonify_info(data):
if isinstance(data, str):
data = json.loads(data)
info = dict(data)
for key in ('version_info (sys)', 'implementation_version (sys)'):
if isinstance(info[key], list):
# We would use type(sys.version_info) if it allowed it.
info[key] = tuple(info[key])
for key in ('pyc_magic_number',):
if isinstance(info[key], str):
info[key] = bytes.fromhex(data[key])
return info
#######################################
# use as a script
if __name__ == '__main__':
info = _get_current_info()
data = _jsonify_info(info)
json.dump(data, sys.stdout, indent=4)
print()
| 33.584906 | 81 | 0.618352 | 630 | 5,340 | 5.02381 | 0.253968 | 0.049289 | 0.030332 | 0.020221 | 0.289731 | 0.252449 | 0.252449 | 0.2 | 0.2 | 0.2 | 0 | 0.001004 | 0.25412 | 5,340 | 158 | 82 | 33.797468 | 0.793623 | 0.131273 | 0 | 0.070796 | 0 | 0 | 0.286623 | 0.035526 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044248 | false | 0 | 0.070796 | 0 | 0.159292 | 0.00885 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
977f46612fc386542c357d0fedcedfdd08752149 | 592 | py | Python | alembic/versions/6d2dd11ac2fb_20190307204236_tokens_table.py | pchudzik/tweet | 1938dae6be1359d73a8140b994c3db39d2b336da | [
"MIT"
] | null | null | null | alembic/versions/6d2dd11ac2fb_20190307204236_tokens_table.py | pchudzik/tweet | 1938dae6be1359d73a8140b994c3db39d2b336da | [
"MIT"
] | null | null | null | alembic/versions/6d2dd11ac2fb_20190307204236_tokens_table.py | pchudzik/tweet | 1938dae6be1359d73a8140b994c3db39d2b336da | [
"MIT"
] | null | null | null | """tokens table
Revision ID: 6d2dd11ac2fb
Revises: 908de7ed5813
Create Date: 2019-03-07 20:42:36.328479
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6d2dd11ac2fb'
down_revision = '908de7ed5813'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'jwt_revoked_tokens',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('jti', sa.String(120), nullable=False, index=True),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('jwt_revoked_tokens')
| 19.733333 | 69 | 0.697635 | 76 | 592 | 5.315789 | 0.631579 | 0.039604 | 0.074257 | 0.10396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.100823 | 0.179054 | 592 | 29 | 70 | 20.413793 | 0.730453 | 0.238176 | 0 | 0 | 0 | 0 | 0.151242 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97817fb679923c526b028329f844109b22f3d6ae | 1,655 | py | Python | pineboolib/fllegacy/FLSettings.py | Miguel-J/pineboo-buscar | 41a2f3ee0425d163619b78f32544c4b4661d5fa7 | [
"MIT"
] | null | null | null | pineboolib/fllegacy/FLSettings.py | Miguel-J/pineboo-buscar | 41a2f3ee0425d163619b78f32544c4b4661d5fa7 | [
"MIT"
] | null | null | null | pineboolib/fllegacy/FLSettings.py | Miguel-J/pineboo-buscar | 41a2f3ee0425d163619b78f32544c4b4661d5fa7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from PyQt5 import QtCore
from pineboolib.flcontrols import ProjectClass
from pineboolib import decorators
class FLSettings(ProjectClass):
s = QtCore.QSettings(QtCore.QSettings.NativeFormat,
QtCore.QSettings.UserScope, "Eneboo", "Pineboo")
@decorators.BetaImplementation
def readListEntry(self, key, retOk=False):
ret = []
if key in self.s:
ret = self.s.value(key)
return ret
def readEntry(self, _key, _def=None, retOk=False):
ret = self.s.value(_key, None) # devuelve un QVariant !!!!
if "geo" in _key:
# print("Geo vale", str(ret))
# ret = ret.toSize()
# print("Geo vale", str(ret))
if not ret:
ret = _def
else:
if str(ret) == "":
ret = _def
# print("Retornando %s ---> %s" % (_key, ret))
return ret
@decorators.BetaImplementation
def readNumEntry(self, key, _def=0, retOk=False):
ret = self.s.value(key)
return int(ret)
@decorators.BetaImplementation
def readDoubleEntry(self, key, _def=0, retOk=False):
ret = self.s.value(key)
return float(ret)
def readBoolEntry(self, key, _def=False, retOk=False):
ret = self.s.value(key)
if isinstance(ret, str):
ret = ret == "true"
if ret is None:
ret = _def
return ret
def writeEntry(self, key, value):
self.s.setValue(key, value)
@decorators.BetaImplementation
def writeEntryList(self, key, value):
self.s.setValue(key, value)
| 26.269841 | 73 | 0.569184 | 190 | 1,655 | 4.9 | 0.294737 | 0.042965 | 0.069817 | 0.069817 | 0.281418 | 0.24275 | 0.219119 | 0.163265 | 0.092374 | 0.092374 | 0 | 0.003518 | 0.312991 | 1,655 | 62 | 74 | 26.693548 | 0.815303 | 0.100906 | 0 | 0.390244 | 0 | 0 | 0.013504 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.170732 | false | 0 | 0.073171 | 0 | 0.414634 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97818d041346c51355d6a36de188f9622c855abf | 35,874 | py | Python | kernel/tracker/tracking.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 39 | 2021-10-12T01:43:27.000Z | 2022-03-28T04:46:35.000Z | kernel/tracker/tracking.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 6 | 2021-10-14T02:11:47.000Z | 2022-03-23T02:41:50.000Z | kernel/tracker/tracking.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 10 | 2021-10-14T09:36:03.000Z | 2022-02-10T11:05:12.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
import numpy as np
from google.protobuf.json_format import MessageToJson
from common.python import RuntimeInstance
from common.python import session
from common.python.calculation.fc.fc_source import FCSource
from common.python.calculation.fc.fc_storage import FCStorage
from common.python.common.consts import NAMESPACE, TaskResultDataType, \
ProjectStatus, ModelType, TaskStatus
from common.python.common.enums import FlowQueueActionType
from common.python.db.data_set_dao import DataSetDao
from common.python.db.data_set_column_dao import DataSetColumnDao
from common.python.db.db_models import *
from common.python.db.job_member_dao import JobMemberDao
from common.python.db.project_dao import ProjectDao
from common.python.db.project_data_set_dao import ProjectDataSetDao
from common.python.db.task_dao import TaskDao
from common.python.db.task_progress_dao import TaskProgressDao
from common.python.db.flow_action_queue_dao import FlowActionQueueDao
from common.python.db.task_result_dao import TaskResultDao
from common.python.db.current_best_model_dao import CurrentBestModelDao
from common.python.db.provider_model_param_dao import ProviderModelParamsDao
from common.python.db.job_dao import JobDao
from common.python.protobuf.pyproto import default_empty_fill_pb2
from common.python.utils import file_utils
from common.python.utils.core_utils import current_datetime, timestamp_to_date, get_commit_id, md5, get_delta_seconds
from kernel.tracker import model_manager
from kernel.tracker import model_utils
from kernel.utils.decorator_utils import update_task_status_env
LOGGER = log_utils.get_logger()
def generate_unit_id(task_id):
str_list = task_id.split("_")
for item in str_list:
if item in ["arbiter", "promoter", "provider"]:
str_list.remove(item)
return "_".join(str_list)
class Tracking(object):
METRIC_DATA_PARTITION = 48
METRIC_LIST_PARTITION = 48
JOB_VIEW_PARTITION = 8
def __init__(self, project_id: str, job_id: str, role: str, member_id: int,
model_id: str = None,
model_version: str = None,
component_name: str = None,
module_name: str = None,
task_id: str = None,
oot: bool = False):
self.is_serving_model = False
self.show_name = ""
self.source_type = ""
self.project_id = project_id
self.job_id = job_id
self.role = role
self.member_id = member_id
self.component_name = component_name if component_name else 'pipeline'
self.module_name = module_name if module_name else 'Pipeline'
self.task_id = task_id if task_id else Tracking.generate_task_id(job_id=self.job_id, role=self.role,
component_name=self.component_name)
self.table_namespace = '_'.join(
['wefe', 'tracking', 'data', self.job_id, self.role, str(self.member_id), self.component_name])
self.job_table_namespace = '_'.join(
['wefe', 'tracking', 'data', self.job_id, self.role, str(self.member_id)])
self.model_id = model_id
self.member_model_id = model_utils.gen_member_model_id(model_id=model_id, role=role, member_id=member_id)
self.model_version = model_version
self.oot = oot
def set_is_serving_model(self, flag):
self.is_serving_model = flag
def set_show_name(self, name):
self.show_name = name
def set_source_type(self, source_type):
self.source_type = source_type
def _get_task_result_type(self, data_type, data_name=None):
"""
Get type for task result
Parameters
----------
data_type:TaskResultDataType
data_name:str
train、eval
Returns
-------
"""
if data_name:
# In oot mode, in order to avoid primary key conflicts,
# only the type field can be used to de-duplicate
if self.oot:
return '_'.join([data_type, data_name, self.component_name, 'oot'])
return '_'.join([data_type, data_name])
return data_type + '_' + self.component_name + "_oot" if self.oot else data_type
def save_output_data_table(self, data_table, data_name: str = 'component', save_dataset=False):
if data_table:
save_name = '{}_persistent'.format(data_table._name)
save_namespace = NAMESPACE.DATA
save_partitions = data_table.get_partitions()
async_save = False
fcs_info = None
if RuntimeInstance.BACKEND.is_fc() and isinstance(data_table, FCSource) and data_table.get_exist_fcs():
async_save = True
fcs_info = data_table.get_exist_fcs().to_dict()
params = {
"fcs_info": fcs_info,
"name": save_name,
"namespace": save_namespace,
"partitions": save_partitions
}
# save data asynchronously
flow_action_queue = FlowActionQueue()
flow_action_queue.id = get_commit_id()
flow_action_queue.producer = 'kernel'
flow_action_queue.action = FlowQueueActionType.SAVE_OUTPUT_DATA
flow_action_queue.params = json.dumps(params)
flow_action_queue.channel = ''
FlowActionQueueDao.save(flow_action_queue,force_insert=True)
if not async_save:
# save data synchronously
data_table.save_as(namespace=save_namespace, name=save_name)
# save meta
header_list = data_table.schema.get('header', [])
session.save_data_table_meta(
{'schema': data_table.schema, 'header': header_list,
'sid': data_table.schema.get('sid_name', '')},
data_table_namespace=save_namespace, data_table_name=save_name)
data_input = {'table_name': save_name, 'table_namespace': save_namespace, 'partition': save_partitions,
'table_create_count': data_table.count() if data_table else 0, 'fcs_info': fcs_info}
# self.save_data_info(data_input=data_input, mark=True, data_name=data_name)
self.save_task_result(data_input, self._get_task_result_type(TaskResultDataType.DATA, data_name))
if save_dataset:
self.save_dataset(data_input, data_table.schema, data_table)
def get_output_data_table(self, data_name: str = 'component'):
"""
Get output data
Parameters
----------
data_name
Returns
-------
table of dsource
"""
task_result = self.get_task_result(self._get_task_result_type(TaskResultDataType.DATA, data_name))
if task_result and task_result.result:
data_table_info = json.loads(task_result.result)
if data_table_info and data_table_info.get("table_name") and data_table_info.get("table_namespace"):
data_table = session.table(name=data_table_info.get('table_name', ''),
namespace=data_table_info.get('table_namespace', ''))
data_table_meta = data_table.get_metas()
if data_table_meta.get('schema', None):
data_table.schema = data_table_meta['schema']
# If fcs exists, load fcs directly
if 'fcs_info' in data_table_info and isinstance(data_table, FCSource):
fcs_info = data_table_info.get('fcs_info')
fcs = FCStorage.from_fcs_info(fcs_info)
if fcs:
fc_source = FCSource.from_fcs(fcs, session.get_session_id(), data_table.get_namespace(),
data_table.get_name())
fc_source.schema = data_table.schema
data_table = fc_source
return data_table
else:
return None
def save_output_model(self, model_buffers: dict, model_name: str, data_name, save_to_storage=False):
if model_buffers:
if save_to_storage:
name_space = 'wefe_data'
name = self.task_id + '_' + self.job_id
model_manager.save_component_model(component_model_key='{}.{}'.format(self.component_name, model_name),
model_buffers=model_buffers,
member_model_id=name_space,
model_version=name)
# save to task result
model_json_obj = self._model_buffers_to_json_obj(model_buffers, self.member_model_id, self.model_version,
component_model_key='{}.{}'.format(self.component_name,
model_name))
self.save_task_result(model_json_obj, self._get_task_result_type(TaskResultDataType.MODEL, model_name))
def _model_buffers_to_json_obj(self, model_buffers: dict, member_model_id, model_version, component_model_key):
"""
Model buffers to json obj
Parameters
----------
model_buffers
member_model_id
model_version
component_model_key
Returns
-------
"""
model = {'member_model_id': member_model_id, 'model_version': model_version,
'component_model_key': component_model_key}
for buffer_name, buffer_object in model_buffers.items():
json_obj = MessageToJson(buffer_object, including_default_value_fields=True)
if not json_obj:
fill_message = default_empty_fill_pb2.DefaultEmptyFillMessage()
fill_message.flag = 'set'
json_obj = MessageToJson(fill_message, including_default_value_fields=True)
if 'meta' in buffer_name.lower():
model['model_meta'] = json.loads(json_obj)
if 'param' in buffer_name.lower():
model['model_param'] = json.loads(json_obj)
return model
def save_task_result(self, task_result: dict, result_type, component_name=None):
"""
Save task result
Parameters
----------
task_result
result_type
component_name:str
Component name, special case can be specified separately
Returns
-------
"""
model = TaskResultDao.get(
TaskResult.job_id == self.job_id,
TaskResult.task_id == self.task_id,
TaskResult.role == self.role,
TaskResult.type == result_type
)
task = TaskDao.get(
Task.job_id == self.job_id,
Task.task_id == self.task_id
)
# Compatible with local test without task information
if not task:
task = Task()
task.flow_id = "local_test_flow_id"
task.flow_node_id = "local_test_flow_node_id"
is_insert = True
if model:
is_insert = False
else:
model = TaskResult()
model.id = get_commit_id()
model.created_time = datetime.datetime.now()
model.job_id = self.job_id
model.name = component_name or self.component_name
model.task_id = self.task_id
model.role = self.role
model.type = result_type
model.updated_time = datetime.datetime.now()
model.result = json.dumps(task_result)
model.component_type = self.component_name.rsplit('_')[0]
model.flow_id = task.flow_id
model.flow_node_id = task.flow_node_id
model.project_id = task.project_id
if self.is_serving_model and model.type.split("_")[0] == "model":
model.serving_model = 1
TaskResultDao.save(model, force_insert=is_insert)
return model
def get_task_result(self, result_type, task_id=None):
"""
Get task result
Parameters
----------
result_type
task_id
Returns
-------
"""
where_condition = [TaskResult.job_id == self.job_id,
TaskResult.name == self.component_name,
TaskResult.role == self.role,
TaskResult.type == result_type]
if task_id:
where_condition.append(TaskResult.task_id == task_id)
return TaskResultDao.get(*tuple(where_condition))
def save_training_best_model(self, model_buffers):
# save to task_result
model_json_obj = self._model_buffers_to_json_obj(model_buffers, self.member_model_id, self.model_version,
component_model_key='{}.{}'.format(self.component_name,
"default"))
self.save_task_result(model_json_obj, self._get_task_result_type(TaskResultDataType.TRAINING_MODEL, "default"))
def save_cur_best_model(self, model_buffers, iteration):
model = CurrentBestModelDao.get(
CurrentBestModel.job_id == self.job_id,
CurrentBestModel.component_name == self.component_name,
CurrentBestModel.role == self.role,
CurrentBestModel.member_id == self.member_id
)
is_insert = True
if model:
is_insert = False
else:
model = CurrentBestModel()
model.id = get_commit_id()
model.created_time = current_datetime()
model.job_id = self.job_id
model.component_name = self.component_name
model.task_id = self.task_id
model.role = self.role
model.member_id = self.member_id
model.updated_time = current_datetime()
model.iteration = iteration
for buffer_name, buffer_object in model_buffers.items():
json_obj = MessageToJson(buffer_object, including_default_value_fields=True)
if not json_obj:
fill_message = default_empty_fill_pb2.DefaultEmptyFillMessage()
fill_message.flag = 'set'
json_obj = MessageToJson(fill_message, including_default_value_fields=True)
if 'meta' in buffer_name.lower():
model.model_meta = json_obj
if 'param' in buffer_name.lower():
model.model_param = json_obj
CurrentBestModelDao.save(model, force_insert=is_insert)
return model
def save_provider_model_params(self, model_buffers, provider_member_id):
model = ProviderModelParamsDao.get(
ProviderModelParams.job_id == self.job_id,
ProviderModelParams.component_name == self.component_name,
ProviderModelParams.role == self.role,
ProviderModelParams.member_id == self.member_id
)
is_insert = True
if model:
is_insert = False
else:
model = ProviderModelParams()
model.id = get_commit_id()
model.created_time = datetime.datetime.now()
model.job_id = self.job_id
model.component_name = self.component_name
model.task_id = self.task_id
model.role = self.role
model.member_id = self.member_id
model.updated_time = datetime.datetime.now()
model.provider_member_id = provider_member_id
# model.updated_by = ""
# model.created_by = ""
json_obj = MessageToJson(model_buffers, including_default_value_fields=True)
if not json_obj:
fill_message = default_empty_fill_pb2.DefaultEmptyFillMessage()
fill_message.flag = 'set'
json_obj = MessageToJson(fill_message, including_default_value_fields=True)
model.provider_model_param = json_obj
ProviderModelParamsDao.save(model, force_insert=is_insert)
return model
def get_output_model(self, model_name=ModelType.BINNING_MODEL):
model = TaskResultDao.get(
TaskResult.task_id == self.task_id,
TaskResult.role == self.role,
TaskResult.type == self._get_task_result_type(TaskResultDataType.MODEL, model_name)
)
if model:
model = json.loads(model.result)
return {"Model_Meta": model["model_meta"], "Model_Param": model["model_param"]}
else:
return None
def get_training_best_model(self):
model = TaskResultDao.get(
TaskResult.task_id == self.task_id,
TaskResult.role == self.role,
TaskResult.type == self._get_task_result_type(TaskResultDataType.TRAINING_MODEL, "default")
)
if model:
model = json.loads(model.result)
return {"Model_Meta": model["model_meta"], "Model_Param": model["model_param"]}
else:
return None
def get_statics_result(self, type='data_feature_statistic'):
model = TaskResultDao.get_last_statics_result(self.job_id, self.role, type)
if model:
max = {}
min = {}
mean = {}
median = {}
missing_count = {}
std_variance = {}
count = 0
mode = {}
result = json.loads(model.result)
LOGGER.info("mysql result:{}".format(result))
members = result['members']
feature_statistic = None
for member in members:
if member['role'] == self.role:
feature_statistic = member['feature_statistic']
if feature_statistic:
for feature, value in feature_statistic.items():
max[feature] = value['max']
min[feature] = value['min']
mean[feature] = value['mean']
if '50' in value['percentile']:
median[feature] = value['percentile']['50']
missing_count[feature] = value['missing_count']
std_variance[feature] = value['std_variance']
count = value['count']
mode[feature] = value.get('mode')
statics = {"max": max, "min": min, "mean": mean, "median": median, "missing_count": missing_count,
"std_variance": std_variance, "std": std_variance, 'count': count, "mode": mode}
return statics
return None
def get_binning_result(self):
model = TaskResultDao.get_last_task_result(self.job_id, self.role, 'model_train')
if model:
result = json.loads(model.result)
LOGGER.debug("mysql result:{}".format(result))
binning_result = result.get('model_param').get('binningResult').get('binningResult')
binning_results = {}
for feature, value in binning_result.items():
binning_results[feature] = {'woe': value.get('woeArray'), 'split_points': value.get('splitPoints')}
model_meta = result.get('model_meta')
model_param = {'header': model_meta.get('cols')}
transform_cols = model_meta.get('transformParam').get('transformCols')
model_param['transform_bin_indexes'] = [int(x) for x in transform_cols]
return model_param, binning_results
return None, None
def saveSingleMetricData(self, metric_name: str, metric_namespace: str, metric_meta, kv, job_level=False):
self.save_metric_data_to_task_result(metric_name, metric_namespace, metric_meta, kv, job_level)
def saveMetricData(self, metric_name: str, metric_namespace: str, metric_meta, kv, job_level=False):
self.save_metric_data_to_task_result(metric_name, metric_namespace, metric_meta, kv, job_level)
def _get_item_metric(self, metric_name: str, metric_namespace: str, metric_meta: {}, data: {}):
"""
Get metric item
Parameters
----------
metric_name
metric_namespace
metric_meta
data
Returns
-------
"""
return {"metric_name": metric_name, "metric_namespace": metric_namespace,
"metric_meta": metric_meta, "data": data}
def _get_metric_data_value(self, v):
# return {'value': v, 'create_time': timestamp_to_date()}
if isinstance(v, dict):
return {'value': v}
if np.isinf(v):
return {'value': 'Infinity'}
if type(v) == float:
return {'value': str(v)}
return {'value': v}
def save_metric_data_to_task_result(self, metric_name: str, metric_namespace: str, metric_meta, kv,
job_level=False, need_value=True):
"""
Save metric data to task result
Parameters
----------
metric_name
metric_namespace
metric_meta
kv
job_level
need_value
Returns
-------
"""
result_type = self._get_task_result_type(TaskResultDataType.METRIC, metric_namespace)
metric_task_result = self.get_task_result(result_type, self.task_id)
result = {}
if metric_task_result and metric_task_result.result:
result = json.loads(metric_task_result.result)
metric_key = '_'.join([metric_namespace, metric_name])
component_name = self.component_name if not job_level else 'dag'
if metric_key in result.keys():
item_metric = result.get(metric_key)
else:
item_metric = self._get_item_metric(metric_name, metric_namespace, metric_meta, {})
if not need_value:
item_metric['data'] = kv
elif isinstance(kv, list):
for k, v in kv:
item_metric['data'].update({k: self._get_metric_data_value(v)})
else:
item_metric['data'].update({kv[0]: self._get_metric_data_value(kv[1])})
result[metric_key] = item_metric
self.save_task_result(result, result_type, component_name)
def save_dataset(self, data_input, schema, data_table):
header_list = schema.get("header")
# Determine whether the task exists
task = TaskDao.find_one_by_task_id(self.task_id)
if not task:
return
# Determine whether the job exists
job = JobDao.find_one_by_job_id(self.job_id, self.role)
if not job:
return
# Determine whether the project exists
project = ProjectDao.get(self.project_id == Project.project_id, Project.my_role == self.role)
if not project:
return
job_member = JobMemberDao.get(
JobMember.job_id == self.job_id,
JobMember.member_id == self.member_id,
JobMember.job_role == self.role
)
if not job_member:
return
data_set_old = DataSetDao.get(
DataSet.id == job_member.data_set_id
)
if not data_set_old:
return
data_set = DataSet()
# data_set_id = get_commit_id()
unit_id = generate_unit_id(self.task_id)
data_set.id = md5(unit_id)
data_set.created_time = current_datetime()
data_set.updated_time = current_datetime()
data_set.name = job.name + self.show_name
data_set.source_type = self.module_name
data_set.source_job_id = job.job_id
data_set.name = data_set.name + '_' + timestamp_to_date(format_string='%Y%m%d%H%M%S')
data_set.storage_type = data_set_old.storage_type
data_set.public_member_list = data_set_old.public_member_list
data_set.tags = data_set_old.tags
data_set.description = data_set_old.description
data_set.source_flow_id = data_set_old.source_flow_id
data_set.source_task_id = self.task_id
data_set.y_name_list = data_set.y_name_list
data_set.usage_count_in_job = 0
data_set.usage_count_in_flow = 0
data_set.usage_count_in_project = 0
data_set.namespace = data_input['table_namespace']
data_set.table_name = data_input['table_name']
data_set.row_count = data_input['table_create_count']
data_set.feature_name_list = ",".join(header_list)
data_set.y_name_list = data_set_old.y_name_list
data_set.primary_key_column = data_set_old.primary_key_column
# column = primary_key + y + feature
if data_set.y_name_list is None:
data_set.column_name_list = data_set.primary_key_column + "," + ",".join(header_list)
else:
data_set.column_name_list = f"{data_set.primary_key_column},{data_set.y_name_list},{','.join(header_list)}"
# y positive count
y_positive_count = data_table.filter(lambda k, v: int(v.label) > 0).count()
y_positive_ratio = round(y_positive_count / data_input['table_create_count'], 4)
data_set.y_positive_example_count = y_positive_count
data_set.y_positive_example_ratio = y_positive_ratio
if len(header_list) == 0:
data_set.column_name_list = data_set.column_name_list[1:]
data_set.contains_y = data_set_old.contains_y
data_set.column_count = len(data_set.column_name_list.split(","))
data_set.feature_count = len(data_set.feature_name_list.split(","))
DataSetDao.save(data_set, force_insert=True)
self.save_project_data_set(data_set.id, self.job_id, self.task_id, self.component_name)
self.save_data_set_column(data_set, schema, data_set_old.id)
return data_set
@staticmethod
def generate_task_id(job_id, role, component_name):
return '{}_{}_{}'.format(job_id, role, component_name)
def get_job_log_directory(job_id):
return os.path.join(log_utils.get_log_root_path(), job_id)
def get_job_directory(job_id):
return os.path.join(file_utils.get_project_base_directory(), 'jobs', job_id)
def save_project_data_set(self, data_set_id, job_id, task_id, component_name):
project_data_set = ProjectDataSet()
project_data_set.id = get_commit_id()
project_data_set.member_role = self.role
project_data_set.created_by = self.member_id
project_data_set.created_time = current_datetime()
project_data_set.updated_by = self.member_id
project_data_set.updated_time = current_datetime()
project_data_set.project_id = self.project_id
project_data_set.member_id = self.member_id
project_data_set.data_set_id = data_set_id
project_data_set.audit_status = ProjectStatus.AGREE
project_data_set.status_updated_time = current_datetime()
project_data_set.source_task_id = task_id
project_data_set.source_type = component_name.split("_")[0]
project_data_set.source_job_id = job_id
ProjectDataSetDao.save(project_data_set, force_insert=True)
return project_data_set
@staticmethod
def get_data_set_column_type(data_set_id):
data_set_columns = DataSetColumnDao.list_by_data_set_id(data_set_id)
column_types = []
for item_column in data_set_columns:
column_types.append(item_column.data_type)
return column_types
@staticmethod
def save_data_set_column(data_set, schema, old_data_set_id):
column_types = schema.get("column_types")
header = schema.get("header")
if column_types:
def get_new_column_json(data_set_id, index, name, data_type):
return {
"data_set_id": data_set_id,
"id": get_commit_id(),
"created_time": current_datetime(),
"index": index,
"name": name,
"data_type": data_type
}
index = 0
data_set_id = data_set.id
# get old data set id column type
id_column = DataSetColumnDao.get(DataSetColumn.data_set_id == old_data_set_id,
DataSetColumn.name == data_set.primary_key_column)
# id column
id_column_type = id_column.data_type if id_column else "String"
column_list = [get_new_column_json(data_set_id, index, data_set.primary_key_column, id_column_type)]
index += 1
# label column
if data_set.contains_y == 1:
for item_y in data_set.y_name_list.split(','):
column_list.append(get_new_column_json(data_set_id, index, item_y, "Integer"))
index += 1
# feature column
for i in range(len(header)):
column_list.append(get_new_column_json(data_set_id, index, header[i], column_types[i]))
index += 1
DataSetColumnDao.batch_insert(column_list)
def _calc_progress(self, model):
"""
Calculation progress
According to the total engineering quantity, the current completion engineering quantity calculation progress
If there is actual engineering quantity, calculate the percentage based on actual engineering quantity, that is, it is finished
Otherwise, calculate the progress percentage according to the estimated engineering quantity
Parameters
----------
model
Returns
-------
"""
if model.progress is None:
model.progress = 0
if model.progress > model.expect_work_amount:
model.progress = model.expect_work_amount
work_amount = model.really_work_amount or model.expect_work_amount
model.progress_rate = round(model.progress / work_amount * 100, 2)
if model.progress_rate > 100:
model.progress_rate = 100
if model.updated_time is not None and model.progress_rate > 0:
model.spend = int((model.updated_time - model.created_time).total_seconds() * 1000)
need_time = int(model.spend * 100 / model.progress_rate - model.spend)
model.expect_end_time = model.updated_time + datetime.timedelta(milliseconds=need_time)
return model
def init_task_progress(self, work_amount: int):
"""
Initialize the total engineering quantity of the task schedule
eg. Logistic regression algorithm parameters need to run 300 iterations,
then work_amount can be set to 300, then after each iteration is completed,
the current work amount needs to be +1
Parameters
----------
work_amount:int
Total engineering
Returns
-------
"""
if self.oot:
return
is_insert = True
model = TaskProgressDao.get_by_unique_id(self.task_id, self.role)
if model:
is_insert = False
# reset
model.progress = 0
model.really_work_amount = None
model.created_time = datetime.datetime.now()
model.updated_time = None
model.expect_end_time = None
model.spend = None
else:
model = TaskProgress()
model.id = get_commit_id()
model.progress = 0
model.created_time = datetime.datetime.now()
# get task info
task_info = TaskDao.get(
Task.task_id == self.task_id,
Task.role == self.role
)
if task_info:
model.flow_id = task_info.flow_id
model.flow_node_id = task_info.flow_node_id
else:
model.flow_id = 0
model.flow_node_id = 0
model.project_id = self.project_id
model.job_id = self.job_id
model.role = self.role
model.task_id = self.task_id
model.task_type = self.component_name.split('_')[0]
model.expect_work_amount = work_amount
self._calc_progress(model)
TaskProgressDao.save(model, force_insert=is_insert)
def set_task_progress(self, work_amount: int):
"""
Update the progress according to the specified work amount
Parameters
----------
work_amount:int
The amount of work currently completed
Returns
-------
"""
if self.oot:
return
if work_amount >= 0:
model = TaskProgressDao.get_by_unique_id(self.task_id, self.role)
if model:
model.progress = work_amount
model.updated_time = datetime.datetime.now()
self._calc_progress(model)
TaskProgressDao.save(model)
def add_task_progress(self, step: int = 1):
"""
Increase progress according to step
Parameters
----------
step:int
Returns
-------
"""
if self.oot:
return
model = TaskProgressDao.get_by_unique_id(self.task_id, self.role)
if model.progress is not None:
work_amount = model.progress + step
else:
work_amount = step
# Reserve one amount for use when the finish call
if work_amount > model.expect_work_amount - 1:
work_amount = model.expect_work_amount - 1
self.set_task_progress(work_amount)
def finish_task_progress(self):
"""
Finish task progress
Returns
-------
"""
model = TaskProgressDao.get_by_unique_id(self.task_id, self.role)
if model:
model.progress = model.progress + 1
model.really_work_amount = model.progress
if model.really_work_amount > model.expect_work_amount:
model.really_work_amount = model.expect_work_amount
model.updated_time = datetime.datetime.now()
self._calc_progress(model)
model.pid_success = 1
TaskProgressDao.save(model)
@update_task_status_env()
def set_task_success(self):
"""
Set task success
Returns
-------
"""
running_task = TaskDao.find_one_by_task_id(self.task_id)
if running_task:
running_task.status = TaskStatus.SUCCESS
running_task.message = "任务运行完成"
running_task.updated_time = datetime.datetime.now()
running_task.finish_time = datetime.datetime.now()
running_task.spend = get_delta_seconds(
running_task.finish_time, running_task.start_time)
TaskDao.save(running_task)
if __name__ == '__main__':
task = TaskDao.find_one_by_task_id('69ccd7ca9ff444f3a93a7e950fbf432d_promoter_Intersection_16238974992057754')
a = task.start_time
b = task.finish_time
print(type(a))
c = b - a
print(type(c))
print(c.seconds)
| 37.682773 | 135 | 0.616073 | 4,290 | 35,874 | 4.81958 | 0.101865 | 0.036564 | 0.010012 | 0.010447 | 0.424792 | 0.322306 | 0.260495 | 0.209857 | 0.192155 | 0.176098 | 0 | 0.004563 | 0.297458 | 35,874 | 951 | 136 | 37.722397 | 0.815816 | 0.0904 | 0 | 0.234811 | 0 | 0 | 0.043235 | 0.006773 | 0 | 0 | 0 | 0 | 0 | 1 | 0.062397 | false | 0 | 0.049261 | 0.006568 | 0.183908 | 0.004926 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |