hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d467fadd8af4902f63395f8f7006d9ea2380851a | 10,037 | py | Python | dfdone/plot.py | elespike/dfdone | c514e290a0eb0f74fd3c8f74ddbfddb917b2a629 | [
"MIT"
] | 7 | 2020-06-05T15:33:40.000Z | 2021-03-07T16:57:55.000Z | dfdone/plot.py | elespike/dfdone | c514e290a0eb0f74fd3c8f74ddbfddb917b2a629 | [
"MIT"
] | null | null | null | dfdone/plot.py | elespike/dfdone | c514e290a0eb0f74fd3c8f74ddbfddb917b2a629 | [
"MIT"
] | 1 | 2020-06-05T20:01:46.000Z | 2020-06-05T20:01:46.000Z | from collections import defaultdict as ddict
from operator import attrgetter, methodcaller
from string import punctuation
from graphviz import Digraph
from dfdone.enums import (
Profile,
Role,
)
ASSUMPTION = 'assumption'
DATA = 'data'
MEASURE = 'measure'
THREAT = 'threat'
def table_from_list(class_name, table_headers, table_rows):
final_list = ['<thead>']
for header in table_headers:
final_list.append(F"<th>{header}</th>")
final_list.append('</thead>')
final_list.append('<tbody>')
final_list.extend(table_rows)
final_list.append('</tbody>')
table_body = '\n'.join(final_list)
return F'\n\n<table class="{class_name}">\n{table_body}\n</table>'
slugify = str.maketrans(' ', '-', punctuation)
def id_format(label):
return label.lower().replace('-', ' ').translate(slugify)
def build_table_rows(class_prefix, component_list):
table_rows = list()
for i, c in enumerate(component_list):
table_rows.append('<tr>')
table_rows.append('<td>')
table_rows.append(
F'<div class="row-number {class_prefix}-number">{i + 1}</div>'
)
table_rows.append('</td>')
style_class = ''
if class_prefix == DATA:
style_class = F" classification-{c.classification.name.lower()}"
elif class_prefix == ASSUMPTION or class_prefix == THREAT:
style_class = F" risk-{c.calculate_risk().name.lower()}"
elif class_prefix == MEASURE:
style_class = F" capability-{c.capability.name.lower()}"
table_rows.append('<td>')
table_rows.append((
F'<div id="{id_format(c.id)}" '
F'class="label {class_prefix}-label{style_class}">'
F"{c.label}</div>"
))
table_rows.append('</td>')
if class_prefix == THREAT:
table_rows.append('<td>')
for m in c.measures:
table_rows.append((
'<div class="label measure-label '
F'capability-{m.capability.name.lower()}">'
F'<a href="#{id_format(m.id)}">{m.label}</a></div>'
))
table_rows.append('</td>')
if class_prefix == MEASURE:
table_rows.append('<td>')
for t in c.threats:
table_rows.append((
'<div class="label threat-label '
F'risk-{t.calculate_risk().name.lower()}">'
F'<a href="#{id_format(t.id)}">{t.label}</a></div>'
))
table_rows.append('</td>')
table_rows.append('<td>')
table_rows.append('<div class="{}">{}</div>'.format(
F"description {class_prefix}-description" if c.description
else 'dash',
c.description or '-'
))
table_rows.append('</td>')
table_rows.append('</tr>')
return table_rows
def build_assumption_table(assumptions):
headers = ['#', 'Disprove', 'Description']
return table_from_list(
'assumption-table',
headers,
build_table_rows(ASSUMPTION, assumptions)
)
def build_data_table(data):
headers = ['#', 'Data', 'Description']
data = sorted(data, key=attrgetter('label'))
data.sort(key=attrgetter('classification'), reverse=True)
return table_from_list(
'data-table',
headers,
build_table_rows(DATA, data)
)
def build_threat_table(threats):
headers = ['#', 'Active Threat', 'Applicable Measures', 'Description']
threats = sorted(threats, key=attrgetter('label'))
threats.sort(key=methodcaller('calculate_risk'), reverse=True)
return table_from_list(
'threat-table',
headers,
build_table_rows(THREAT, threats)
)
def build_measure_table(measures):
headers = ['#', 'Security Measure', 'Mitigable Threats', 'Description']
measures = sorted(measures, key=attrgetter('label'))
measures.sort(key=attrgetter('capability'), reverse=True)
return table_from_list(
'measure-table',
headers,
build_table_rows(MEASURE, measures)
)
def organize_elements(graph, elements):
central_elements = max([
[e for e in elements if e.profile == Profile.BLACK],
[e for e in elements if e.profile == Profile.GREY],
[e for e in elements if e.profile == Profile.WHITE],
], key=lambda l: len(l))
if not central_elements:
return
row_count = max(2, len(central_elements) // 2)
row_subgraph = Digraph(name='rows')
for i in range(1, row_count):
row_subgraph.edge(F"{i}", F"{i+1}", style='invis')
row_subgraph.node_attr.update(style='invis', shape='plain')
graph.subgraph(row_subgraph)
for i in range(row_count):
rank_subgraph = Digraph()
rank_subgraph.attr(rank='same')
for e in central_elements[i::row_count]:
rank_subgraph.node(F"{i+1}")
rank_subgraph.node(e.id)
graph.subgraph(rank_subgraph)
def build_diagram(elements, interactions):
elements = list(elements) # to be able to iterate more than once.
dot = Digraph(format='svg')
dot.attr(rankdir='TB', newrank='false')
organize_elements(dot, elements)
groups = ddict(list)
for e in elements:
if e.group:
groups[e.group].append(e)
else:
add_node(dot, e)
for group, group_elements in groups.items():
# Graphviz requirement: name must start with 'cluster'.
sub = Digraph(name=F"cluster_{group}")
sub.attr(label=group, style='filled', color='lightgrey')
for e in group_elements:
add_node(sub, e)
dot.subgraph(sub)
_interactions = sorted(interactions, key=attrgetter('created'))
for i_index, interaction in enumerate(_interactions):
dot.edge(
interaction.source.id,
interaction.target.id,
label=F" {i_index + 1} ",
decorate='true',
constraint=interaction.laterally
)
# Return the SVG source:
return (
'\n\n<div class="diagram">\n'
F"{dot.pipe().decode('utf-8').strip()}\n"
'</div>'
)
def add_node(graph, element):
# Role defines node shape
shape = {
Role.SERVICE: 'oval',
Role.STORAGE: 'box3d'
}.get(element.role, 'box')
# Set proper background + text contrast
fillcolor, fontcolor = {
Profile.BLACK: ('black', 'white'),
Profile.GREY: ('dimgrey', 'white')
}.get(element.profile, ('white', 'black'))
graph.node(
element.id,
label=element.label,
shape=shape,
style='filled',
color='black',
fontcolor=fontcolor,
fillcolor=fillcolor
)
def build_threats_cell(threats, classification, interaction_table, rowspan=1):
interaction_table.append(F"<td rowspan={rowspan}>")
for t in threats:
risk_level = t.calculate_risk(classification).name.lower()
interaction_table.append((
F'<div class="label threat-label risk-{risk_level}">'
F'<a href="#{id_format(t.id)}">{t.label}</a></div>'
))
for m in t.measures:
if not m.active:
continue
interaction_table.append((
'<div class="label mitigation-label '
F"imperative-{m.imperative.name.lower()} "
F"capability-{m.capability.name.lower()} "
F'status-{m.status.name.lower()}">'
F'<a href="#{id_format(m.id)}">{m.label}</a></div>'
))
interaction_table.append('</td>')
def build_interaction_table(interactions):
interaction_table = list()
headers = ['#', 'Data', 'Data Threats', 'Interaction Threats', 'Notes']
_interactions = sorted(interactions, key=attrgetter('created'))
for i_index, interaction in enumerate(_interactions):
interaction_rowspan = len(interaction.data_threats.values())
interaction_table.append('<tr>')
interaction_table.append((
F'<td rowspan="{interaction_rowspan}">'
'<div class="row-number interaction-number">'
F"{i_index + 1}</div></td>"
))
di = 0
for datum, threats in interaction.data_threats.items():
if di > 0:
interaction_table.append('<tr>')
interaction_table.append((
F'<td><div class="label data-label '
F'classification-{datum.classification.name.lower()}">'
F'<a href="#{id_format(datum.id)}">{datum.label}</a>'
'</div></td>'
))
if not threats:
interaction_table.append('<td><div class="dash">-</div></td>')
else:
build_threats_cell(
threats,
datum.classification,
interaction_table
)
if di == 0:
if not interaction.broad_threats:
interaction_table.append((
F'<td rowspan="{interaction_rowspan}">'
'<div class="dash">-</div></td>'
))
else:
build_threats_cell(
interaction.broad_threats,
interaction.highest_classification,
interaction_table,
rowspan=interaction_rowspan
)
interaction_table.append(
F'<td rowspan="{interaction_rowspan}">'
)
interaction_table.append('<div class="{}">{}</div>'.format(
'interaction-notes' if interaction.notes
else 'dash',
interaction.notes or '-'
))
interaction_table.append('</td>')
interaction_table.append('</tr>')
di += 1
return table_from_list('interaction-table', headers, interaction_table)
| 32.800654 | 78 | 0.565209 | 1,105 | 10,037 | 4.98371 | 0.171041 | 0.042491 | 0.046305 | 0.03087 | 0.284729 | 0.228255 | 0.184311 | 0.153986 | 0.133467 | 0.077719 | 0 | 0.002119 | 0.29461 | 10,037 | 305 | 79 | 32.908197 | 0.775706 | 0.017535 | 0 | 0.229249 | 0 | 0 | 0.209132 | 0.093962 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047431 | false | 0 | 0.019763 | 0.003953 | 0.106719 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d468f2c87e5ec8bf7bc10de9d752f8c5b503e861 | 1,159 | py | Python | asilmedia.py | kamronbek29/asilmedia_scrapper | b94b6a0fc05f22adab8ba18ea466cd8511dfd319 | [
"MIT"
] | null | null | null | asilmedia.py | kamronbek29/asilmedia_scrapper | b94b6a0fc05f22adab8ba18ea466cd8511dfd319 | [
"MIT"
] | null | null | null | asilmedia.py | kamronbek29/asilmedia_scrapper | b94b6a0fc05f22adab8ba18ea466cd8511dfd319 | [
"MIT"
] | null | null | null | import os
import requests
from pyquery import PyQuery as pq
def get_download_url(movie_url):
get_request = requests.get(movie_url)
get_request_str = str(get_request.content, 'utf-8')
pq_obj_items = pq(get_request_str)('div.download-list.d-hidden').eq(0)('div')('a').items()
download_urls = []
for pq_item in pq_obj_items:
if '.mp4' in str(pq_item):
download_url = pq_item('a').attr('href')
download_urls.append(download_url)
best_quality_download_url = download_urls[-1]
download_movie(best_quality_download_url)
def download_movie(download_url):
file_name = str(download_url).split(maxsplit=1)[1].replace('/', '')
file_dir = 'videos/{}.mp4'.format(file_name)
if not os.path.exists('videos'):
os.mkdir('videos')
get_video = requests.get(download_url, allow_redirects=True)
with open(file_dir, "wb") as file_stream:
video_content = get_video.content
file_stream.write(video_content)
return file_dir
url = 'http://asilmedia.net/11773-tepalikda-ajratish-olim-yaqin-emas-uzbek-tilida-2018-ozbekcha-tarjima-kino-hd.html'
get_download_url(url)
| 28.975 | 117 | 0.701467 | 171 | 1,159 | 4.479532 | 0.450292 | 0.129243 | 0.05483 | 0.046997 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01658 | 0.167386 | 1,159 | 39 | 118 | 29.717949 | 0.777202 | 0 | 0 | 0 | 0 | 0.038462 | 0.156169 | 0.022433 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.115385 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d46f429828765f453153f4024780bc7dd3ec8f3f | 1,011 | py | Python | scripts/venv/lib/python2.7/site-packages/cogent/struct/annotation.py | sauloal/cnidaria | fe6f8c8dfed86d39c80f2804a753c05bb2e485b4 | [
"MIT"
] | 3 | 2015-11-20T08:44:42.000Z | 2016-12-14T01:40:03.000Z | scripts/venv/lib/python2.7/site-packages/cogent/struct/annotation.py | sauloal/cnidaria | fe6f8c8dfed86d39c80f2804a753c05bb2e485b4 | [
"MIT"
] | 1 | 2017-09-04T14:04:32.000Z | 2020-05-26T19:04:00.000Z | scripts/venv/lib/python2.7/site-packages/cogent/struct/annotation.py | sauloal/cnidaria | fe6f8c8dfed86d39c80f2804a753c05bb2e485b4 | [
"MIT"
] | null | null | null | """Contains functions to annotate macromolecular entities."""
from cogent.core.entity import HIERARCHY
__author__ = "Marcin Cieslik"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Marcin Cieslik"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Marcin Cieslik"
__email__ = "mpc4p@virginia.edu"
__status__ = "Development"
def xtradata(data, entity):
"""Annotates an entity with data from a ``{full_id:data}`` dictionary. The
``data`` should also be a dictionary.
Arguments:
- data: a dictionary, which is a mapping of full_id's (keys) and data
dictionaries.
- entity: top-level entity, which contains the entities which will hold
the data."""
for full_id, data in data.iteritems():
sub_entity = entity
strip_full_id = [i for i in full_id if i is not None]
for short_id in strip_full_id:
sub_entity = sub_entity[(short_id,)]
sub_entity.xtra.update(data)
| 33.7 | 80 | 0.670623 | 133 | 1,011 | 4.75188 | 0.541353 | 0.056962 | 0.031646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015484 | 0.233432 | 1,011 | 29 | 81 | 34.862069 | 0.8 | 0.373887 | 0 | 0 | 0 | 0 | 0.198988 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d470ff56ce7d42894f9f9e01bcb618251f87a397 | 3,618 | py | Python | cerebralcortex/core/data_manager/sql/kafka_offsets_handler.py | brinnaebent/CerebralCortex-Kernel | b0daad06df118d27e62e178e123170e8f189065e | [
"BSD-2-Clause"
] | null | null | null | cerebralcortex/core/data_manager/sql/kafka_offsets_handler.py | brinnaebent/CerebralCortex-Kernel | b0daad06df118d27e62e178e123170e8f189065e | [
"BSD-2-Clause"
] | null | null | null | cerebralcortex/core/data_manager/sql/kafka_offsets_handler.py | brinnaebent/CerebralCortex-Kernel | b0daad06df118d27e62e178e123170e8f189065e | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2019, MD2K Center of Excellence
# - Nasir Ali <nasir.ali08@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from typing import List
class KafkaOffsetsHandler:
def store_or_update_Kafka_offset(self, topic_partition: str, offset_start: str, offset_until: str)->bool:
"""
Store or Update kafka topic offsets. Offsets are used to track what messages have been processed.
Args:
topic (str): name of the kafka topic
offset_start (str): starting of offset
offset_until (str): last processed offset
Raises:
ValueError: All params are required.
Exception: Cannot add/update kafka offsets because ERROR-MESSAGE
Returns:
bool: returns True if offsets are add/updated or throws an exception.
"""
if not topic_partition and not offset_start and not offset_until:
raise ValueError("All params are required.")
try:
qry = "REPLACE INTO " + self.kafkaOffsetsTable + " (topic, topic_partition, offset_start, offset_until) VALUES(%s, %s, %s, %s)"
vals = str(self.study_name), str(topic_partition), str(offset_start), json.dumps(offset_until)
self.execute(qry, vals, commit=True)
return True
except Exception as e:
raise Exception("Cannot add/update kafka offsets because "+str(e))
def get_kafka_offsets(self) -> List[dict]:
"""
Get last stored kafka offsets
Returns:
list[dict]: list of kafka offsets. This method will return empty list if topic does not exist and/or no offset is stored for the topic.
Raises:
ValueError: Topic name cannot be empty/None
Examples:
>>> CC = CerebralCortex("/directory/path/of/configs/")
>>> CC.get_kafka_offsets("live-data")
>>> [{"id","topic", "topic_partition", "offset_start", "offset_until", "offset_update_time"}]
"""
results = []
qry = "SELECT * from " + self.kafkaOffsetsTable + " where topic = %(topic)s order by id DESC"
vals = {'topic': str(self.study_name)}
rows = self.execute(qry, vals)
if rows:
for row in rows:
results.append(row)
return results
else:
return []
| 45.225 | 147 | 0.67717 | 471 | 3,618 | 5.140127 | 0.426752 | 0.027261 | 0.014044 | 0.019 | 0.193309 | 0.125568 | 0.125568 | 0.056175 | 0.056175 | 0.056175 | 0 | 0.002568 | 0.246545 | 3,618 | 79 | 148 | 45.797468 | 0.885547 | 0.612217 | 0 | 0 | 0 | 0.041667 | 0.17893 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4736994de9c464bb6f5278b4dbda2f2dd43fd96 | 3,790 | py | Python | Rejection-System/rejection_system.py | willtop/imitation-learning | 2c00f77e4e575e38ef233cc5eac6862e598ec4ac | [
"MIT"
] | null | null | null | Rejection-System/rejection_system.py | willtop/imitation-learning | 2c00f77e4e575e38ef233cc5eac6862e598ec4ac | [
"MIT"
] | null | null | null | Rejection-System/rejection_system.py | willtop/imitation-learning | 2c00f77e4e575e38ef233cc5eac6862e598ec4ac | [
"MIT"
] | null | null | null | import os
import tensorflow as tf
import numpy as np
import rejection_network
class RejectionSystem():
def __init__(self):
self.dir_path = os.path.dirname(os.path.abspath(__file__))
self._train_dir = os.path.join(self.dir_path, "Data/Train/")
self._valid_dir = os.path.join(self.dir_path, "Data/Valid/")
# training setting
self._training_epoches = 100
self._number_of_minibatches = 20
self._rejection_net = rejection_network.Network()
self._initialize_training = True
self._debug = False
def load_data(self):
train_images = np.load(self._train_dir + "train_images.npy")
train_targets = np.load(self._train_dir + "train_targets.npy")
valid_images = np.load(self._valid_dir + "valid_images.npy")
valid_targets = np.load(self._valid_dir + "valid_targets.npy")
return train_images, train_targets, valid_images, valid_targets
def prepare_training_batches(self, inputs, targets):
data_amount = np.shape(targets)[0]
perm = np.arange(data_amount)
np.random.shuffle(perm)
inputs = inputs[perm]
targets = targets[perm]
inputs_batches = np.split(inputs, self._number_of_minibatches)
targets_batches = np.split(targets, self._number_of_minibatches)
return inputs_batches, targets_batches
def train_model(self, train_images, train_targets, valid_images, valid_targets):
TFgraph, images_placeholder, targets_placeholder, whether_training_placeholder, safety_scores, loss, train_step = self._rejection_net.build_rejection_network()
model_loc = self._rejection_net.model_loc
with TFgraph.as_default():
with tf.Session() as sess:
saver = tf.train.Saver()
if(self._initialize_training):
print("Initialize parameters and train from scratch...")
sess.run(tf.global_variables_initializer())
else:
print("Resume training on model loaded from {}...".format(model_loc))
saver.restore(sess, model_loc)
for i in range(1, self._training_epoches+1):
train_images_batches, train_targets_batches = self.prepare_training_batches(train_images, train_targets)
train_loss_avg = 0
for j in range(self._number_of_minibatches):
_, train_loss, train_scores = sess.run([train_step, loss, safety_scores], feed_dict={
images_placeholder: train_images_batches[j],
targets_placeholder: train_targets_batches[j],
whether_training_placeholder: True
})
train_loss_avg += train_loss/self._number_of_minibatches
valid_loss, valid_scores = sess.run([loss, safety_scores], feed_dict={
images_placeholder: valid_images,
targets_placeholder: valid_targets,
whether_training_placeholder: False
})
if(self._debug):
print(valid_scores)
print("{}/{} Epoch. Avg CE: Train {} | Valid {}".format(i, self._training_epoches, train_loss_avg, valid_loss))
saver.save(sess, model_loc)
print("Trained model saved at {}!".format(model_loc))
return
if(__name__=="__main__"):
rejection_system = RejectionSystem()
train_images, train_targets, valid_images, valid_targets = rejection_system.load_data()
print("Data Loading Completed!")
rejection_system.train_model(train_images, train_targets, valid_images, valid_targets)
| 48.589744 | 167 | 0.629815 | 428 | 3,790 | 5.196262 | 0.240654 | 0.044514 | 0.026978 | 0.051709 | 0.186151 | 0.186151 | 0.144784 | 0.107914 | 0 | 0 | 0 | 0.003308 | 0.282058 | 3,790 | 77 | 168 | 49.220779 | 0.814039 | 0.004222 | 0 | 0.029851 | 0 | 0 | 0.07266 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0 | 0.059701 | 0 | 0.179104 | 0.089552 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d473bd2a6111d692be84e1c6bd981d1b8ff3ee2c | 2,370 | py | Python | examples/display_youtube_subs_single_tilechain.py | netmanchris/pylifxtiles | f9a77fe0beaabff4c792032d7778a8ad2815e2bd | [
"Apache-2.0"
] | 6 | 2020-04-27T00:55:47.000Z | 2020-10-11T19:16:38.000Z | examples/display_youtube_subs_single_tilechain.py | netmanchris/pylifxtiles | f9a77fe0beaabff4c792032d7778a8ad2815e2bd | [
"Apache-2.0"
] | null | null | null | examples/display_youtube_subs_single_tilechain.py | netmanchris/pylifxtiles | f9a77fe0beaabff4c792032d7778a8ad2815e2bd | [
"Apache-2.0"
] | null | null | null | import requests
import inflect
#create a file called secrets.py and place your googleAPI key in a var called youtube_api_key DO NOT POSTS THIS TO GITHUB
from lifxlan import *
# from random import randint, betavariate
from time import sleep
from examples.secrets import youtube_api_key
from pylifxtiles import actions
from pylifxtiles import objects
from pylifxtiles.alphanum import nums
from pylifxtiles import colors
channel_name = 'UCQHfJyIROQhDFUOJKVBiLog'
my_tile = 'T1'
def main():
target_tilechain = my_tile
lan = LifxLAN()
tilechain_lights = lan.get_tilechain_lights()
print(len(tilechain_lights))
if len(tilechain_lights) != 0:
for tile in tilechain_lights:
if tile.get_label() == target_tilechain:
print(tile.label)
# if tile.get_label() == 'TEST':
target_tilechain = tile
duration_ms = 1000
try:
# original_colors = reset_tiles(T1)
run = 0
target_color_map = actions.reset_tiles(target_tilechain)
original_colors = [actions.blank_tile()] * 5
objects.draw_youtube(target_tilechain, 0)
while (True):
# T1.set_tile_colors(0,youtube,rapid=True)
subs = get_subs(channel_name, youtube_api_key)
tile = 1
for number in subs:
blank_tile = actions.blank_tile()
print(number)
for led in nums[number]:
target_color_map[tile][led] = (colors.dblue, 65535, colors.fourty, 4900)
target_tilechain.set_tile_colors(tile, target_color_map[tile])
print(tile)
tile += 1
run += 1
print('This is run ' + str(run) + ' with ' + str(subs) + ' subscribers')
# sleeps for 1/2h
sleep(1200)
except KeyboardInterrupt:
print("Done.")
else:
print("No TileChain lights found.")
def get_subs(channel_name, api_key):
num_of_subs = []
data = requests.get(
"https://www.googleapis.com/youtube/v3/channels?part=statistics&id=" + channel_name + "&key=" + api_key)
subs = data.json()['items'][0]['statistics']['subscriberCount']
for i in subs:
p = inflect.engine()
num_of_subs.append(p.number_to_words(int(i)))
return num_of_subs
if __name__ == "__main__":
main() | 32.465753 | 121 | 0.627426 | 295 | 2,370 | 4.820339 | 0.4 | 0.063291 | 0.027426 | 0.019691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018681 | 0.277215 | 2,370 | 73 | 122 | 32.465753 | 0.811442 | 0.118987 | 0 | 0 | 0 | 0 | 0.094095 | 0.011522 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.160714 | 0 | 0.214286 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d47455c84e22f79970850d0a4f527fa6cc12c816 | 6,031 | py | Python | lib/cfnvpn/templates/lambdas/auto_route_populator/app.py | base2Services/cfn-vpn | d26c01eb675cd47b2162aefeb26540a2a5891062 | [
"MIT"
] | 1 | 2019-10-17T02:36:16.000Z | 2019-10-17T02:36:16.000Z | lib/cfnvpn/templates/lambdas/auto_route_populator/app.py | base2Services/cfn-vpn | d26c01eb675cd47b2162aefeb26540a2a5891062 | [
"MIT"
] | 7 | 2019-12-12T00:34:31.000Z | 2022-03-30T03:47:51.000Z | lib/cfnvpn/templates/lambdas/auto_route_populator/app.py | base2Services/cfn-vpn | d26c01eb675cd47b2162aefeb26540a2a5891062 | [
"MIT"
] | 7 | 2019-12-11T22:23:15.000Z | 2021-11-23T03:51:54.000Z | import socket
import boto3
from botocore.exceptions import ClientError
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def delete_route(client, vpn_endpoint, subnet, cidr):
client.delete_client_vpn_route(
ClientVpnEndpointId=vpn_endpoint,
TargetVpcSubnetId=subnet,
DestinationCidrBlock=cidr,
)
def create_route(client, event, cidr):
client.create_client_vpn_route(
ClientVpnEndpointId=event['ClientVpnEndpointId'],
DestinationCidrBlock=cidr,
TargetVpcSubnetId=event['TargetSubnet'],
Description=f"cfnvpn auto generated route for endpoint {event['Record']}. {event['Description']}"
)
def revoke_route_auth(client, event, cidr, group = None):
args = {
'ClientVpnEndpointId': event['ClientVpnEndpointId'],
'TargetNetworkCidr': cidr
}
if group is None:
args['RevokeAllGroups'] = True
else:
args['AccessGroupId'] = group
client.revoke_client_vpn_ingress(**args)
def authorize_route(client, event, cidr, group = None):
args = {
'ClientVpnEndpointId': event['ClientVpnEndpointId'],
'TargetNetworkCidr': cidr,
'Description': f"cfnvpn auto generated authorization for endpoint {event['Record']}. {event['Description']}"
}
if group is None:
args['AuthorizeAllGroups'] = True
else:
args['AccessGroupId'] = group
client.authorize_client_vpn_ingress(**args)
def get_routes(client, event):
response = client.describe_client_vpn_routes(
ClientVpnEndpointId=event['ClientVpnEndpointId'],
Filters=[
{
'Name': 'origin',
'Values': ['add-route']
}
]
)
routes = [route for route in response['Routes'] if event['Record'] in route['Description']]
logger.info(f"found {len(routes)} exisiting routes for {event['Record']}")
return routes
def get_rules(client, vpn_endpoint, cidr):
response = client.describe_client_vpn_authorization_rules(
ClientVpnEndpointId=vpn_endpoint,
Filters=[
{
'Name': 'destination-cidr',
'Values': [cidr]
}
]
)
return response['AuthorizationRules']
def handler(event,context):
# DNS lookup on the dns record and return all IPS for the endpoint
try:
cidrs = [ ip + "/32" for ip in socket.gethostbyname_ex(event['Record'])[-1]]
logger.info(f"resolved endpoint {event['Record']} to {cidrs}")
except socket.gaierror as e:
logger.exception(f"failed to resolve record {event['Record']}")
return 'KO'
client = boto3.client('ec2')
routes = get_routes(client, event)
for cidr in cidrs:
route = next((route for route in routes if route['DestinationCidr'] == cidr), None)
# if there are no existing routes for the endpoint cidr create a new route
if route is None:
try:
create_route(client, event, cidr)
if 'Groups' in event:
for group in event['Groups']:
authorize_route(client, event, cidr, group)
else:
authorize_route(client, event, cidr)
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidClientVpnDuplicateRoute':
logger.error(f"route for CIDR {cidr} already exists with a different endpoint")
continue
raise e
# if the route already exists
else:
logger.info(f"route for cidr {cidr} is already in place")
# if the target subnet has changed in the payload, recreate the routes to use the new subnet
if route['TargetSubnet'] != event['TargetSubnet']:
logger.info(f"target subnet for route for {cidr} has changed, recreating the route")
delete_route(client, event['ClientVpnEndpointId'], route['TargetSubnet'], cidr)
create_route(client, event, cidr)
logger.info(f"checking authorization rules for the route")
# check the rules match the payload
rules = get_rules(client, event['ClientVpnEndpointId'], cidr)
existing_groups = [rule['GroupId'] for rule in rules]
if 'Groups' in event:
# remove expired rules not defined in the payload anymore
expired_rules = [rule for rule in rules if rule['GroupId'] not in event['Groups']]
for rule in expired_rules:
logger.info(f"removing expired authorization rule for group {rule['GroupId']} for route {cidr}")
revoke_route_auth(client, event, cidr, rule['GroupId'])
# add new rules defined in the payload
new_rules = [group for group in event['Groups'] if group not in existing_groups]
for group in new_rules:
logger.info(f"creating new authorization rule for group {rule['GroupId']} for route {cidr}")
authorize_route(client, event, cidr, group)
else:
# if amount of rules for the cidr is greater than 1 when no groups are specified in the payload
# we'll assume that all groups have been removed from the payload so we'll remove all existing rules and add a rule for allow all
if len(rules) > 1:
logger.info(f"creating an allow all rule for route {cidr}")
revoke_route_auth(client, event, cidr)
authorize_route(client, event, cidr)
# clean up any expired routes when the ips for an endpoint change
expired_routes = [route for route in routes if route['DestinationCidr'] not in cidrs]
for route in expired_routes:
logger.info(f"removing expired route {route['DestinationCidr']} for endpoint {event['Record']}")
try:
revoke_route_auth(client, event, route['DestinationCidr'])
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidClientVpnEndpointAuthorizationRuleNotFound':
pass
else:
raise e
try:
delete_route(client, event['ClientVpnEndpointId'], route['TargetSubnet'], route['DestinationCidr'])
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidClientVpnRouteNotFound':
pass
else:
raise e
return 'OK' | 34.462857 | 138 | 0.664898 | 725 | 6,031 | 5.455172 | 0.213793 | 0.047282 | 0.041719 | 0.040455 | 0.384324 | 0.251833 | 0.199747 | 0.151201 | 0.129456 | 0.076865 | 0 | 0.001729 | 0.232631 | 6,031 | 175 | 139 | 34.462857 | 0.852852 | 0.111259 | 0 | 0.333333 | 0 | 0 | 0.280187 | 0.033271 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054264 | false | 0.015504 | 0.031008 | 0 | 0.116279 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d47d005c02515ac759e7040407be536af13a0a86 | 1,392 | py | Python | full-problems/topKNumbers.py | vikas-t/DS-Algo | ea654d1cad5374c824c52da9d3815a9546eb43fa | [
"Apache-2.0"
] | null | null | null | full-problems/topKNumbers.py | vikas-t/DS-Algo | ea654d1cad5374c824c52da9d3815a9546eb43fa | [
"Apache-2.0"
] | null | null | null | full-problems/topKNumbers.py | vikas-t/DS-Algo | ea654d1cad5374c824c52da9d3815a9546eb43fa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# https://practice.geeksforgeeks.org/problems/top-k-numbers/0
def sol(arr, n, k):
f = {0:0}
rl = [0]*(k+1)
# Lets initialise a list of k+1 elements
# We have taken one extra element here so as we dont overwrite an existing
# result or subresult
for x in arr:
f[x] = f[x] + 1 if x in f else 1
rl[k] = x
# Store the newest element at the last meaning at position which
# has the least frequency
i = rl.index(x)
i-=1
# Find the position where the element occurs for the first time so
# as to adjust the elements preeceding that. The elements in
# succession remains unchanged
while i >= 0:
if f[rl[i]] < f[rl[i+1]]:
rl[i], rl[i+1] = rl[i+1], rl[i]
# If the element to the left has smaller frequency, swap it
elif f[rl[i]] == f[rl[i+1]] and rl[i] > rl[i+1]:
rl[i], rl[i+1] = rl[i+1], rl[i]
# If the number to the left has same frequency but the number is
# greater swap it
else:
break
# No point going further to the left
i-=1
for r in rl[:k]:
if not r:
continue
print(r, end=" ")
# Print the results as asked in the question
print() | 33.95122 | 78 | 0.514368 | 217 | 1,392 | 3.299539 | 0.442396 | 0.058659 | 0.039106 | 0.050279 | 0.087989 | 0.087989 | 0.087989 | 0.064246 | 0.064246 | 0.064246 | 0 | 0.022248 | 0.386494 | 1,392 | 41 | 79 | 33.95122 | 0.816159 | 0.479167 | 0 | 0.190476 | 0 | 0 | 0.001406 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0 | 0 | 0.047619 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d47da1a3567c0646e5b6d191bc5adb903cf32946 | 2,125 | py | Python | pymeta.py | ustayready/python-pentesting | 9a2e22eebbd7b7869bef43ae5dddd45a20558095 | [
"MIT"
] | 135 | 2020-02-28T23:22:00.000Z | 2022-03-29T03:48:31.000Z | pymeta.py | treebuilder/python-pentesting | 9a2e22eebbd7b7869bef43ae5dddd45a20558095 | [
"MIT"
] | null | null | null | pymeta.py | treebuilder/python-pentesting | 9a2e22eebbd7b7869bef43ae5dddd45a20558095 | [
"MIT"
] | 45 | 2020-03-01T04:12:08.000Z | 2022-02-02T22:43:15.000Z | import os
import re
import argparse
import zipfile
import PyPDF2
from lxml import etree as ET
class PyMetaExtractor():
ext = ['docx', 'xlsx', 'pptx', 'pdf']
rexp = re.compile(r'.+\.({})$'.format('|'.join(ext)))
def __init__(self, directory):
self.directory = os.path.abspath(directory)
print("[*] Starting to search from: [{}]".format(self.directory))
return
def run(self):
for cwd, lod, lof in os.walk(self.directory):
for f in lof:
m = self.rexp.match(f)
if m:
fullpath = os.path.join(cwd, f)
try:
print('[*] {}'.format(fullpath))
if m.group(1) == 'pdf':
self.pdf(fullpath)
else:
self.openxml(fullpath)
print('')
except:
continue
def openxml(self, pathname):
zf = zipfile.ZipFile(pathname, 'r')
docprops = ET.fromstring(zf.read('docProps/core.xml'))
for meta in docprops.findall('*'):
if meta.tag[0] == '{':
tag = meta.tag.split('}')[1].title()
else:
tag = meta.tag.title()
value = meta.text
print(' [+] {:15s} => {}'.format(tag, value))
def pdf(self, pathname):
reader = PyPDF2.PdfFileReader(pathname)
meta = reader.getDocumentInfo()
for key in meta:
tag = key.lstrip('/')
value = meta[key]
print(' [+] {:15s} => {}'.format(tag, value))
if __name__ == '__main__':
print('''
_______________________________________
PyMeta version 1.0
Author: Joff Thyer (c) 2020
Black Hills Information Security
_______________________________________
''')
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='starting directory')
args = parser.parse_args()
pm = PyMetaExtractor(args.directory)
pm.run() | 32.19697 | 74 | 0.499765 | 205 | 2,125 | 4.731707 | 0.463415 | 0.053608 | 0.020619 | 0.035052 | 0.045361 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011161 | 0.367529 | 2,125 | 66 | 75 | 32.19697 | 0.710565 | 0 | 0 | 0.103448 | 0 | 0 | 0.163998 | 0.037846 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.103448 | 0 | 0.241379 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d482268b3a67acf7c98e550f439531564a00f5c4 | 6,497 | py | Python | tests/test_operation_filter.py | kolypto/py-jessiql | 724a1eda84e912483bb2d96bb0f74ce6a12098a3 | [
"MIT"
] | null | null | null | tests/test_operation_filter.py | kolypto/py-jessiql | 724a1eda84e912483bb2d96bb0f74ce6a12098a3 | [
"MIT"
] | null | null | null | tests/test_operation_filter.py | kolypto/py-jessiql | 724a1eda84e912483bb2d96bb0f74ce6a12098a3 | [
"MIT"
] | null | null | null | import pytest
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as pg
from jessiql import QueryObjectDict
from jessiql.sainfo.version import SA_14
from jessiql.testing.insert import insert
from jessiql.testing.recreate_tables import created_tables
from jessiql.util import sacompat
from .util.models import IdManyFieldsMixin, id_manyfields
from .util.test_queries import typical_test_sql_query_text, typical_test_query_results, typical_test_query_text_and_results
@pytest.mark.parametrize(('query_object', 'expected_query_lines',), [
# Empty
(dict(filter=None), []),
(dict(filter={}), []),
# Shortcut equality
(dict(filter={'a': 1}), ["WHERE a.a = 1"]),
# Scalar Operators
(dict(filter={'a': {'$eq': 1}}), ["WHERE a.a = 1"]),
(dict(filter={'a': {'$ne': 1}}), ["WHERE a.a IS DISTINCT FROM 1"]),
(dict(filter={'a': {'$lt': 1}}), ["WHERE a.a < 1"]),
(dict(filter={'a': {'$lte': 1}}), ["WHERE a.a <= 1"]),
(dict(filter={'a': {'$gte': 1}}), ["WHERE a.a >= 1"]),
(dict(filter={'a': {'$gt': 1}}), ["WHERE a.a > 1"]),
(dict(filter={'a': {'$prefix': 'ex-'}}), ["WHERE (a.a LIKE ex- || '%')"]),
(dict(filter={'a': {'$in': (1, 2, 3)}}), ["WHERE a.a IN ([POSTCOMPILE_a_1])" if SA_14 else
"WHERE a.a IN (1, 2, 3)"]),
(dict(filter={'a': {'$nin': (1, 2, 3)}}), ["WHERE (a.a NOT IN ([POSTCOMPILE_a_1]))" if SA_14 else
"WHERE a.a NOT IN (1, 2, 3)"]),
(dict(filter={'a': {'$exists': 0}}), ["WHERE a.a IS NULL"]),
(dict(filter={'a': {'$exists': 1}}), ["WHERE a.a IS NOT NULL"]),
# Multiple scalar comparisons
(dict(filter={'a': 1, 'b': 2}), ["WHERE a.a = 1 AND a.b = 2"]),
(dict(filter={'a': {'$gt': 1, '$ne': 10}}), ["WHERE a.a > 1 AND a.a IS DISTINCT FROM 10"]),
# Array operators, scalar operand
(dict(filter={'tags': {'$eq': 'a'}}), ["WHERE a = ANY (a.tags)"]),
(dict(filter={'tags': {'$ne': 'a'}}), ["WHERE a != ALL (a.tags)"]),
(dict(filter={'tags': {'$exists': 1}}), ["WHERE a.tags IS NOT NULL"]),
(dict(filter={'tags': {'$size': 0}}), ["WHERE array_length(a.tags, 1) IS NULL"]),
(dict(filter={'tags': {'$size': 1}}), ["WHERE array_length(a.tags, 1) = 1"]),
# Array operators, scalar operand
(dict(filter={'tags': {'$eq': ['a', 'b', 'c']}}), ["WHERE a.tags = CAST(ARRAY[a, b, c] AS VARCHAR[])"]),
(dict(filter={'tags': {'$ne': ['a', 'b', 'c']}}), ["WHERE a.tags != CAST(ARRAY[a, b, c] AS VARCHAR[])"]),
(dict(filter={'tags': {'$in': ['a', 'b', 'c']}}), ["WHERE a.tags && CAST(ARRAY[a, b, c] AS VARCHAR[])"]),
(dict(filter={'tags': {'$nin': ['a', 'b', 'c']}}), ["WHERE NOT a.tags && CAST(ARRAY[a, b, c] AS VARCHAR[])"]),
(dict(filter={'tags': {'$all': ['a', 'b', 'c']}}), ["WHERE a.tags @> CAST(ARRAY[a, b, c] AS VARCHAR[])"]),
# Comparison with a JSON value
# It is important to cast it to a correct value
(dict(filter={'j.user.name': 'kolypto'}), ["WHERE CAST((a.j #>> ('user', 'name')) AS TEXT) = kolypto"]),
(dict(filter={'j.user.name': 10}), ["WHERE CAST((a.j #>> ('user', 'name')) AS INTEGER) = 10"]),
(dict(filter={'j.user.name': True}), ["WHERE CAST((a.j #>> ('user', 'name')) AS BOOLEAN) = true"]),
(dict(filter={'j.user.name': None}), ["WHERE CAST((a.j #>> ('user', 'name')) AS TEXT) IS NULL"]),
])
def test_filter_sql(connection: sa.engine.Connection, query_object: QueryObjectDict, expected_query_lines: list[str]):
""" Typical test: what SQL is generated """
# Models
Base = sacompat.declarative_base()
class Model(IdManyFieldsMixin, Base):
__tablename__ = 'a'
# This Postgres-specific implementation has .contains() and .overlaps() implementations
tags = sa.Column(pg.ARRAY(sa.String))
# Test
typical_test_sql_query_text(query_object, Model, expected_query_lines)
@pytest.mark.parametrize(('query_object', 'expected_results'), [
# Empty input
(dict(), [{'id': n} for n in (1, 2, 3)]),
# Filter by column
(dict(filter={'a': 'not-found'}), []),
(dict(filter={'a': 'm-1-a'}), [{'id': 1}]),
# Filter by JSON value
(dict(filter={'j.m': '1-j'}), [{'id': 1}]),
])
def test_filter_results(connection: sa.engine.Connection, query_object: QueryObjectDict, expected_results: list[dict]):
""" Typical test: real data, real query, real results """
# Models
Base = sacompat.declarative_base()
class Model(IdManyFieldsMixin, Base):
__tablename__ = 'a'
# Data
with created_tables(connection, Base):
# Insert some rows
insert(connection, Model, [
id_manyfields('m', 1),
id_manyfields('m', 2),
id_manyfields('m', 3),
])
# Test
typical_test_query_results(connection, query_object, Model, expected_results)
@pytest.mark.parametrize(('query_object', 'expected_query_lines', 'expected_results'), [
# Simple filter: column equality
(dict(select=[{'articles': dict(filter={'id': 3})}]), [
'FROM u',
'FROM a',
# joined query includes: filter condition AND join condition
'WHERE a.user_id IN ([POSTCOMPILE_primary_keys]) AND a.id = 3' if SA_14 else
'WHERE a.user_id IN ([EXPANDING_primary_keys]) AND a.id = 3'
], [
{'id': 1, 'articles': [
{'id': 3, 'user_id': 1},
# no more rows
]}
]),
])
def test_joined_filter(connection: sa.engine.Connection, query_object: QueryObjectDict, expected_query_lines: list[str], expected_results: list[dict]):
""" Typical test: JOINs, SQL and results """
# Models
Base = sacompat.declarative_base()
class User(IdManyFieldsMixin, Base):
__tablename__ = 'u'
articles = sa.orm.relationship('Article', back_populates='author')
class Article(IdManyFieldsMixin, Base):
__tablename__ = 'a'
user_id = sa.Column(sa.ForeignKey(User.id))
author = sa.orm.relationship(User, back_populates='articles')
# Data
with created_tables(connection, Base):
# Insert some rows
insert(connection, User, [
id_manyfields('u', 1),
])
insert(connection, Article, [
id_manyfields('a', 1, user_id=1),
id_manyfields('a', 2, user_id=1),
id_manyfields('a', 3, user_id=1),
])
# Test
typical_test_query_text_and_results(connection, query_object, User, expected_query_lines, expected_results)
| 44.197279 | 151 | 0.579806 | 868 | 6,497 | 4.210829 | 0.176267 | 0.093023 | 0.048153 | 0.01751 | 0.533516 | 0.435021 | 0.343912 | 0.309439 | 0.251436 | 0.196443 | 0 | 0.015799 | 0.210867 | 6,497 | 146 | 152 | 44.5 | 0.697094 | 0.100508 | 0 | 0.163265 | 0 | 0 | 0.260472 | 0.009309 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030612 | false | 0 | 0.102041 | 0 | 0.255102 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d484862ba3e2f33977b9dfb27a2a6296e1c0eb7b | 1,051 | py | Python | setup.py | dykesk/gaussian-wake | d06509af9740a25e9e5be459fdc3a3644fdf609d | [
"Apache-2.0"
] | 3 | 2017-10-21T15:32:17.000Z | 2021-11-23T04:44:11.000Z | setup.py | dykesk/gaussian-wake | d06509af9740a25e9e5be459fdc3a3644fdf609d | [
"Apache-2.0"
] | 3 | 2017-08-01T20:04:06.000Z | 2019-06-24T18:21:38.000Z | setup.py | dykesk/gaussian-wake | d06509af9740a25e9e5be459fdc3a3644fdf609d | [
"Apache-2.0"
] | 3 | 2019-07-01T19:03:06.000Z | 2020-02-23T10:40:17.000Z | #!/usr/bin/env python
# encoding: utf-8
from numpy.distutils.core import setup, Extension
module1 = Extension('_porteagel_fortran', sources=['src/gaussianwake/gaussianwake.f90',
'src/gaussianwake/gaussianwake_bv.f90',
'src/gaussianwake/gaussianwake_dv.f90',
'src/gaussianwake/adStack.c',
'src/gaussianwake/adBuffer.f'],
# 'src/gaussianwake/lib_array.f90'],
extra_compile_args=['-O2', '-c'])
setup(
name='GaussianWake',
version='0.0.1',
description='Gaussian wake model published by Bastankhah and Porte Agel 2016',
install_requires=['openmdao>=1.7.3'],
package_dir={'': 'src'},
ext_modules=[module1],
dependency_links=['http://github.com/OpenMDAO/OpenMDAO.git@master'],
packages=['gaussianwake'],
license='Apache License, Version 2.0',
) | 43.791667 | 90 | 0.531874 | 98 | 1,051 | 5.591837 | 0.693878 | 0.164234 | 0.14781 | 0.109489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034682 | 0.341579 | 1,051 | 24 | 91 | 43.791667 | 0.757225 | 0.067555 | 0 | 0 | 0 | 0 | 0.372188 | 0.161554 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d48923ca32ecdfba04756e192b86f66124d82a4a | 1,473 | py | Python | src/datalaunch_server/backend/run.py | mfaafm/datalaunch-server | 0518b786378e8a2bc8808adbd91ae41f3f72d70d | [
"MIT"
] | null | null | null | src/datalaunch_server/backend/run.py | mfaafm/datalaunch-server | 0518b786378e8a2bc8808adbd91ae41f3f72d70d | [
"MIT"
] | null | null | null | src/datalaunch_server/backend/run.py | mfaafm/datalaunch-server | 0518b786378e8a2bc8808adbd91ae41f3f72d70d | [
"MIT"
] | null | null | null | import uuid
import threading
from datetime import datetime
from .execution import RunExecution
class RunBackend(object):
def __init__(self, workspace):
self.workspace = workspace
self.db = workspace.db
self.storage = workspace.storage
def create_run(self, specification):
run_id = str(uuid.uuid4())
run = {
"run_id": run_id,
"status": "created",
"created": datetime.utcnow(),
"specification": specification,
}
self.db.create_run(run)
run_execution = RunExecution(self.workspace, run_id)
run_execution_thread = threading.Thread(
target=run_execution.run, name=f"RunExecution {run_id}"
)
run_execution_thread.start()
return run
def terminate_run(self, run_id):
run = self.db.get_run(run_id)
if run["status"] == "terminated" or run["status"] == "run finished":
return
run["status"] = "terminated"
run["terminated"] = datetime.now()
self.db.update_run(run)
def delete_run(self, run_id):
self.terminate_run(run_id)
self.db.delete_run(run_id)
self.storage.delete_logs(run_id)
self.storage.delete_code(run_id)
def get_run(self, run_id):
return self.db.get_run(run_id)
def get_run_ids(self):
return self.db.get_run_ids()
def get_all_runs(self):
return self.db.get_all_runs()
| 25.842105 | 76 | 0.614392 | 183 | 1,473 | 4.704918 | 0.245902 | 0.081301 | 0.046458 | 0.041812 | 0.225319 | 0.039489 | 0 | 0 | 0 | 0 | 0 | 0.000939 | 0.276986 | 1,473 | 56 | 77 | 26.303571 | 0.807512 | 0 | 0 | 0 | 0 | 0 | 0.081466 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.095238 | 0.071429 | 0.404762 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
00f66e72c3e4bcc933d8c4833a291f52c75faf20 | 440 | py | Python | mindhome_alpha/erpnext/www/lms/index.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/www/lms/index.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/www/lms/index.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | from __future__ import unicode_literals
import erpnext.education.utils as utils
import frappe
no_cache = 1
def get_context(context):
context.education_settings = frappe.get_single("Education Settings")
if not context.education_settings.enable_lms:
frappe.local.flags.redirect_location = '/'
raise frappe.Redirect
context.featured_programs = get_featured_programs()
def get_featured_programs():
return utils.get_portal_programs() | 27.5 | 69 | 0.820455 | 58 | 440 | 5.896552 | 0.534483 | 0.149123 | 0.140351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002532 | 0.102273 | 440 | 16 | 70 | 27.5 | 0.863291 | 0 | 0 | 0 | 0 | 0 | 0.043084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.25 | 0.083333 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
00f6d7849b57443a7fcfc0dd2b15cbcd9d92e769 | 1,748 | py | Python | src/800_predict_with_lightgbm.py | j20232/kaggle_earthquake | 47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b | [
"MIT"
] | null | null | null | src/800_predict_with_lightgbm.py | j20232/kaggle_earthquake | 47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b | [
"MIT"
] | null | null | null | src/800_predict_with_lightgbm.py | j20232/kaggle_earthquake | 47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b | [
"MIT"
] | null | null | null | """Predict labels with lightgbm models"""
import os
import sys
import json
import pandas as pd
import lightgbm as lgb
from pathlib import Path
import competition as cc
from common import stop_watch, predict_chunk
# For osx
os.environ['KMP_DUPLICATE_LIB_OK'] = "True"
@stop_watch
def predict_with_lightgbm():
model_directory_path = cc.MODEL_PATH / sys.argv[1]
model_path_list = sorted(list(model_directory_path.glob("*.model")))
config_file = list(cc.CONFIG_PATH.glob(sys.argv[1] + "*.json"))[0]
with config_file.open() as f:
params = json.load(f)
params = params["Predict"]
if params["Version"] != cc.PREF:
assert False
preds = None
predict_df = None
test_csv_path = Path(cc.VALIDATION_PATH / sys.argv[1] / "test.csv")
test_X = pd.read_csv(test_csv_path)
test_X.reset_index(inplace=True)
for fold, model_path in enumerate(model_path_list):
print("=== [Predict] fold{} starts!! ===".format(fold))
model = lgb.Booster(model_file=str(model_path))
if predict_df is None:
predict_df = test_X["index"]
test_X = test_X.set_index("index")
if preds is None:
preds = predict_chunk(model, test_X) / len(model_path_list)
else:
preds += predict_chunk(model, test_X) / len(model_path_list)
sample_df = pd.read_csv(cc.SAMPLE_SUBMISSION_CSV_PATH)
predict_df = pd.DataFrame(predict_df)
predict_df["seg_id"] = sample_df["seg_id"]
predict_df["time_to_failure"] = preds
del predict_df["index"]
Path.mkdir(cc.SUBMIT_PATH, exist_ok=True, parents=True)
predict_df.to_csv(cc.SUBMIT_PATH / "{}.csv".format(sys.argv[1]), index=False)
if __name__ == "__main__":
predict_with_lightgbm()
| 34.27451 | 81 | 0.680206 | 260 | 1,748 | 4.276923 | 0.338462 | 0.072842 | 0.028777 | 0.021583 | 0.077338 | 0.077338 | 0.077338 | 0.077338 | 0.077338 | 0.077338 | 0 | 0.003556 | 0.195652 | 1,748 | 50 | 82 | 34.96 | 0.78734 | 0.025172 | 0 | 0 | 0 | 0 | 0.087161 | 0 | 0 | 0 | 0 | 0 | 0.023256 | 1 | 0.023256 | false | 0 | 0.186047 | 0 | 0.209302 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
00f7909565a967fdf18011834d450c60108175a8 | 1,782 | py | Python | archive/urls.py | radon-provenance/radon-web | 83f5b46f57f157d4ac4c7f2d8ec4c955cc512b5a | [
"Apache-2.0"
] | null | null | null | archive/urls.py | radon-provenance/radon-web | 83f5b46f57f157d4ac4c7f2d8ec4c955cc512b5a | [
"Apache-2.0"
] | 5 | 2020-06-09T09:28:07.000Z | 2020-06-12T13:36:52.000Z | archive/urls.py | radon-provenance/radon-web | 83f5b46f57f157d4ac4c7f2d8ec4c955cc512b5a | [
"Apache-2.0"
] | null | null | null | # Copyright 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.urls import path
#
from archive.views import (
delete_collection,
delete_resource,
download,
edit_collection,
edit_resource,
home,
new_collection,
new_resource,
preview,
search,
view_collection,
view_resource,
)
app_name = "archive"
urlpatterns = [
path("", home, name="home"),
path("search", search, name="search"),
path("resource<path:path>", view_resource, name="resource_view"),
path("resource", view_resource, name="resource_view"),
path("new/collection<path:parent>", new_collection, name="new_collection"),
path("edit/collection<path:path>", edit_collection, name="edit_collection"),
path("delete/collection<path:path>", delete_collection, name="delete_collection"),
path("new/resource<path:parent>", new_resource, name="new_resource"),
path("edit/resource<path:path>", edit_resource, name="edit_resource"),
path("delete/resource<path:path>", delete_resource, name="delete_resource"),
path("view<path:path>", view_collection, name="view"),
path("view", view_collection, name="view"),
path("download<path:path>", download, name="download"),
path("preview<path:path>", preview, name="preview"),
]
| 34.941176 | 86 | 0.714366 | 234 | 1,782 | 5.324786 | 0.350427 | 0.051364 | 0.038523 | 0.025682 | 0.093098 | 0.051364 | 0 | 0 | 0 | 0 | 0 | 0.005302 | 0.153199 | 1,782 | 50 | 87 | 35.64 | 0.820411 | 0.298541 | 0 | 0 | 0 | 0 | 0.321718 | 0.126418 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
00f9ebf2f0b587c3e9e4c70a58e0e5c0b2107dc9 | 2,528 | py | Python | scripts.python3/recover_howde_tree.py | rsharris/HowDeSBT-multi_make_bf | 4f45e27a9b70a8c470f80ede086c58c2683774f9 | [
"MIT"
] | null | null | null | scripts.python3/recover_howde_tree.py | rsharris/HowDeSBT-multi_make_bf | 4f45e27a9b70a8c470f80ede086c58c2683774f9 | [
"MIT"
] | null | null | null | scripts.python3/recover_howde_tree.py | rsharris/HowDeSBT-multi_make_bf | 4f45e27a9b70a8c470f80ede086c58c2683774f9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Recover the tree relationship from a howde tree hierarchy file.
"""
from sys import argv,stdin,stdout,stderr,exit
from howde_tree_parse import read_howde_tree_file
def usage(s=None):
message = """
usage: cat howde_tree_file | recover_howde_tree [options]
--show=preorder list the tree in pre-order
(this is the default)
--show=postorder list the tree in post-order
--show=leafgroups list all leaf groups
--show=height for each node, list max distance to a leaf, and number
of descendants
--show:subtree=<node> create a listing file for a node and its descendants
--filespec=<spec> spec describing how node names are to be output; for
example /usr/nwu253/howdesbt/compressed/{name}.rrr.bf"""
if (s == None): exit (message)
else: exit ("%s\n%s" % (s,message))
def main():
# parse the command line
showWhat = "pre order"
fileSpec = None
for arg in argv[1:]:
if ("=" in arg):
argVal = arg.split("=",1)[1]
if (arg in ["--show=preorder","--show=pre"]):
showWhat = "pre order"
elif (arg in ["--show=postorder","--show=post"]):
showWhat = "post order"
elif (arg == "--show=leafgroups"):
showWhat = "leaf groups"
elif (arg == "--show=height"):
showWhat = "height etc"
elif (arg.startswith("--show:subtree=")) or (arg.startswith("--subtree=")):
showWhat = "subtree"
nodeName = argVal
elif (arg.startswith("--filespec=")):
if ("{name}" not in argVal):
usage("filespec MUST contain {name}\n(in \"%s\"" % arg)
fileSpec = argVal
elif (arg.startswith("--")):
usage("unrecognized option: %s" % arg)
else:
usage("unrecognized option: %s" % arg)
# process the tree
forest = read_howde_tree_file(stdin)
assert (len(forest) != 0), "input has no tree"
for tree in forest:
if (showWhat == "pre order"):
tree.list_pre_order()
elif (showWhat == "post order"):
tree.list_post_order()
elif (showWhat == "leaf groups"):
tree.list_leaf_groups()
elif (showWhat == "height etc"):
tree.compute_height_etc()
tree.list_height_etc()
elif (showWhat == "subtree"):
nameToNode = tree.build_dict()
assert (nodeName in nameToNode), \
"unknown node: \"%s\"" % nodeName
subtree = nameToNode[nodeName]
subtree.list_pre_order(fileSpec=fileSpec)
else:
assert (False), \
"internal error: unknown operation \"%s\"" % showWhat
if __name__ == "__main__": main()
| 29.395349 | 81 | 0.627373 | 330 | 2,528 | 4.706061 | 0.339394 | 0.034771 | 0.025113 | 0.021893 | 0.034771 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004082 | 0.224684 | 2,528 | 85 | 82 | 29.741176 | 0.788265 | 0.049446 | 0 | 0.095238 | 0 | 0 | 0.419557 | 0.02758 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.031746 | false | 0 | 0.031746 | 0 | 0.063492 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
00fdc14d8d6651a586f8e493f4b200f6fad1f8e4 | 2,641 | py | Python | feed.py | UphillD/Twitter-Feed | 9d48534f70a7522c0e06c2e0c51dd3b476eacbed | [
"MIT"
] | null | null | null | feed.py | UphillD/Twitter-Feed | 9d48534f70a7522c0e06c2e0c51dd3b476eacbed | [
"MIT"
] | null | null | null | feed.py | UphillD/Twitter-Feed | 9d48534f70a7522c0e06c2e0c51dd3b476eacbed | [
"MIT"
] | null | null | null | import json
import sys
import tkinter
from config import *
from imagefy import *
from twitter import *
# Get old rules
old_rules = get_rules()
print('Old rules received.')
# Delete old rules
delete_response = delete_rules(old_rules)
print('Old rules deleted.')
# Generate new rules
query_rules = generate_rules()
print(str(len(query_rules)) + ' new rules generated.')
# Set new rules
set_response = set_rules(query_rules)
created_rules = str(set_response['meta']['summary']['created'])
print(created_rules + ' new rules set.')
# Initialize the GUI
master, frame, canvas = init_gui()
print('GUI Initialized')
# Start the stream
print()
print('Stream starting...')
print('Pause-Resume with CTRL+C, Exit with ESC')
print()
# Initialize tweet counter
cnt = 0
# Lists to hold image and canvas objects
images = []
canvas_images = []
while(True):
response = requests.get(url_stream, auth=bearer_oauth, params=query_params, stream=True)
if response.status_code != 200:
raise Exception("Cannot get stream (HTTP {}): {}".format(response.status_code, response.text))
try:
for response_line in response.iter_lines():
if response_line:
# Grab tweet
tweet = json.loads(response_line)
# Grab resulting image & priority flag
image = draw_tweet(tweet)
if image == -1: exit()
# Increment & print counters
cnt += 1
print('Tweet received, {} total tweets'.format(cnt))
# Add resulting image object to image list
images.append(image)
# If canvas fits more tweets, resize it
if (int(canvas.cget('height')) < min_tweet_height * max_onscreen_tweets):
frame.config(width=int(canvas.cget('width')), height=int(canvas.cget('height')) + image.height() + omargins[2])
canvas.config(width=int(canvas.cget('width')), height=int(canvas.cget('height')) + image.height() + omargins[2])
# Iterate through all canvas images
for canvas_image in canvas_images:
# Move the previous tweet lower
canvas.move(canvas_image, 0, image.height() + omargins[2])
# If onscreen tweet limit exceeded, delete oldest tweet
if len(canvas_images) > max_tweets:
canvas.delete(canvas_images[0])
canvas_images.pop(0)
images.pop(0)
# Paste new tweet
canvas_images.append(canvas.create_image(omargins[3], omargins[1], anchor=tkinter.NW, image=image))
canvas.update_idletasks()
canvas.update()
# Catch CTRL-C interrupt
except KeyboardInterrupt:
print('TRL+C detected, stream stopped.')
print('Press CTRL+C again to resume.')
while(True):
try:
canvas.update()
except KeyboardInterrupt:
print('TRL+C detected, resuming stream..')
print()
| 29.674157 | 117 | 0.706929 | 362 | 2,641 | 5.046961 | 0.370166 | 0.045977 | 0.035577 | 0.031199 | 0.124795 | 0.124795 | 0.081007 | 0.081007 | 0.081007 | 0.081007 | 0 | 0.006834 | 0.168875 | 2,641 | 88 | 118 | 30.011364 | 0.825513 | 0.179477 | 0 | 0.189655 | 0 | 0 | 0.16108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.103448 | 0 | 0.103448 | 0.241379 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
00ff0ed92617aeee4a59104327ee5016ba50a470 | 3,632 | py | Python | tests/call_error_test.py | msaladna/mitogen | c6824b68181729cb16c090e72f4d35d6c4d95523 | [
"BSD-3-Clause"
] | 1,526 | 2017-09-15T18:49:40.000Z | 2021-01-17T16:04:12.000Z | tests/call_error_test.py | msaladna/mitogen | c6824b68181729cb16c090e72f4d35d6c4d95523 | [
"BSD-3-Clause"
] | 682 | 2017-09-11T17:43:12.000Z | 2021-01-17T05:26:26.000Z | tests/call_error_test.py | msaladna/mitogen | c6824b68181729cb16c090e72f4d35d6c4d95523 | [
"BSD-3-Clause"
] | 111 | 2017-09-15T23:21:37.000Z | 2021-01-01T14:45:35.000Z | import pickle
import sys
import unittest2
import mitogen.core
import testlib
import plain_old_module
class ConstructorTest(testlib.TestCase):
klass = mitogen.core.CallError
def test_string_noargs(self):
e = self.klass('%s%s')
self.assertEquals(e.args[0], '%s%s')
self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
def test_string_args(self):
e = self.klass('%s%s', 1, 1)
self.assertEquals(e.args[0], '11')
self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
def test_from_exc(self):
ve = plain_old_module.MyError('eek')
e = self.klass(ve)
self.assertEquals(e.args[0], 'plain_old_module.MyError: eek')
self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
def test_form_base_exc(self):
ve = SystemExit('eek')
e = self.klass(ve)
cls = ve.__class__
self.assertEquals(e.args[0],
# varies across 2/3.
'%s.%s: eek' % (cls.__module__, cls.__name__))
self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
def test_from_exc_tb(self):
try:
raise plain_old_module.MyError('eek')
except plain_old_module.MyError:
ve = sys.exc_info()[1]
e = self.klass(ve)
self.assertTrue(e.args[0].startswith('plain_old_module.MyError: eek'))
self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
self.assertTrue('test_from_exc_tb' in e.args[0])
def test_bytestring_conversion(self):
e = self.klass(mitogen.core.b('bytes'))
self.assertEquals(u'bytes', e.args[0])
self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
def test_reduce(self):
e = self.klass('eek')
func, (arg,) = e.__reduce__()
self.assertTrue(func is mitogen.core._unpickle_call_error)
self.assertEquals(arg, e.args[0])
class UnpickleCallErrorTest(testlib.TestCase):
func = staticmethod(mitogen.core._unpickle_call_error)
def test_not_unicode(self):
self.assertRaises(TypeError,
lambda: self.func(mitogen.core.b('bad')))
def test_oversized(self):
self.assertRaises(TypeError,
lambda: self.func(mitogen.core.b('b'*10001)))
def test_reify(self):
e = self.func(u'some error')
self.assertEquals(mitogen.core.CallError, e.__class__)
self.assertEquals(1, len(e.args))
self.assertEquals(mitogen.core.UnicodeType, type(e.args[0]))
self.assertEquals(u'some error', e.args[0])
class PickleTest(testlib.TestCase):
klass = mitogen.core.CallError
def test_string_noargs(self):
e = self.klass('%s%s')
e2 = pickle.loads(pickle.dumps(e))
self.assertEquals(e2.args[0], '%s%s')
def test_string_args(self):
e = self.klass('%s%s', 1, 1)
e2 = pickle.loads(pickle.dumps(e))
self.assertEquals(e2.args[0], '11')
def test_from_exc(self):
ve = plain_old_module.MyError('eek')
e = self.klass(ve)
e2 = pickle.loads(pickle.dumps(e))
self.assertEquals(e2.args[0], 'plain_old_module.MyError: eek')
def test_from_exc_tb(self):
try:
raise plain_old_module.MyError('eek')
except plain_old_module.MyError:
ve = sys.exc_info()[1]
e = self.klass(ve)
e2 = pickle.loads(pickle.dumps(e))
self.assertTrue(e2.args[0].startswith('plain_old_module.MyError: eek'))
self.assertTrue('test_from_exc_tb' in e2.args[0])
if __name__ == '__main__':
unittest2.main()
| 31.582609 | 79 | 0.632434 | 489 | 3,632 | 4.509202 | 0.169734 | 0.047619 | 0.043537 | 0.095238 | 0.678458 | 0.606803 | 0.606803 | 0.573696 | 0.573696 | 0.573696 | 0 | 0.017806 | 0.226872 | 3,632 | 114 | 80 | 31.859649 | 0.76745 | 0.004956 | 0 | 0.476744 | 0 | 0 | 0.068106 | 0.027685 | 0 | 0 | 0 | 0 | 0.302326 | 1 | 0.162791 | false | 0 | 0.069767 | 0 | 0.302326 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e076b3f8694910f0ea4b0f6d1af437799ab9b18 | 288 | py | Python | mpsp_main.py | GeoCIA/MPSP | 2ccc8b82d619d52e7248e06999cfd95368608788 | [
"Apache-2.0"
] | null | null | null | mpsp_main.py | GeoCIA/MPSP | 2ccc8b82d619d52e7248e06999cfd95368608788 | [
"Apache-2.0"
] | null | null | null | mpsp_main.py | GeoCIA/MPSP | 2ccc8b82d619d52e7248e06999cfd95368608788 | [
"Apache-2.0"
] | null | null | null | from mpsp import FLIGHT, GROUNDTEST
from mpsp.mpsp import MPSP
import pyb
switch = pyb.Switch()
pyb.LED(3).on()
pyb.LED(1).on()
pyb.delay(4000)
pyb.LED(3).off()
pyb.LED(1).off()
if switch():
mode = FLIGHT
else:
mode = GROUNDTEST
pyb.delay(1000)
m = MPSP(mode)
m.init()
m.run()
| 13.714286 | 35 | 0.666667 | 51 | 288 | 3.764706 | 0.411765 | 0.125 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.049383 | 0.15625 | 288 | 20 | 36 | 14.4 | 0.740741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.176471 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e0b3ccedd271154d067c56ea6b56d4912fc2b1a | 331 | py | Python | hsf_website_helpers/util/repo.py | HSF/website-helpers | 7b01db3648d9f8026a318a4fac2fd3a8aeea354e | [
"MIT"
] | null | null | null | hsf_website_helpers/util/repo.py | HSF/website-helpers | 7b01db3648d9f8026a318a4fac2fd3a8aeea354e | [
"MIT"
] | null | null | null | hsf_website_helpers/util/repo.py | HSF/website-helpers | 7b01db3648d9f8026a318a4fac2fd3a8aeea354e | [
"MIT"
] | null | null | null | from pathlib import Path
def is_website_folder(path: Path):
"""Checks if path likely points at the hsf.github.io repository"""
existing_subfolders = [".git", "_profiles", "_data"]
for es in existing_subfolders:
if not (path / es).is_dir():
print(path, es)
return False
return True
| 27.583333 | 70 | 0.634441 | 44 | 331 | 4.613636 | 0.727273 | 0.17734 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.259819 | 331 | 11 | 71 | 30.090909 | 0.828571 | 0.181269 | 0 | 0 | 0 | 0 | 0.067925 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.5 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e0f83a5679b19aa17f9519619b150c41b6a8ad9 | 8,529 | py | Python | ml-service/ml-model-dynamic-hosting/main.py | mathieu/decisions-on-ml | b0283851ae0db538c1f424bcba8bcd15d4a603da | [
"Apache-2.0"
] | null | null | null | ml-service/ml-model-dynamic-hosting/main.py | mathieu/decisions-on-ml | b0283851ae0db538c1f424bcba8bcd15d4a603da | [
"Apache-2.0"
] | 1 | 2020-06-04T15:59:04.000Z | 2020-06-04T15:59:04.000Z | ml-service/ml-model-dynamic-hosting/main.py | mathieu/decisions-on-ml | b0283851ae0db538c1f424bcba8bcd15d4a603da | [
"Apache-2.0"
] | 3 | 2020-06-04T16:28:31.000Z | 2021-11-05T17:11:55.000Z | #!flask/bin/python
import os
import uuid
from flask import Flask, jsonify
from flask import request, jsonify
from flask_restplus import Api, Resource, fields
from flask_restplus import reqparse
import pandas as pd
import numpy as np
from joblib import load
import pickle
import json
import requests
#
# Model registering
#
modelDictionary = dict({
'models': [
{
'path': "models/miniloandefault-rfc.joblib",
},
{
'path': "models/miniloandefault-svm.joblib",
},
{
'path': "models/miniloandefault-xgb-c.joblib",
},
{
'path': "models/iris-svc.joblib",
}
]
})
# todo
# Propagate the joblib metadata into the model management dictionary
#
# Flask
#
app = Flask(__name__)
api = Api(app)
ns = api.namespace('automation/api/v1.0/prediction/admin', description='administration')
@ns.route('/is-alive') # Create a URL route to this resource
class HeartBeat(Resource): # Create a RESTful resource
def get(self): # Create GET endpoint
"""Returns an heart beat."""
return {'answer': 'ok'}
@ns.route("/models")
class Model(Resource):
def get(self):
"""Returns the list of ML models."""
return modelDictionary
model_key_descriptor = api.model('ModelKeyDescriptor', {
'name': fields.String(required=True, description="Name of the model", help="Name cannot be blank.",
default='iris-svc'),
'version': fields.String(required=True, description="Version of the model", help="Name cannot be blank.",
default='1.0'),
'format': fields.String(required=True, description="Format of the model", help="Name cannot be blank.",
default='joblib'),
})
model_metadata = api.model('ModelMetadata', {
'name': fields.String(required=True, description="Name of the model", help="Name cannot be blank."),
'version': fields.String(required=True, description="Version of the model", help="Name cannot be blank."),
'format': fields.String(required=True, description="Format of the model", help="Name cannot be blank."),
'author': fields.String(required=True, description="Author of the model", help="Name cannot be blank."),
'metrics': fields.Wildcard(fields.String),
'customProperties': fields.Wildcard(fields.String)
})
model_signature_parameter = api.model('ModelSignatureParameter', {
'name': fields.String(required=True, description="Name of the model", help="Name cannot be blank."),
'order': fields.String(required=True, description="Version of the model", help="Name cannot be blank."),
'type': fields.String(required=True, description="Version of the model", help="Name cannot be blank.")
})
model_signature = api.model('ModelSignature', {
'input': fields.List(fields.Raw(required=True, description="Inputs", help="Name cannot be blank.")),
'output': fields.List(fields.Raw(required=True, description="Outputs", help="Name cannot be blank."))
})
model_schema = api.model('ModelSchema', {
'metadata': fields.Nested(model_metadata),
'signature': fields.Nested(model_signature),
'customProperties': fields.Nested(model_metadata),
})
@ns.route('/model-schema')
class ModelSchema(Resource):
@api.expect(model_key_descriptor)
@api.response(202, 'ML Schema retrieved.', model_schema)
def post(self):
"""Returns the schema of a model."""
json_dictionary = request.json
print(json_dictionary)
# Model
model_name = json_dictionary["name"]
mode_version = json_dictionary["version"]
model_format = json_dictionary["format"]
# Compose the model path
model_path = 'models/' + model_name + '.' + model_format
# Local read
model_dictionary = load(model_path)
# Make a copy and remove the model from it as non serializable into JSON
model_dictionnary_copy = model_dictionary.copy()
del model_dictionnary_copy["model"]
del model_dictionnary_copy["metadata"]["creationDate"]
return model_dictionnary_copy
ns = api.namespace('automation/api/v1.0/prediction/invocation', description='run ML models')
request_model_descriptor = api.model('ModelDescriptor', {
'name': fields.String(required=True, description="Local path of the model", help="Name cannot be blank."),
'version': fields.String(required=True, description="Version of the model", help="Name cannot be blank."),
'format': fields.String(required=True, description="Format of the model", help="Name cannot be blank.")
})
prediction_request = api.model('PredictionRequest', {
'model': fields.Nested(request_model_descriptor),
'features': fields.Wildcard(fields.String)
})
prediction_response = api.model('PredictionResponse', {
'path': fields.String(required=True, description="Local path of the invoked predictive model",
help="Name cannot be blank."),
'id': fields.String(required=True, description="Uuid of the prediction", help="Name cannot be blank."),
'prediction': fields.String(required=False, description="The prediction", help="Name cannot be blank."),
'probabilities': fields.Wildcard(fields.String)
})
@ns.route('/')
class PredictionService(Resource):
@api.expect(prediction_request)
@api.response(201, 'Category successfully created.', prediction_response)
def post(self):
"""Computes a new prediction."""
try:
json_dictionary = request.json
print(json_dictionary)
# Model
json_model_dictionary = json_dictionary["model"]
model_name = json_model_dictionary["name"]
model_version = json_model_dictionary["version"]
model_format = json_model_dictionary["format"]
# Features
json_payload_dictionary = json_dictionary["features"]
# Compose the model path
model_path = 'models/' + model_name + '.' + 'joblib' # Picking joblib file by default
# Remote read
# response = requests.get('https://github.com/ODMDev/decisions-on-ml/blob/master/docker-python-flask-sklearn-joblist-json/models/miniloandefault-rfc.joblib?raw=true')
# Local read
dictionary = load(model_path)
# Access to the model metadata
metadata_dictionary = dictionary["metadata"]
# Introspect the signature
signature_dictionnary = dictionary["signature"]
signature_parameters = signature_dictionnary["input"]
parameter_values = []
for parameter in signature_parameters:
print(parameter)
name = parameter["name"]
type = parameter["type"]
value = float(json_payload_dictionary[name])
parameter_values.append(value)
# Local read
loaded_model = dictionary['model']
# Invocation
invocation_method = metadata_dictionary["invocation"]
response_dictionary = {
"path": model_path,
"id": str(uuid.uuid4())
}
if invocation_method == 'predict':
predicted_class = loaded_model.predict(
[parameter_values])
# Assume an array of a single element to be cast in int
found_class = predicted_class[0]
response_dictionary['prediction'] = found_class.item() # cast into int
if invocation_method == 'predict_proba':
prediction_wrapper = loaded_model.predict_proba(
[parameter_values])
probabilities = prediction_wrapper[0]
# Needs to be generalized
probability_dictionnary = {
"0": probabilities[0],
"1": probabilities[1]
}
response_dictionary["probabilities"] = probability_dictionnary
## Ok for RFC
predicted_class = np.where(probabilities == np.amax(probabilities))
response_dictionary['prediction'] = str(predicted_class[0][0])
# json_string = json.dumps(responseDictionary, indent=4)
print(response_dictionary)
return response_dictionary
except:
return "KO"
if __name__ == '__main__':
# Start a development server
app.run(port=5000, host='0.0.0.0')
| 35.390041 | 178 | 0.637941 | 929 | 8,529 | 5.733046 | 0.228202 | 0.045062 | 0.047315 | 0.054074 | 0.322005 | 0.292903 | 0.269245 | 0.240706 | 0.201465 | 0.163913 | 0 | 0.004666 | 0.246102 | 8,529 | 240 | 179 | 35.5375 | 0.823639 | 0.109743 | 0 | 0.148387 | 0 | 0 | 0.211964 | 0.02958 | 0.006452 | 0 | 0 | 0.004167 | 0 | 1 | 0.025806 | false | 0 | 0.077419 | 0 | 0.16129 | 0.025806 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e14507604ab4d37b2d654a12f456017f680cf04 | 12,466 | py | Python | src/train_val.py | pzzhang/sasa | e663d7666e85de8e5a7a664a6b37d988008ab007 | [
"MIT"
] | 1 | 2020-01-28T15:22:16.000Z | 2020-01-28T15:22:16.000Z | src/train_val.py | pzzhang/sasa | e663d7666e85de8e5a7a664a6b37d988008ab007 | [
"MIT"
] | null | null | null | src/train_val.py | pzzhang/sasa | e663d7666e85de8e5a7a664a6b37d988008ab007 | [
"MIT"
] | 1 | 2021-06-10T05:04:24.000Z | 2021-06-10T05:04:24.000Z | # Copyright (c) Microsoft. All rights reserved.
import time
import logging
import torch
from rnndata import repackage_hidden, clone_hidden, get_batch
from utils import get_lr_mom, AverageMeter
def compute_accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
if type(output) is not torch.Tensor:
# inception v3 model
output = output[0]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def mean_accuracy_multi_binary_label_with_logits(output, target, topk=(40, 13)):
with torch.no_grad():
if type(output) is not torch.Tensor:
# inception v3 model
output = output[0]
target = target.type(torch.int)
acc_all = torch.mean(((output > 0.0) == (target > 0.5)).type(torch.float), dim=0)
res = []
for k in topk:
acc_k = torch.mean(acc_all[:k], dim=0, keepdim=True)
res.append(acc_k.mul_(100.0))
return res
def seq_train(train_data, model, criterion, optimizer, epoch, ntokens,
batch_size, cfg, checkpointer, extend_stats, train_writer):
total_loss = 0.
start_time = time.time()
hidden = model.module.init_hidden(batch_size)
data_batches = range(0, train_data.size(0) - 1, cfg.MODEL.RNN.BPTT)
if cfg.MODEL.RNN.SHUFFLE:
if cfg.DATALOADER.RE == 'yes':
data_sampler = torch.randint(high=len(data_batches),
size=(len(data_batches),),
dtype=torch.int64).tolist()
elif cfg.DATALOADER.RE == 'no':
data_sampler = torch.randperm(len(data_batches)).tolist()
else:
raise ValueError(
"Invalid cfg.DATALOADER.RE input {}".format(cfg.DATALOADER.RE))
else:
data_sampler = range(0, len(data_batches))
for batch, data_i in enumerate(data_sampler):
i = data_batches[data_i]
# Turn on training mode which enables dropout.
model.train()
# get data
data, targets = get_batch(train_data, i, cfg.MODEL.RNN.BPTT)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
# When cfg.MODEL.RNN.SHUFFLE is true, not initializing with 0 does not
# make sense. However, we just keep it here.
hidden = repackage_hidden(hidden, cfg.MODEL.RNN.INIT0)
if cfg.OPTIM.OPT in ['sgd_sls', 'salsa', 'ssls', 'salsa_new']:
hidden_clone = clone_hidden(hidden)
model.zero_grad()
output, hidden = model(data, hidden)
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.MODEL.RNN.CLIP)
# closure function defined for line search used in SGD_SLS
def eval_loss():
#if cfg.ls_eval:
if cfg.OPTIM.LS.EVAL:
model.eval()
with torch.no_grad():
output, _ = model(data, hidden_clone)
loss = criterion(output.view(-1, ntokens), targets)
return loss
if cfg.OPTIM.OPT in ['yaida_diag', 'yaida_seq', 'pflug_bat', 'pflug_seq',
'sasa_xd_seq', 'sasa_xd']:
optimizer.step(closure=extend_stats)
elif cfg.OPTIM.OPT in ['sgd_sls', 'salsa', 'ssls', 'salsa_new']:
optimizer.step(loss, closure=eval_loss)
else:
optimizer.step(closure=None)
total_loss += loss.item()
if batch % cfg.LOG_FREQ == 0 and batch > 0:
cur_loss = total_loss / cfg.LOG_FREQ
elapsed = time.time() - start_time
lr, mom = get_lr_mom(optimizer, cfg)
print(
'| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // cfg.MODEL.RNN.BPTT, lr,
elapsed * 1000 / cfg.LOG_FREQ, cur_loss,
cur_loss))
total_loss = 0
start_time = time.time()
train_writer.add_scalar("metrics/top1", cur_loss)
train_writer.add_scalar("metrics/loss", cur_loss)
lr, mom = get_lr_mom(optimizer, cfg)
train_writer.add_scalar("params/lr", lr)
train_writer.add_scalar("params/mom", mom)
checkpointer.trainacc.append(cur_loss)
checkpointer.trainloss.append(cur_loss)
checkpointer.lrs.append(lr)
checkpointer.moms.append(mom)
# Training
def train(train_loader, model, criterion, optimizer, epoch,
cfg, extend_stats, train_writer, checkpointer, device):
print('\nEpoch: %d' % epoch)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# switch to train mode
model.train()
# measure data loading time
data_time.update(time.time() - end)
# compute output and record loss
input, target = input.to(device), target.to(device)
output = model(input)
if cfg.LOSS == "bce":
target = target.type(torch.float32)
if cfg.MODEL.ARCH == 'inception_v3':
loss = 0.5 * (criterion(output[0], target) + criterion(output[1], target))
else:
loss = criterion(output, target)
losses.update(loss.item(), input.size(0))
# measure and record accuracy
if cfg.LOSS == "xentropy":
prec1, prec5 = compute_accuracy(output, target, topk=(1, 5))
top1.update(prec1[0].item(), input.size(0))
top5.update(prec5[0].item(), input.size(0))
elif cfg.LOSS == "bce":
prec1, prec5 = mean_accuracy_multi_binary_label_with_logits(output, target, topk=(40, 13))
top1.update(prec1[0].item(), input.size(0))
top5.update(prec5[0].item(), input.size(0))
else:
top1.update(0.0, input.size(0))
top5.update(0.0, input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
# closure function defined for line search used in SGD_SLS
def eval_loss():
#if cfg.ls_eval:
if cfg.OPTIM.LS.EVAL:
model.eval()
with torch.no_grad():
output = model(input)
loss = criterion(output, target)
return loss
if cfg.OPTIM.OPT in ['yaida_diag', 'yaida_seq', 'pflug_bat', 'pflug_seq',
'sasa_xd_seq', 'sasa_xd']:
optimizer.step(closure=extend_stats)
elif cfg.OPTIM.OPT in ['sgd_sls', 'salsa', 'ssls', 'salsa_new']:
optimizer.step(loss, closure=eval_loss)
else:
optimizer.step(closure=None)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# only log once per cfg.LOG_FREQ param updates. adjust factor because pflug uses
# 3 batches to make 1 param update.
if i % cfg.LOG_FREQ == 0:
logging.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
train_writer.add_scalar("metrics/top1", top1.val)
train_writer.add_scalar("metrics/top5", top5.val)
train_writer.add_scalar("metrics/loss", losses.val)
lr, mom = get_lr_mom(optimizer, cfg)
train_writer.add_scalar("params/lr", lr)
train_writer.add_scalar("params/mom", mom)
checkpointer.trainacc.append(top1.val)
checkpointer.trainloss.append(losses.val)
checkpointer.lrs.append(lr)
checkpointer.moms.append(mom)
def seq_evaluate(data_source, model, criterion, ntokens, eval_batch_size,
epoch, cfg, test_writer, checkpointer):
# Turn on evaluation mode which disables dropout.
eval_start_time = time.time()
model.eval()
total_loss = 0.
hidden = model.module.init_hidden(eval_batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, cfg.MODEL.RNN.BPTT):
data, targets = get_batch(data_source, i, cfg.MODEL.RNN.BPTT)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
hidden = repackage_hidden(hidden, 0)
val_loss = total_loss / (len(data_source) - 1)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - eval_start_time),
val_loss, val_loss))
test_writer.add_scalar("metrics/top1", val_loss)
test_writer.add_scalar("metrics/loss", val_loss)
checkpointer.testloss.append(val_loss)
checkpointer.testacc.append(val_loss)
return val_loss
def validate(val_loader, model, criterion,
cfg, test_writer, checkpointer, device):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
input, target = input.to(device), target.to(device)
# compute output and record loss
output = model(input)
if cfg.LOSS == "bce":
target = target.type(torch.float32)
loss = criterion(output, target)
losses.update(loss.item(), input.size(0))
# measure and record accuracy
if cfg.LOSS == "xentropy":
prec1, prec5 = compute_accuracy(output, target, topk=(1, 5))
top1.update(prec1[0].item(), input.size(0))
top5.update(prec5[0].item(), input.size(0))
elif cfg.LOSS == "bce":
prec1, prec5 = mean_accuracy_multi_binary_label_with_logits(output, target, topk=(40, 13))
top1.update(prec1[0].item(), input.size(0))
top5.update(prec5[0].item(), input.size(0))
else:
top1.update(0.0, input.size(0))
top5.update(0.0, input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % cfg.LOG_FREQ == 0:
logging.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
test_writer.add_scalar("metrics/top1", top1.avg)
test_writer.add_scalar("metrics/top5", top5.avg)
test_writer.add_scalar("metrics/loss", losses.avg)
checkpointer.testloss.append(losses.avg)
checkpointer.testacc.append(top1.avg)
return top1.avg | 40.872131 | 106 | 0.572437 | 1,578 | 12,466 | 4.382129 | 0.1673 | 0.012292 | 0.030369 | 0.031815 | 0.549964 | 0.497759 | 0.447289 | 0.400578 | 0.349385 | 0.338684 | 0 | 0.026165 | 0.300979 | 12,466 | 305 | 107 | 40.872131 | 0.767386 | 0.090967 | 0 | 0.540773 | 0 | 0.012876 | 0.095133 | 0.006283 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034335 | false | 0 | 0.021459 | 0 | 0.081545 | 0.021459 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e15b17caf196545a558db1448e485770a951b9c | 10,449 | py | Python | venv/Lib/site-packages/PySide2/examples/xmlpatterns/schema/schema_rc.py | TEDxVienna/continuum | 85cefbc274fc59e2059c313bc0d3b9b93a34ba6d | [
"MIT"
] | null | null | null | venv/Lib/site-packages/PySide2/examples/xmlpatterns/schema/schema_rc.py | TEDxVienna/continuum | 85cefbc274fc59e2059c313bc0d3b9b93a34ba6d | [
"MIT"
] | null | null | null | venv/Lib/site-packages/PySide2/examples/xmlpatterns/schema/schema_rc.py | TEDxVienna/continuum | 85cefbc274fc59e2059c313bc0d3b9b93a34ba6d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Thu Sep 5 07:07:53 2019
# by: The Resource Compiler for PySide2 (Qt v5.13.1)
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore
qt_resource_data = b"\
\x00\x00\x015\
<\
contact>\x0d\x0a <g\
ivenName>John</g\
ivenName>\x0d\x0a <\
familyName>Doe</\
familyName>\x0d\x0a \
<birthdate>1977\
-12-25</birthdat\
e>\x0d\x0a <homeAdd\
ress>\x0d\x0a <\
street>Sandakerv\
eien 116</street\
>\x0d\x0a <zipC\
ode>N-0550</zipC\
ode>\x0d\x0a <c\
ity>Oslo</city>\x0d\
\x0a <countr\
y>Norway</countr\
y>\x0d\x0a </homeAd\
dress>\x0d\x0a</contac\
t>\x0d\x0a\
\x00\x00\x01\xc8\
<\
order>\x0d\x0a <cus\
tomerId>194223</\
customerId>\x0d\x0a \
<article>\x0d\x0a \
<articleId>2\
2242</articleId>\
\x0d\x0a <count\
>5</count>\x0d\x0a \
</article>\x0d\x0a \
<article>\x0d\x0a \
<articleId>32\
372</articleId>\x0d\
\x0a <count>\
12</count>\x0d\x0a \
<comment>wit\
hout stripes</co\
mment>\x0d\x0a </ar\
ticle>\x0d\x0a <art\
icle>\x0d\x0a <\
articleId>23649<\
/articleId>\x0d\x0a \
<count>2</c\
ount>\x0d\x0a </art\
icle>\x0d\x0a <deli\
veryDate>2009-01\
-23</deliveryDat\
e>\x0d\x0a <payed>t\
rue</payed>\x0d\x0a</o\
rder>\x0d\x0a\
\x00\x00\x06-\
<\
?xml version=\x221.\
0\x22?>\x0d\x0a<xsd:schem\
a xmlns:xsd=\x22htt\
p://www.w3.org/2\
001/XMLSchema\x22>\x0d\
\x0a\x0d\x0a <xsd:elem\
ent name=\x22recipe\
\x22>\x0d\x0a <xsd\
:complexType>\x0d\x0a \
<xsd:\
sequence>\x0d\x0a \
<xsd:\
element name=\x22ti\
tle\x22 type=\x22xsd:s\
tring\x22/>\x0d\x0a \
<xsd:e\
lement name=\x22ing\
redient\x22 type=\x22i\
ngredientType\x22 m\
axOccurs=\x22unboun\
ded\x22/>\x0d\x0a \
<xsd:ele\
ment name=\x22time\x22\
type=\x22timeType\x22\
/>\x0d\x0a \
<xsd:element\
name=\x22method\x22>\x0d\
\x0a \
<xsd:comple\
xType>\x0d\x0a \
\
<xsd:sequence>\x0d\x0a\
\
<xsd\
:element name=\x22s\
tep\x22 type=\x22xsd:s\
tring\x22 maxOccurs\
=\x22unbounded\x22/>\x0d\x0a\
\
</xsd:se\
quence>\x0d\x0a \
</x\
sd:complexType>\x0d\
\x0a \
</xsd:element>\x0d\
\x0a </x\
sd:sequence>\x0d\x0a \
</xsd:comp\
lexType>\x0d\x0a </\
xsd:element>\x0d\x0a\x0d\x0a\
<xsd:complex\
Type name=\x22ingre\
dientType\x22>\x0d\x0a \
<xsd:attrib\
ute name=\x22name\x22 \
type=\x22xsd:string\
\x22/>\x0d\x0a <xs\
d:attribute name\
=\x22quantity\x22 type\
=\x22xsd:positiveIn\
teger\x22/>\x0d\x0a \
<xsd:attribute\
name=\x22unit\x22 typ\
e=\x22xsd:string\x22/>\
\x0d\x0a </xsd:comp\
lexType>\x0d\x0a\x0d\x0a \
<xsd:complexType\
name=\x22timeType\x22\
>\x0d\x0a <xsd:\
attribute name=\x22\
quantity\x22 type=\x22\
xsd:positiveInte\
ger\x22/>\x0d\x0a \
<xsd:attribute n\
ame=\x22unit\x22>\x0d\x0a \
<xsd:si\
mpleType>\x0d\x0a \
<xsd:\
restriction base\
=\x22xsd:string\x22>\x0d\x0a\
\
<xsd:enumera\
tion value=\x22seco\
nds\x22/>\x0d\x0a \
<xsd\
:enumeration val\
ue=\x22minutes\x22/>\x0d\x0a\
\
<xsd:enumera\
tion value=\x22hour\
s\x22/>\x0d\x0a \
</xsd:rest\
riction>\x0d\x0a \
</xsd:simp\
leType>\x0d\x0a \
</xsd:attribute\
>\x0d\x0a </xsd:com\
plexType>\x0d\x0a\x0d\x0a</x\
sd:schema>\x0d\x0a\
\x00\x00\x02c\
<\
recipe>\x0d\x0a <ti\
tle>Cheese on To\
ast</title>\x0d\x0a \
<ingredient nam\
e=\x22Bread\x22 quanti\
ty=\x222\x22 unit=\x22sli\
ces\x22/>\x0d\x0a <ing\
redient name=\x22Ch\
eese\x22 quantity=\x22\
2\x22 unit=\x22slices\x22\
/>\x0d\x0a <time qu\
antity=\x223\x22 unit=\
\x22days\x22/>\x0d\x0a <m\
ethod>\x0d\x0a \
<step>1. Slice t\
he bread and che\
ese.</step>\x0d\x0a \
<step>2. Gr\
ill one side of \
each slice of br\
ead.</step>\x0d\x0a \
<step>3. Tu\
rn over the brea\
d and place a sl\
ice of cheese on\
each piece.</st\
ep>\x0d\x0a <st\
ep>4. Grill unti\
l the cheese has\
started to melt\
.</step>\x0d\x0a \
<step>5. Serve\
and enjoy!</ste\
p>\x0d\x0a </method\
>\x0d\x0a <comment>\
Tell your friend\
s about it!</com\
ment>\x0d\x0a</recipe>\
\x0d\x0a\
\x00\x00\x03\xd4\
<\
?xml version=\x221.\
0\x22?>\x0d\x0a<xsd:schem\
a xmlns:xsd=\x22htt\
p://www.w3.org/2\
001/XMLSchema\x22>\x0d\
\x0a\x0d\x0a <xsd:elem\
ent name=\x22contac\
t\x22>\x0d\x0a <xs\
d:complexType>\x0d\x0a\
<xsd\
:sequence>\x0d\x0a \
<xsd\
:element name=\x22g\
ivenName\x22 type=\x22\
xsd:string\x22/>\x0d\x0a \
<\
xsd:element name\
=\x22familyName\x22 ty\
pe=\x22xsd:string\x22/\
>\x0d\x0a \
<xsd:element \
name=\x22birthdate\x22\
type=\x22xsd:date\x22\
minOccurs=\x220\x22/>\
\x0d\x0a \
<xsd:element n\
ame=\x22homeAddress\
\x22 type=\x22address\x22\
/>\x0d\x0a \
<xsd:element\
name=\x22workAddre\
ss\x22 type=\x22addres\
s\x22 minOccurs=\x220\x22\
/>\x0d\x0a \
</xsd:sequence>\x0d\
\x0a </xsd:c\
omplexType>\x0d\x0a \
</xsd:element>\x0d\
\x0a\x0d\x0a <xsd:comp\
lexType name=\x22ad\
dress\x22>\x0d\x0a \
<xsd:sequence>\x0d\
\x0a <xs\
d:element name=\x22\
street\x22 type=\x22xs\
d:string\x22/>\x0d\x0a \
<xsd:el\
ement name=\x22zipC\
ode\x22 type=\x22xsd:s\
tring\x22/>\x0d\x0a \
<xsd:eleme\
nt name=\x22city\x22 t\
ype=\x22xsd:string\x22\
/>\x0d\x0a \
<xsd:element nam\
e=\x22country\x22 type\
=\x22xsd:string\x22/>\x0d\
\x0a </xsd:s\
equence>\x0d\x0a </\
xsd:complexType>\
\x0d\x0a\x0d\x0a</xsd:schema\
>\x0d\x0a\
\x00\x00\x022\
<\
recipe>\x0d\x0a <ti\
tle>Cheese on To\
ast</title>\x0d\x0a \
<ingredient nam\
e=\x22Bread\x22 quanti\
ty=\x222\x22 unit=\x22sli\
ces\x22/>\x0d\x0a <ing\
redient name=\x22Ch\
eese\x22 quantity=\x22\
2\x22 unit=\x22slices\x22\
/>\x0d\x0a <time qu\
antity=\x223\x22 unit=\
\x22minutes\x22/>\x0d\x0a \
<method>\x0d\x0a \
<step>1. Slic\
e the bread and \
cheese.</step>\x0d\x0a\
<step>2.\
Grill one side \
of each slice of\
bread.</step>\x0d\x0a\
<step>3.\
Turn over the b\
read and place a\
slice of cheese\
on each piece.<\
/step>\x0d\x0a \
<step>4. Grill u\
ntil the cheese \
has started to m\
elt.</step>\x0d\x0a \
<step>5. Se\
rve and enjoy!</\
step>\x0d\x0a </met\
hod>\x0d\x0a</recipe>\x0d\
\x0a\
\x00\x00\x01(\
<\
contact>\x0d\x0a <g\
ivenName>John</g\
ivenName>\x0d\x0a <\
familyName>Doe</\
familyName>\x0d\x0a \
<title>Prof.</t\
itle>\x0d\x0a <work\
Address>\x0d\x0a \
<street>Sandak\
erveien 116</str\
eet>\x0d\x0a <z\
ipCode>N-0550</z\
ipCode>\x0d\x0a \
<city>Oslo</cit\
y>\x0d\x0a <cou\
ntry>Norway</cou\
ntry>\x0d\x0a </wor\
kAddress>\x0d\x0a</con\
tact>\x0d\x0a\
\x00\x00\x01;\
<\
order>\x0d\x0a <cus\
tomerId>234219</\
customerId>\x0d\x0a \
<article>\x0d\x0a \
<articleId>2\
1692</articleId>\
\x0d\x0a <count\
>3</count>\x0d\x0a \
</article>\x0d\x0a \
<article>\x0d\x0a \
<articleId>24\
749</articleId>\x0d\
\x0a <count>\
9</count>\x0d\x0a <\
/article>\x0d\x0a <\
deliveryDate>200\
9-01-23</deliver\
yDate>\x0d\x0a <pay\
ed>yes</payed>\x0d\x0a\
</order>\x0d\x0a\
\x00\x00\x03~\
<\
?xml version=\x221.\
0\x22?>\x0d\x0a<xsd:schem\
a xmlns:xsd=\x22htt\
p://www.w3.org/2\
001/XMLSchema\x22>\x0d\
\x0a\x0d\x0a <xsd:elem\
ent name=\x22order\x22\
>\x0d\x0a <xsd:\
complexType>\x0d\x0a \
<xsd:s\
equence>\x0d\x0a \
<xsd:e\
lement name=\x22cus\
tomerId\x22 type=\x22x\
sd:positiveInteg\
er\x22/>\x0d\x0a \
<xsd:elem\
ent name=\x22articl\
e\x22 type=\x22article\
Type\x22 maxOccurs=\
\x22unbounded\x22/>\x0d\x0a \
<\
xsd:element name\
=\x22deliveryDate\x22 \
type=\x22xsd:date\x22/\
>\x0d\x0a \
<xsd:element \
name=\x22payed\x22 typ\
e=\x22xsd:boolean\x22/\
>\x0d\x0a <\
/xsd:sequence>\x0d\x0a\
</xsd:co\
mplexType>\x0d\x0a \
</xsd:element>\x0d\x0a\
\x0d\x0a <xsd:compl\
exType name=\x22art\
icleType\x22>\x0d\x0a \
<xsd:sequenc\
e>\x0d\x0a \
<xsd:element nam\
e=\x22articleId\x22 ty\
pe=\x22xsd:positive\
Integer\x22/>\x0d\x0a \
<xsd:ele\
ment name=\x22count\
\x22 type=\x22xsd:posi\
tiveInteger\x22/>\x0d\x0a\
<xsd\
:element name=\x22c\
omment\x22 type=\x22xs\
d:string\x22 minOcc\
urs=\x220\x22/>\x0d\x0a \
</xsd:sequenc\
e>\x0d\x0a </xsd:co\
mplexType>\x0d\x0a\x0d\x0a</\
xsd:schema>\x0d\x0a\
"
qt_resource_name = b"\
\x00\x0e\
\x00vJ\x1c\
\x00i\
\x00n\x00s\x00t\x00a\x00n\x00c\x00e\x00_\x000\x00.\x00x\x00m\x00l\
\x00\x0e\
\x00rJ\x1c\
\x00i\
\x00n\x00s\x00t\x00a\x00n\x00c\x00e\x00_\x004\x00.\x00x\x00m\x00l\
\x00\x0c\
\x08\x13\x87\xf4\
\x00s\
\x00c\x00h\x00e\x00m\x00a\x00_\x001\x00.\x00x\x00s\x00d\
\x00\x0e\
\x00sJ\x1c\
\x00i\
\x00n\x00s\x00t\x00a\x00n\x00c\x00e\x00_\x003\x00.\x00x\x00m\x00l\
\x00\x0c\
\x08\x10\x87\xf4\
\x00s\
\x00c\x00h\x00e\x00m\x00a\x00_\x000\x00.\x00x\x00s\x00d\
\x00\x0e\
\x00pJ\x1c\
\x00i\
\x00n\x00s\x00t\x00a\x00n\x00c\x00e\x00_\x002\x00.\x00x\x00m\x00l\
\x00\x0e\
\x00yJ\x1c\
\x00i\
\x00n\x00s\x00t\x00a\x00n\x00c\x00e\x00_\x001\x00.\x00x\x00m\x00l\
\x00\x0e\
\x00uJ\x1c\
\x00i\
\x00n\x00s\x00t\x00a\x00n\x00c\x00e\x00_\x005\x00.\x00x\x00m\x00l\
\x00\x0c\
\x08\x16\x87\xf4\
\x00s\
\x00c\x00h\x00e\x00m\x00a\x00_\x002\x00.\x00x\x00s\x00d\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x09\x00\x00\x00\x01\
\x00\x00\x00\xa2\x00\x00\x00\x00\x00\x01\x00\x00\x0fu\
\x00\x00\x00\x22\x00\x00\x00\x00\x00\x01\x00\x00\x019\
\x00\x00\x00b\x00\x00\x00\x00\x00\x01\x00\x00\x096\
\x00\x00\x00\xe6\x00\x00\x00\x00\x00\x01\x00\x00\x12\xd7\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xc4\x00\x00\x00\x00\x00\x01\x00\x00\x11\xab\
\x00\x00\x00\x84\x00\x00\x00\x00\x00\x01\x00\x00\x0b\x9d\
\x00\x00\x00D\x00\x00\x00\x00\x00\x01\x00\x00\x03\x05\
\x00\x00\x01\x08\x00\x00\x00\x00\x00\x01\x00\x00\x14\x16\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 22.137712 | 96 | 0.616136 | 1,637 | 10,449 | 3.91631 | 0.259621 | 0.15723 | 0.096865 | 0.071128 | 0.592887 | 0.519576 | 0.435346 | 0.339729 | 0.260958 | 0.20652 | 0 | 0.1721 | 0.180879 | 10,449 | 471 | 97 | 22.184713 | 0.576937 | 0.017609 | 0 | 0.300439 | 0 | 0.041667 | 0 | 0 | 0 | 0 | 0.00078 | 0 | 0 | 1 | 0.004386 | false | 0 | 0.002193 | 0 | 0.006579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e1a6d385104b92cdf4c3b65e8d847fac4046e9c | 2,441 | py | Python | play.py | apgeorg/rl-cartpole-balancer | 673c934326c90982460eb63543333334af1390a9 | [
"MIT"
] | 1 | 2018-12-24T13:49:32.000Z | 2018-12-24T13:49:32.000Z | play.py | apgeorg/rl-cartpole-balancer | 673c934326c90982460eb63543333334af1390a9 | [
"MIT"
] | null | null | null | play.py | apgeorg/rl-cartpole-balancer | 673c934326c90982460eb63543333334af1390a9 | [
"MIT"
] | null | null | null | import gym
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from agents.dqn import DQN
def create_model(states, actions):
model = Sequential()
model.add(Dense(24, input_dim=states, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(actions, activation='linear'))
model.compile(loss='mse', optimizer=Adam(lr=1e-4))
return model
def play(gym_id, episodes=1, agent=None):
env = gym.make(gym_id)
for e in range(episodes):
state = env.reset()
total_reward = 0.
for t in range(500):
if agent is None:
action = env.action_space.sample() # take a random action
else:
action = agent.act(np.reshape(state, [1, agent.state_size]))
state, reward, done, _ = env.step(action)
total_reward += reward
if done:
print('Episode {}/{} done in {} steps, total reward {}: '.format(e+1, episodes, t+1, total_reward))
break
env.close()
def learn(gym_id, episodes=1000, batch_size=32, model_path="models/model.h5"):
env = gym.make(gym_id)
num_states = env.observation_space.shape[0]
num_actions = env.action_space.n
agent = DQN(create_model(num_states, num_actions))
for e in range(episodes):
state = env.reset()
state = np.reshape(state, [1, num_states])
total_reward = 0.
for steps in range(500):
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
next_state = np.reshape(next_state, [1, agent.state_size])
agent.remember(state, action, reward, next_state, done)
total_reward += reward
state = next_state
if done:
print('Episode {}/{} done in {} steps, total reward {}: '.format(e+1, episodes, steps+1, total_reward))
if total_reward >= 200:
agent.save(model_path)
return agent
break
if agent.memory_size > batch_size:
agent.train(batch_size) # train the agent with the experience of the episode
env.close()
return None
if __name__ == '__main__':
agent = learn('CartPole-v0', episodes=1000, batch_size=24, model_path="./models/cartpole-full.h5")
play('CartPole-v0', episodes=5, agent=agent) | 39.370968 | 119 | 0.606719 | 321 | 2,441 | 4.46729 | 0.311526 | 0.069038 | 0.027197 | 0.020921 | 0.248257 | 0.161785 | 0.122734 | 0.122734 | 0.078103 | 0.078103 | 0 | 0.024349 | 0.276526 | 2,441 | 62 | 120 | 39.370968 | 0.787656 | 0.029086 | 0 | 0.275862 | 0 | 0 | 0.078125 | 0.010557 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0 | 0.103448 | 0 | 0.206897 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e1ba8922cf58bb90287d078e5a54ad5ae1af3bf | 46,505 | py | Python | Database/DataBaseGenerator.py | matteoNunz/ImmunoPoli | 4a6688cc70715feefd9ed71e13aafa90b45a9a62 | [
"MIT"
] | null | null | null | Database/DataBaseGenerator.py | matteoNunz/ImmunoPoli | 4a6688cc70715feefd9ed71e13aafa90b45a9a62 | [
"MIT"
] | null | null | null | Database/DataBaseGenerator.py | matteoNunz/ImmunoPoli | 4a6688cc70715feefd9ed71e13aafa90b45a9a62 | [
"MIT"
] | 2 | 2021-12-22T09:07:09.000Z | 2021-12-24T19:27:26.000Z | """
Date: 28/10/2021
Neo4J generator for ImmunoPoli project
"""
import neo4j as nj
import App.PlotDBStructure as ps
from random import randint, random
from enum import IntEnum
import datetime
MAX_CIVIC_NUMBER = 100
PHONE_NUMBER_LENGTH = 10
MAX_NUMBER_OF_FAMILY_MEMBER = 5
NUMBER_OF_FAMILY = 150
MAX_NUMBER_OF_CONTACT = 2000 # For new contact relationships
MAX_NUMBER_OF_VISIT = 5000 # For new visit relationships
MAX_NUMBER_OF_VACCINE = 750 # For new get vaccinated relationships
MAX_NUMBER_OF_TEST = 4000 # For new make test relationships
PROBABILITY_TO_HAVE_APP = 0.5
PROBABILITY_TO_BE_POSITIVE = 0.5
PROBABILITY_TO_BE_TESTED_AFTER_INFECTED = 0.8
MAX_NUMBER_OF_ATTEMPTS_FOR_VALID_DATE = 15
CONTACT_DAYS_BACKS = 10
VISITS_DAYS_BACKS = 150
VACCINES_DAYS_BACKS = 150
TESTS_DAYS_BACKS = 150
# BOLT = "bolt://localhost:7687"
# PASSWORD = "991437"
USER = "neo4j"
PASSWORD = "cJhfqi7RhIHR4I8ocQtc5pFPSEhIHDVJBCps3ULNzbA"
URI = "neo4j+s://057f4a80.databases.neo4j.io"
class PersonAttribute(IntEnum):
"""
Class enum for the attribute of a Person Node
"""
NAME = 0
SURNAME = 1
AGE = 2
MAIL = 3
NUMBER = 4
APP = 5
# And so on...
@classmethod
def numberOfAttribute(cls):
numAttribute = 0
for _ in PersonAttribute:
numAttribute += 1
return numAttribute
class LocationAttribute(IntEnum):
"""
Class enum for the attribute of a Location
"""
TYPE = 0
NAME = 1
ADDRESS = 2
CIVIC_NUMBER = 3
CAP = 4
CITY = 5
PROVINCE = 6
# and so on ...
@classmethod
def numberOfAttribute(cls):
numAttribute = 0
for _ in LocationAttribute:
numAttribute += 1
return numAttribute
class HouseAttribute(IntEnum):
"""
Class enum for the creation of the House
"""
ADDRESS = 0
CAP = 1
CITY = 2
PROVINCE = 3
@classmethod
def numberOfAttribute(cls):
numAttribute = 0
for _ in HouseAttribute:
numAttribute += 1
return numAttribute
class VaccineAttribute(IntEnum):
"""
Class enum for the attribute of a Location
"""
NAME = 0
PRODUCER = 1
# and so on ...
@classmethod
def numberOfAttribute(cls):
numAttribute = 0
for _ in VaccineAttribute:
numAttribute += 1
return numAttribute
def openConnection():
"""
Method that starts a connection with the database
:return: the driver for the connection
"""
connection = nj.GraphDatabase.driver(
uri=URI, auth=nj.basic_auth(USER, PASSWORD))
return connection
def closeConnection(connection):
"""
Method that close a connection
:param connection: is the connection to terminate
"""
connection.close()
def readNames():
"""
Method that reads the possible names from a file
:return: a list containing the names
"""
namesRead = []
with open("Files/Names.txt", 'r', encoding='utf8') as f:
for line in f:
if line == "\n":
continue
namesRead.append(line.rstrip('\n').rstrip().lstrip())
f.close()
return namesRead
def readSurnames():
"""
Method that reads the possible surnames from a file
:return: a list containing the surnames
"""
surnamesRead = []
with open("Files/Surnames.txt", 'r', encoding='utf8') as f:
for line in f:
if line == "\n":
continue
surnamesRead.append(line.rstrip('\n').rstrip().lstrip())
f.close()
return surnamesRead
def readLocations():
"""
Method that reads the possible locations from a file
:return: a list containing the locations
"""
locationsRead = []
# Parallel reading from address_file and locations_file
with open("Files/PublicPlaces.txt", 'r', encoding='utf8') as f:
for line in f:
if line == "\n":
continue
details = line.split(",")
address = []
for detail in details:
address.append(detail.rstrip('\n').rstrip().lstrip())
locationsRead.append(address)
f.close()
return locationsRead
def readHouseAddresses():
"""
Method that reads different addresses from a file
:return: a list of addresses
"""
addressesRead = []
with open("Files/HouseAddresses.txt", 'r', encoding='utf8') as f:
for line in f:
if line == "\n":
continue
details = line.split(",")
address = []
for detail in details:
address.append(detail.rstrip('\n').rstrip().lstrip())
addressesRead.append(address)
f.close()
return addressesRead
def readVaccines():
"""
Method that reads the possible vaccines from a file
:return: a list containing the vaccines
"""
vaccinesRead = []
with open("Files/Vaccines.txt", 'r', encoding='utf8') as vaccine_file:
for vaccine_lines in vaccine_file:
vaccineDetails = vaccine_lines.split(",")
details = []
for vaccineDetail in vaccineDetails:
details.append(vaccineDetail.lstrip().rstrip().rstrip('\n'))
vaccinesRead.append(details)
vaccine_file.close()
return vaccinesRead
def readTests():
"""
Method that reads the possible locations from a file
:return: a list containing the locations
"""
testsList = []
with open("Files/Tests.txt", 'r', encoding='utf8') as f:
for line in f:
if line == "\n":
continue
testsList.append(line.rstrip('\n').rstrip().lstrip())
f.close()
return testsList
def deleteAll(tx):
"""
Method that deletes every node and every link
:param tx: is the transaction
:return: nothing
"""
query = (
"MATCH(p1:Person)-[a:APP_CONTACT]->(p2:Person)"
"WHERE a.date < date() - duration({Days: 10}) OR (a.date = date() - duration({Days: 10}) AND a.hour < time())"
"DELETE a"
)
tx.run(query)
def countAll(tx):
"""
Method that count the number of Nodes
:param tx: is the transaction
:return: the number of Nodes
"""
query = (
"MATCH (n) "
"RETURN COUNT(n) AS count "
"LIMIT $limit"
)
result = tx.run(query, limit=10)
return [record["count"] for record in result]
def findAll(tx):
"""
Methods that fins the whole structure of the database
:param tx: is the transaction
:return: the whole structure
"""
query = (
"MATCH (n1)-[r]->(n2) "
"RETURN n1 AS node1 , r AS relationship , n2 AS node2 "
)
result = tx.run(query)
return [(record["node1"], record["relationship"], record["node2"]) for record in result]
def findAllPerson(tx):
"""
Method that finds all the nodes Person in the data base
:param tx: is the transaction
:return: a list of nodes
"""
query = (
"MATCH (p:Person) "
"RETURN p , ID(p);"
)
results = tx.run(query).data()
return results
def findAllHome(tx):
"""
Method that finds all the nodes House in the data base
:param tx: is the transaction
:return: a list of nodes
"""
query = (
"MATCH (h:House) "
"RETURN h , ID(h);"
)
results = tx.run(query).data()
return results
def findAllLocation(tx):
"""
Method that finds all the nodes Location in the data base
:param tx: is the transaction
:return: a list of nodes
"""
query = (
"MATCH (l:Location) "
"RETURN l , ID(l);"
)
results = tx.run(query).data()
return results
def findAllVaccine(tx):
"""
Method that finds all the nodes Vaccine in the data base
:param tx: is the transaction
:return: a list of nodes
"""
query = (
"MATCH (v:Vaccine) "
"RETURN v , ID(v);"
)
results = tx.run(query).data()
return results
def findAllTest(tx):
"""
Method that finds all the nodes Test in the data base
:param tx: is the transaction
:return: a list of nodes
"""
query = (
"MATCH (t:Test) "
"RETURN t , ID(t);"
)
results = tx.run(query).data()
return results
def findAllLiveRelationships(tx):
"""
Method that finds all Live relationships in the data base
:param tx: is the transaction
:return: a list of relationships
"""
query = (
"MATCH (n1:Person)-[r:LIVE]->(n2:House) "
"RETURN ID(n1) , r , ID(n2);"
)
results = tx.run(query).data()
return results
def findAllAppContactRelationships(tx):
"""
Method that finds all App_Contact relationships in the data base
:param tx: is the transaction
:return: a list of relationships
"""
query = (
"MATCH (n1:Person)-[r:APP_CONTACT]->(n2:Person) "
"RETURN ID(n1) , r , r.date , r.hour, ID(n2);"
)
results = tx.run(query).data()
return results
def findAllVisitRelationships(tx):
"""
Method that finds all VISIT relationships in the data base
:param tx: is the transaction
:return: a list of relationships
"""
query = (
"MATCH (n1:Person)-[r:VISIT]->(n2:Location) "
"RETURN ID(n1) , r , r.date , r.start_hour , r.end_hour , ID(n2);"
)
results = tx.run(query).data()
return results
def findAllGetVaccineRelationships(tx):
"""
Method that finds all GET (a vaccine) relationships in the data base
:param tx: is the transaction
:return: a list of relationships
"""
query = (
"MATCH (n1:Person)-[r:GET_VACCINE]->(n2:Vaccine) "
"RETURN ID(n1) , r , r.date , r.country , r.expirationDate , ID(n2);"
)
results = tx.run(query).data()
return results
def findAllMakeTestRelationships(tx):
"""
Method that finds all MAKE (a test) relationships in the data base
:param tx: is the transaction
:return: a list of relationships
"""
query = (
"MATCH (n1:Person)-[r:MAKE_TEST]->(n2:Test) "
"RETURN ID(n1) , r , r.date , r.hour , r.result , ID(n2);"
)
results = tx.run(query).data()
return results
def findAllInfectedRelationships(tx):
"""
Method that finds all INFECTED relationships in the data base
:param tx: is the transaction
:return: a list of relationships
"""
query = (
"MATCH (n1:Person)-[r:COVID_EXPOSURE]->(n2:Person) "
"RETURN ID(n1) , r , r.date , r.name , ID(n2);"
)
results = tx.run(query).data()
return results
def createFamilies(namesList, surnamesList):
"""
Method that initialize a list of all the family relationships
:return: a list of list (a list of family)
"""
familiesList = []
surnameIndex = 0
for _ in range(0, NUMBER_OF_FAMILY):
# Choose a size for the family
numberOfMembers = randint(1, MAX_NUMBER_OF_FAMILY_MEMBER)
# Family will contain the name in pos 0 and the surname in pos 1
familyEl = [None] * numberOfMembers
casualFamily = False
for j in range(0, len(familyEl)):
familyEl[j] = [None] * PersonAttribute.numberOfAttribute()
# Append a random name
name = str(namesList[randint(0, len(names) - 1)])
familyEl[j][int(PersonAttribute.NAME)] = name
# Append the next surname
surname = str(surnamesList[surnameIndex])
familyEl[j][int(PersonAttribute.SURNAME)] = surname
# Append a random age
if j == 0:
age = randint(18, 99)
else:
age = randint(1, 99)
familyEl[j][int(PersonAttribute.AGE)] = age
# Append the mail
mail = name.lower() + "." + surname.lower() + str(age) + "@immunoPoli.it"
familyEl[j][int(PersonAttribute.MAIL)] = mail
# Append the phone number
number = 0
for i in range(0, PHONE_NUMBER_LENGTH):
number += randint(0, 9) * 10 ** i
familyEl[j][int(PersonAttribute.NUMBER)] = number
# Append the app attribute
if random() < PROBABILITY_TO_HAVE_APP:
app = "True"
else:
app = "False"
familyEl[j][int(PersonAttribute.APP)] = app
# In every family there will be at least 2 surnames
# In case of friends living together there is a probability of 30% to have more than 2 surnames in a family
if j == 0 and randint(0, 100) < 30: # Family of not familiar
casualFamily = True
if j == 0 or (numberOfMembers > 2 and casualFamily):
surnameIndex += 1
if surnameIndex >= len(surnames):
surnameIndex = 0
familiesList.append(familyEl)
surnameIndex += 1
if surnameIndex >= len(surnames):
surnameIndex = 0
return familiesList
def createNodesFamily(familiesList, houseAddressesList):
"""
Method that append some command to the general query
:param houseAddressesList: is the list containing addresses ofr houses
:param familiesList: is the list of families
:return: nothing
"""
creationQuery = [] # Query that will contains all the queries for the node creation
relationshipsQuery = [] # Query that will contains all the queries for the relationship creation
for familyEl in familiesList:
for memberEl in familyEl:
currentQuery = (
"CREATE (p:Person {name: \"" + str(memberEl[int(PersonAttribute.NAME)]) + "\" , surname: \"" +
str(memberEl[int(PersonAttribute.SURNAME)]) + "\" , age: \"" + str(
memberEl[int(PersonAttribute.AGE)]) +
"\" , mail: \"" + str(memberEl[int(PersonAttribute.MAIL)]) + "\" , number: \"" +
str(memberEl[int(PersonAttribute.NUMBER)]) + "\" , app: \"" +
str(memberEl[int(PersonAttribute.APP)]) + "\"}); "
)
creationQuery.append(currentQuery)
# Create the name of the house
memberFamily = familyEl[0]
familyName = memberFamily[PersonAttribute.NAME] + " " + memberFamily[PersonAttribute.SURNAME] + " house"
addressIndex = randint(0, len(houseAddressesList) - 1)
address = houseAddressesList[addressIndex]
civicNumber = randint(0, MAX_CIVIC_NUMBER)
currentQuery = (
"CREATE (h:House {name: \"" + str(familyName) + "\" , address: \"" + str(
address[HouseAttribute.ADDRESS]) +
"\", civic_number: \"" + str(civicNumber) + "\" , CAP: \"" + str(address[HouseAttribute.CAP]) +
"\", city: \"" + str(address[HouseAttribute.CITY]) + "\" , province: \""
+ str(address[HouseAttribute.PROVINCE]) + "\"}); "
)
creationQuery.append(currentQuery)
# Create the LIVE relationships
for memberEl in familyEl:
currentQuery = (
"MATCH (p:Person) , (h:House) "
"WHERE p.name = \"" + str(memberEl[int(PersonAttribute.NAME)]) +
"\" AND p.surname = \"" + str(memberEl[int(PersonAttribute.SURNAME)]) + "\" AND p.age= \"" +
str(memberEl[int(PersonAttribute.AGE)]) + "\" AND h.name = \"" + str(familyName) +
"\" AND h.address = \"" + str(address[HouseAttribute.ADDRESS]) + "\" AND h.civic_number = \"" +
str(civicNumber) + "\" AND h.CAP = \"" + str(address[HouseAttribute.CAP]) +
"\" AND h.city = \"" + str(address[HouseAttribute.CITY]) + "\" AND h.province = \"" +
str(address[HouseAttribute.PROVINCE]) + "\" "
"CREATE (p)-[:LIVE]->(h);"
)
relationshipsQuery.append(currentQuery)
return creationQuery, relationshipsQuery
def createNodeLocations(locationsList):
"""
Method that creates the query for the creation of the public places
:param locationsList: is a list containing all the locations
:return: a query
"""
locationsQuery = []
for locationEl in locationsList:
currentQuery = (
"CREATE (l:Location {name: \"" + str(locationEl[int(LocationAttribute.NAME)]) + "\" , type: \"" +
str(locationEl[int(LocationAttribute.TYPE)]) + "\" , address: \"" +
str(locationEl[int(LocationAttribute.ADDRESS)]) + "\" , civic_number: \"" +
str(locationEl[int(LocationAttribute.CIVIC_NUMBER)]) + "\", CAP: \"" +
str(locationEl[int(LocationAttribute.CAP)]) + "\" , city: \"" +
str(locationEl[int(LocationAttribute.CITY)]) + "\" , province: \"" +
str(locationEl[int(LocationAttribute.PROVINCE)]) + "\"}); "
)
locationsQuery.append(currentQuery)
return locationsQuery
def createNodeVaccines(vaccinesList):
"""
Method that creates the query for the creation of the vaccines node
:param vaccinesList: is a list containing all the vaccines
:return: a query
"""
vaccinesQuery = []
for vaccineEl in vaccinesList:
currentQuery = (
"CREATE (v:Vaccine {name: \"" + str(vaccineEl[int(VaccineAttribute.NAME)]) + "\" , producer: \"" +
str(vaccineEl[int(VaccineAttribute.PRODUCER)]) + "\"}); "
)
vaccinesQuery.append(currentQuery)
return vaccinesQuery
def createNodeTests(testsList):
"""
Method that creates the query for the creation of the tests
:param testsList: is a list containing all the possible type of tests
:return: a query
"""
testsQuery = []
for testEl in testsList:
currentQuery = (
"CREATE (t:Test {name: \"" + str(testEl) + "\"}); "
)
testsQuery.append(currentQuery)
return testsQuery
def createRelationshipsAppContact(d, pIds):
"""
Method that creates random relationship
:param d: is the connection (driver)
:param pIds: list of Person ids
:return: nothing
"""
# Create the number of app contact for the day
numOfContact = MAX_NUMBER_OF_CONTACT
for _ in range(0, numOfContact):
# Choose two random people
randomIndex = randint(0, len(pIds) - 1)
pId1 = pIds[randomIndex]
randomIndex = randint(0, len(pIds) - 1)
pId2 = pIds[randomIndex]
# Choose the hour/date
# Verify if it's the same node
if pId1 == pId2:
continue
date = datetime.date.today() - datetime.timedelta(days=randint(0, CONTACT_DAYS_BACKS))
date = date.strftime("%Y-%m-%d")
h = randint(0, 23)
minutes = randint(0, 59)
if minutes < 10:
minutes = "0" + str(minutes)
hour = str(h) + ":" + str(minutes) + ":00"
n = 0
while not (validateDate(d, date, pId1, hour) or not validateDate(d, date, pId2, hour)) \
and n < MAX_NUMBER_OF_ATTEMPTS_FOR_VALID_DATE:
date = datetime.date.today() - datetime.timedelta(days=randint(0, 20))
date = date.strftime("%Y-%m-%d")
h = randint(0, 23)
minutes = randint(0, 59)
if minutes < 10:
minutes = "0" + str(minutes)
hour = str(h) + ":" + str(minutes) + ":00"
n = n + 1
if n == MAX_NUMBER_OF_ATTEMPTS_FOR_VALID_DATE:
continue
query = (
"MATCH (p1:Person) , (p2:Person) "
"WHERE ID(p1) = $pId1 AND ID(p2) = $pId2 "
"MERGE (p1)-[:APP_CONTACT { hour: time($hour) , date: date($date)}]->(p2) "
"MERGE (p1)<-[:APP_CONTACT { hour: time($hour) , date: date($date)}]-(p2)"
)
# Execute the query
with d.session() as s:
s.write_transaction(createContact, query, pId1, pId2, hour, date)
def createRelationshipsVisit(d, pIds, lIds):
"""
Method that creates VISIT relationships
:param d: is the connection (driver)
:param pIds: is a list of Person ids
:param lIds: is a list of Location ids
:return: nothing
"""
# Choose how many new visit relationships
numberOfVisits = MAX_NUMBER_OF_VISIT
for _ in range(0, numberOfVisits):
lIndex = randint(0, len(lIds) - 1)
locationId = lIds[lIndex]
pIndex = randint(0, len(pIds) - 1)
personId = pIds[pIndex]
# Choose the hour/date
date = datetime.date.today() - datetime.timedelta(days=randint(0, VISITS_DAYS_BACKS))
date = date.strftime("%Y-%m-%d")
h = randint(0, 22)
minutes = randint(0, 59)
if minutes < 10:
minutes = "0" + str(minutes)
startHour = str(h) + ":" + str(minutes)
h = randint(h, 23)
minutes = randint(0, 59)
if minutes < 10:
minutes = "0" + str(minutes)
endHour = str(h) + ":" + str(minutes)
n = 0
while not validateDate(d, date, personId, endHour) and n < MAX_NUMBER_OF_ATTEMPTS_FOR_VALID_DATE:
date = datetime.date.today() - datetime.timedelta(days=randint(0, 150))
date = date.strftime("%Y-%m-%d")
h = randint(0, 22)
minutes = randint(0, 59)
if minutes < 10:
minutes = "0" + str(minutes)
startHour = str(h) + ":" + str(minutes)
h = randint(h, 23)
minutes = randint(0, 59)
if minutes < 10:
minutes = "0" + str(minutes)
endHour = str(h) + ":" + str(minutes)
n = n + 1
if n == MAX_NUMBER_OF_ATTEMPTS_FOR_VALID_DATE:
continue
query = (
"MATCH (p:Person) , (l:Location) "
"WHERE ID(p) = $personId AND ID(l) = $locationId "
"MERGE (p)-[:VISIT {date: date($date) , start_hour: time($startHour) , end_hour: time($endHour)}]->(l); "
)
# Execute the query
with d.session() as s:
s.write_transaction(createVisit, query, personId, locationId, date, startHour, endHour)
def validateDate(d, date, personId, hour):
"""
Method that validate the date, if the last test before the date is positive return false
:param d: driver
:param date: date to check
:param personId: person to check
:param hour: hour to check
:return: true if it's valid
"""
query = (
"MATCH (p:Person)-[r:MAKE_TEST]->(:Test) "
"WHERE ID(p) = $personId AND (date($date)>r.date OR(date($date)=r.date AND time($hour)>r.hour)) "
"RETURN r.date as date,r.result as result,r.hour as hour "
"ORDER BY date DESC "
"LIMIT 1 ")
# Execute the query
with d.session() as s:
precDates = s.read_transaction(checkDate, query, personId, date, hour)
if precDates is None or len(precDates) == 0 or precDates[0]["result"] == "Negative":
return True
else:
return False
def createRelationshipsGetVaccine(d, pIds, vIds):
"""
Method that creates GET vaccine relationships
:param d: is the connection (driver)
:param pIds: is a list of Person ids
:param vIds: is a list of Vaccine ids
:return: nothing
"""
# Choose how many new visit relationships
numberOfVaccines = MAX_NUMBER_OF_VACCINE
for _ in range(0, numberOfVaccines):
vIndex = randint(0, len(vIds) - 1)
vaccineId = vIds[vIndex]
pIndex = randint(0, len(pIds) - 1)
personId = pIds[pIndex]
date = datetime.date.today() - datetime.timedelta(days=randint(0, VACCINES_DAYS_BACKS))
country = "Italy"
# For the future: maybe do a random country
# Ask to neo4j server how many vaccines the user did
query = (
"MATCH (p:Person)-[r]->(v:Vaccine) "
"WHERE ID(p) = $personId AND type(r)='GET_VACCINE'"
"RETURN count(p) as count,ID(v) as vaccineID,r.expirationDate as date"
)
with d.session() as s:
datas = s.read_transaction(gettingNumberVaccines, query, personId)
# if no vaccines do one, else make the second vaccine
if len(datas) == 0:
string2 = str(date + datetime.timedelta(days=28)).split("-")
expDate = datetime.date(int(string2[0]), int(string2[1]), int(string2[2]))
else:
if len(datas) == 1:
string1 = str(datas[0]["date"]).split("-")
date = datetime.date(int(string1[0]), int(string1[1]), int(string1[2]))
string2 = str(date + datetime.timedelta(days=365)).split("-")
expDate = datetime.date(int(string2[0]), int(string2[1]), int(string2[2]))
vaccineId = datas[0]["vaccineID"]
else:
continue
date = date.strftime("%Y-%m-%d")
expDate = expDate.strftime("%Y-%m-%d")
query = (
"MATCH (p:Person) , (v:Vaccine) "
"WHERE ID(p) = $personId AND ID(v) = $vaccineId "
"MERGE (p)-[:GET_VACCINE{date:date($date),country:$country,expirationDate:date($expDate)}]->(v); "
)
# Execute the query
with d.session() as s:
s.write_transaction(createGettingVaccine, query, personId, vaccineId, date, country, expDate)
def createRelationshipsMakeTest(d, pIds, tIds):
"""
Method that creates MAKE test relationships
:param d: is the connection (driver)
:param pIds: is a list of Person ids
:param tIds: is a list of Test ids
:return: nothing
"""
# Choose how many new visit relationships
numberOfTest = MAX_NUMBER_OF_TEST
for _ in range(0, numberOfTest):
probability = random()
tIndex = randint(0, len(tIds) - 1)
testId = tIds[tIndex]
pIndex = randint(0, len(pIds) - 1)
personId = pIds[pIndex]
date = datetime.date.today() - datetime.timedelta(days=randint(0, TESTS_DAYS_BACKS))
h = randint(0, 23)
minutes = randint(0, 59)
if minutes < 10:
minutes = "0" + str(minutes)
string_date = date.strftime("%Y-%m-%d")
hour = str(h) + ":" + str(minutes)
if probability < PROBABILITY_TO_BE_POSITIVE:
result = "Positive"
else:
result = "Negative"
query = (
"MATCH (p:Person) , (t:Test) "
"WHERE ID(p) = $personId AND ID(t) = $testId "
"MERGE (p)-[:MAKE_TEST{date:date($date) , hour: time($hour) ,result:$result}]->(t); "
)
# If negative, all infections have to be neglected
if probability >= PROBABILITY_TO_BE_POSITIVE:
# Check whether or not I have been infected by someone
delete_possible_infection_command = (
"MATCH ()-[i:COVID_EXPOSURE]->(p:Person)"
"WHERE ID(p) = $personId AND (date($date) >= i.date + duration({days: 7})) "
"DELETE i"
)
with d.session() as s:
s.write_transaction(delete_possible_infection, delete_possible_infection_command,
personId, string_date, hour)
# Execute the query
with d.session() as s:
s.write_transaction(createMakingTest, query, personId, testId, string_date, hour, result)
def delete_possible_infection(tx, command, personId, date, hour):
"""
Method
:param command: delete infection command to be performed
:param personId: person whose infection is deleted
:param date: date of the test
:param hour: hour of the test
"""
tx.run(command, personId=personId, date=date, hour=hour)
def createVisit(tx, query, personId, locationId, date, startHour, endHour):
"""
Method that executes the query to create a VISIT relationship
:param endHour: ending time of the visit
:param startHour: starting time of the visit
:param date: date of the visit
:param tx: is the transaction
:param query: is the query to create a visit relationship
:param personId: is the id of the Person
:param locationId: is the id of the Location
:return: nothing
"""
tx.run(query, personId=personId, locationId=locationId, date=date, startHour=startHour,
endHour=endHour)
def createGettingVaccine(tx, query, personId, vaccineId, date, country, expDate):
"""
Method that executes the query to create a VISIT relationship
:param tx: is the transaction
:param query: is the query to create a visit relationship
:param personId: is the id of the Person
:param vaccineId: is the id of the Vaccine
:param date: date of the vaccine
:param country: country
:param expDate: expiration date of the vaccine
:return: nothing
"""
tx.run(query, personId=personId, vaccineId=vaccineId, date=date, country=country, expDate=expDate)
def gettingNumberVaccines(tx, query, personId):
"""
Method that executes the query to create a GET vaccinated relationship
:param tx: is the transaction
:param query: is the query to create a visit relationship
:param personId: is the id of the Person
:return: a list of the vaccines already administered to the Person
"""
return tx.run(query, personId=personId).data()
def createMakingTest(tx, query, personId, testId, date, hour, result):
"""
Method that executes the query to create a VISIT relationship
:param tx: is the transaction
:param query: is the query to create a visit relationship
:param personId: is the id of the Person
:param testId: is the id of the Test
:param date: date of the vaccine
:param hour: hour of the test
:param result: result of the test
:return: nothing
"""
tx.run(query, personId=personId, testId=testId, date=date, hour=hour, result=result)
def findAllPositivePerson():
"""
Method that finds all the positive person
:return: a list of positive ids
"""
query = (
"""
MATCH (p:Person)-[t:MAKE_TEST{result: \"Positive\"}]->()
WHERE NOT EXISTS {
MATCH (p)-[t2:MAKE_TEST{result: \"Negative\"}]->()
WHERE t2.date > t.date
}
RETURN distinct ID(p) , t.date as infectionDate , t.hour as infectionHour
"""
)
positiveIdsFound = runQueryRead(driver, query)
return positiveIdsFound
def checkDate(tx, query, personId, date, hour):
"""
Method that executes the query to return the last test before the date
:param date: hypothetical date of the visit
:param tx: is the transaction
:param query: is the query to get the test
:return: date of the precedent test
"""
return tx.run(query, personId=personId, date=date, hour=hour).data()
def createRelationshipsInfect(id, test_date, test_hour, daysBack):
"""
Method that finds all the contacts of a positive person
:param daysBack: is the number of days to look in the past
:param id: is the id of the positive person
:return: a list of people who got in contact with the positive person
"""
familyQuery = (
"MATCH (pp:Person)-[:LIVE]->(h:House)<-[:LIVE]-(ip:Person) "
"WHERE ID(pp) = $id AND ip <> pp AND NOT (ip)<-[:COVID_EXPOSURE]-(pp)"
"RETURN DISTINCT ID(ip);"
)
"""
IMPORTANT: ($date) represents the date from which we check the contacts. It is the date of positive test - 7 days
We check all contacts until the date of positive test
"""
appContactQuery = (
"MATCH (pp:Person)-[r1:APP_CONTACT]->(ip:Person) "
"WHERE ID(pp) = $id AND (r1.date > date($date) OR (r1.date = date($date) AND r1.hour >= time($hour))) "
"AND (r1.date < date($date) + duration({days:7}) OR (r1.date = date($date)+duration({days:7}) AND "
"r1.hour <= time($hour))) "
"AND NOT "
"(pp)-[:COVID_EXPOSURE{date: r1.date}]->(ip)"
"RETURN DISTINCT ID(ip) , r1.date;"
)
locationContactQuery = (
"MATCH (pp:Person)-[r1:VISIT]->(l:Location)<-[r2:VISIT]-(ip:Person) "
"WHERE ID(pp) = $id AND ip <> pp AND (r1.date > date($date) OR (r1.date = date($date) AND r1.start_hour >= time($hour))) "
"AND (r1.date < date($date) + duration({days:7}) OR (r1.date = date($date)+duration({days:7}) AND "
"r1.end_hour <= time($hour))) AND r2.date = r1.date AND "
"((r1.start_hour < r2.start_hour AND r1.end_hour > r2.start_hour) OR "
"(r2.start_hour < r1.start_hour AND r2.end_hour > r1.start_hour)) AND NOT "
"(pp)-[:COVID_EXPOSURE{name: l.name , date: r1.date}]->(ip)"
"RETURN DISTINCT ID(ip) , r1.date , l.name;"
)
# date = datetime.date.today() - datetime.timedelta(daysBack)
"""
date is referred to date test - daysback
"""
date = test_date - datetime.timedelta(daysBack)
infectedIds = []
with driver.session() as s:
familyInfected = s.read_transaction(findInfectInFamily, familyQuery, id)
appInfected = s.read_transaction(findInfect, appContactQuery, id, date, test_hour)
locationInfected = s.read_transaction(findInfect, locationContactQuery, id, date, test_hour)
for el in familyInfected, appInfected, locationInfected:
if len(el) > 0:
# Take just the id
infectedIds.append(el[0]['ID(ip)'])
infectedIds = []
for el in familyInfected:
infectedIds.append(el['ID(ip)'])
for infectedId in infectedIds:
query = (
"MATCH (pp:Person) , (ip:Person) "
"WHERE ID(pp) = $id AND ID(ip) = $ipid "
"CREATE (pp)-[:COVID_EXPOSURE{date:date($date)}]->(ip);"
)
s.write_transaction(createInfectFamily, query, id, infectedId, date.strftime("%Y-%m-%d"))
infectedIds = []
for el in appInfected:
details = []
details.append(el['ID(ip)'])
details.append(el['r1.date'])
infectedIds.append(details)
for infectedId, infectedDate in infectedIds:
query = (
"MATCH (pp:Person) , (ip:Person) "
"WHERE ID(pp) = $id AND ID(ip) = $ipid "
"CREATE (pp)-[:COVID_EXPOSURE{date: date($date)}]->(ip);"
)
s.write_transaction(createInfectApp, query, id, infectedId, infectedDate)
infectedIds = []
for el in locationInfected:
details = []
details.append(el['ID(ip)'])
details.append(el['r1.date'])
details.append(el['l.name'])
infectedIds.append(details)
for infectedId, infectedDate, infectedPlace in infectedIds:
query = (
"MATCH (pp:Person) , (ip:Person) "
"WHERE ID(pp) = $id AND ID(ip) = $ipid "
"CREATE (pp)-[:COVID_EXPOSURE{date: date($date) , name: $name}]->(ip);"
)
s.write_transaction(createInfectLocation, query, id, infectedId, infectedDate, infectedPlace)
def delete_negative_after_exposure():
"""
Method that deletes exposure for people who made a negative test after a covid exposure
"""
query = ("match ()-[c:COVID_EXPOSURE]->(p)-[m:MAKE_TEST{result:\"Negative\"}]->(t) "
"where m.date >= c.date + duration({days: 7}) "
"delete c")
with driver.session() as session:
session.run(query)
def createInfectFamily(tx, query, id, ipid, date):
"""
Method that create the relationship Infect
"""
tx.run(query, id=id, ipid=ipid, date=date)
def createInfectApp(tx, query, id, ipid, date):
"""
Method that create the relationship Infect
"""
tx.run(query, id=id, ipid=ipid, date=date)
def createInfectLocation(tx, query, id, ipid, date, name):
"""
Method that create the relationship Infect
"""
tx.run(query, id=id, ipid=ipid, date=date, name=name)
def findInfectInFamily(tx, query, id):
"""
Method that executes the query to find the infected member of a family
:param tx: is the transaction
:param query: is the query to execute
:param id: is the id of the positive Person
"""
result = tx.run(query, id=id).data()
return result
def findInfect(tx, query, id, date, hour):
"""
Method that executes the query to find the Person infected by other Persons
:param tx: is the transaction
:param query: is the query to execute
:param id: is the id of the positive Person
:param date: is the date from wich start the tracking
"""
result = tx.run(query, id=id, date=date, hour=hour).data()
return result
def createContact(tx, query, pId1, pId2, hour, date):
"""
Method that executes the query to create a CONTACT_APP relationship
:param date: the date of the contact
:param hour: the hour of the contact
:param tx: is the transaction
:param query: is the query to perform
:param pId1: is the id of the first Person
:param pId2: is the id of the second Person
:return: nothing
"""
tx.run(query, pId1=pId1, pId2=pId2, hour=hour, date=date)
def getPersonIds(withApp=False):
"""
Method that retrieves all the ids of Person Node
:param withApp: if True, retrieve the id of person with app = True
:return: a list of integer corresponding to the person ids
"""
with driver.session() as s:
ids = s.write_transaction(getPersonId, withApp)
pIds = []
for idEl in ids:
pIds.append(idEl["ID(p)"])
return pIds
def getPersonId(tx, withApp):
"""
Method that retrieves the ids of Person in the data base
:param tx: is the transaction
:param withApp: if True, retrieve the id of person with app = True
:return: a list of ids
"""
if not withApp:
query = (
"MATCH (p:Person) "
"RETURN ID(p);"
)
else:
query = (
"MATCH (p:Person) "
"WHERE p.app = \"True\" "
"RETURN ID(p);"
)
idsList = tx.run(query).data()
return idsList
def getLocationsIds():
"""
Method that retrieves all the ids of Location Node
:return: a list of integer corresponding to the location ids
"""
with driver.session() as s:
ids = s.write_transaction(getLocationsId)
lIds = []
for idEl in ids:
lIds.append(idEl["ID(l)"])
return lIds
def getLocationsId(tx):
"""
Method that retrieve a list of location ids
:param tx: is the transaction
:return: a list of ids
"""
query = (
"MATCH (l:Location)"
"RETURN ID(l)"
)
idsList = tx.run(query).data()
return idsList
def getVaccinesId(tx):
"""
Method that retrieve a list of location ids
:param tx: is the transaction
:return: a list of ids
"""
query = (
"MATCH (v:Vaccine)"
"RETURN ID(v)"
)
idsList = tx.run(query).data()
return idsList
def getVaccinesIds():
"""
Method that retrieves all the ids of Vaccine Node
:return: a list of integer corresponding to the vaccine ids
"""
with driver.session() as s:
ids = s.write_transaction(getVaccinesId)
vIds = []
for idEl in ids:
vIds.append(idEl["ID(v)"])
return vIds
def getTestsIds():
"""
Method that retrieves all the ids of test Node
:return: a list of integer corresponding to the test ids
"""
with driver.session() as s:
ids = s.write_transaction(getTestsId)
tIds = []
for idEl in ids:
tIds.append(idEl["ID(t)"])
return tIds
def getTestsId(tx):
"""
Method that retrieve a list of location ids
:param tx: is the transaction
:return: a list of ids
"""
query = (
"MATCH (t:Test)"
"RETURN ID(t)"
)
idsList = tx.run(query).data()
return idsList
def runQuery(tx, query, isReturn=False):
"""
Method that runs a generic query
:param tx: is the transaction
:param query: is the query to perform
:param isReturn: if True return the results, return nothing otherwise
"""
result = tx.run(query)
if isReturn:
return result.data()
def runQueryWrite(d, queryList):
"""
Method that run a generic query
:param d: is the connection to the database (driver)
:param queryList: is the query to run -> it's already completed
:return: nothing
"""
for query in queryList:
with d.session() as s:
s.write_transaction(runQuery, query)
def runQueryRead(d, query):
"""
Method that run a generic query
:param d: is the connection to the database
:param query: is the query to run -> it's already completed
:return: nothing
"""
with d.session() as s:
results = s.read_transaction(runQuery, query, True)
return results
def printDatabase():
"""
Method use to print the database structure using PlotDBStructure module
:return: nothing
"""
with driver.session() as s:
personNodes = s.read_transaction(findAllPerson)
houseNodes = s.read_transaction(findAllHome)
locationNodes = s.read_transaction(findAllLocation)
vaccineNodes = s.read_transaction(findAllVaccine)
testNodes = s.read_transaction(findAllTest)
liveRelationships = s.read_transaction(findAllLiveRelationships)
visitRelationships = s.read_transaction(findAllVisitRelationships)
appContactRelationships = s.read_transaction(findAllAppContactRelationships)
getRelationships = s.read_transaction(findAllGetVaccineRelationships)
makeRelationships = s.read_transaction(findAllMakeTestRelationships)
infectRelationships = s.read_transaction(findAllInfectedRelationships)
# Initialize the network attribute
ps.PlotDBStructure.__init__()
# Add nodes
ps.PlotDBStructure.addStructure(personNodes)
ps.PlotDBStructure.addStructure(houseNodes)
ps.PlotDBStructure.addStructure(locationNodes)
ps.PlotDBStructure.addStructure(vaccineNodes)
ps.PlotDBStructure.addStructure(testNodes)
# Add relationships
ps.PlotDBStructure.addStructure(liveRelationships)
ps.PlotDBStructure.addStructure(visitRelationships)
ps.PlotDBStructure.addStructure(appContactRelationships)
ps.PlotDBStructure.addStructure(makeRelationships)
ps.PlotDBStructure.addStructure(getRelationships)
ps.PlotDBStructure.addStructure(infectRelationships)
# Show the graph structure
ps.PlotDBStructure.showGraph()
return
if __name__ == '__main__':
# Open the connection
driver = openConnection()
# Only read from the graph
# printDatabase()
# Close the connection
# closeConnection(driver)
# exit()
# Read names from the file
names = readNames()
# Read surnames from the file
surnames = readSurnames()
# Read locations
locations = readLocations()
# Read house addresses
houseAddresses = readHouseAddresses()
vaccines = readVaccines()
tests = readTests()
# Create the family list
print("Creating families...")
families = createFamilies(names, surnames)
# Query is an attribute that will contain the whole query to instantiate the database
generalQuery = []
# Generate all the Person Nodes and the family relationships
cQuery, rQuery = createNodesFamily(families, houseAddresses)
# Generate the locations node
lQuery = createNodeLocations(locations)
# Generate the vaccines nodes
vQuery = createNodeVaccines(vaccines)
# Generate the tests nodes
tQuery = createNodeTests(tests)
# Adds the creation node queries to the generalQuery
for subQuery in cQuery:
generalQuery.append(subQuery)
for subQuery in lQuery:
generalQuery.append(subQuery)
for subQuery in vQuery:
generalQuery.append(subQuery)
for subQuery in tQuery:
generalQuery.append(subQuery)
# Adds the relationships queries to the generalQuery
for subQuery in rQuery:
generalQuery.append(subQuery)
# Delete the nodes already present
with driver.session() as session:
numberOfNodes = session.write_transaction(deleteAll)
# Generate the structure performing the node and relationship creation
runQueryWrite(driver, generalQuery)
# Generate random tests
# Take tests ids
print("Creating random tests...")
testsIds = getTestsIds()
personIds = getPersonIds()
# # Generate the relationship
createRelationshipsMakeTest(driver, personIds, testsIds)
# Generate random contacts with app tracing
# Take Person ids of people with app attribute equal to True)
print("Creating random app contact relationships...")
personIds = getPersonIds(True)
# Generate the relationships
createRelationshipsAppContact(driver, personIds)
# Generate random visits
# Take Location ids
locationIds = getLocationsIds()
personIds = getPersonIds()
# Generate the relationship
print("Creating random visit relationships...")
createRelationshipsVisit(driver, personIds, locationIds)
# Generate random vaccines
# Take vaccines ids
vaccineIds = getVaccinesIds()
print("Creating random vaccines...")
# Generate the relationship
createRelationshipsGetVaccine(driver, personIds, vaccineIds)
# Verify the nodes are been created
# with driver.session() as session:
# numberOfNodes = session.read_transaction(countAll)
# print("Number of nodes: " + str(numberOfNodes))
# Find all the positive Person
data_for_positive = findAllPositivePerson()
print("Creating covid exposure relationships...")
for positive in data_for_positive:
positive_id = positive['ID(p)']
contagion_date = str(positive['infectionDate'])
# Instruction needed to comply with Python way to manage dates
contagion_datetime = datetime.datetime.strptime(contagion_date, "%Y-%m-%d")
contagion_hour = str(positive['infectionHour'])
createRelationshipsInfect(positive_id, contagion_datetime, contagion_hour, 7)
# Search all the infected Person tracked
delete_negative_after_exposure()
# Print the whole structure
printDatabase()
# Close the connection
closeConnection(driver)
| 32.227997 | 130 | 0.606666 | 5,475 | 46,505 | 5.107215 | 0.100639 | 0.011086 | 0.008762 | 0.011587 | 0.446034 | 0.381804 | 0.327802 | 0.293684 | 0.258637 | 0.232315 | 0 | 0.011855 | 0.281712 | 46,505 | 1,442 | 131 | 32.250347 | 0.825231 | 0.250317 | 0 | 0.334187 | 0 | 0.021767 | 0.159012 | 0.039623 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080666 | false | 0.002561 | 0.006402 | 0 | 0.176697 | 0.010243 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e1c8db5594b84531a71f4ac6141cb6cebad50e7 | 1,073 | py | Python | code/DNN/MiniFramework/HyperParameters_4_1.py | Knowledge-Precipitation-Tribe/Neural-network | eac2e66cdde85b34ddf9313ce4d2b123cc1b8be8 | [
"MIT"
] | 3 | 2021-05-25T10:18:23.000Z | 2022-02-09T08:55:14.000Z | code/DNN/MiniFramework/HyperParameters_4_1.py | Knowledge-Precipitation-Tribe/Neural-network | eac2e66cdde85b34ddf9313ce4d2b123cc1b8be8 | [
"MIT"
] | null | null | null | code/DNN/MiniFramework/HyperParameters_4_1.py | Knowledge-Precipitation-Tribe/Neural-network | eac2e66cdde85b34ddf9313ce4d2b123cc1b8be8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-#
'''
# Name: HyperParameters_4_0
# Description:
# Author: super
# Date: 2020/6/2
'''
from MiniFramework.EnumDef_4_0 import *
# this class is for two-layer NN only
class HyperParameters_4_1(object):
def __init__(self, eta=0.1, max_epoch=10000, batch_size=5,
net_type=NetType.Fitting,
init_method=InitialMethod.Xavier,
optimizer_name=OptimizerName.SGD,
stopper = None):
self.eta = eta
self.max_epoch = max_epoch
# if batch_size == -1, it is FullBatch
if batch_size == -1:
self.batch_size = self.num_example
else:
self.batch_size = batch_size
# end if
self.net_type = net_type
self.init_method = init_method
self.optimizer_name = optimizer_name
self.stopper = stopper
def toString(self):
title = str.format("bz:{0},eta:{1},init:{2},op:{3}", self.batch_size, self.eta, self.init_method.name, self.optimizer_name.name)
return title | 32.515152 | 136 | 0.59739 | 139 | 1,073 | 4.381295 | 0.467626 | 0.103448 | 0.064039 | 0.039409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035667 | 0.294501 | 1,073 | 33 | 137 | 32.515152 | 0.768824 | 0.188257 | 0 | 0 | 0 | 0 | 0.034884 | 0.034884 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.05 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e1d901364494765f2ea9f357679476f51d416cd | 584 | py | Python | recurce_13/i_conference_lovers_v2.py | master-cim/algorithm | a57f473ceb32b96240989e31ac33154e55c00724 | [
"MIT"
] | 1 | 2022-03-31T07:30:53.000Z | 2022-03-31T07:30:53.000Z | recurce_13/i_conference_lovers_v2.py | master-cim/algorithm | a57f473ceb32b96240989e31ac33154e55c00724 | [
"MIT"
] | null | null | null | recurce_13/i_conference_lovers_v2.py | master-cim/algorithm | a57f473ceb32b96240989e31ac33154e55c00724 | [
"MIT"
] | 2 | 2022-03-04T09:42:03.000Z | 2022-03-30T14:51:32.000Z | # I. Любители конференций
# ID успешной посылки 66248195
from collections import Counter
def conference_lovers(id_university, k):
number_participant = Counter(id_university)
k_max = number_participant.most_common()[0:k:]
result = [univer[0] for univer in k_max]
print(' '.join(map(str, result)))
def read_input():
_ = int(input())
id_university = [int(element) for element in input().strip().split()]
k = int(input())
return(id_university, k)
if __name__ == '__main__':
id_university, k = read_input()
conference_lovers(id_university, k)
| 25.391304 | 73 | 0.693493 | 78 | 584 | 4.884615 | 0.512821 | 0.188976 | 0.170604 | 0.146982 | 0.152231 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020877 | 0.179795 | 584 | 22 | 74 | 26.545455 | 0.77453 | 0.089041 | 0 | 0 | 0 | 0 | 0.017013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.214286 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e1feda997e60649764860b6a4e1f4de31bd698d | 2,327 | py | Python | app/routes/user.py | Axtell-io/Axtell | 2f660450ca2eb70cc0520ad970c9aabdc65a3bb7 | [
"MIT"
] | 15 | 2018-04-20T14:56:59.000Z | 2021-03-31T20:16:29.000Z | app/routes/user.py | Axtell/Axtell | 2f660450ca2eb70cc0520ad970c9aabdc65a3bb7 | [
"MIT"
] | 148 | 2018-04-17T01:47:44.000Z | 2020-05-14T13:24:03.000Z | app/routes/user.py | Axtell-io/Axtell | 2f660450ca2eb70cc0520ad970c9aabdc65a3bb7 | [
"MIT"
] | 7 | 2018-06-01T11:15:18.000Z | 2020-08-14T04:24:50.000Z | from app.helpers.render import render_template, render_error
from app.controllers import user
from app.models.User import User, UserAuthToken
from app.server import server
from flask import g, request, redirect, url_for, abort
from app.session.csrf import csrf_protected
# noinspection PyUnusedLocal
@server.route("/user/data/me", methods=['GET'])
def get_my_profile():
return user.get_my_profile()
@server.route("/users/data/<int:user_id>", methods=['GET'])
def get_profile(user_id):
return user.get_profile(user_id)
@server.route("/user/followers/<int:user_id>/page/<int:page>", methods=['GET'])
@csrf_protected
def get_followers(user_id, page):
return user.get_followers(user_id, page=page)
@server.route("/user/following/<int:user_id>/page/<int:page>", methods=['GET'])
@csrf_protected
def get_following(user_id, page):
return user.get_following(user_id, page=page)
@server.route("/user/follow/<int:target_user_id>", methods=['POST'])
def follow_user(target_user_id):
if not isinstance(g.user, User):
return render_error('Unauthorized'), 401
return user.follow(g.user.id, target_user_id)
@server.route("/user/unfollow/<int:target_user_id>", methods=['POST'])
def unfollow_user(target_user_id):
if not isinstance(g.user, User):
return render_error('Unauthorized'), 401
return user.unfollow(g.user.id, target_user_id)
@server.route("/user/<int:user_id>", defaults={"name": None})
@server.route("/user/<int:user_id>/<name>")
def get_user(user_id, name):
matched_user = User.query.filter_by(id=user_id, deleted=False).first()
if matched_user is None:
return abort(404)
# Redirect if name is incorrect. add 'noredirect=1' flag to avoid infinite redirection in
# exceptional circumstances
if name != matched_user.name and request.args.get('noredirect', '0') != '1':
return redirect(url_for('get_user', user_id=user_id, name=matched_user.name, **request.args, noredirect='1'), code=301)
stackexchange_login = UserAuthToken.\
query.\
filter_by(user_id=user_id, issuer='stackexchange.com').\
order_by(UserAuthToken.id.desc()).\
first() if matched_user.linked_stackexchange_public else None
return render_template('user.html', user=matched_user, stackexchange_login=stackexchange_login)
| 33.242857 | 127 | 0.728406 | 337 | 2,327 | 4.830861 | 0.25816 | 0.092138 | 0.064496 | 0.031327 | 0.383292 | 0.322482 | 0.273956 | 0.202703 | 0.202703 | 0.160934 | 0 | 0.007905 | 0.130211 | 2,327 | 69 | 128 | 33.724638 | 0.796443 | 0.060163 | 0 | 0.136364 | 0 | 0 | 0.153917 | 0.09574 | 0 | 0 | 0 | 0 | 0 | 1 | 0.159091 | false | 0 | 0.136364 | 0.090909 | 0.545455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e20d0f7055a8efeb21c1bd0269a9e0a1afa7cf4 | 8,616 | py | Python | captioning/utils/rewards.py | YapingZ/News-image-caption | fcccf51bbe5607adbf71c1da8ecdc6693555993f | [
"Apache-2.0"
] | null | null | null | captioning/utils/rewards.py | YapingZ/News-image-caption | fcccf51bbe5607adbf71c1da8ecdc6693555993f | [
"Apache-2.0"
] | null | null | null | captioning/utils/rewards.py | YapingZ/News-image-caption | fcccf51bbe5607adbf71c1da8ecdc6693555993f | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import time
from collections import OrderedDict
import torch
import sys
try:
sys.path.append("cider")
from pyciderevalcap.ciderD.ciderD import CiderD
from pyciderevalcap.cider.cider import Cider
sys.path.append("coco-caption")
from pycocoevalcap.bleu.bleu import Bleu
from pyciderevalcap.NKRE_D.nkpe_D import Nkpe_D
except:
print('cider or coco-caption missing')
CiderD_scorer = None
Cider_scorer = None
Bleu_scorer = None
Nkpe_scorer = None
#CiderD_scorer = CiderD(df='corpus')
def init_scorer(cached_tokens):
global CiderD_scorer
CiderD_scorer = CiderD_scorer or CiderD(df=cached_tokens)
global Cider_scorer
Cider_scorer = Cider_scorer or Cider(df=cached_tokens)
global Bleu_scorer
Bleu_scorer = Bleu_scorer or Bleu(4)
global Nkpe_scorer
Nkpe_scorer = Nkpe_scorer or Nkpe_D()
def array_to_str(arr):
out = ''
for i in range(len(arr)):
out += str(arr[i]) + ' '
if arr[i] == 0 :
break
return out.strip()
def get_self_critical_reward(greedy_res, data_gts, gen_result, opt):
batch_size = len(data_gts)
gen_result_size = gen_result.shape[0]
seq_per_img = gen_result_size // len(data_gts) # gen_result_size = batch_size * seq_per_img
assert greedy_res.shape[0] == batch_size
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
greedy_res = greedy_res.data.cpu().numpy()
for i in range(gen_result_size):
res[i] = [array_to_str(gen_result[i])]
for i in range(batch_size):
res[gen_result_size + i] = [array_to_str(greedy_res[i])]
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
res_ = [{'image_id':i, 'caption': res[i]} for i in range(len(res))]
res__ = {i: res[i] for i in range(len(res_))}
gts_ = {i: gts[i // seq_per_img] for i in range(gen_result_size)}
gts_.update({i+gen_result_size: gts[i] for i in range(batch_size)})
if opt.cider_reward_weight > 0:
_, cider_scores = CiderD_scorer.compute_score(gts_, res_)
print('Cider scores:', _)
else:
cider_scores = 0
if opt.nkpe_reward_weight > 0:
_, nkpe_scores = Nkpe_scorer.compute_score(gts_, res_)
print('Nkpe scores:', _)
else:
nkpe_scores = 0
if opt.bleu_reward_weight > 0:
_, bleu_scores = Bleu_scorer.compute_score(gts_, res__)
bleu_scores = np.array(bleu_scores[3])
print('Bleu scores:', _[3])
else:
bleu_scores = 0
scores = opt.cider_reward_weight * cider_scores + opt.bleu_reward_weight * bleu_scores + opt.nkpe_reward_weight * nkpe_scores
# scores = cider_scores * nkpe_scores * 3
scores = scores[:gen_result_size].reshape(batch_size, seq_per_img) - scores[-batch_size:][:, np.newaxis]
scores = scores.reshape(gen_result_size)
rewards = np.repeat(scores[:, np.newaxis], gen_result.shape[1], 1)
return rewards
def get_self_critical_reward_2(data_gts, gen_result, monte_carlo_count):
global Nkpe_scorer
Nkpe_scorer = Nkpe_scorer or Nkpe_D()
# reward = np.zeros((gen_result.shape[0], 1))
gen_result_size = gen_result.shape[0]
seq_per_img = gen_result_size // len(data_gts) // monte_carlo_count # gen_result_size = batch_size * seq_per_img
batch_size = gen_result_size // monte_carlo_count
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
for i in range(gen_result_size):
# print(gen_result[i])
res[i] = [array_to_str(gen_result[i])]
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
res_ = [{'image_id': i, 'caption': res[i]} for i in range(len(res))]
gts_ = {int(gen_result_size//monte_carlo_count) * i + j: gts[j // seq_per_img] for i in range(monte_carlo_count) for j in range(int(gen_result_size//monte_carlo_count))}
_, nkpe_scores = Nkpe_scorer.compute_score(gts_, res_)
# print('Nkpe scores:', _)
reward = torch.from_numpy(nkpe_scores).cuda()
reward = reward.view(batch_size, monte_carlo_count, -1).sum(1)
return reward
def get_self_critical_reward_3(greedy_res, data_gts, gen_result, current_generated, opt, monte_carlo_count=2):
batch_size = len(data_gts)
gen_result_size = gen_result.shape[0]
seq_length = gen_result.shape[1]
seq_per_img = gen_result_size // len(data_gts) # gen_result_size = batch_size * seq_per_img
assert greedy_res.shape[0] == batch_size
current_generated_size = current_generated.size(0)
t = current_generated_size // gen_result_size
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
greedy_res = greedy_res.data.cpu().numpy()
for i in range(gen_result_size):
res[i] = [array_to_str(gen_result[i])]
for i in range(batch_size):
res[gen_result_size + i] = [array_to_str(greedy_res[i])]
cur_res = OrderedDict()
current_generated = current_generated.data.cpu().numpy()
for i in range(current_generated_size):
cur_res[i] = [array_to_str(current_generated[i])]
# gen_result = gen_result.data.cpu().numpy()
# greedy_res = greedy_res.data.cpu().numpy()
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
cur_res_ = [{'image_id':i, 'caption': cur_res[i]} for i in range(len(cur_res))]
gts_ = {i: gts[i // seq_per_img] for i in range(gen_result_size)}
gts_cur_ = {j*gen_result_size + i: gts_[i] for j in range(t) for i in range(len(gts_)) }
# start = time.time()
_, nkpe_scores = Nkpe_scorer.compute_score(gts_cur_, cur_res_)
# print('scores time {}'.format(time.time() - start))
print('Nkpe scores:', _)
nkpe_scores_list = np.split(nkpe_scores, t/monte_carlo_count, axis=0)
result = np.zeros((gen_result_size, seq_length), dtype=nkpe_scores.dtype)
for t, item in enumerate(nkpe_scores_list):
item_list = np.split(item, monte_carlo_count, axis=0)
res_scores = np.zeros((gen_result_size,), dtype=nkpe_scores.dtype)
for item_i in item_list:
res_scores += item_i
result[:,t*6: t*6+6] = np.repeat((res_scores/monte_carlo_count).reshape(-1,1),6, axis=1)
# scores = scores[:gen_result_size].reshape(batch_size, seq_per_img) - scores[-batch_size:][:, np.newaxis]
# scores = scores.reshape(gen_result_size)
#
# rewards = np.repeat(scores[:, np.newaxis], gen_result.shape[1], 1)
rewards = result
return rewards
def get_scores(data_gts, gen_result, opt):
batch_size = gen_result.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
for i in range(batch_size):
res[i] = [array_to_str(gen_result[i])]
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
res_ = [{'image_id':i, 'caption': res[i]} for i in range(batch_size)]
res__ = {i: res[i] for i in range(batch_size)}
gts = {i: gts[i // seq_per_img] for i in range(batch_size)}
if opt.cider_reward_weight > 0:
_, cider_scores = CiderD_scorer.compute_score(gts, res_)
print('Cider scores:', _)
else:
cider_scores = 0
if opt.bleu_reward_weight > 0:
_, bleu_scores = Bleu_scorer.compute_score(gts, res__)
bleu_scores = np.array(bleu_scores[3])
print('Bleu scores:', _[3])
else:
bleu_scores = 0
scores = opt.cider_reward_weight * cider_scores + opt.bleu_reward_weight * bleu_scores
return scores
def get_self_cider_scores(data_gts, gen_result, opt):
batch_size = gen_result.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
res = []
gen_result = gen_result.data.cpu().numpy()
for i in range(batch_size):
res.append(array_to_str(gen_result[i]))
scores = []
for i in range(len(data_gts)):
tmp = Cider_scorer.my_self_cider([res[i*seq_per_img:(i+1)*seq_per_img]])
def get_div(eigvals):
eigvals = np.clip(eigvals, 0, None)
return -np.log(np.sqrt(eigvals[-1]) / (np.sqrt(eigvals).sum())) / np.log(len(eigvals))
scores.append(get_div(np.linalg.eigvalsh(tmp[0]/10)))
scores = np.array(scores)
return scores | 37.298701 | 173 | 0.67572 | 1,342 | 8,616 | 4.008197 | 0.092399 | 0.10039 | 0.072504 | 0.05317 | 0.65105 | 0.601413 | 0.586168 | 0.549173 | 0.535787 | 0.531883 | 0 | 0.007533 | 0.198816 | 8,616 | 231 | 174 | 37.298701 | 0.771693 | 0.086583 | 0 | 0.485714 | 0 | 0 | 0.023049 | 0 | 0 | 0 | 0 | 0 | 0.011429 | 1 | 0.045714 | false | 0 | 0.068571 | 0 | 0.154286 | 0.045714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e2279b1f99ff28aa158f6954e55b80b12e12f93 | 1,984 | py | Python | util/gps_handler/gps_fence_handler.py | linusluotsinen/RPiAntiTheft | d76782b5064f7e540a4013fbf0e0ea26d989e2ce | [
"MIT"
] | null | null | null | util/gps_handler/gps_fence_handler.py | linusluotsinen/RPiAntiTheft | d76782b5064f7e540a4013fbf0e0ea26d989e2ce | [
"MIT"
] | null | null | null | util/gps_handler/gps_fence_handler.py | linusluotsinen/RPiAntiTheft | d76782b5064f7e540a4013fbf0e0ea26d989e2ce | [
"MIT"
] | null | null | null | #from gps_handler import GpsHandler
import math
class GpsFenceHandler:
def __init__(self, settings):
self.settings = settings
self.settings.load()
if self.settings.get_data() is None:
#gpsh = GpsHandler()
#gps_data = gpsh.get_gps_data()
default_settings = {"enabled": False, "thresholds":{"dist":100,"speed":10}, "gps":None }
self.settings.set_data(default_settings)
self.settings.save()
def enable(self):
data = self.settings.get_data()
data["enabled"] = True
self.settings.save()
def disable(self):
data = self.settings.get_data()
data["enabled"] = False
data["gps"] = None
self.settings.save()
#def refresh(self, client):
# data = self.settings.get_data()
# gpsh = GpsHandler()
# gps_data = gpsh.get_gps_data()
# data["gps"] = gps_data
# self.settings.save()
def get_settings(self):
return self.settings
def distance(self,lat1, lon1, lat2, lon2):
radius = 6371*1000 # m
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
def check_triggers(self, gps_data):
ret = {"dist": False, "speed": False }
thresholds = self.settings.get_data()["thresholds"]
state = self.settings.get_data()["gps"]
if state is not None:
dist = self.distance(state['latitude'],state['longitude'],gps_data['latitude'],gps_data['longitude'])
if dist > thresholds['dist']:
ret["dist"] = True
if state["speed"] > thresholds["speed"]:
ret["speed"] = True
return ret
| 31.492063 | 113 | 0.5625 | 238 | 1,984 | 4.579832 | 0.264706 | 0.165138 | 0.082569 | 0.104587 | 0.180734 | 0.133945 | 0.133945 | 0.133945 | 0 | 0 | 0 | 0.021444 | 0.294859 | 1,984 | 62 | 114 | 32 | 0.757684 | 0.12752 | 0 | 0.125 | 0 | 0 | 0.072632 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.025 | 0.025 | 0.275 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e2601a57578f079de2951387b1289c07c01b38b | 10,894 | py | Python | build/lib.linux-x86_64-2.7/ryu/tests/unit/packet/test_packet.py | sharat910/my-ryu | d2994571e3e5fad58433044a3ca8a5b40a413c87 | [
"Apache-2.0"
] | 2 | 2019-05-06T01:11:37.000Z | 2020-10-09T08:24:15.000Z | ryu/tests/unit/packet/test_packet.py | rpt/ryu | ebf7638aac4481762e10ec90958f1480761a3893 | [
"Apache-2.0"
] | null | null | null | ryu/tests/unit/packet/test_packet.py | rpt/ryu | ebf7638aac4481762e10ec90958f1480761a3893 | [
"Apache-2.0"
] | 1 | 2018-07-12T20:08:53.000Z | 2018-07-12T20:08:53.000Z | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
import struct
import netaddr
import array
from nose.tools import *
from nose.plugins.skip import Skip, SkipTest
from ryu.ofproto import ether, inet
from ryu.lib import mac
from ryu.lib.packet import *
LOG = logging.getLogger('test_packet')
class TestPacket(unittest.TestCase):
""" Test case for packet
"""
dst_mac = mac.haddr_to_bin('AA:AA:AA:AA:AA:AA')
src_mac = mac.haddr_to_bin('BB:BB:BB:BB:BB:BB')
dst_ip = int(netaddr.IPAddress('192.168.128.10'))
dst_ip_bin = struct.pack('!I', dst_ip)
src_ip = int(netaddr.IPAddress('192.168.122.20'))
src_ip_bin = struct.pack('!I', src_ip)
payload = '\x06\x06\x47\x50\x00\x00\x00\x00' \
+ '\xcd\xc5\x00\x00\x00\x00\x00\x00' \
+ '\x10\x11\x12\x13\x14\x15\x16\x17' \
+ '\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f'
def get_protocols(self, pkt):
protocols = {}
for p in pkt:
if hasattr(p, 'protocol_name'):
protocols[p.protocol_name] = p
else:
protocols['payload'] = p
return protocols
def setUp(self):
pass
def tearDown(self):
pass
def test_arp(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_ARP)
a = arp.arp(1, ether.ETH_TYPE_IP, 6, 4, 2,
self.src_mac, self.src_ip, self.dst_mac,
self.dst_ip)
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(a)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac \
+ self.src_mac \
+ '\x08\x06'
# arp !HHBBH6sI6sI
a_buf = '\x00\x01' \
+ '\x08\x00' \
+ '\x06' \
+ '\x04' \
+ '\x00\x02' \
+ self.src_mac \
+ self.src_ip_bin \
+ self.dst_mac \
+ self.dst_ip_bin
buf = e_buf + a_buf
eq_(buf, p.data)
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_arp = protocols['arp']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_ARP, p_eth.ethertype)
# arp
ok_(p_arp)
eq_(1, p_arp.hwtype)
eq_(ether.ETH_TYPE_IP, p_arp.proto)
eq_(6, p_arp.hlen)
eq_(4, p_arp.plen)
eq_(2, p_arp.opcode)
eq_(self.src_mac, p_arp.src_mac)
eq_(self.src_ip, p_arp.src_ip)
eq_(self.dst_mac, p_arp.dst_mac)
eq_(self.dst_ip, p_arp.dst_ip)
def test_vlan_arp(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_8021Q)
v = vlan.vlan(0b111, 0b1, 3, ether.ETH_TYPE_ARP)
a = arp.arp(1, ether.ETH_TYPE_IP, 6, 4, 2,
self.src_mac, self.src_ip, self.dst_mac,
self.dst_ip)
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(v)
p.add_protocol(a)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac \
+ self.src_mac \
+ '\x81\x00'
# vlan !HH
v_buf = '\xF0\x03' \
+ '\x08\x06'
# arp !HHBBH6sI6sI
a_buf = '\x00\x01' \
+ '\x08\x00' \
+ '\x06' \
+ '\x04' \
+ '\x00\x02' \
+ self.src_mac \
+ self.src_ip_bin \
+ self.dst_mac \
+ self.dst_ip_bin
buf = e_buf + v_buf + a_buf
eq_(buf, p.data)
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_vlan = protocols['vlan']
p_arp = protocols['arp']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_8021Q, p_eth.ethertype)
# vlan
ok_(p_vlan)
eq_(0b111, p_vlan.pcp)
eq_(0b1, p_vlan.cfi)
eq_(3, p_vlan.vid)
eq_(ether.ETH_TYPE_ARP, p_vlan.ethertype)
# arp
ok_(p_arp)
eq_(1, p_arp.hwtype)
eq_(ether.ETH_TYPE_IP, p_arp.proto)
eq_(6, p_arp.hlen)
eq_(4, p_arp.plen)
eq_(2, p_arp.opcode)
eq_(self.src_mac, p_arp.src_mac)
eq_(self.src_ip, p_arp.src_ip)
eq_(self.dst_mac, p_arp.dst_mac)
eq_(self.dst_ip, p_arp.dst_ip)
def test_ipv4_udp(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_IP)
ip = ipv4.ipv4(4, 5, 1, 0, 3, 1, 4, 64, inet.IPPROTO_UDP, 0,
self.src_ip, self.dst_ip)
u = udp.udp(0x190F, 0x1F90, 0, 0)
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(ip)
p.add_protocol(u)
p.add_protocol(self.payload)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac \
+ self.src_mac \
+ '\x08\x00'
# ipv4 !BBHHHBBHII
ip_buf = '\x45' \
+ '\x01' \
+ '\x00\x3C' \
+ '\x00\x03' \
+ '\x20\x04' \
+ '\x40' \
+ '\x11' \
+ '\x00\x00' \
+ self.src_ip_bin \
+ self.dst_ip_bin
# udp !HHHH
u_buf = '\x19\x0F' \
+ '\x1F\x90' \
+ '\x00\x28' \
+ '\x00\x00'
buf = e_buf + ip_buf + u_buf + self.payload
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv4 = protocols['ipv4']
p_udp = protocols['udp']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_IP, p_eth.ethertype)
# ipv4
ok_(p_ipv4)
eq_(4, p_ipv4.version)
eq_(5, p_ipv4.header_length)
eq_(1, p_ipv4.tos)
l = len(ip_buf) + len(u_buf) + len(self.payload)
eq_(l, p_ipv4.total_length)
eq_(3, p_ipv4.identification)
eq_(1, p_ipv4.flags)
eq_(64, p_ipv4.ttl)
eq_(inet.IPPROTO_UDP, p_ipv4.proto)
eq_(self.src_ip, p_ipv4.src)
eq_(self.dst_ip, p_ipv4.dst)
t = bytearray(ip_buf)
struct.pack_into('!H', t, 10, p_ipv4.csum)
eq_(packet_utils.checksum(t), 0)
# udp
ok_(p_udp)
eq_(0x190f, p_udp.src_port)
eq_(0x1F90, p_udp.dst_port)
eq_(len(u_buf) + len(self.payload), p_udp.total_length)
eq_(0x77b2, p_udp.csum)
t = bytearray(u_buf)
struct.pack_into('!H', t, 6, p_udp.csum)
ph = struct.pack('!IIBBH', self.src_ip, self.dst_ip, 0,
17, len(u_buf) + len(self.payload))
t = ph + t + self.payload
eq_(packet_utils.checksum(t), 0)
# payload
ok_('payload' in protocols)
eq_(self.payload, protocols['payload'].tostring())
def test_ipv4_tcp(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_IP)
ip = ipv4.ipv4(4, 5, 0, 0, 0, 0, 0, 64, inet.IPPROTO_TCP, 0,
self.src_ip, self.dst_ip)
t = tcp.tcp(0x190F, 0x1F90, 0x123, 1, 6, 0b101010, 2048, 0, 0x6f,
'\x01\x02')
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(ip)
p.add_protocol(t)
p.add_protocol(self.payload)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac \
+ self.src_mac \
+ '\x08\x00'
# ipv4 !BBHHHBBHII
ip_buf = '\x45' \
+ '\x00' \
+ '\x00\x4C' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x40' \
+ '\x06' \
+ '\x00\x00' \
+ self.src_ip_bin \
+ self.dst_ip_bin
# tcp !HHIIBBHHH + option
t_buf = '\x19\x0F' \
+ '\x1F\x90' \
+ '\x00\x00\x01\x23' \
+ '\x00\x00\x00\x01' \
+ '\x60' \
+ '\x2A' \
+ '\x08\x00' \
+ '\x00\x00' \
+ '\x00\x6F' \
+ '\x01\x02\x00\x00'
buf = e_buf + ip_buf + t_buf + self.payload
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv4 = protocols['ipv4']
p_tcp = protocols['tcp']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_IP, p_eth.ethertype)
# ipv4
ok_(p_ipv4)
eq_(4, p_ipv4.version)
eq_(5, p_ipv4.header_length)
eq_(0, p_ipv4.tos)
l = len(ip_buf) + len(t_buf) + len(self.payload)
eq_(l, p_ipv4.total_length)
eq_(0, p_ipv4.identification)
eq_(0, p_ipv4.flags)
eq_(64, p_ipv4.ttl)
eq_(inet.IPPROTO_TCP, p_ipv4.proto)
eq_(self.src_ip, p_ipv4.src)
eq_(self.dst_ip, p_ipv4.dst)
t = bytearray(ip_buf)
struct.pack_into('!H', t, 10, p_ipv4.csum)
eq_(packet_utils.checksum(t), 0)
# tcp
ok_(p_tcp)
eq_(0x190f, p_tcp.src_port)
eq_(0x1F90, p_tcp.dst_port)
eq_(0x123, p_tcp.seq)
eq_(1, p_tcp.ack)
eq_(6, p_tcp.offset)
eq_(0b101010, p_tcp.bits)
eq_(2048, p_tcp.window_size)
eq_(0x6f, p_tcp.urgent)
eq_(len(t_buf), p_tcp.length)
t = bytearray(t_buf)
struct.pack_into('!H', t, 16, p_tcp.csum)
ph = struct.pack('!IIBBH', self.src_ip, self.dst_ip, 0,
6, len(t_buf) + len(self.payload))
t = ph + t + self.payload
eq_(packet_utils.checksum(t), 0)
# payload
ok_('payload' in protocols)
eq_(self.payload, protocols['payload'].tostring())
| 29.284946 | 73 | 0.523224 | 1,516 | 10,894 | 3.504617 | 0.166227 | 0.042161 | 0.033879 | 0.031621 | 0.663655 | 0.636364 | 0.602296 | 0.588368 | 0.580839 | 0.580839 | 0 | 0.065367 | 0.345603 | 10,894 | 371 | 74 | 29.363881 | 0.679899 | 0.090141 | 0 | 0.594891 | 0 | 0 | 0.068668 | 0.012983 | 0 | 0 | 0.007303 | 0 | 0 | 1 | 0.025547 | false | 0.007299 | 0.036496 | 0 | 0.094891 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e266d63f6512f29cea6db3e7a1e786d0b045c0b | 8,739 | py | Python | models/torch_conv_train.py | Devjiu/Quntization | 60853485525a5382cde7824b0b09e55e2e264e2f | [
"MIT"
] | null | null | null | models/torch_conv_train.py | Devjiu/Quntization | 60853485525a5382cde7824b0b09e55e2e264e2f | [
"MIT"
] | null | null | null | models/torch_conv_train.py | Devjiu/Quntization | 60853485525a5382cde7824b0b09e55e2e264e2f | [
"MIT"
] | null | null | null | import itertools
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
device = torch.device("cpu")
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=0)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# functions to show an image
# import matplotlib.pyplot as plt
# import numpy as np
#
#
# def imshow(img):
# img = img / 2 + 0.5 # unnormalize
# npimg = img.numpy()
# plt.imshow(np.transpose(npimg, (1, 2, 0)))
# plt.show()
#
#
# # get some random training images
# dataiter = iter(trainloader)
# images, labels = dataiter.next()
#
# # show images
# imshow(torchvision.utils.make_grid(images))
# # print labels
# print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Quant(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
@staticmethod
def forward(ctx, input, quant_param):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
ctx.save_for_backward(input)
ctx.save_for_backward(Variable(torch.ones(1, 1), requires_grad=False))
return input
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
input = ctx.saved_tensors
grad_input = grad_output.clone()
# grad_input[input < 0] = 0
return grad_input
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5)
self.fc1 = nn.Linear(in_features=16 * 5 * 5, out_features=120)
self.fc2 = nn.Linear(in_features=120, out_features=84)
self.fc3 = nn.Linear(in_features=84, out_features=10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
import torch.optim as optim
# def convQuant(convLayer: nn.Module):
# convLayer()
# y_pred = relu(x.mm(w1)).mm(w2)
#
# # Compute and print loss
# loss = (y_pred - y).pow(2).sum()
# if t % 100 == 99:
# print(t, loss.item())
#
# # Use autograd to compute the backward pass.
# loss.backward()
#
# # Update weights using gradient descent
# with torch.no_grad():
# w1 -= learning_rate * w1.grad
# w2 -= learning_rate * w2.grad
#
# # Manually zero the gradients after updating weights
# w1.grad.zero_()
# w2.grad.zero_()
QuantizationCrunch = {}
def forward_hook(module: nn.Module, input: tuple, output: torch.Tensor) -> None:
# print("Forward Module : {}. hash {}".format(module, module.__hash__()))
# for layer in module.modules():
if isinstance(module, nn.Conv2d):
# print("Layer dict {}".format(module.state_dict().keys()))
# str = "quant_{}_input".format(list(module.named_modules())
# module.register_parameter("orig_input", input[0])
# module.register_buffer("orig_output", output[0])
QuantizationCrunch[str(module.__hash__())] = {"input": input[0], "output": output[0]} # module.
def backward_hook(module: nn.Module, grad_input: torch.Tensor, grad_output: torch.Tensor) -> None:
# print("Backward Module : {}, grad_inp: {}, grad_out: {}".format(module, len(grad_input), len(grad_output)))
# Forward pass: compute predicted y using operations; we compute
# ReLU using our custom autograd operation.
if isinstance(module, nn.Conv2d):
inp = QuantizationCrunch[str(module.__hash__())]["input"]
# module.register_buffer("orig_output", output)
# torch.utils.hooks.RemovableHandle(clone).remove()
# print("w: {}, b: {}".format(module.weight, module.bias))
quant_out, quant_weights = quantize(module, module.weight, module.bias, inp)
# module.weight = torch.nn.Parameter(quant_weights)
def quantize(mod: torch.nn.Module, weights: torch.Tensor, bias: torch.Tensor, inp: torch.Tensor) -> {torch.Tensor,
torch.Tensor}:
orig_out = mod.forward(inp)
weights_shape = weights.shape
l_w = weights.flatten().tolist()
for i in range(len(l_w)):
if l_w[i] * 10_000 % 1 > 0:
# print("vl {} : {}".format(l_w[i], int(l_w[i] * 1_000) / 1_000.))
l_w[i] = int(l_w[i] * 10_000) / 10_000.
# if l_w[i] != 0:
# l_w[i] = 0.
q_w = torch.as_tensor(l_w).requires_grad_(False).reshape(weights_shape)
# print("q_w : {}".format(q_w))
mod.weight = torch.nn.Parameter(q_w, True)
quant_out = mod.forward(inp)
plt.plot(range(len(orig_out.flatten().tolist())), (quant_out - orig_out).flatten().tolist(), ",")
# print("Diff {}".format(quant_out - orig_out))
QuantizationCrunch.pop(str(mod.__hash__()))
return [quant_out, q_w]
criterion = nn.CrossEntropyLoss()
# print(net.parameters())
optimizer = optim.Adam(net.parameters(), lr=0.001) # , momentum=0.9)
for epoch in range(50): # loop over the dataset multiple times
running_loss = 0.0
for i, data in itertools.islice(enumerate(trainloader, 0), 25):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
# state = net.state_dict()
# print("state dict: {}".format(state))
net.conv1.register_forward_hook(hook=forward_hook)
net.conv1.register_backward_hook(hook=backward_hook)
# net.conv2.register_forward_hook(hook=forward_hook)
# net.conv2.register_backward_hook(hook=backward_hook)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# for layer in net.modules():
# # print("\tModules {} ".format(layer))
# if isinstance(layer, nn.Conv2d):
# print("Layer dict {}".format(layer.state_dict().keys()))
# w1 = layer.state_dict().get("weights")
# learning_rate = 0.01
# with torch.no_grad():
# w1 -= learning_rate * w1.grad
#
# # Manually zero the gradients after updating weights
# w1.grad.zero_()
# print statistics
running_loss += loss.item()
if i % 5 == 4: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 5))
running_loss = 0.0
plt.show()
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
l_w = net.conv1.weight.flatten().tolist()
for i in range(len(l_w)):
if l_w[i] * 10_000 % 1 > 0:
# print("vl {} : {}".format(l_w[i], int(l_w[i] * 1_000) / 1_000.))
print("not worked")
print("w1 : {}".format(
net.conv1.weight.tolist()
))
print("w2 : {}".format(
net.conv2.weight.tolist()
))
print("Accuracy of network on the 10_000 test images: %d %%" % (100 * correct / total))
| 35.962963 | 115 | 0.620323 | 1,173 | 8,739 | 4.491901 | 0.248082 | 0.005694 | 0.005694 | 0.003796 | 0.211425 | 0.16246 | 0.105143 | 0.077434 | 0.077434 | 0.051243 | 0 | 0.028911 | 0.240073 | 8,739 | 242 | 116 | 36.11157 | 0.764493 | 0.392951 | 0 | 0.12963 | 0 | 0 | 0.036275 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064815 | false | 0 | 0.083333 | 0 | 0.203704 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e2680be29a37e0471952ff1c48d95daa56f74a6 | 4,738 | py | Python | src/dispatch/job/views.py | alibaba/easydispatch | 2cf32a374d12c804ff396f90b789c2a838003c5d | [
"Apache-2.0"
] | 11 | 2021-05-04T03:15:20.000Z | 2022-02-16T07:44:16.000Z | src/dispatch/job/views.py | alibaba/easydispatch | 2cf32a374d12c804ff396f90b789c2a838003c5d | [
"Apache-2.0"
] | 4 | 2021-06-21T11:12:37.000Z | 2021-06-29T11:54:18.000Z | src/dispatch/job/views.py | alibaba/easydispatch | 2cf32a374d12c804ff396f90b789c2a838003c5d | [
"Apache-2.0"
] | 2 | 2021-05-05T00:42:44.000Z | 2021-05-10T12:51:58.000Z | from typing import List
from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, Query
from sqlalchemy.orm import Session
from dispatch.enums import Visibility
from dispatch.auth.models import DispatchUser
from dispatch.auth.service import get_current_user
from dispatch.database import get_db, search_filter_sort_paginate
from dispatch.auth.models import UserRoles
from .models import JobCreate, JobPagination, JobRead, JobUpdate
from .service import create, delete, get, update, get_by_code
router = APIRouter()
@router.get("/", response_model=JobPagination, summary="Retrieve a list of all jobs.")
def get_jobs(
db_session: Session = Depends(get_db),
page: int = 1,
items_per_page: int = Query(5, alias="itemsPerPage"),
query_str: str = Query(None, alias="q"),
sort_by: List[str] = Query(None, alias="sortBy[]"),
descending: List[bool] = Query(None, alias="descending[]"),
fields: List[str] = Query([], alias="fields[]"),
ops: List[str] = Query([], alias="ops[]"),
values: List[str] = Query([], alias="values[]"),
current_user: DispatchUser = Depends(get_current_user),
):
"""
Retrieve a list of all jobs.
"""
# we want to provide additional protections around restricted jobs
# Because we want to proactively filter (instead of when the item is returned
# we don't use fastapi_permissions acls.
return search_filter_sort_paginate(
db_session=db_session,
model="Job",
query_str=query_str,
page=page,
items_per_page=items_per_page,
sort_by=sort_by,
descending=descending,
fields=fields,
values=values,
ops=ops,
join_attrs=[("tag","requested_primary_worker")],
)
@router.get("/{job_id}", response_model=JobRead, summary="Retrieve a single job.")
def get_job(
*,
db_session: Session = Depends(get_db),
job_id: str,
current_user: DispatchUser = Depends(get_current_user),
):
"""
Retrieve details about a specific job.
"""
job = get(db_session=db_session, job_id=job_id)
if not job:
raise HTTPException(status_code=404, detail="The requested job does not exist.")
return job
@router.post("/", response_model=JobRead, summary="Create a new job.")
def create_job(
*,
db_session: Session = Depends(get_db),
job_in: JobCreate,
current_user: DispatchUser = Depends(get_current_user),
background_tasks: BackgroundTasks,
):
"""
Create a new job.
"""
job = get_by_code(db_session=db_session, code=job_in.code)
if job:
raise HTTPException(status_code=400, detail="The job with this code already exists.")
job = create(db_session=db_session, **job_in.dict())
# background_tasks.add_task(job_create_flow, job_id=job.id)
return job
@router.put("/{job_id}", response_model=JobRead, summary="Update an existing job.")
def update_job(
*,
db_session: Session = Depends(get_db),
job_id: str,
job_in: JobUpdate,
current_user: DispatchUser = Depends(get_current_user),
background_tasks: BackgroundTasks,
):
"""
Update an worker job.
"""
job = get(db_session=db_session, job_id=job_id)
if not job:
raise HTTPException(status_code=404, detail="The requested job does not exist.")
previous_job = JobRead.from_orm(job)
# NOTE: Order matters we have to get the previous state for change detection
job = update(db_session=db_session, job=job, job_in=job_in)
return job
@router.post("/{job_id}/join", summary="Join an job.")
def join_job(
*,
db_session: Session = Depends(get_db),
job_id: str,
current_user: DispatchUser = Depends(get_current_user),
background_tasks: BackgroundTasks,
):
"""
Join an worker job.
"""
job = get(db_session=db_session, job_id=job_id)
if not job:
raise HTTPException(status_code=404, detail="The requested job does not exist.")
background_tasks.add_task(
job_add_or_reactivate_participant_flow, current_user.code, job_id=job.id
)
@router.delete("/{job_id}", response_model=JobRead, summary="Delete an job.")
def delete_job(*, db_session: Session = Depends(get_db), job_id: str):
"""
Delete an worker job.
"""
job = get(db_session=db_session, job_id=job_id)
if not job:
raise HTTPException(status_code=404, detail="The requested job does not exist.")
delete(db_session=db_session, job_id=job.id)
@router.get("/metric/forecast/{job_type}", summary="Get job forecast data.")
def get_job_forecast(*, db_session: Session = Depends(get_db), job_type: str):
"""
Get job forecast data.
"""
return make_forecast(db_session=db_session, job_type=job_type)
| 30.567742 | 93 | 0.691009 | 651 | 4,738 | 4.809524 | 0.218126 | 0.077611 | 0.035133 | 0.05749 | 0.452571 | 0.381667 | 0.330565 | 0.320664 | 0.281699 | 0.281699 | 0 | 0.004443 | 0.192486 | 4,738 | 154 | 94 | 30.766234 | 0.813905 | 0.102575 | 0 | 0.40404 | 0 | 0 | 0.11154 | 0.012313 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070707 | false | 0 | 0.10101 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e286f4bba6bc269eb129fff803ce6e3c742ed75 | 11,451 | py | Python | dashboard/dashboard.py | leap-solutions-asia/auto-scaling | be6a3e743be5ee57d5c6a5a35578a44f07751fe5 | [
"MIT"
] | null | null | null | dashboard/dashboard.py | leap-solutions-asia/auto-scaling | be6a3e743be5ee57d5c6a5a35578a44f07751fe5 | [
"MIT"
] | 23 | 2019-03-07T08:05:36.000Z | 2019-10-11T10:37:35.000Z | dashboard/dashboard.py | leap-solutions-asia/auto-scaling | be6a3e743be5ee57d5c6a5a35578a44f07751fe5 | [
"MIT"
] | 3 | 2019-08-09T05:46:35.000Z | 2020-01-23T10:12:29.000Z | import os
import re
import pickle
from flask import Flask, render_template, url_for, flash, redirect, session
from forms import SettingsForm, CredentialForm, EditCredentialForm, EditSettingsForm
from CloudStackApiClient import CloudStackApiClient
from CloudStackConfig import CloudStackConfig, cloudstack_file
from timezone import TIMEZONE, DEFAULT_TIMEZONE
from datetime import datetime, timedelta
app = Flask(__name__)
app.config['SECRET_KEY'] = '04f38b5709e0425f716a3e630b01085b'
autoscaling_file = "/auto-scaling/autoscaling.status"
@app.route('/')
@app.route("/dashboard")
def dashboard():
conf = CloudStackConfig()
if not conf.has_cloudstack_section():
session.pop('_flashes', None)
flash(f'Please input Cloudstack credential first', 'success')
return redirect(url_for('editcredential'))
if not conf.has_tenant_section():
flash(f'Please complete the settings!', 'success')
return redirect(url_for('editsettings'))
if not conf.has_autoscaling_section():
flash(f'Please complete the settings', 'success')
return redirect(url_for('editsettings'))
params = {}
params["title"] = 'Autoscale Dashboard'
params["labels"] = None
params["datasets"] = None
params["autoscaling_data"] = None
if not os.path.exists(autoscaling_file):
params["message"] = 'Autoscaling file does not exist, Please try to reload in minutes'
else:
with open(autoscaling_file, 'rb') as fd:
autoscaling_data = pickle.load(fd)
labels = []
for uuid, value in autoscaling_data['status'].items():
labels = [ x[0] for x in value ]
break
if conf.get_timezone() is not None:
offset = timedelta(seconds=int(conf.get_timezone()))
for i , utc_str in enumerate(labels):
utc_datetime = datetime.strptime(utc_str, '%H:%M:%S') + offset
labels[i] = utc_datetime.strftime('%H:%M:%S')
datasets = []
for uuid, value in autoscaling_data['status'].items():
color = re.sub('^[^-]*([^-])-[^-]*([^-])-[^-]*([^-])-[^-]*([^-])-[^-]*([^-]{2})$', '#\\1\\2\\3\\4\\5', uuid)
datasets.append({
"label": autoscaling_data['vm'][uuid]['name'],
"borderColor": color,
"fill": False,
"data": [ x[1] for x in value ]
})
params["labels"] = labels
params["datasets"] = datasets
params["autoscaling_data"] = autoscaling_data
return render_template('dashboard.html', **params)
@app.route("/credential", methods=['GET', 'POST'])
def credential():
conf = CloudStackConfig()
if not conf.has_cloudstack_section():
flash(f'Please input Cloudstack credential first', 'success')
return redirect(url_for('editcredential'))
form = CredentialForm()
if form.validate_on_submit():
return redirect(url_for('editcredential'))
cs_secret = conf.get_secret()
cs_key = conf.get_key()
cs_endpoint = conf.get_endpoint()
params = {}
params["title"] = 'Credential'
params["form"] = form
params["cs_secret"] = cs_secret
params["cs_key"] = cs_key
params["cs_endpoint"] = cs_endpoint
return render_template('credential.html', **params)
@app.route("/editcredential", methods=['GET', 'POST'])
def editcredential():
form = EditCredentialForm()
conf = CloudStackConfig()
if form.validate_on_submit():
if not conf.has_cloudstack_section():
conf.add_cloudstack_section()
if conf.has_tenant_section():
conf.remove_tenant_section()
if conf.has_autoscaling_section():
conf.remove_autoscaling_section()
if conf.has_vm_section():
conf.remove_vm_section()
conf.set_secret(form.secret.data)
conf.set_key(form.key.data)
conf.set_endpoint(form.endpoint.data)
conf.update_configfile()
flash(f'Credential updated for {form.key.data}!, Please update autoscale settings', 'success')
return redirect(url_for('editsettings'))
params = {}
if conf.get_secret():
params["cs_secret"] = conf.get_secret()
if conf.get_key():
params["cs_key"] = conf.get_key()
if conf.get_endpoint():
params["cs_endpoint"]= conf.get_endpoint()
params["title"] = 'Edit Credential'
params["form"] = form
return render_template('editcredential.html', **params)
@app.route("/settings", methods=['GET', 'POST'])
def settings():
conf = CloudStackConfig()
if not conf.has_cloudstack_section():
flash(f'Please input Cloudstack credential first', 'success')
return redirect(url_for('editcredential'))
if not conf.has_tenant_section():
flash(f'Please complete the settings', 'success')
return redirect(url_for('editsettings'))
if not conf.has_autoscaling_section():
flash(f'Please complete the settings', 'success')
return redirect(url_for('editsettings'))
form = SettingsForm()
cs = CloudStackApiClient.get_instance()
if form.validate_on_submit():
return redirect(url_for('editsettings'))
tenant_lb_rule_uuid = conf.get_lb_rule_uuid()
tenant_zone_uuid = conf.get_zone_uuid()
tenant_template_uuid = conf.get_template_uuid()
tenant_serviceoffering_uuid = conf.get_serviceoffering_uuid()
autoscaling_autoscaling_vm = conf.get_autoscaling_vm()
autoscaling_upper_limit = conf.get_upper_limit()
autoscaling_lower_limit = conf.get_lower_limit()
tenant_zone_name = cs.get_zone_name(tenant_zone_uuid)
tenant_lb_rule_name = cs.get_lb_name(tenant_lb_rule_uuid)
tenant_template_name = cs.get_tp_name(tenant_template_uuid)
tenant_serviceoffering_name = cs.get_sv_name(tenant_serviceoffering_uuid)
networks_name_list = []
if conf.has_tenant_section():
for nw_uuid in conf.get_networks():
nw_name = cs.get_nw_name(nw_uuid)
networks_name_list.append(nw_name)
vms_name_list = []
if conf.has_vm_section():
for vm in conf.get_vm_list():
vm_uuid = conf.get_vm_uuid(vm)
vm_name = cs.get_vm_name(vm_uuid)
vms_name_list.append(vm_name)
timezone = dict(TIMEZONE).get(DEFAULT_TIMEZONE)
if conf.get_timezone() is not None:
timezone = dict(TIMEZONE).get(conf.get_timezone())
params = {}
params["title"] = 'Settings'
params["form"] = form
params["tenant_zone_name"] = tenant_zone_name
params["tenant_lb_rule_name"] = tenant_lb_rule_name
params["tenant_template_name"] = tenant_template_name
params["networks_name_list"] = networks_name_list
params["tenant_serviceoffering_name"] = tenant_serviceoffering_name
params["autoscaling_autoscaling_vm"] = autoscaling_autoscaling_vm
params["autoscaling_upper_limit"] = autoscaling_upper_limit
params["autoscaling_lower_limit"] = autoscaling_lower_limit
params["vms_name_list"] = vms_name_list
params["timezone"] = timezone
return render_template('settings.html', **params)
@app.route("/editsettings", methods=['GET', 'POST'])
def editsettings():
form = EditSettingsForm()
cs = CloudStackApiClient.get_instance()
messages = []
form.template_uuid.choices = cs.listTemplates(force=True)
if not form.template_uuid.choices:
form.template_uuid.errors = ['Please create templates first!']
messages.append({'category':'danger','content':'Please create templates first!'})
form.nws.choices = cs.listNetworks(force=True)
form.lb_rule_uuid.choices = cs.listLoadBalancerRules(force=True)
if not form.lb_rule_uuid.choices:
form.lb_rule_uuid.errors = ['Please create LB rules first!']
messages.append({'category':'danger','content':'Please create LB Rules first!'})
form.serviceoffering_uuid.choices = cs.listServiceOfferings(force=True)
form.zone_uuid.choices = cs.listZones(force=True)
form.vms.choices = cs.listVirtualMachines(force=True)
conf = CloudStackConfig()
if form.validate_on_submit():
if conf.has_tenant_section():
conf.remove_tenant_section()
conf.add_tenant_section()
conf.set_zone_uuid(form.zone_uuid.data)
conf.set_lb_rule_uuid(form.lb_rule_uuid.data)
conf.set_template_uuid(form.template_uuid.data)
conf.set_serviceoffering_uuid(form.serviceoffering_uuid.data)
for num, uuid in enumerate(form.nws.data, start=1):
conf.set_nw("network{}_uuid".format(num), uuid)
if conf.has_autoscaling_section():
conf.remove_autoscaling_section()
conf.add_autoscaling_section()
conf.set_autoscaling_vm(form.autoscaling_vm.data)
conf.set_upper_limit(form.upper_limit.data)
conf.set_lower_limit(form.lower_limit.data)
if conf.has_vm_section():
conf.remove_vm_section()
conf.add_vm_section()
for num, uuid in enumerate(form.vms.data, start=1):
conf.set_vm("vm{}_uuid".format(num), uuid)
if conf.has_dashboard_section():
conf.remove_dashboard_section()
conf.add_dashboard_section()
conf.set_timezone(form.timezone.data)
conf.update_configfile()
flash(f'Settings has been updated!', 'success')
return redirect(url_for('settings'))
params = {}
if conf.has_tenant_section() and conf.has_autoscaling_section():
tenant_zone_uuid = conf.get_zone_uuid()
tenant_lb_rule_uuid = conf.get_lb_rule_uuid()
tenant_template_uuid = conf.get_template_uuid()
tenant_serviceoffering_uuid = conf.get_serviceoffering_uuid()
nws = conf.get_networks()
autoscaling_autoscaling_vm = conf.get_autoscaling_vm()
autoscaling_upper_limit = conf.get_upper_limit()
autoscaling_lower_limit = conf.get_lower_limit()
vms = []
if conf.has_vm_section():
for vm in conf.get_vm_list():
vms.append(conf.get_vm_uuid(vm))
form.zone_uuid.default = tenant_zone_uuid
form.template_uuid.default = tenant_template_uuid
form.nws.default = nws
form.serviceoffering_uuid.default = tenant_serviceoffering_uuid
form.lb_rule_uuid.default = tenant_lb_rule_uuid
form.vms.default = vms
if conf.get_timezone() is not None:
form.timezone.default = conf.get_timezone()
form.process()
params = {
"tenant_zone_uuid": tenant_zone_uuid,
"tenant_lb_rule_uuid": tenant_lb_rule_uuid,
"tenant_template_uuid": tenant_template_uuid,
"nws": nws,
"tenant_serviceoffering_uuid": tenant_serviceoffering_uuid,
"autoscaling_autoscaling_vm": autoscaling_autoscaling_vm,
"autoscaling_upper_limit": autoscaling_upper_limit,
"autoscaling_lower_limit": autoscaling_lower_limit,
"vms": vms,
}
params["title"] = 'Edit Settings'
params["form"] = form
params["messages"] = messages
return render_template('editsettings.html', **params)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8080, debug=True)
| 37.917219 | 120 | 0.656537 | 1,351 | 11,451 | 5.274611 | 0.131754 | 0.034381 | 0.019646 | 0.030873 | 0.458322 | 0.393068 | 0.311255 | 0.294274 | 0.234493 | 0.194639 | 0 | 0.004746 | 0.227229 | 11,451 | 301 | 121 | 38.043189 | 0.800542 | 0 | 0 | 0.331984 | 0 | 0 | 0.154222 | 0.028469 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020243 | false | 0 | 0.036437 | 0 | 0.121457 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e2918459530001385052c20f48317cb1c6bed76 | 6,679 | py | Python | modules/gitbox/files/asfgit/hooks/ghactions.py | isabella232/infrastructure-puppet | 5fcb3429d47688b605c6b9f33e453c639af0d20c | [
"Apache-2.0"
] | 121 | 2015-01-23T20:42:14.000Z | 2022-02-28T23:36:46.000Z | modules/gitbox/files/asfgit/hooks/ghactions.py | isabella232/infrastructure-puppet | 5fcb3429d47688b605c6b9f33e453c639af0d20c | [
"Apache-2.0"
] | 206 | 2015-01-01T00:34:12.000Z | 2022-01-20T20:15:59.000Z | modules/gitbox/files/asfgit/hooks/ghactions.py | isabella232/infrastructure-puppet | 5fcb3429d47688b605c6b9f33e453c639af0d20c | [
"Apache-2.0"
] | 167 | 2015-04-15T21:13:19.000Z | 2021-11-07T21:16:59.000Z | #!/usr/local/bin/python
import fnmatch
import io
import os
import re
import subprocess
import sys
import asfpy.messaging
import yaml
import yaml.constructor
# LDAP to CNAME mappings for some projects
WSMAP = {
"whimsy": "whimsical",
"empire": "empire-db",
"webservices": "ws",
"infrastructure": "infra",
"comdev": "community",
}
# Hack to get around 'on: foo' being translated to 'True: foo' in pyYaml:
yaml.constructor.SafeConstructor.bool_values["on"] = "on"
# YAML String locator debug dict
ALL_STRINGS = {}
# Allowed GH Actions, in glob format
ALLOWED_ACTIONS = [
"actions/*", # GitHub Common Actions
"github/*", # GitHub's own Action collection
"apache/*", # Apache's action collection
"*/*@" + "[a-f0-9]"*40, # Any SHA1-pinned action (assuming it's been reviewed)
]
def capture_string_location(self, node):
""" Constructor that captures where in the yaml all strings are located, for debug/response purposes """
if self.name not in ALL_STRINGS:
ALL_STRINGS[self.name] = []
ALL_STRINGS[self.name].append((node.value, str(node.start_mark)))
return self.construct_scalar(node)
# Re-route all strings through our capture function
yaml.constructor.SafeConstructor.add_constructor(u"tag:yaml.org,2002:str", capture_string_location)
def contains(filename, value=None, fnvalue=None):
""" If a string is contained within a yaml (and is not a comment or key), return where we found it """
if filename in ALL_STRINGS:
for el in ALL_STRINGS[filename]:
if (value and value in el[0]) or (fnvalue and fnmatch.fnmatch(el[0], fnvalue)):
return el[1].strip()
def get_yaml(filename, refname):
""" Fetch a yaml file from a specific branch, return its contents to caller as parsed object"""
try:
devnull = open(os.devnull, "w")
fdata = subprocess.check_output(("/usr/bin/git", "show", "%s:%s" % (refname, filename)), stderr=devnull)
except subprocess.CalledProcessError as e: # Git show failure, no such file/branch
fdata = None
if fdata:
try:
stream = io.BytesIO(fdata)
stream.name = filename
return yaml.safe_load(stream)
except yaml.YAMLError as e:
pass # If yaml doesn't work, we do not need to scan it :)
return None
def get_values(yml, tagname):
""" Returns all matching tag values from the yaml """
for key, value in yml.iteritems():
if key == tagname:
yield value
elif isinstance(value, dict):
for subvalue in get_values(value, tagname):
yield subvalue
elif isinstance(value, list):
for subitem in value:
if isinstance(subitem, dict):
for subvalue in get_values(subitem, tagname):
yield subvalue
def notify_private(cfg, subject, text):
""" Notify a project's private list about issues... """
# infer project name
m = re.match(r"(?:incubator-)?([^-.]+)", cfg.repo_name)
pname = m.group(1)
pname = WSMAP.get(pname, pname)
# recps = ["private@%s.apache.org" % pname, "private@infra.apache.org"]
recps = ["notifications@infra.apache.org"] # For now, send to projects later.
# Tell project what happened, on private@
asfpy.messaging.mail(
sender="GitBox Security Scan <gitbox@apache.org>",
recipients=recps,
subject=subject,
message=text,
)
def scan_for_problems(yml, filename):
""" Scan for all potential security policy issues in the yaml """
problems = ""
# Rule 1: No pull_request_target triggers if secrets are used in the workflow
if "on" in yml:
triggers = yml.get("on", [])
if (isinstance(triggers, list) or isinstance(triggers, dict)) and "pull_request_target" in triggers:
# No ${{ secrets.GITHUB_TOKEN }} etc in pull_request_target workflows.
secrets_where = contains(filename, fnvalue="${{ secrets.* }}")
if secrets_where:
problems += (
"- Workflow can be triggered by forks (pull_request_target) but contains references to secrets %s!\n"
% secrets_where
)
# No imports via from_secret!
from_secret = get_values(yml, "from_secret")
if from_secret:
secrets_where = contains(filename, value="from_secret")
problems += (
"- Workflow can be triggered by forks (pull_request_target) but contains references to secrets %s!\n"
% secrets_where
)
# Rule 2: All external refs must be pinned or within whitelisted groups
for use_ref in get_values(yml, "uses"):
good = False
for am in ALLOWED_ACTIONS:
if fnmatch.fnmatch(use_ref, am):
good = True
if not good:
problems += '- "%s" (%s) is not an allowed GitHub Actions reference.\n' % (
use_ref,
contains(filename, use_ref),
)
return problems
def main():
import asfgit.cfg as cfg
import asfgit.git as git
# For each push
for ref in git.stream_refs(sys.stdin):
# For each commit in push
for commit in ref.commits():
cfiles = commit.files()
# For each file in commit
for filename in cfiles:
# Is this a GHA file?
if filename.startswith(".github/workflows/") and (
filename.endswith(".yml") or filename.endswith(".yaml")
):
yml = get_yaml(filename, ref.name)
problems = scan_for_problems(yml, filename)
if problems:
notify_private(
cfg,
"Security policy warning for GitHub Actions defined in %s.git: %s"
% (cfg.repo_name, filename),
"The following issues were detected while scanning %s in the %s repository:\n\n"
"%s\n\n"
"Please see https://s.apache.org/ghactions for our general policies on GitHub Actions.\n"
"With regards,\nASF Infrastructure <users@infra.apache.org>."
% (filename, cfg.repo_name, problems),
)
# Test when being called directly
if __name__ == "__main__":
my_yaml = yaml.safe_load(open("test.yml"))
probs = scan_for_problems(my_yaml, "test.yml")
print(probs)
| 36.697802 | 121 | 0.590957 | 813 | 6,679 | 4.765068 | 0.348093 | 0.02065 | 0.021941 | 0.009293 | 0.077439 | 0.064017 | 0.050594 | 0.050594 | 0.050594 | 0.050594 | 0 | 0.003243 | 0.307531 | 6,679 | 181 | 122 | 36.900552 | 0.834378 | 0.214403 | 0 | 0.078125 | 0 | 0 | 0.177208 | 0.027189 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054688 | false | 0.007813 | 0.085938 | 0 | 0.179688 | 0.007813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e2f92d8667d4eff648ffa790d95599d6434e9f2 | 1,606 | py | Python | user_auth/views.py | miswo/tweet-only-client | 649c0c621e84f726fc8c1fc51725c74b6f7dc8ad | [
"MIT"
] | null | null | null | user_auth/views.py | miswo/tweet-only-client | 649c0c621e84f726fc8c1fc51725c74b6f7dc8ad | [
"MIT"
] | 3 | 2020-02-11T23:17:55.000Z | 2021-06-10T20:54:07.000Z | user_auth/views.py | miswo/tweet-only-client | 649c0c621e84f726fc8c1fc51725c74b6f7dc8ad | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from social_django.models import UserSocialAuth
import tweepy, os
'''
from djangoworks.settings import isDebug
if isDebug() == True:
try:
from djangoworks.configs import twitter
SOCIAL_AUTH_TWITTER_KEY = twitter.SOCIAL_AUTH_TWITTER_KEY
SOCIAL_AUTH_TWITTER_SECRET = twitter.SOCIAL_AUTH_TWITTER_SECRET
except:
pass
else:
'''
SOCIAL_AUTH_TWITTER_KEY = os.environ['SOCIAL_AUTH_TWITTER_KEY']
SOCIAL_AUTH_TWITTER_SECRET = os.environ['SOCIAL_AUTH_TWITTER_SECRET']
@login_required
def top(request):
user = UserSocialAuth.objects.get(user_id=request.user.id)
if 'words' in request.GET:
try:
auth = UserSocialAuth.objects.filter(user=request.user).get()
handler = tweepy.OAuthHandler(SOCIAL_AUTH_TWITTER_KEY, SOCIAL_AUTH_TWITTER_SECRET)
handler.set_access_token(auth.tokens["oauth_token"],auth.tokens["oauth_token_secret"])
api = tweepy.API(auth_handler=handler)
Message = {
'words': request.GET.get('words'),
}
msg = Message['words']
print(msg)
api.update_status(msg)
return render(request, 'top.html', Message)
except:
ErrorMessage = {
'words': "Couldn't tweet because you said the same thing again.",
}
return render(request, 'top.html', ErrorMessage)
else:
return render(request,'top.html',{'user': user})
| 29.740741 | 98 | 0.651308 | 184 | 1,606 | 5.461957 | 0.358696 | 0.099502 | 0.169154 | 0.099502 | 0.319403 | 0.128358 | 0.128358 | 0.128358 | 0 | 0 | 0 | 0 | 0.254047 | 1,606 | 53 | 99 | 30.301887 | 0.838898 | 0 | 0 | 0 | 0 | 0 | 0.140565 | 0.037433 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.137931 | 0 | 0.275862 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e307b91132870572d536e2f42552febf7371a45 | 646 | py | Python | configs/hyperparameters.py | hyungkwonko/PTI | 5c804a9fc75028cede80b187c0def28521f2a331 | [
"MIT"
] | 2 | 2021-08-01T08:05:15.000Z | 2021-08-01T08:11:07.000Z | configs/hyperparameters.py | hyungkwonko/PTI | 5c804a9fc75028cede80b187c0def28521f2a331 | [
"MIT"
] | null | null | null | configs/hyperparameters.py | hyungkwonko/PTI | 5c804a9fc75028cede80b187c0def28521f2a331 | [
"MIT"
] | 1 | 2021-08-19T10:42:56.000Z | 2021-08-19T10:42:56.000Z | ## Architechture
lpips_type = 'vgg'
first_inv_type = 'w'
optim_type = 'adam'
## Locality regularization
latent_ball_num_of_samples = 1
locality_regularization_interval = 1
use_locality_regularization = False
regulizer_l2_lambda = 0.1
regulizer_lpips_lambda = 0.1
regulizer_alpha = 30
## Loss
pt_l1_lambda = 1
pt_l2_lambda = 1
pt_lpips_lambda = 1
pt_lpips_layers = [0, 1, 2, 3, 4]
## Steps
LPIPS_value_threshold = 0.06
L2_value_threshold = 0.03
max_pti_steps = 350
first_inv_steps = 450
max_images_to_invert = 30
## Optimization
n_avg_samples = 10000
pti_learning_rate = 3e-4
first_inv_lr = 5e-3
train_batch_size = 1
use_last_w_pivots = False
| 19.575758 | 36 | 0.78483 | 109 | 646 | 4.220183 | 0.541284 | 0.052174 | 0.058696 | 0.073913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.078853 | 0.136223 | 646 | 32 | 37 | 20.1875 | 0.74552 | 0.094427 | 0 | 0 | 0 | 0 | 0.013937 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e346baa1e8905675293c0be9900be1c2daedaec | 458 | py | Python | codigo_Arduino/DesdeConsola.py | Mik3Mon/AnalisisAlgoritmos | 95b739b22ab2fa240df0373ef89423286399a65e | [
"MIT"
] | null | null | null | codigo_Arduino/DesdeConsola.py | Mik3Mon/AnalisisAlgoritmos | 95b739b22ab2fa240df0373ef89423286399a65e | [
"MIT"
] | null | null | null | codigo_Arduino/DesdeConsola.py | Mik3Mon/AnalisisAlgoritmos | 95b739b22ab2fa240df0373ef89423286399a65e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 9 22:11:56 2021
@author: Mike
"""
import serial
import time
arduino = serial.Serial("COM2", 9600)
time.sleep(2)
print("Presione 1 para mandar y 2 para apagar: ")
while 1:
datousuario = input()
if datousuario == "1":
arduino.write(b'34.23;34.23;45.22')
print("Mandar")
elif datousuario == "2":
arduino.close()
print("Apagar")
break
| 18.32 | 49 | 0.561135 | 61 | 458 | 4.213115 | 0.672131 | 0.031128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107362 | 0.28821 | 458 | 25 | 50 | 18.32 | 0.680982 | 0.159389 | 0 | 0 | 0 | 0 | 0.198413 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.214286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e365b42aa11cbe15a9c00e9f63e614ec75483ec | 11,547 | py | Python | src/simulations/tests/simulator_test.py | PrivacyAmp/cardinality_estimation_evaluation_framework | c6f16733f821bba99c1e5ca827025a063f5689ae | [
"Apache-2.0"
] | 20 | 2020-03-30T22:39:32.000Z | 2022-03-09T06:32:14.000Z | src/simulations/tests/simulator_test.py | OpenMeasurement/cardinality_estimation_evaluation_framework | c6f16733f821bba99c1e5ca827025a063f5689ae | [
"Apache-2.0"
] | 41 | 2020-05-01T01:09:38.000Z | 2021-10-15T17:53:31.000Z | src/simulations/tests/simulator_test.py | OpenMeasurement/cardinality_estimation_evaluation_framework | c6f16733f821bba99c1e5ca827025a063f5689ae | [
"Apache-2.0"
] | 8 | 2020-06-18T22:33:14.000Z | 2021-05-03T13:39:12.000Z | # Copyright 2020 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for wfa_cardinality_estimation_evaluation_framework.simulations.simulator."""
import io
from absl.testing import absltest
import numpy as np
import pandas as pd
from wfa_cardinality_estimation_evaluation_framework.estimators.base import EstimateNoiserBase
from wfa_cardinality_estimation_evaluation_framework.estimators.base import EstimatorBase
from wfa_cardinality_estimation_evaluation_framework.estimators.base import SketchBase
from wfa_cardinality_estimation_evaluation_framework.estimators.exact_set import AddRandomElementsNoiser
from wfa_cardinality_estimation_evaluation_framework.estimators.exact_set import ExactMultiSet
from wfa_cardinality_estimation_evaluation_framework.estimators.exact_set import LosslessEstimator
from wfa_cardinality_estimation_evaluation_framework.evaluations.configs import SketchEstimatorConfig
from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator
from wfa_cardinality_estimation_evaluation_framework.simulations import simulator
def get_simple_simulator(sketch_estimator_config=None):
if not sketch_estimator_config:
sketch_estimator_config = SketchEstimatorConfig(
name='exact_set-lossless', sketch_factory=ExactMultiSet,
estimator=LosslessEstimator())
set_generator_factory = (
set_generator.IndependentSetGenerator.
get_generator_factory_with_num_and_size(
universe_size=1, num_sets=1, set_size=1))
return simulator.Simulator(
num_runs=1,
set_generator_factory=set_generator_factory,
sketch_estimator_config=sketch_estimator_config,
sketch_random_state=np.random.RandomState(1),
set_random_state=np.random.RandomState(2))
class RandomSketchForTestRandomSeed(SketchBase):
@classmethod
def get_sketch_factory(cls):
def f(random_seed):
return cls(random_seed=random_seed)
return f
def __init__(self, random_seed):
self.cardinality = random_seed
def add_ids(self, ids):
_ = ids
class EstimatorForTestRandomSeed(EstimatorBase):
def __call__(self, sketch_list):
return [sketch_list[-1].cardinality]
class FakeEstimateNoiser(EstimateNoiserBase):
def __init__(self):
self._calls = 0
def __call__(self, cardinality_estimate):
self._calls += 1
return 10
class FakeSetGenerator(set_generator.SetGeneratorBase):
"""Generator for a fixed collection of sets."""
@classmethod
def get_generator_factory(cls, set_list):
def f(random_state):
return cls(set_list)
return f
def __init__(self, set_list):
self.set_list = set_list
def __iter__(self):
for s in self.set_list:
yield s
return self
class SimulatorTest(absltest.TestCase):
def test_simulator_run_one(self):
sim = get_simple_simulator()
data_frame = sim.run_one()
self.assertLen(data_frame, 1)
for pub in data_frame['num_sets']:
self.assertEqual(pub, 1)
def test_simulator_run_one_with_estimate_noiser(self):
fake_estimate_noiser = FakeEstimateNoiser()
sketch_estimator_config = SketchEstimatorConfig(
name='exact_set-lossless',
sketch_factory=ExactMultiSet, estimator=LosslessEstimator(),
estimate_noiser=fake_estimate_noiser)
sim = get_simple_simulator(sketch_estimator_config)
data_frame = sim.run_one()
self.assertLen(data_frame, 1)
self.assertEqual(
data_frame[simulator.ESTIMATED_CARDINALITY_BASENAME + '1'].iloc[0], 10)
self.assertEqual(fake_estimate_noiser._calls, 1)
def test_simulator_run_all_and_aggregate(self):
sim = get_simple_simulator()
data_frames = sim.run_all_and_aggregate()
self.assertLen(data_frames, 2)
for pub in data_frames[0]['num_sets']:
self.assertEqual(pub, 1)
def test_simulator_run_all_and_aggregate_with_noise(self):
rs = np.random.RandomState(3)
sketch_estimator_config = SketchEstimatorConfig(
name='exact_set-lossless',
sketch_factory=ExactMultiSet,
estimator=LosslessEstimator(),
sketch_noiser=AddRandomElementsNoiser(num_random_elements=3,
random_state=rs))
sim = get_simple_simulator(sketch_estimator_config)
data_frames = sim.run_all_and_aggregate()
self.assertLen(data_frames, 2)
for pub in data_frames[0]['num_sets']:
self.assertEqual(pub, 1)
self.assertEqual(
data_frames[0][simulator.ESTIMATED_CARDINALITY_BASENAME + '1'][0], 4)
self.assertEqual(
data_frames[0][simulator.TRUE_CARDINALITY_BASENAME + '1'][0], 1)
self.assertEqual(
data_frames[0][simulator.RELATIVE_ERROR_BASENAME + '1'][0], 3)
def test_simulator_run_all_and_aggregate_multiple_runs(self):
sketch_estimator_config = SketchEstimatorConfig(
name='exact_set-lossless',
sketch_factory=ExactMultiSet, estimator=LosslessEstimator())
set_generator_factory = (
set_generator.IndependentSetGenerator.
get_generator_factory_with_num_and_size(
universe_size=1, num_sets=1, set_size=1))
sim = simulator.Simulator(
num_runs=5,
set_generator_factory=set_generator_factory,
sketch_estimator_config=sketch_estimator_config)
data_frames = sim.run_all_and_aggregate()
self.assertLen(data_frames, 2)
self.assertLen(data_frames[0], 5)
for pub in data_frames[0]['num_sets']:
self.assertEqual(pub, 1)
def test_simulator_run_all_and_aggregate_write_file(self):
sketch_estimator_config = SketchEstimatorConfig(
name='exact_set-lossless',
sketch_factory=ExactMultiSet, estimator=LosslessEstimator())
set_generator_factory = (
set_generator.IndependentSetGenerator.
get_generator_factory_with_num_and_size(
universe_size=1, num_sets=1, set_size=1))
file_df = io.StringIO()
file_df_agg = io.StringIO()
sim = simulator.Simulator(
num_runs=5,
set_generator_factory=set_generator_factory,
sketch_estimator_config=sketch_estimator_config,
file_handle_raw=file_df,
file_handle_agg=file_df_agg)
df, df_agg = sim()
# Test if the saved data frame is the same as the one returned from the
# simulator.
file_df.seek(0)
df_from_csv = pd.read_csv(file_df)
pd.testing.assert_frame_equal(df, df_from_csv)
file_df_agg.seek(0)
df_agg_from_csv = pd.read_csv(file_df_agg,
header=[0, 1], index_col=0)
pd.testing.assert_frame_equal(df_agg, df_agg_from_csv)
def test_get_sketch_same_run_same_random_state(self):
sketch_estimator_config = SketchEstimatorConfig(
name='exact_set-lossless',
sketch_factory=RandomSketchForTestRandomSeed,
estimator=EstimatorForTestRandomSeed())
set_generator_factory = (
set_generator.IndependentSetGenerator.
get_generator_factory_with_num_and_size(
universe_size=1, num_sets=2, set_size=1))
sim = simulator.Simulator(
num_runs=1,
set_generator_factory=set_generator_factory,
sketch_estimator_config=sketch_estimator_config)
df, _ = sim()
self.assertEqual(
df.loc[df['num_sets'] == 1, simulator.ESTIMATED_CARDINALITY_BASENAME + '1'].values,
df.loc[df['num_sets'] == 2, simulator.ESTIMATED_CARDINALITY_BASENAME + '1'].values)
def test_get_sketch_different_runs_different_random_state(self):
sketch_estimator_config = SketchEstimatorConfig(
name='random_sketch-estimator_for_test_random_seed',
sketch_factory=RandomSketchForTestRandomSeed,
estimator=EstimatorForTestRandomSeed())
set_generator_factory = (
set_generator.IndependentSetGenerator.
get_generator_factory_with_num_and_size(
universe_size=1, num_sets=1, set_size=1))
sim = simulator.Simulator(
num_runs=2,
set_generator_factory=set_generator_factory,
sketch_estimator_config=sketch_estimator_config)
df, _ = sim()
self.assertNotEqual(
df.loc[df['run_index'] == 0, simulator.ESTIMATED_CARDINALITY_BASENAME + '1'].values,
df.loc[df['run_index'] == 1, simulator.ESTIMATED_CARDINALITY_BASENAME + '1'].values)
def test_extend_histogram(self):
self.assertEqual(simulator.Simulator._extend_histogram(None, [], 1), [0])
self.assertEqual(simulator.Simulator._extend_histogram(None, [3, 2, 1], 1), [3])
self.assertEqual(simulator.Simulator._extend_histogram(None, [3, 2, 1], 2), [3, 2])
self.assertEqual(simulator.Simulator._extend_histogram(None, [3, 2, 1], 3), [3, 2, 1])
self.assertEqual(simulator.Simulator._extend_histogram(None, [3, 2, 1], 5), [3, 2, 1, 0, 0])
def test_shuffle_distance(self):
with self.assertRaises(AssertionError):
simulator.Simulator(0,0,0)._shuffle_distance([], [])
with self.assertRaises(AssertionError):
simulator.Simulator(0,0,0)._shuffle_distance([1], [])
self.assertEqual(simulator.Simulator(0,0,0)._shuffle_distance(
[1], [1]), 0.0)
self.assertEqual(simulator.Simulator(0,0,0)._shuffle_distance(
[10], [10]), 0.0)
self.assertEqual(simulator.Simulator(0,0,0)._shuffle_distance(
[1, 1], [1]), 1.0)
self.assertEqual(simulator.Simulator(0,0,0)._shuffle_distance(
[1, 1], [1, 1]), 0.0)
self.assertEqual(simulator.Simulator(0,0,0)._shuffle_distance(
[2, 1, 0], [2, 2, 1]), 0.5)
def test_multiple_frequencies(self):
sketch_estimator_config = SketchEstimatorConfig(
name='exact-set-multiple-frequencies',
sketch_factory=ExactMultiSet,
estimator=LosslessEstimator(),
max_frequency=3)
set_generator_factory = (
FakeSetGenerator.get_generator_factory(
[[1, 1, 1, 2, 2, 3], [1, 1, 1, 3, 3, 4]]))
sim = simulator.Simulator(
num_runs=1,
set_generator_factory=set_generator_factory,
sketch_estimator_config=sketch_estimator_config)
df, _ = sim()
expected_columns = ['num_sets',
simulator.ESTIMATED_CARDINALITY_BASENAME + '1',
simulator.ESTIMATED_CARDINALITY_BASENAME + '2',
simulator.ESTIMATED_CARDINALITY_BASENAME + '3',
simulator.TRUE_CARDINALITY_BASENAME + '1',
simulator.TRUE_CARDINALITY_BASENAME + '2',
simulator.TRUE_CARDINALITY_BASENAME + '3',
simulator.SHUFFLE_DISTANCE,
'run_index',
simulator.RELATIVE_ERROR_BASENAME + '1',
simulator.RELATIVE_ERROR_BASENAME + '2',
simulator.RELATIVE_ERROR_BASENAME + '3']
expected_data = [
[1, 3, 2, 1, 3, 2, 1, 0., 0, 0., 0., 0.],
[2, 4, 3, 2, 4, 3, 2, 0., 0, 0., 0., 0.]
]
expected_df = pd.DataFrame(expected_data, columns=expected_columns)
pd.testing.assert_frame_equal(df, expected_df)
if __name__ == '__main__':
absltest.main()
| 38.49 | 104 | 0.71603 | 1,421 | 11,547 | 5.475018 | 0.143561 | 0.006684 | 0.064781 | 0.031105 | 0.672622 | 0.608612 | 0.562468 | 0.536375 | 0.47635 | 0.437404 | 0 | 0.022251 | 0.190439 | 11,547 | 299 | 105 | 38.618729 | 0.810013 | 0.068849 | 0 | 0.450644 | 0 | 0 | 0.027035 | 0.006898 | 0 | 0 | 0 | 0 | 0.137339 | 1 | 0.098712 | false | 0 | 0.055794 | 0.012876 | 0.2103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e3dddefc0fc41021833c4964e4d102684d48114 | 616 | py | Python | utils/bufsize.py | devmil/pty | d8365a8ca021e8da1611512662cee94cc41688a6 | [
"MIT"
] | 22 | 2020-08-04T13:52:16.000Z | 2022-03-16T09:48:26.000Z | utils/bufsize.py | devmil/pty | d8365a8ca021e8da1611512662cee94cc41688a6 | [
"MIT"
] | 5 | 2020-10-16T12:24:06.000Z | 2021-07-21T02:19:34.000Z | utils/bufsize.py | devmil/pty | d8365a8ca021e8da1611512662cee94cc41688a6 | [
"MIT"
] | 6 | 2020-10-03T03:00:47.000Z | 2021-12-30T10:33:51.000Z | #!/usr/bin/env python3
# Pty buffer size detect script
# From: https://superuser.com/a/1452858
# Results:
# MacOS 11.2.3: pts write blocked after 1023 bytes (0 KiB)
import os
from pty import openpty
from fcntl import fcntl, F_GETFL, F_SETFL
from itertools import count
def set_nonblock(fd):
flags = fcntl(fd, F_GETFL)
flags |= os.O_NONBLOCK
fcntl(fd, F_SETFL, flags)
master, slave = openpty()
set_nonblock(slave)
for i in count():
try:
os.write(slave, b'a')
except BlockingIOError:
i -= 1
break
print("pts write blocked after {} bytes ({} KiB)".format(i, i//1024)) | 20.533333 | 69 | 0.670455 | 96 | 616 | 4.229167 | 0.59375 | 0.039409 | 0.073892 | 0.098522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045267 | 0.211039 | 616 | 30 | 69 | 20.533333 | 0.790123 | 0.251623 | 0 | 0 | 0 | 0 | 0.091904 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.235294 | 0 | 0.294118 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e3f9e5cf08830d85f22ac94a405bd4dc059c2da | 1,007 | py | Python | LTC1380.py | karu2003/Bandpass_tester | 645fc2478ece07ba4303079da020a4f15f946897 | [
"MIT"
] | null | null | null | LTC1380.py | karu2003/Bandpass_tester | 645fc2478ece07ba4303079da020a4f15f946897 | [
"MIT"
] | null | null | null | LTC1380.py | karu2003/Bandpass_tester | 645fc2478ece07ba4303079da020a4f15f946897 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""python module for the LTC1380
created 17, 06, 2021
last modified 17, 06, 2021
Copyright 2021 Andrew Buckin
"""
import smbus
import time
Enable = 8
class LTC1380:
def __init__(self, i2cAddress=0x48):
self.i2cAddress = i2cAddress
self.bus = smbus.SMBus(1)
try:
self.Enable()
except IOError:
print("No i2c device at address:", self.i2cAddress,)
self.Desable()
return
def Enable(self):
self.bus.write_byte(self.i2cAddress, Enable)
return
def Desable(self):
self.bus.write_byte(self.i2cAddress, 0x00)
return
def SetChannel(self, Channel):
self.bus.write_byte(self.i2cAddress, Enable | Channel)
return
if __name__ == "__main__":
MUX = LTC1380(i2cAddress=0x48)
Channel = list(range(0, 8, 1)) # data loop DO>DI
for i in Channel:
print(i)
MUX.SetChannel(i)
time.sleep(0.5)
MUX.Desable
| 20.14 | 64 | 0.600794 | 126 | 1,007 | 4.68254 | 0.484127 | 0.142373 | 0.061017 | 0.081356 | 0.186441 | 0.186441 | 0.186441 | 0 | 0 | 0 | 0 | 0.080508 | 0.296922 | 1,007 | 49 | 65 | 20.55102 | 0.752825 | 0.142999 | 0 | 0.133333 | 0 | 0 | 0.038687 | 0 | 0 | 0 | 0.014068 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.366667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e43b13aff85b6f549a34372cb839f1045f1224d | 9,618 | py | Python | common/model.py | abhay97ps/visual-control-ppo-procgen | 765fe1ddb289d384abddc4df8eb865379c8da76a | [
"MIT"
] | null | null | null | common/model.py | abhay97ps/visual-control-ppo-procgen | 765fe1ddb289d384abddc4df8eb865379c8da76a | [
"MIT"
] | null | null | null | common/model.py | abhay97ps/visual-control-ppo-procgen | 765fe1ddb289d384abddc4df8eb865379c8da76a | [
"MIT"
] | null | null | null | from .misc_util import orthogonal_init, xavier_uniform_init
import torch.nn as nn
import torch
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
return x.reshape(x.size(0), -1)
class MlpModel(nn.Module):
def __init__(self,
input_dims=4,
hidden_dims=[64, 64],
**kwargs):
"""
input_dim: (int) number of the input dimensions
hidden_dims: (list) list of the dimensions for the hidden layers
use_batchnorm: (bool) whether to use batchnorm
"""
super(MlpModel, self).__init__()
# Hidden layers
hidden_dims = [input_dims] + hidden_dims
layers = []
for i in range(len(hidden_dims) - 1):
in_features = hidden_dims[i]
out_features = hidden_dims[i + 1]
layers.append(nn.Linear(in_features, out_features))
layers.append(nn.ReLU())
self.layers = nn.Sequential(*layers)
self.output_dim = hidden_dims[-1]
self.apply(orthogonal_init)
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class NatureModel(nn.Module):
def __init__(self,
in_channels,
**kwargs):
"""
input_shape: (tuple) tuple of the input dimension shape (channel, height, width)
filters: (list) list of the tuples consists of (number of channels, kernel size, and strides)
use_batchnorm: (bool) whether to use batchnorm
"""
super(NatureModel, self).__init__()
self.layers = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=8, stride=4), nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2), nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1), nn.ReLU(),
Flatten(),
nn.Linear(in_features=64*7*7, out_features=512), nn.ReLU()
)
self.output_dim = 512
self.apply(orthogonal_init)
def forward(self, x):
x = self.layers(x)
return x
class ResidualBlock(nn.Module):
def __init__(self,
in_channels):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x):
out = nn.ReLU()(x)
out = self.conv1(out)
out = nn.ReLU()(out)
out = self.conv2(out)
return out + x
class ImpalaBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ImpalaBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.res1 = ResidualBlock(out_channels)
self.res2 = ResidualBlock(out_channels)
def forward(self, x):
x = self.conv(x)
x = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)(x)
x = self.res1(x)
x = self.res2(x)
return x
class ImpalaModel(nn.Module):
def __init__(self,
in_channels,
**kwargs):
super(ImpalaModel, self).__init__()
self.block1 = ImpalaBlock(in_channels=in_channels, out_channels=16)
self.block2 = ImpalaBlock(in_channels=16, out_channels=32)
self.block3 = ImpalaBlock(in_channels=32, out_channels=32)
self.fc = nn.Linear(in_features=32 * 8 * 8, out_features=256)
self.output_dim = 256
self.apply(xavier_uniform_init)
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = nn.ReLU()(x)
x = Flatten()(x)
x = self.fc(x)
x = nn.ReLU()(x)
return x
class GRU(nn.Module):
def __init__(self, input_size, hidden_size):
super(GRU, self).__init__()
self.gru = orthogonal_init(nn.GRU(input_size, hidden_size), gain=1.0)
def forward(self, x, hxs, masks):
# Prediction
if x.size(0) == hxs.size(0):
# input for GRU-CELL: (L=sequence_length, N, H)
# output for GRU-CELL: (output: (L, N, H), hidden: (L, N, H))
masks = masks.unsqueeze(-1)
x, hxs = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))
x = x.squeeze(0)
hxs = hxs.squeeze(0)
# Training
# We will recompute the hidden state to allow gradient to be back-propagated through time
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
# (can be interpreted as a truncated back-propagation through time)
has_zeros = ((masks[1:] == 0.0) \
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.unsqueeze(0)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
rnn_scores, hxs = self.gru(
x[start_idx:end_idx],
hxs * masks[start_idx].view(1, -1, 1))
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.view(T * N, -1)
hxs = hxs.squeeze(0)
return x, hxs
class ConvBlock(nn.Module):
def __init__(self, in_features, out_features, num_conv, pool=False):
super(ConvBlock, self).__init__()
features = [in_features] + [out_features for i in range(num_conv)]
layers = []
for i in range(len(features)-1):
layers.append(nn.Conv2d(in_channels=features[i], out_channels=features[i+1], kernel_size=3, padding=1, bias=True))
layers.append(nn.BatchNorm2d(num_features=features[i+1], affine=True, track_running_stats=True))
layers.append(nn.ReLU())
if pool:
layers.append(nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
self.op = nn.Sequential(*layers)
def forward(self, x):
return self.op(x)
class LinearAttentionBlock(nn.Module):
def __init__(self, in_features):
super(LinearAttentionBlock, self).__init__()
self.op = nn.Conv2d(in_channels=in_features, out_channels=1, kernel_size=1, padding=0, bias=False)
def forward(self, l, g):
N, C, W, H = l.size()
c = self.op(l+g)
# out N, 1, W, H
a = F.softmax(c.view(N,1,-1), dim=2).view(N,1,W,H)
g = torch.mul(a.expand_as(l), l)
g = g.view(N,C,-1).sum(dim=2) # batch_sizexC
return c.view(N,1,W,H), g
class AttentionModel(nn.Module):
def __init__(self, in_channels, **kwargs):
super(AttentionModel, self).__init__()
self.conv_block1 = ConvBlock(in_channels, 8, 2)
self.conv_block2 = ConvBlock(8, 16, 2)
self.conv_block3 = ConvBlock(16, 32, 2)
self.conv_block4 = ConvBlock(32, 64, 3)
self.conv_block5 = ConvBlock(64, 64, 3)
self.conv_block6 = ConvBlock(64, 64, 3, pool=True)
self.dense = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=1, padding=0, bias=True)
# for l1 attetnion
self.projector = nn.Conv2d(32, 64, kernel_size=1, padding=0, bias=False)
self.att1 = LinearAttentionBlock(64)
self.att2 = LinearAttentionBlock(64)
self.att3 = LinearAttentionBlock(64)
self.output_dim = 256
self.embed = nn.Linear(in_features=64*4, out_features=self.output_dim)
def forward(self, x):
# input N, 3, 64, 64
x = self.conv_block1(x)
# out N, 8, 64, 64
x = self.conv_block2(x)
# out N, 16, 64, 64
x = self.conv_block3(x)
# out N, 32, 64, 64
l1 = F.max_pool2d(x, kernel_size=2, stride=2, padding=0)
# out N, 32, 32, 32
l2 = F.max_pool2d(self.conv_block4(l1), kernel_size=2, stride=2, padding=0)
# out N, 64, 16, 16
l3 = F.max_pool2d(self.conv_block5(l2), kernel_size=2, stride=2, padding=0)
# out N, 64, 8, 8
x = self.conv_block6(l3)
# out N, 64, 1, 1
g = self.dense(x)
# out N, 64, 1, 1
c1, g1 = self.att1(self.projector(l1), g)
c2, g2 = self.att2(l2, g)
c3, g3 = self.att3(l3, g)
N, C, _, _ = g.size()
g = g.view(N,C,-1).sum(dim=2)
g = torch.cat((g,g1,g2,g3), dim=1)
g = self.embed(g)
return [g,c1,c2,c3] | 36.850575 | 126 | 0.563215 | 1,345 | 9,618 | 3.864684 | 0.166543 | 0.048095 | 0.021162 | 0.025972 | 0.297999 | 0.233167 | 0.197191 | 0.145633 | 0.093113 | 0.057715 | 0 | 0.045523 | 0.312539 | 9,618 | 261 | 127 | 36.850575 | 0.740623 | 0.140466 | 0 | 0.201087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103261 | false | 0 | 0.021739 | 0.01087 | 0.233696 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e44aa4ea82d11042fce4becc4abc41c58a84079 | 1,350 | py | Python | src/sample_api.py | evanmahony/kaggleTemplate | 19a44e4511ca137382a810d3e230a13ee7413959 | [
"MIT"
] | null | null | null | src/sample_api.py | evanmahony/kaggleTemplate | 19a44e4511ca137382a810d3e230a13ee7413959 | [
"MIT"
] | null | null | null | src/sample_api.py | evanmahony/kaggleTemplate | 19a44e4511ca137382a810d3e230a13ee7413959 | [
"MIT"
] | null | null | null | import logging
import os
from flask import Flask, request
import pandas as pd
import torch
from torch_template import Model
PATH = "/home/jovyan"
LOAD_PATH = os.path.join(PATH, "runs/03-56 17_02_22/model.pth")
OUTPUT_PATH = os.path.join(PATH, "runs/api")
# Configuring logging
logging.basicConfig(
filename=os.path.join(OUTPUT_PATH, "run.log"),
format="%(asctime)s - %(levelname)s - %(message)s",
encoding="utf-8",
level=logging.INFO,
)
app = Flask(__name__)
model = Model(1, 1).to("cpu")
logging.info(f"Model:\n{model}")
model.load_state_dict(torch.load(LOAD_PATH))
logging.info(f"Loaded model from {LOAD_PATH}")
model.eval()
X = torch.tensor([1]).type(torch.LongTensor).to("cpu")
logging.info(f"X: {X.type}")
logging.info(f"{model.forward(X)}")
@app.route("/model")
def model():
logging.info(f"Model:\n{model}")
return f"Model:\n{model}"
@app.route("/predict", methods=["POST"])
def predict():
if request.method == "POST":
input_json = request.get_json()
input_df = pd.read_json(input_json)
logging.info(f"Input DataFrame: {input_df}\n")
X = torch.tensor(input_df.values)[0]
logging.info(f"X shape: {X.shape}\n")
logging.info(f"X: {X}\n")
pred = model.forward(X)
return pred
if __name__ == "__main__":
app.run(host="localhost", port=6006)
| 24.107143 | 63 | 0.660741 | 207 | 1,350 | 4.169082 | 0.386473 | 0.114716 | 0.11124 | 0.059096 | 0.14832 | 0.104287 | 0 | 0 | 0 | 0 | 0 | 0.016755 | 0.16 | 1,350 | 55 | 64 | 24.545455 | 0.744268 | 0.014074 | 0 | 0.04878 | 0 | 0 | 0.231001 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.146341 | 0 | 0.243902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e46f63258059c8f4e70f3a51307f604048da8b0 | 4,768 | py | Python | script/fsrcnn/train.py | victorelcaminas/SuperResolutionKit | aef8a9a8a36f8833244e8c6907616b1a6aee962b | [
"MIT"
] | 86 | 2018-08-31T08:43:42.000Z | 2022-02-07T12:39:41.000Z | script/fsrcnn/train.py | victorelcaminas/SuperResolutionKit | aef8a9a8a36f8833244e8c6907616b1a6aee962b | [
"MIT"
] | 3 | 2018-09-05T12:52:49.000Z | 2020-02-28T12:11:36.000Z | script/fsrcnn/train.py | victorelcaminas/SuperResolutionKit | aef8a9a8a36f8833244e8c6907616b1a6aee962b | [
"MIT"
] | 5 | 2018-09-11T23:06:28.000Z | 2022-01-23T19:50:14.000Z | from keras.models import Sequential
from keras.layers import Conv2D, Conv2DTranspose, Input, BatchNormalization, PReLU
from keras.callbacks import ModelCheckpoint, Callback, TensorBoard
from keras.optimizers import SGD, Adam
import numpy as np
import math
import os
import random
from os import listdir, makedirs
from os.path import isfile, join, exists
from PIL import Image
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from s3sync import S3SyncCallback
def model(scale = 2):
d = 56
s = 12
m = 4
c = 3
SRCNN = Sequential()
SRCNN.add(Conv2D(nb_filter=d, nb_row=5, nb_col=5, init='glorot_uniform', border_mode='same', bias=True, input_shape=(100, 100, 3)))
SRCNN.add(PReLU(shared_axes=[1, 2]))
SRCNN.add(Conv2D(nb_filter=s, nb_row=1, nb_col=1, init='glorot_uniform', border_mode='same', bias=True))
SRCNN.add(PReLU(shared_axes=[1, 2]))
for i in range(m):
SRCNN.add(Conv2D(nb_filter=s, nb_row=3, nb_col=3, init='glorot_uniform', border_mode='same', bias=True))
SRCNN.add(PReLU(shared_axes=[1, 2]))
SRCNN.add(Conv2D(nb_filter=d, nb_row=1, nb_col=1, init='glorot_uniform', border_mode='same', bias=True))
SRCNN.add(PReLU(shared_axes=[1, 2]))
SRCNN.add(Conv2DTranspose(filters=3, kernel_size=(9,9), strides=(scale, scale), init='glorot_uniform', border_mode='same', bias=True))
adam = Adam(lr=0.0003)
SRCNN.compile(optimizer=adam, loss='mean_squared_error', metrics=['mean_squared_error'])
return SRCNN
class MyDataGenerator(object):
def flow_from_directory(self, input_dir, label_dir, batch_size=32):
images = []
labels = []
while True:
files = listdir(input_dir)
random.shuffle(files)
for f in files:
images.append(self.load_image(input_dir, f))
labels.append(self.load_image(label_dir, f))
if len(images) == batch_size:
x_inputs = np.asarray(images)
x_labels = np.asarray(labels)
images = []
labels = []
yield x_inputs, x_labels
def load_image(self, src_dir, f):
X = np.asarray(Image.open(join(src_dir, f)).convert('RGB'), dtype='float32')
X /= 255.
return X
def train(log_dir, model_dir, train_dir, test_dir, eval_img, scale, epochs, steps):
srcnn_model = model(scale)
print(srcnn_model.summary())
datagen = MyDataGenerator()
train_gen = datagen.flow_from_directory(os.path.join(
train_dir, 'input'),
os.path.join(train_dir, 'label'),
batch_size = 10)
val_gen = datagen.flow_from_directory(
os.path.join(test_dir, 'input'),
os.path.join(test_dir, 'label'),
batch_size = 10)
class PredCallback(Callback):
def on_epoch_end(self, epoch, logs=None):
pass
#pred.predict(self.model, eval_img, 'base-%d.png' % epoch, 'out-%d.png' % epoch, False)
class PSNRCallback(Callback):
def on_epoch_end(self, epoch, logs=None):
loss = logs['loss'] * 255.
val_loss = logs['val_loss'] * 255.
psnr = 20 * math.log10(255. / math.sqrt(loss))
val_psnr = 20 * math.log10(255. / math.sqrt(val_loss))
print("\n")
print("PSNR:%s" % psnr)
print("PSNR(val):%s" % val_psnr)
pd_cb = PredCallback()
ps_cb = PSNRCallback()
md_cb = ModelCheckpoint(os.path.join(model_dir,'check.h5'), monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min', period=1)
tb_cb = TensorBoard(log_dir=log_dir)
s3_cb = S3SyncCallback(s3_base_url='s3://tryswift/super-resolution-kit/log', log_dir=log_dir)
srcnn_model.fit_generator(
generator = train_gen,
steps_per_epoch = steps,
validation_data = val_gen,
validation_steps = steps,
epochs = epochs,
callbacks=[ps_cb, md_cb, tb_cb, s3_cb])
srcnn_model.save(os.path.join(model_dir,'model.h5'))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("log_dir")
parser.add_argument("model_dir")
parser.add_argument("train_dir")
parser.add_argument("test_dir")
parser.add_argument("--eval_img")
parser.add_argument("-scale", type=int, default=2)
parser.add_argument("-epochs", type=int, default=100)
parser.add_argument("-steps", type=int, default=100)
args = parser.parse_args()
print(args)
if not exists(args.model_dir):
makedirs(args.model_dir)
train(args.log_dir, args.model_dir, args.train_dir, args.test_dir, args.eval_img, args.scale, args.epochs, args.steps)
| 38.144 | 162 | 0.648909 | 674 | 4,768 | 4.378338 | 0.284866 | 0.022365 | 0.046086 | 0.03897 | 0.257201 | 0.213826 | 0.213826 | 0.196205 | 0.125042 | 0.099288 | 0 | 0.025201 | 0.217701 | 4,768 | 124 | 163 | 38.451613 | 0.765952 | 0.018037 | 0 | 0.113208 | 0 | 0 | 0.069216 | 0.008118 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0.009434 | 0.132075 | 0 | 0.235849 | 0.04717 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e46fbeaf8ee970ea30ad406caaf1c983e261497 | 5,023 | py | Python | module/error.py | Saroniii/yonosumi_official_bot | ef09ff8e9c089c0df8d191fe5db665f0f7322fd3 | [
"MIT"
] | 5 | 2020-09-23T01:06:00.000Z | 2020-11-24T04:39:58.000Z | module/error.py | Saroniii/yonosumi_official_bot | ef09ff8e9c089c0df8d191fe5db665f0f7322fd3 | [
"MIT"
] | 13 | 2020-10-10T16:00:16.000Z | 2020-11-26T02:02:57.000Z | module/error.py | YonosumiProject/yonosumi_official_bot | f8e3d2c0f7c0320cdb9247917d6d21f208ec7a77 | [
"MIT"
] | 2 | 2021-04-19T21:46:00.000Z | 2021-08-16T07:23:11.000Z | import re
import traceback
import discord
from discord.ext import commands
class ErrorHandler(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, (commands.CommandNotFound, commands.CommandOnCooldown)):
return
waiting = await ctx.send(f"{ctx.author.mention}->エラーが発生しました...原因を解析しています...")
if isinstance(error, commands.MissingRequiredArgument):
arg = str(error.param)
varname = {
'object_gos': 'サーバーオブジェクトもしくは文字列',
'database': '操作したいデータベース',
'object_mor': '検索したい役職もしくはメンバー',
'announcedata': 'アナウンスする文章',
'noteuser': 'Noteのユーザー名',
'channelname': 'チャンネル名',
'channel': 'チャンネル',
'sqlcmd': 'SQLステートメント',
'roll_data': '抽選するもの',
'_triger': '絵文字の追加名',
'code': 'コード',
'userid': 'ユーザーID',
'reason': '理由',
'target': '処置を行う相手',
'playername': '検索するプレイヤー',
'artist': '歌手名',
'song': '曲名',
'text': '打ち込みたい文章',
'math_value': '計算させたい式',
'ip': '検索したいサーバーのIPアドレス',
'settype': 'タイプ指定',
'triger': 'メモを呼び出すためのトリガー',
'role': '役職',
'onlinetype': 'オンライン表示',
'playing': 'アクティビティー',
'check': 'タイプ指定',
'tododata': 'ToDoの文章',
'user': 'ユーザー',
'invite_user': '招待したいユーザー',
'sentence': '文章',
'title': 'タイトル',
'bantype': 'BANのタイプ',
'badge_type': 'バッジのタイプ',
'get_type': '付与するタイプ',
'guild': 'サーバー名',
'data_id': 'ID',
}
arg = re.split('[.,:]', arg)
embed = discord.Embed(
title="引数不足です!", description=f"引数``{arg[0]}``が足りていません!", color=discord.Colour.from_rgb(255, 0, 0))
try:
desc = varname[arg[0]]
embed.add_field(name=f"💡もしかしたら...",
value=f"``{desc}``が不足していませんか?")
except:
pass
await waiting.edit(content=f"{ctx.author.mention}->", embed=embed)
return
elif isinstance(error, commands.BadArgument):
await ctx.send(dir(error))
try:
await ctx.send(dir(error.__context__))
except:
pass
target_dir = {
'int': '数値',
'Member': 'メンバー',
'user': 'ユーザー',
'Guild': 'サーバー',
'Emoji': '絵文字'
}
target = str(error.args).split()[2].replace('"', '')
embed = discord.Embed(
title=f'取得に失敗しました!', description=f"引数の``{target}``を取得できませんでした!", color=discord.Colour.from_rgb(255, 0, 0))
try:
desc = target_dir[target]
embed.add_field(
name="💡もしかして...", value=f"引数の``{desc}``は実際に存在していますか?\n実際に存在しているオブジェクトでも、凜花が認識していないオブジェクトは取得できない場合があります。")
except:
pass
await waiting.edit(content=f"{ctx.author.mention}->", embed=embed)
return
elif isinstance(error, (commands.MissingPermissions, commands.BotMissingPermissions)):
perm = error.missing_perms[0]
try:
perm = self.bot.permissions_dir[perm]
except:
pass
if isinstance(error, commands.MissingPermissions):
await waiting.edit(content=f"{ctx.author.mention}->", embed=discord.Embed(title=f"権限不足です!", description=f"このコマンドを実行するには、``{perm}``が必要です!", color=discord.Colour.from_rgb(255, 0, 0)))
else:
await waiting.edit(content=f"{ctx.author.mention}->", embed=discord.Embed(title=f"Botの権限不足です!", description=f"このコマンドを実行するには、Botに``{perm}``を付与する必要があります!", color=discord.Colour.from_rgb(255, 0, 0)))
return
try:
await waiting.edit(content=f"{ctx.author.mention}->{error}")
except:
await waiting.edit(content=f"{ctx.author.mention}->エラーが解析できませんでした!")
s_error = traceback.format_exception(
type(error), error, error.__traceback__)
print(s_error)
for i in range(len(s_error)):
while len("".join(s_error[i:i+2])) < 2000-15 and len("".join(s_error[i+1:])) != 0:
s_error[i:i+2] = ["".join(s_error[i:i+2])]
webhook = await self.bot.fetch_webhook(800731709104324658)
for i in range(0, len(s_error), 3):
await webhook.send(embeds=[discord.Embed(description=f"```py\n{y}```").set_footer(text=f"{i+x+1}/{len(s_error)}") for x, y in enumerate(s_error[i:i+3])])
def setup(bot):
bot.add_cog(ErrorHandler(bot))
| 41.858333 | 212 | 0.505873 | 488 | 5,023 | 5.114754 | 0.403689 | 0.024038 | 0.028045 | 0.047676 | 0.256811 | 0.221154 | 0.210737 | 0.210737 | 0.154647 | 0.154647 | 0 | 0.017288 | 0.343619 | 5,023 | 119 | 213 | 42.210084 | 0.739157 | 0 | 0 | 0.212389 | 0 | 0 | 0.212622 | 0.088194 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017699 | false | 0.035398 | 0.035398 | 0 | 0.097345 | 0.00885 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e475fe77b8d59f438381a1b9f65606ccdf0eeb0 | 10,481 | py | Python | rabbitpy/channel0.py | AndTornes/rabbitpy | 0b805f308c868ae69825cb6366e2b0a1e74c1f2b | [
"BSD-3-Clause"
] | 149 | 2015-01-08T21:55:20.000Z | 2022-02-28T10:43:53.000Z | rabbitpy/channel0.py | AndTornes/rabbitpy | 0b805f308c868ae69825cb6366e2b0a1e74c1f2b | [
"BSD-3-Clause"
] | 71 | 2015-01-04T22:28:56.000Z | 2022-02-06T09:23:30.000Z | rabbitpy/channel0.py | AndTornes/rabbitpy | 0b805f308c868ae69825cb6366e2b0a1e74c1f2b | [
"BSD-3-Clause"
] | 56 | 2015-01-07T11:06:05.000Z | 2022-03-18T08:45:40.000Z | """
Channel0 is used for connection level communication between RabbitMQ and the
client on channel 0.
"""
import locale
import logging
import sys
from pamqp import header
from pamqp import heartbeat
from pamqp import specification
from rabbitpy import __version__
from rabbitpy import base
from rabbitpy import events
from rabbitpy import exceptions
from rabbitpy.utils import queue
LOGGER = logging.getLogger(__name__)
DEFAULT_LOCALE = locale.getdefaultlocale()
del locale
class Channel0(base.AMQPChannel):
"""Channel0 is used to negotiate a connection with RabbitMQ and for
processing and dispatching events on channel 0 once connected.
:param dict connection_args: Data required to negotiate the connection
:param events_obj: The shared events coordination object
:type events_obj: rabbitpy.events.Events
:param exception_queue: The queue where any pending exceptions live
:type exception_queue: queue.Queue
:param write_queue: The queue to place data to write in
:type write_queue: queue.Queue
:param write_trigger: The socket to write to, to trigger IO writes
:type write_trigger: socket.socket
"""
CHANNEL = 0
CLOSE_REQUEST_FRAME = specification.Connection.Close
DEFAULT_LOCALE = 'en-US'
def __init__(self, connection_args, events_obj, exception_queue,
write_queue, write_trigger, connection):
super(Channel0, self).__init__(
exception_queue, write_trigger, connection)
self._channel_id = 0
self._args = connection_args
self._events = events_obj
self._exceptions = exception_queue
self._read_queue = queue.Queue()
self._write_queue = write_queue
self._write_trigger = write_trigger
self._state = self.CLOSED
self._max_channels = connection_args['channel_max']
self._max_frame_size = connection_args['frame_max']
self._heartbeat_interval = connection_args['heartbeat']
self.properties = None
def close(self):
"""Close the connection via Channel0 communication."""
if self.open:
self._set_state(self.CLOSING)
self.rpc(specification.Connection.Close())
@property
def heartbeat_interval(self):
"""Return the AMQP heartbeat interval for the connection
:rtype: int
"""
return self._heartbeat_interval
@property
def maximum_channels(self):
"""Return the AMQP maximum channel count for the connection
:rtype: int
"""
return self._max_channels
@property
def maximum_frame_size(self):
"""Return the AMQP maximum frame size for the connection
:rtype: int
"""
return self._max_frame_size
def on_frame(self, value):
"""Process a RPC frame received from the server
:param pamqp.message.Message value: The message value
"""
LOGGER.debug('Received frame: %r', value.name)
if value.name == 'Connection.Close':
LOGGER.warning('RabbitMQ closed the connection (%s): %s',
value.reply_code, value.reply_text)
self._set_state(self.CLOSED)
self._events.set(events.SOCKET_CLOSED)
self._events.set(events.CHANNEL0_CLOSED)
self._connection.close()
if value.reply_code in exceptions.AMQP:
err = exceptions.AMQP[value.reply_code](value.reply_text)
else:
err = exceptions.RemoteClosedException(value.reply_code,
value.reply_text)
self._exceptions.put(err)
self._trigger_write()
elif value.name == 'Connection.Blocked':
LOGGER.warning('RabbitMQ has blocked the connection: %s',
value.reason)
self._events.set(events.CONNECTION_BLOCKED)
elif value.name == 'Connection.CloseOk':
self._set_state(self.CLOSED)
self._events.set(events.CHANNEL0_CLOSED)
elif value.name == 'Connection.OpenOk':
self._on_connection_open_ok()
elif value.name == 'Connection.Start':
self._on_connection_start(value)
elif value.name == 'Connection.Tune':
self._on_connection_tune(value)
elif value.name == 'Connection.Unblocked':
LOGGER.info('Connection is no longer blocked')
self._events.clear(events.CONNECTION_BLOCKED)
elif value.name == 'Heartbeat':
pass
else:
LOGGER.warning('Unexpected Channel0 Frame: %r', value)
raise specification.AMQPUnexpectedFrame(value)
def send_heartbeat(self):
"""Send a heartbeat frame to the remote connection."""
self.write_frame(heartbeat.Heartbeat())
def start(self):
"""Start the AMQP protocol negotiation"""
self._set_state(self.OPENING)
self._write_protocol_header()
def _build_open_frame(self):
"""Build and return the Connection.Open frame.
:rtype: pamqp.specification.Connection.Open
"""
return specification.Connection.Open(self._args['virtual_host'])
def _build_start_ok_frame(self):
"""Build and return the Connection.StartOk frame.
:rtype: pamqp.specification.Connection.StartOk
"""
properties = {
'product': 'rabbitpy',
'platform': 'Python {0}.{1}.{2}'.format(*sys.version_info),
'capabilities': {'authentication_failure_close': True,
'basic.nack': True,
'connection.blocked': True,
'consumer_cancel_notify': True,
'publisher_confirms': True},
'information': 'See https://rabbitpy.readthedocs.io',
'version': __version__}
return specification.Connection.StartOk(client_properties=properties,
response=self._credentials,
locale=self._get_locale())
def _build_tune_ok_frame(self):
"""Build and return the Connection.TuneOk frame.
:rtype: pamqp.specification.Connection.TuneOk
"""
return specification.Connection.TuneOk(self._max_channels,
self._max_frame_size,
self._heartbeat_interval)
@property
def _credentials(self):
"""Return the marshaled credentials for the AMQP connection.
:rtype: str
"""
return '\0%s\0%s' % (self._args['username'], self._args['password'])
def _get_locale(self):
"""Return the current locale for the python interpreter or the default
locale.
:rtype: str
"""
if not self._args['locale']:
return DEFAULT_LOCALE[0] or self.DEFAULT_LOCALE
return self._args['locale']
@staticmethod
def _negotiate(client_value, server_value):
"""Return the negotiated value between what the client has requested
and the server has requested for how the two will communicate.
:param int client_value:
:param int server_value:
:return: int
"""
return min(client_value, server_value) or \
(client_value or server_value)
def _on_connection_open_ok(self):
LOGGER.debug('Connection opened')
self._set_state(self.OPEN)
self._events.set(events.CHANNEL0_OPENED)
def _on_connection_start(self, frame_value):
"""Negotiate the Connection.Start process, writing out a
Connection.StartOk frame when the Connection.Start frame is received.
:type frame_value: pamqp.specification.Connection.Start
:raises: rabbitpy.exceptions.ConnectionException
"""
if not self._validate_connection_start(frame_value):
LOGGER.error('Could not negotiate a connection, disconnecting')
raise exceptions.ConnectionResetException()
self.properties = frame_value.server_properties
for key in self.properties:
if key == 'capabilities':
for capability in self.properties[key]:
LOGGER.debug('Server supports %s: %r',
capability, self.properties[key][capability])
else:
LOGGER.debug('Server %s: %r', key, self.properties[key])
self.write_frame(self._build_start_ok_frame())
def _on_connection_tune(self, frame_value):
"""Negotiate the Connection.Tune frames, waiting for the
Connection.Tune frame from RabbitMQ and sending the Connection.TuneOk
frame.
:param specification.Connection.Tune frame_value: Tune frame
"""
self._max_frame_size = self._negotiate(self._max_frame_size,
frame_value.frame_max)
self._max_channels = self._negotiate(self._max_channels,
frame_value.channel_max)
LOGGER.debug('Heartbeat interval (server/client): %r/%r',
frame_value.heartbeat, self._heartbeat_interval)
# Properly negotiate the heartbeat interval
if self._heartbeat_interval is None:
self._heartbeat_interval = frame_value.heartbeat
elif self._heartbeat_interval == 0 or frame_value.heartbeat == 0:
self._heartbeat_interval = 0
self.write_frame(self._build_tune_ok_frame())
self.write_frame(self._build_open_frame())
@staticmethod
def _validate_connection_start(frame_value):
"""Validate the received Connection.Start frame
:param specification.Connection.Start frame_value: Frame to validate
:rtype: bool
"""
if (frame_value.version_major, frame_value.version_minor) != \
(specification.VERSION[0], specification.VERSION[1]):
LOGGER.warning('AMQP version error (received %i.%i, expected %r)',
frame_value.version_major,
frame_value.version_minor,
specification.VERSION)
return False
return True
def _write_protocol_header(self):
"""Send the protocol header to the connected server."""
self.write_frame(header.ProtocolHeader())
| 36.141379 | 78 | 0.626085 | 1,136 | 10,481 | 5.550176 | 0.185739 | 0.026963 | 0.026646 | 0.021887 | 0.2 | 0.113402 | 0.086122 | 0.057098 | 0.033307 | 0.020301 | 0 | 0.003225 | 0.289858 | 10,481 | 289 | 79 | 36.266436 | 0.84388 | 0.22889 | 0 | 0.080247 | 0 | 0 | 0.100773 | 0.006552 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117284 | false | 0.012346 | 0.067901 | 0 | 0.283951 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e4773ee4daaadfedc508c278457290fb9e78e54 | 1,617 | py | Python | format.py | saultyevil/PyPython | 109f650505388ecf0611c6e2661d1365bca4cd70 | [
"MIT"
] | null | null | null | format.py | saultyevil/PyPython | 109f650505388ecf0611c6e2661d1365bca4cd70 | [
"MIT"
] | null | null | null | format.py | saultyevil/PyPython | 109f650505388ecf0611c6e2661d1365bca4cd70 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from sys import argv
from subprocess import run
from pathlib import Path
def format_source(fp):
"""Run isort and then yapf to format the python files contained in
fp. Sends the output to /dev/null.
Parameters
----------
fp: str
The file path to search recursively for python files.
"""
style = "'{based_on_style: pep8, column_limit: 120}'"
for file in Path(fp).rglob("*.py"):
print(" -", str(file))
run(f"isort --dont-float-to-top {file} > /dev/null; yapf -i --style={style} {file} > /dev/null", shell=True)
def format_docstrings(fp):
"""Use docformatter to format docstrings using docformatter. This should be
done to PEP-8 covention.
Parameters
----------
fp: str
The file path to research recursively for python files.
"""
for file in Path(fp).rglob("*.py"):
print(" -", str(file))
run(f"docformatter -i {file} > /dev/null", shell=True)
def strip_type_hints(fp):
"""Stip type hints from source files.
Parameters
----------
fp: str
The file path to search recursively for python files.
"""
for file in Path(fp).rglob("*.py"):
print(" -", str(file))
run(f"strip-hints {file} > tmp.txt; mv tmp.txt {file}", shell=True)
if "--strip-hints" in argv:
print("Stripping type hints:")
strip_type_hints("pypython")
strip_type_hints("scripts")
print("Reformating source files:")
format_source("pypython")
format_source("scripts")
print("Reformatting docstrings")
format_docstrings("pypython")
format_docstrings("scripts")
| 27.40678 | 116 | 0.634508 | 220 | 1,617 | 4.595455 | 0.354545 | 0.04451 | 0.04451 | 0.053412 | 0.327399 | 0.327399 | 0.281899 | 0.254204 | 0.254204 | 0.254204 | 0 | 0.004743 | 0.217687 | 1,617 | 58 | 117 | 27.87931 | 0.794466 | 0.320965 | 0 | 0.230769 | 0 | 0.038462 | 0.355094 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.115385 | 0 | 0.230769 | 0.230769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e47d4e19559b640b7393534b7b6cf07e2a119e3 | 1,091 | py | Python | utils.py | Mahoo12138/auto-submit-dlu | a955afba7f453c03d0bccb8ae02258ff43ef6663 | [
"Apache-2.0"
] | 5 | 2021-09-07T02:47:33.000Z | 2021-12-05T13:22:00.000Z | utils.py | Mahoo12138/auto-submit-dlu | a955afba7f453c03d0bccb8ae02258ff43ef6663 | [
"Apache-2.0"
] | null | null | null | utils.py | Mahoo12138/auto-submit-dlu | a955afba7f453c03d0bccb8ae02258ff43ef6663 | [
"Apache-2.0"
] | 1 | 2021-09-09T08:39:51.000Z | 2021-09-09T08:39:51.000Z | import json
from base64 import b64encode
from Crypto.Cipher import AES, DES
from Crypto.Util.Padding import pad
from Crypto.Hash import MD5
from urllib.parse import quote
### 用于加密生成 Cpdaily-Extension,传入表单以及个人配置数据
def extensionEncrypt(data):
key = b"b3L26XNL"
iv = bytes([1, 2, 3, 4, 5, 6, 7, 8])
data = bytes(json.dumps(data), encoding='utf-8')
print(data)
cipher = DES.new(key, DES.MODE_CBC, iv)
secret_bytes = cipher.encrypt(pad(data, DES.block_size))
encrypted = b64encode(secret_bytes).decode('utf-8')
return encrypted
def formBodyEncrypt(data):
key = b'ytUQ7l2ZZu8mLvJZ'
iv = bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7])
data = bytes(json.dumps(data), encoding='utf-8')
cipher = AES.new(key, AES.MODE_CBC, iv)
secret_bytes = cipher.encrypt(pad(data, AES.block_size))
encrypted = b64encode(secret_bytes).decode('utf-8')
return encrypted
def getSignHash(str):
jstr = json.dumps(str)
temp = bytes(quote(jstr) + '=&ytUQ7l2ZZu8mLvJZ', encoding='utf-8')
h = MD5.new(data=temp)
return h.hexdigest()
| 29.486486 | 70 | 0.672777 | 165 | 1,091 | 4.4 | 0.363636 | 0.027548 | 0.012397 | 0.016529 | 0.436639 | 0.436639 | 0.436639 | 0.426997 | 0.333333 | 0.223141 | 0 | 0.053872 | 0.183318 | 1,091 | 36 | 71 | 30.305556 | 0.760943 | 0.033914 | 0 | 0.214286 | 0 | 0 | 0.06387 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.214286 | 0 | 0.428571 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e48c7fdd82f103792c63baa829d0d629b6b36dd | 85,623 | py | Python | RVS/X86.py | infosecsecurity/OSPTF | df3f63dc882db6d7e0b7bd80476e9bbc8471ac1f | [
"MIT"
] | 2 | 2017-11-23T01:07:37.000Z | 2021-06-25T05:03:49.000Z | RVS/X86.py | infosecsecurity/OSPTF | df3f63dc882db6d7e0b7bd80476e9bbc8471ac1f | [
"MIT"
] | null | null | null | RVS/X86.py | infosecsecurity/OSPTF | df3f63dc882db6d7e0b7bd80476e9bbc8471ac1f | [
"MIT"
] | 1 | 2018-05-22T02:28:43.000Z | 2018-05-22T02:28:43.000Z | # X86 disassembler for Python
# Copyright (c) 2011-2012 Rusty Wagner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
FLAG_LOCK = 1
FLAG_REP = 2
FLAG_REPNE = 4
FLAG_REPE = 8
FLAG_OPSIZE = 16
FLAG_ADDRSIZE = 32
FLAG_64BIT_ADDRESS = 64
FLAG_INSUFFICIENT_LENGTH = 0x80000000
FLAG_ANY_REP = (FLAG_REP | FLAG_REPE | FLAG_REPNE)
DEC_FLAG_LOCK = 0x0020
DEC_FLAG_REP = 0x0040
DEC_FLAG_REP_COND = 0x0080
DEC_FLAG_BYTE = 0x0100
DEC_FLAG_FLIP_OPERANDS = 0x0200
DEC_FLAG_IMM_SX = 0x0400
DEC_FLAG_INC_OPERATION_FOR_64 = 0x0800
DEC_FLAG_OPERATION_OP_SIZE = 0x1000
DEC_FLAG_FORCE_16BIT = 0x2000
DEC_FLAG_INVALID_IN_64BIT = 0x4000
DEC_FLAG_DEFAULT_TO_64BIT = 0x8000
DEC_FLAG_REG_RM_SIZE_MASK = 0x03
DEC_FLAG_REG_RM_2X_SIZE = 0x01
DEC_FLAG_REG_RM_FAR_SIZE = 0x02
DEC_FLAG_REG_RM_NO_SIZE = 0x03
ControlRegs = ["cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", "cr8", "cr9", "cr10", "cr11", "cr12", "cr13", "cr14", "cr15"]
DebugRegs = ["dr0", "dr1", "dr2", "dr3", "dr4", "dr5", "dr6", "dr7", "dr8", "dr9", "dr10", "dr11", "dr12", "dr13", "dr14", "dr15"]
TestRegs = ["tr0", "tr1", "tr2", "tr3", "tr4", "tr5", "tr6", "tr7", "tr8", "tr9", "tr10", "tr11", "tr12", "tr13", "tr14", "tr15"]
MainOpcodeMap = [
["add", "rm_reg_8_lock"], ["add", "rm_reg_v_lock"], ["add", "reg_rm_8"], ["add", "reg_rm_v"], # 0x00
["add", "eax_imm_8"], ["add", "eax_imm_v"], ["push", "push_pop_seg"], ["pop", "push_pop_seg"], # 0x04
["or", "rm_reg_8_lock"], ["or", "rm_reg_v_lock"], ["or", "reg_rm_8"], ["or", "reg_rm_v"], # 0x08
["or", "eax_imm_8"], ["or", "eax_imm_v"], ["push", "push_pop_seg"], [None, "two_byte"], # 0x0c
["adc", "rm_reg_8_lock"], ["adc", "rm_reg_v_lock"], ["adc", "reg_rm_8"], ["adc", "reg_rm_v"], # 0x10
["adc", "eax_imm_8"], ["adc", "eax_imm_v"], ["push", "push_pop_seg"], ["pop", "push_pop_seg"], # 0x14
["sbb", "rm_reg_8_lock"], ["sbb", "rm_reg_v_lock"], ["sbb", "reg_rm_8"], ["sbb", "reg_rm_v"], # 0x18
["sbb", "eax_imm_8"], ["sbb", "eax_imm_v"], ["push", "push_pop_seg"], ["pop", "push_pop_seg"], # 0x1c
["and", "rm_reg_8_lock"], ["and", "rm_reg_v_lock"], ["and", "reg_rm_8"], ["and", "reg_rm_v"], # 0x20
["and", "eax_imm_8"], ["and", "eax_imm_v"], [None, None], ["daa", "no_operands"], # 0x24
["sub", "rm_reg_8_lock"], ["sub", "rm_reg_v_lock"], ["sub", "reg_rm_8"], ["sub", "reg_rm_v"], # 0x28
["sub", "eax_imm_8"], ["sub", "eax_imm_v"], [None, None], ["das", "no_operands"], # 0x2c
["xor", "rm_reg_8_lock"], ["xor", "rm_reg_v_lock"], ["xor", "reg_rm_8"], ["xor", "reg_rm_v"], # 0x30
["xor", "eax_imm_8"], ["xor", "eax_imm_v"], [None, None], ["aaa", "no_operands"], # 0x34
["cmp", "rm_reg_8"], ["cmp", "rm_reg_v"], ["cmp", "reg_rm_8"], ["cmp", "reg_rm_v"], # 0x38
["cmp", "eax_imm_8"], ["cmp", "eax_imm_v"], [None, None], ["aas", "no_operands"], # 0x3c
["inc", "op_reg_v"], ["inc", "op_reg_v"], ["inc", "op_reg_v"], ["inc", "op_reg_v"], # 0x40
["inc", "op_reg_v"], ["inc", "op_reg_v"], ["inc", "op_reg_v"], ["inc", "op_reg_v"], # 0x44
["dec", "op_reg_v"], ["dec", "op_reg_v"], ["dec", "op_reg_v"], ["dec", "op_reg_v"], # 0x48
["dec", "op_reg_v"], ["dec", "op_reg_v"], ["dec", "op_reg_v"], ["dec", "op_reg_v"], # 0x4c
["push", "op_reg_v_def64"], ["push", "op_reg_v_def64"], ["push", "op_reg_v_def64"], ["push", "op_reg_v_def64"], # 0x50
["push", "op_reg_v_def64"], ["push", "op_reg_v_def64"], ["push", "op_reg_v_def64"], ["push", "op_reg_v_def64"], # 0x54
["pop", "op_reg_v_def64"], ["pop", "op_reg_v_def64"], ["pop", "op_reg_v_def64"], ["pop", "op_reg_v_def64"], # 0x58
["pop", "op_reg_v_def64"], ["pop", "op_reg_v_def64"], ["pop", "op_reg_v_def64"], ["pop", "op_reg_v_def64"], # 0x5c
[["pusha", "pushad"], "op_size_no64"], [["popa", "popad"], "op_size_no64"], ["bound", "reg_rm2x_v"], ["arpl", "arpl"], # 0x60
[None, None], [None, None], [None, None], [None, None], # 0x64
["push", "imm_v_def64"], ["imul", "reg_rm_imm_v"], ["push", "immsx_v_def64"], ["imul", "reg_rm_immsx_v"], # 0x68
["insb", "edi_dx_8_rep"], [["insw", "insd"], "edi_dx_op_size_rep"], ["outsb", "dx_esi_8_rep"], [["outsw", "outsd"], "dx_esi_op_size_rep"], # 0x6c
["jo", "relimm_8_def64"], ["jno", "relimm_8_def64"], ["jb", "relimm_8_def64"], ["jae", "relimm_8_def64"], # 0x70
["je", "relimm_8_def64"], ["jne", "relimm_8_def64"], ["jbe", "relimm_8_def64"], ["ja", "relimm_8_def64"], # 0x74
["js", "relimm_8_def64"], ["jns", "relimm_8_def64"], ["jpe", "relimm_8_def64"], ["jpo", "relimm_8_def64"], # 0x78
["jl", "relimm_8_def64"], ["jge", "relimm_8_def64"], ["jle", "relimm_8_def64"], ["jg", "relimm_8_def64"], # 0x7c
[0, "group_rm_imm_8_lock"], [0, "group_rm_imm_v_lock"], [0, "group_rm_imm_8_no64_lock"], [0, "group_rm_immsx_v_lock"], # 0x80
["test", "rm_reg_8"], ["test", "rm_reg_v"], ["xchg", "rm_reg_8_lock"], ["xchg", "rm_reg_v_lock"], # 0x84
["mov", "rm_reg_8"], ["mov", "rm_reg_v"], ["mov", "reg_rm_8"], ["mov", "reg_rm_v"], # 0x88
["mov", "rm_sreg_v"], ["lea", "reg_rm_0"], ["mov", "sreg_rm_v"], ["pop", "rm_v_def64"], # 0x8c
["nop", "nop"], ["xchg", "eax_op_reg_v"], ["xchg", "eax_op_reg_v"], ["xchg", "eax_op_reg_v"], # 0x90
["xchg", "eax_op_reg_v"], ["xchg", "eax_op_reg_v"], ["xchg", "eax_op_reg_v"], ["xchg", "eax_op_reg_v"], # 0x94
[["cbw", "cwde", "cdqe"], "op_size"], [["cwd", "cdq", "cqo"], "op_size"], ["callf", "far_imm_no64"], ["fwait", "no_operands"], # 0x98
[["pushf", "pushfd", "pushfq"], "op_size_def64"], [["popf", "popfd", "popfq"], "op_size_def64"], ["sahf", "no_operands"], ["lahf", "no_operands"], # 0x9c
["mov", "eax_addr_8"], ["mov", "eax_addr_v"], ["mov", "addr_eax_8"], ["mov", "addr_eax_v"], # 0xa0
["movsb", "edi_esi_8_rep"], [["movsw", "movsd", "movsq"], "edi_esi_op_size_rep"], ["cmpsb", "esi_edi_8_repc"], [["cmpsw", "cmpsd", "cmpsq"], "esi_edi_op_size_repc"], # 0xa4
["test", "eax_imm_8"], ["test", "eax_imm_v"], ["stosb", "edi_eax_8_rep"], [["stosw", "stosd", "stosq"], "edi_eax_op_size_rep"], # 0xa8
["lodsb", "eax_esi_8_rep"], [["lodsw", "lodsd", "lodsq"], "eax_esi_op_size_rep"], ["scasb", "eax_edi_8_repc"], [["scasw", "scasd", "scasq"], "eax_edi_op_size_repc"], # 0xac
["mov", "op_reg_imm_8"], ["mov", "op_reg_imm_8"], ["mov", "op_reg_imm_8"], ["mov", "op_reg_imm_8"], # 0xb0
["mov", "op_reg_imm_8"], ["mov", "op_reg_imm_8"], ["mov", "op_reg_imm_8"], ["mov", "op_reg_imm_8"], # 0xb4
["mov", "op_reg_imm_v"], ["mov", "op_reg_imm_v"], ["mov", "op_reg_imm_v"], ["mov", "op_reg_imm_v"], # 0xb8
["mov", "op_reg_imm_v"], ["mov", "op_reg_imm_v"], ["mov", "op_reg_imm_v"], ["mov", "op_reg_imm_v"], # 0xbc
[1, "group_rm_imm_8"], [1, "group_rm_imm8_v"], ["retn", "imm_16"], ["retn", "no_operands"], # 0xc0
["les", "reg_rm_f"], ["lds", "reg_rm_f"], [2, "group_rm_imm_8"], [2, "group_rm_imm_v"], # 0xc4
["enter", "imm16_imm8"], ["leave", "no_operands"], ["retf", "imm_16"], ["retf", "no_operands"], # 0xc8
["int3", "no_operands"], ["int", "imm_8"], ["into", "no_operands"], ["iret", "no_operands"], # 0xcc
[1, "group_rm_one_8"], [1, "group_rm_one_v"], [1, "group_rm_cl_8"], [1, "group_rm_cl_v"], # 0xd0
["aam", "imm_8"], ["aad", "imm_8"], ["salc", "no_operands"], ["xlat", "al_ebx_al"], # 0xd4
[0, "fpu"], [1, "fpu"], [2, "fpu"], [3, "fpu"], # 0xd8
[4, "fpu"], [5, "fpu"], [6, "fpu"], [7, "fpu"], # 0xdc
["loopne", "relimm_8_def64"], ["loope", "relimm_8_def64"], ["loop", "relimm_8_def64"], [["jcxz", "jecxz", "jrcxz"], "relimm_8_addr_size_def64"], # 0xe0
["in", "eax_imm8_8"], ["in", "eax_imm8_v"], ["out", "imm8_eax_8"], ["out", "imm8_eax_v"], # 0xe4
["calln", "relimm_v_def64"], ["jmpn", "relimm_v_def64"], ["jmpf", "far_imm_no64"], ["jmpn", "relimm_8_def64"], # 0xe8
["in", "eax_dx_8"], ["in", "eax_dx_v"], ["out", "dx_eax_8"], ["out", "dx_eax_v"], # 0xec
[None, None], ["int1", "no_operands"], [None, None], [None, None], # 0xf0
["hlt", "no_operands"], ["cmc", "no_operands"], [3, "group_f6"], [3, "group_f7"], # 0xf4
["clc", "no_operands"], ["stc", "no_operands"], ["cli", "no_operands"], ["sti", "no_operands"], # 0xf8
["cld", "no_operands"], ["std", "no_operands"], [4, "group_rm_8_lock"], [5, "group_ff"], # 0xfc
]
TwoByteOpcodeMap = [
[6, "group_0f00"], [7, "group_0f01"], ["lar", "reg_rm_v"], ["lsl", "reg_rm_v"], # 0x00
[None, None], ["syscall", "no_operands"], ["clts", "no_operands"], ["sysret", "no_operands"], # 0x04
["invd", "no_operands"], ["wbinvd", "no_operands"], [None, None], ["ud2", "no_operands"], # 0x08
[None, None], [8, "group_rm_0"], ["femms", "no_operands"], [0, "_3dnow"], # 0x0c
[0, "sse_table"], [0, "sse_table_flip"], [1, "sse_table"], [2, "sse_table_flip"], # 0x10
[3, "sse_table"], [4, "sse_table"], [5, "sse_table"], [6, "sse_table_flip"], # 0x14
[9, "group_rm_0"], [10, "group_rm_0"], [10, "group_rm_0"], [10, "group_rm_0"], # 0x18
[10, "group_rm_0"], [10, "group_rm_0"], [10, "group_rm_0"], [10, "group_rm_0"], # 0x1c
[ControlRegs, "reg_cr"], [DebugRegs, "reg_cr"], [ControlRegs, "cr_reg"], [DebugRegs, "cr_reg"], # 0x20
[TestRegs, "reg_cr"], [None, None], [TestRegs, "cr_reg"], [None, None], # 0x24
[7, "sse_table"], [7, "sse_table_flip"], [8, "sse_table"], [9, "sse_table_flip"], # 0x28
[10, "sse_table"], [11, "sse_table"], [12, "sse_table"], [13, "sse_table"], # 0x2c
["wrmsr", "no_operands"], ["rdtsc", "no_operands"], ["rdmsr", "no_operands"], ["rdpmc", "no_operands"], # 0x30
["sysenter", "no_operands"], ["sysexit", "no_operands"], [None, None], ["getsec", "no_operands"], # 0x34
[None, None], [None, None], [None, None], [None, None], # 0x38
[None, None], [None, None], [None, None], [None, None], # 0x3c
["cmovo", "reg_rm_v"], ["cmovno", "reg_rm_v"], ["cmovb", "reg_rm_v"], ["cmovae", "reg_rm_v"], # 0x40
["cmove", "reg_rm_v"], ["cmovne", "reg_rm_v"], ["cmovbe", "reg_rm_v"], ["cmova", "reg_rm_v"], # 0x44
["cmovs", "reg_rm_v"], ["cmovns", "reg_rm_v"], ["cmovpe", "reg_rm_v"], ["cmovpo", "reg_rm_v"], # 0x48
["cmovl", "reg_rm_v"], ["cmovge", "reg_rm_v"], ["cmovle", "reg_rm_v"], ["cmovg", "reg_rm_v"], # 0x4c
[14, "sse_table"], [["sqrtps", "sqrtpd", "sqrtsd", "sqrtss"], "sse"], [["rsqrtps", "rsqrtss"], "sse_single"], [["rcpps", "rcpss"], "sse_single"], # 0x50
[["andps", "andpd"], "sse_packed"], [["andnps", "andnpd"], "sse_packed"], [["orps", "orpd"], "sse_packed"], [["xorps", "xorpd"], "sse_packed"], # 0x54
[["addps", "addpd", "addsd", "addss"], "sse"], [["mulps", "mulpd", "mulsd", "mulss"], "sse"], [15, "sse_table"], [16, "sse_table"], # 0x58
[["subps", "subpd", "subsd", "subss"], "sse"], [["minps", "minpd", "minsd", "minss"], "sse"], [["divps", "divpd", "divsd", "divss"], "sse"], [["maxps", "maxpd", "maxsd", "maxss"], "sse"], # 0x5c
[17, "sse_table"], [18, "sse_table"], [19, "sse_table"], ["packsswb", "mmx"], # 0x60
["pcmpgtb", "mmx"], ["pcmpgtw", "mmx"], ["pcmpgtd", "mmx"], ["packuswb", "mmx"], # 0x64
["punpckhbw", "mmx"], ["punpckhwd", "mmx"], ["punpckhdq", "mmx"], ["packssdw", "mmx"], # 0x68
["punpcklqdq", "mmx_sseonly"], ["punpckhqdq", "mmx_sseonly"], [20, "sse_table_incop64"], [21, "sse_table"], # 0x6c
[22, "sse_table_imm_8"], [0, "mmx_group"], [1, "mmx_group"], [2, "mmx_group"], # 0x70
["pcmpeqb", "mmx"], ["pcmpeqw", "mmx"], ["pcmpeqd", "mmx"], ["emms", "no_operands"], # 0x74
["vmread", "rm_reg_def64"], ["vmwrite", "rm_reg_def64"], [None, None], [None, None], # 0x78
[23, "sse_table"], [24, "sse_table"], [25, "sse_table_incop64_flip"], [21, "sse_table_flip"], # 0x7c
["jo", "relimm_v_def64"], ["jno", "relimm_v_def64"], ["jb", "relimm_v_def64"], ["jae", "relimm_v_def64"], # 0x80
["je", "relimm_v_def64"], ["jne", "relimm_v_def64"], ["jbe", "relimm_v_def64"], ["ja", "relimm_v_def64"], # 0x84
["js", "relimm_v_def64"], ["jns", "relimm_v_def64"], ["jpe", "relimm_v_def64"], ["jpo", "relimm_v_def64"], # 0x88
["jl", "relimm_v_def64"], ["jge", "relimm_v_def64"], ["jle", "relimm_v_def64"], ["jg", "relimm_v_def64"], # 0x8c
["seto", "rm_8"], ["setno", "rm_8"], ["setb", "rm_8"], ["setae", "rm_8"], # 0x90
["sete", "rm_8"], ["setne", "rm_8"], ["setbe", "rm_8"], ["seta", "rm_8"], # 0x94
["sets", "rm_8"], ["setns", "rm_8"], ["setpe", "rm_8"], ["setpo", "rm_8"], # 0x98
["setl", "rm_8"], ["setge", "rm_8"], ["setle", "rm_8"], ["setg", "rm_8"], # 0x9c
["push", "push_pop_seg"], ["pop", "push_pop_seg"], ["cpuid", "no_operands"], ["bt", "rm_reg_v"], # 0xa0
["shld", "rm_reg_imm8_v"], ["shld", "rm_reg_cl_v"], [None, None], [None, None], # 0xa4
["push", "push_pop_seg"], ["pop", "push_pop_seg"], ["rsm", "no_operands"], ["bts", "rm_reg_v_lock"], # 0xa8
["shrd", "rm_reg_imm8_v"], ["shrd", "rm_reg_cl_v"], [24, "group_0fae"], ["imul", "reg_rm_v"], # 0xac
["cmpxchg", "rm_reg_8_lock"], ["cmpxchg", "rm_reg_v_lock"], ["lss", "reg_rm_f"], ["btr", "rm_reg_v_lock"], # 0xb0
["lfs", "reg_rm_f"], ["lgs", "reg_rm_f"], ["movzx", "movsxzx_8"], ["movzx", "movsxzx_16"], # 0xb4
["popcnt", "_0fb8"], [None, None], [11, "group_rm_imm8_v"], ["btc", "rm_reg_v_lock"], # 0xb8
["bsf", "reg_rm_v"], ["bsr", "reg_rm_v"], ["movsx", "movsxzx_8"], ["movsx", "movsxzx_16"], # 0xbc
["xadd", "rm_reg_8_lock"], ["xadd", "rm_reg_v_lock"], [26, "sse_table_imm_8"], ["movnti", "movnti"], # 0xc0
[27, "pinsrw"], [28, "sse_table_imm_8"], [29, "sse_table_imm_8"], ["cmpxch8b", "cmpxch8b"], # 0xc4
["bswap", "op_reg_v"], ["bswap", "op_reg_v"], ["bswap", "op_reg_v"], ["bswap", "op_reg_v"], # 0xc8
["bswap", "op_reg_v"], ["bswap", "op_reg_v"], ["bswap", "op_reg_v"], ["bswap", "op_reg_v"], # 0xcc
[30, "sse_table"], ["psrlw", "mmx"], ["psrld", "mmx"], ["psrlq", "mmx"], # 0xd0
["paddq", "mmx"], ["pmullw", "mmx"], [31, "sse_table"], [32, "sse_table"], # 0xd4
["psubusb", "mmx"], ["psubusw", "mmx"], ["pminub", "mmx"], ["pand", "mmx"], # 0xd8
["paddusb", "mmx"], ["paddusw", "mmx"], ["pmaxub", "mmx"], ["pandn", "mmx"], # 0xdc
["pavgb", "mmx"], ["psraw", "mmx"], ["psrad", "mmx"], ["pavgw", "mmx"], # 0xe0
["pmulhuw", "mmx"], ["pmulhw", "mmx"], [33, "sse_table"], [34, "sse_table_flip"], # 0xe4
["psubsb", "mmx"], ["psubsw", "mmx"], ["pminsw", "mmx"], ["por", "mmx"], # 0xe8
["paddsb", "mmx"], ["paddsw", "mmx"], ["pmaxsw", "mmx"], ["pxor", "mmx"], # 0xec
[35, "sse_table"], ["psllw", "mmx"], ["pslld", "mmx"], ["psllq", "mmx"], # 0xf0
["pmuludq", "mmx"], ["pmaddwd", "mmx"], ["psadbw", "mmx"], [36, "sse_table"], # 0xf4
["psubb", "mmx"], ["psubw", "mmx"], ["psubd", "mmx"], ["psubq", "mmx"], # 0xf8
["paddb", "mmx"], ["paddw", "mmx"], ["paddd", "mmx"], ["ud", "no_operands"] # 0xfc
]
ThreeByte0F38Map = [
[0x00, "pshufb", "mmx"], [0x01, "phaddw", "mmx"], [0x02, "phaddd", "mmx"], [0x03, "phaddsw", "mmx"],
[0x04, "pmaddubsw", "mmx"], [0x05, "phsubw", "mmx"], [0x06, "phsubd", "mmx"], [0x07, "phsubsw", "mmx"],
[0x08, "psignb", "mmx"], [0x09, "psignw", "mmx"], [0x0a, "psignd", "mmx"], [0x0b, "pmulhrsw", "mmx"],
[0x10, "pblendvb", "mmx_sseonly"], [0x14, "blendvps", "mmx_sseonly"], [0x15, "blendvpd", "mmx_sseonly"],
[0x17, "ptest", "mmx_sseonly"], [0x1c, "pabsb", "mmx"], [0x1d, "pabsw", "mmx"], [0x1e, "pabsd", "mmx"],
[0x20, 37, "sse_table"], [0x21, 38, "sse_table"], [0x22, 39, "sse_table"], [0x23, 40, "sse_table"],
[0x24, 41, "sse_table"], [0x25, 42, "sse_table"], [0x28, "pmuldq", "mmx_sseonly"], [0x29, "pcmpeqq", "mmx_sseonly"],
[0x2a, 43, "sse_table"], [0x2b, "packusdw", "mmx_sseonly"], [0x30, 44, "sse_table"], [0x31, 45, "sse_table"],
[0x32, 46, "sse_table"], [0x33, 47, "sse_table"], [0x34, 48, "sse_table"], [0x35, 49, "sse_table"],
[0x37, "pcmpgtq", "mmx_sseonly"], [0x38, "pminsb", "mmx_sseonly"], [0x39, "pminsd", "mmx_sseonly"],
[0x3a, "pminuw", "mmx_sseonly"], [0x3b, "pminud", "mmx_sseonly"], [0x3c, "pmaxsb", "mmx_sseonly"],
[0x3d, "pmaxsd", "mmx_sseonly"], [0x3e, "pmaxuw", "mmx_sseonly"], [0x3f, "pmaxud", "mmx_sseonly"],
[0x40, "pmulld", "mmx_sseonly"], [0x41, "phminposuw", "mmx_sseonly"], [0xf0, "crc32", "crc32_8"], [0xf1, "crc32", "crc32_v"]
]
ThreeByte0F3AMap = [
[0x08, "roundps", "mmx_sseonly"], [0x09, "roundpd", "mmx_sseonly"], [0x0a, 50, "sse_table"], [0x0b, 51, "sse_table"],
[0x0c, "blendps", "mmx_sseonly"], [0x0d, "blendpd", "mmx_sseonly"], [0x0e, "pblendw", "mmx_sseonly"], [0x0f, "palignr", "mmx"],
[0x14, 52, "sse_table_mem8_flip"], [0x15, 53, "sse_table"], [0x16, 54, "sse_table_incop64_flip"],
[0x17, 55, "sse_table_flip"], [0x20, 56, "sse_table_mem8"], [0x21, 57, "sse_table"], [0x22, 58, "sse_table_incop64"],
[0x40, "dpps", "mmx_sseonly"], [0x41, "dppd", "mmx_sseonly"], [0x42, "mpsadbw", "mmx_sseonly"],
[0x60, "pcmpestrm", "mmx_sseonly"], [0x61, "pcmpestri", "mmx_sseonly"], [0x62, "pcmpistrm", "mmx_sseonly"],
[0x63, "pcmpistri", "mmx_sseonly"]
]
FPUMemOpcodeMap = [
[ # 0xd8
["fadd", "mem_32"], ["fmul", "mem_32"], ["fcom", "mem_32"], ["fcomp", "mem_32"], # 0
["fsub", "mem_32"], ["fsubr", "mem_32"], ["fdiv", "mem_32"], ["fdivr", "mem_32"] # 4
],
[ # 0xd9
["fld", "mem_32"], [None, None], ["fst", "mem_32"], ["fstp", "mem_32"], # 0
["fldenv", "mem_floatenv"], ["fldcw", "mem_16"], ["fstenv", "mem_floatenv"], ["fstcw", "mem_16"] # 4
],
[ # 0xda
["fiadd", "mem_32"], ["fimul", "mem_32"], ["ficom", "mem_32"], ["ficomp", "mem_32"], # 0
["fisub", "mem_32"], ["fisubr", "mem_32"], ["fidiv", "mem_32"], ["fidivr", "mem_32"] # 4
],
[ # 0xdb
["fild", "mem_32"], ["fisttp", "mem_32"], ["fist", "mem_32"], ["fistp", "mem_32"], # 0
[None, None], ["fld", "mem_80"], [None, None], ["fstp", "mem_80"] # 4
],
[ # 0xdc
["fadd", "mem_64"], ["fmul", "mem_64"], ["fcom", "mem_64"], ["fcomp", "mem_64"], # 0
["fsub", "mem_64"], ["fsubr", "mem_64"], ["fdiv", "mem_64"], ["fdivr", "mem_64"] # 4
],
[ # 0xdd
["fld", "mem_64"], ["fisttp", "mem_64"], ["fst", "mem_64"], ["fstp", "mem_64"], # 0
["frstor", "mem_floatsave"], [None, None], ["fsave", "mem_floatsave"], ["fstsw", "mem_16"] # 4
],
[ # 0xde
["fiadd", "mem_16"], ["fimul", "mem_16"], ["ficom", "mem_16"], ["ficomp", "mem_16"], # 0
["fisub", "mem_16"], ["fisubr", "mem_16"], ["fidiv", "mem_16"], ["fidivr", "mem_16"] # 4
],
[ # 0xdf
["fild", "mem_16"], ["fisttp", "mem_16"], ["fist", "mem_16"], ["fistp", "mem_16"], # 0
["fbld", "mem_80"], ["fild", "mem_64"], ["fbstp", "mem_80"], ["fistp", "mem_64"] # 4
]
]
FPURegOpcodeMap = [
[ # 0xd8
["fadd", "st0_fpureg"], ["fmul", "st0_fpureg"], ["fcom", "st0_fpureg"], ["fcomp", "st0_fpureg"], # 0
["fsub", "st0_fpureg"], ["fsubr", "st0_fpureg"], ["fdiv", "st0_fpureg"], ["fdivr", "st0_fpureg"] # 4
],
[ # 0xd9
["fld", "fpureg"], ["fxch", "st0_fpureg"], [12, "reggroup_no_operands"], [None, None], # 0
[13, "reggroup_no_operands"], [14, "reggroup_no_operands"], [15, "reggroup_no_operands"], [16, "reggroup_no_operands"] # 4
],
[ # 0xda
["fcmovb", "st0_fpureg"], ["fcmove", "st0_fpureg"], ["fcmovbe", "st0_fpureg"], ["fcmovu", "st0_fpureg"], # 0
[None, None], [17, "reggroup_no_operands"], [None, None], [None, None] # 4
],
[ # 0xdb
["fcmovnb", "st0_fpureg"], ["fcmovne", "st0_fpureg"], ["fcmovnbe", "st0_fpureg"], ["fcmovnu", "st0_fpureg"], # 0
[18, "reggroup_no_operands"], ["fucomi", "st0_fpureg"], ["fcomi", "st0_fpureg"], [21, "reggroup_no_operands"] # 4
],
[ # 0xdc
["fadd", "fpureg_st0"], ["fmul", "fpureg_st0"], [None, None], [None, None], # 0
["fsubr", "fpureg_st0"], ["fsub", "fpureg_st0"], ["fdivr", "fpureg_st0"], ["fdiv", "fpureg_st0"] # 4
],
[ # 0xdd
["ffree", "fpureg"], [None, None], ["fst", "fpureg"], ["fstp", "fpureg"], # 0
["fucom", "st0_fpureg"], ["fucomp", "st0_fpureg"], [None, None], [22, "reggroup_no_operands"] # 4
],
[ # 0xde
["faddp", "fpureg_st0"], ["fmulp", "fpureg_st0"], [None, None], [19, "reggroup_no_operands"], # 0
["fsubrp", "fpureg_st0"], ["fsubp", "fpureg_st0"], ["fdivrp", "fpureg_st0"], ["fdivp", "fpureg_st0"] # 4
],
[ # 0xdf
["ffreep", "fpureg"], [None, None], [None, None], [None, None], # 0
[20, "reggroup_ax"], ["fucomip", "st0_fpureg"], ["fcomip", "st0_fpureg"], [23, "reggroup_no_operands"] # 4
]
]
GroupOperations = [
["add", "or", "adc", "sbb", "and", "sub", "xor", "cmp"], # Group 0
["rol", "ror", "rcl", "rcr", "shl", "shr", "shl", "sar"], # Group 1
["mov", None, None, None, None, None, None, None], # Group 2
["test", "test", "not", "neg", "mul", "imul", "div", "idiv"], # Group 3
["inc", "dec", None, None, None, None, None, None], # Group 4
["inc", "dec", "calln", "callf", "jmpn", "jmpf", "push", None], # Group 5
["sldt", "str", "lldt", "ltr", "verr", "verw", None, None], # Group 6
["sgdt", "sidt", "lgdt", "lidt", "smsw", None, "lmsw", "invlpg"], # Group 7
["prefetch", "prefetchw", "prefetch", "prefetch", "prefetch", "prefetch", "prefetch", "prefetch"], # Group 8
["prefetchnta", "prefetcht0", "prefetcht1", "prefetcht2", "mmxnop", "mmxnop", "mmxnop", "mmxnop"], # Group 9
["mmxnop", "mmxnop", "mmxnop", "mmxnop", "mmxnop", "mmxnop", "mmxnop", "mmxnop"], # Group 10
[None, None, None, None, "bt", "bts", "btr", "btc"], # Group 11
["fnop", None, None, None, None, None, None, None], # Group 12
["fchs", "fabs", None, None, "ftst", "fxam", None, None], # Group 13
["fld1", "fldl2t", "fldl2e", "fldpi", "fldlg2", "fldln2", "fldz", None], # Group 14
["f2xm1", "fyl2x", "fptan", "fpatan", "fxtract", "fprem1", "fdecstp", "fincstp"], # Group 15
["fprem", "fyl2xp1", "fsqrt", "fsincos", "frndint", "fscale", "fsin", "fcos"], # Group 16
[None, "fucompp", None, None, None, None, None, None], # Group 17
["feni", "fdisi", "fclex", "finit", "fsetpm", "frstpm", None, None], # Group 18
[None, "fcompp", None, None, None, None, None, None], # Group 19
["fstsw", "fstdw", "fstsg", None, None, None, None, None], # Group 20
[None, None, None, None, "frint2", None, None, None], # Group 21
[None, None, None, None, "frichop", None, None, None], # Group 22
[None, None, None, None, "frinear", None, None, None], # Group 23
["fxsave", "fxrstor", "ldmxcsr", "stmxcsr", "xsave", "xrstor", None, "clflush"], # Group 24
[None, None, None, None, None, "lfence", "mfence", "sfence"] # Group 25
]
Group0F01RegOperations = [
[None, "vmcall", "vmlaunch", "vmresume", "vmxoff", None, None, None],
["monitor", "mwait", None, None, None, None, None, None],
["xgetbv", "xsetbv", None, None, None, None, None, None],
[None, None, None, None, None, None, None, None],
[None, None, None, None, None, None, None, None],
[None, None, None, None, None, None, None, None],
[None, None, None, None, None, None, None, None],
["swapgs", "rdtscp", None, None, None, None, None, None]
]
MMXGroupOperations = [
[ # Group 0
[None, None], [None, None], ["psrlw", "psrlw"], [None, None],
["psraw", "psraw"], [None, None], ["psllw", "psllw"], [None, None]
],
[ # Group 1
[None, None], [None, None], ["psrld", "psrld"], [None, None],
["psrad", "psrad"], [None, None], ["pslld", "pslld"], [None, None]
],
[ # Group 2
[None, None], [None, None], ["psrlq", "psrlq"], [None, "psrldq"],
[None, None], [None, None], ["psllq", "psllq"], [None, "pslldq"]
]
]
SSETable = [
[ # Entry 0
[["movups", "sse_128", "sse_128"], ["movupd", "sse_128", "sse_128"], ["movsd", "sse_128", "sse_128"], ["movss", "sse_128", "sse_128"]],
[["movups", "sse_128", "sse_128"], ["movupd", "sse_128", "sse_128"], ["movsd", "sse_128", "sse_64"], ["movss", "sse_128", "sse_32"]]
],
[ # Entry 1
[["movhlps", "sse_128", "sse_128"], [None, 0, 0], ["movddup", "sse_128", "sse_128"], ["movsldup", "sse_128", "sse_128"]],
[["movlps", "sse_128", "sse_64"], ["movlpd", "sse_128", "sse_64"], ["movddup", "sse_128", "sse_64"], ["movsldup", "sse_128", "sse_128"]]
],
[ # Entry 2
[[None, 0, 0], [None, 0, 0], [None, 0, 0], [None, 0, 0]],
[["movlps", "sse_128", "sse_64"], ["movlpd", "sse_128", "sse_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 3
[["unpcklps", "sse_128", "sse_128"], ["unpcklpd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[["unpcklps", "sse_128", "sse_128"], ["unpcklpd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 4
[["unpckhps", "sse_128", "sse_128"], ["unpckhpd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[["unpckhps", "sse_128", "sse_128"], ["unpckhpd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 5
[["movlhps", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0], ["movshdup", "sse_128", "sse_128"]],
[["movhps", "sse_128", "sse_64"], ["movhpd", "sse_128", "sse_64"], [None, 0, 0], ["movshdup", "sse_128", "sse_128"]]
],
[ # Entry 6
[[None, 0, 0], [None, 0, 0], [None, 0, 0], [None, 0, 0]],
[["movhps", "sse_128", "sse_64"], ["movhpd", "sse_128", "sse_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 7
[["movaps", "sse_128", "sse_128"], ["movapd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[["movaps", "sse_128", "sse_128"], ["movapd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 8
[["cvtpi2ps", "sse_128", "mmx_64"], ["cvtpi2pd", "sse_128", "mmx_64"], ["cvtsi2sd", "sse_128", "gpr_32_or_64"], ["cvtsi2ss", "sse_128", "gpr_32_or_64"]],
[["cvtpi2ps", "sse_128", "mmx_64"], ["cvtpi2pd", "sse_128", "mmx_64"], ["cvtsi2sd", "sse_128", "gpr_32_or_64"], ["cvtsi2ss", "sse_128", "gpr_32_or_64"]]
],
[ # Entry 9
[[None, 0, 0], [None, 0, 0], [None, 0, 0], [None, 0, 0]],
[["movntps", "sse_128", "sse_128"], ["movntpd", "sse_128", "sse_128"], ["movntsd", "sse_128", "sse_64"], ["movntss", "see_128", "sse_32"]]
],
[ # Entry 10
[["cvttps2pi", "mmx_64", "sse_128"], ["cvttpd2pi", "mmx_64", "sse_128"], ["cvttsd2si", "gpr_32_or_64", "sse_128"], ["cvttss2si", "gpr_32_or_64", "sse_128"]],
[["cvttps2pi", "mmx_64", "sse_64"], ["cvttpd2pi", "mmx_64", "sse_128"], ["cvttsd2si", "gpr_32_or_64", "sse_64"], ["cvttss2si", "gpr_32_or_64", "sse_32"]]
],
[ # Entry 11
[["cvtps2pi", "mmx_64", "sse_128"], ["cvtpd2pi", "mmx_64", "sse_128"], ["cvtsd2si", "gpr_32_or_64", "sse_128"], ["cvtss2si", "gpr_32_or_64", "sse_128"]],
[["cvtps2pi", "mmx_64", "sse_64"], ["cvtpd2pi", "mmx_64", "sse_128"], ["cvtsd2si", "gpr_32_or_64", "sse_64"], ["cvtss2si", "gpr_32_or_64", "sse_32"]]
],
[ # Entry 12
[["ucomiss", "sse_128", "sse_128"], ["ucomisd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[["ucomiss", "sse_128", "sse_32"], ["ucomisd", "sse_128", "sse_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 13
[["comiss", "sse_128", "sse_128"], ["comisd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[["comiss", "sse_128", "sse_32"], ["comisd", "sse_128", "sse_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 14
[["movmskps", "gpr_32_or_64", "sse_128"], ["movmskpd", "gpr_32_or_64", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], [None, 0, 0], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 15
[["cvtps2pd", "sse_128", "sse_128"], ["cvtpd2ps", "sse_128", "sse_128"], ["cvtsd2ss", "sse_128", "sse_128"], ["cvtss2sd", "sse_128", "sse_128"]],
[["cvtps2pd", "sse_128", "sse_64"], ["cvtpd2ps", "sse_128", "sse_128"], ["cvtsd2ss", "sse_128", "sse_64"], ["cvtss2sd", "sse_128", "sse_32"]]
],
[ # Entry 16
[["cvtdq2ps", "sse_128", "sse_128"], ["cvtps2dq", "sse_128", "sse_128"], [None, 0, 0], ["cvttps2dq", "sse_128", "sse_128"]],
[["cvtdq2ps", "sse_128", "sse_128"], ["cvtps2dq", "sse_128", "sse_128"], [None, 0, 0], ["cvttps2dq", "sse_128", "sse_128"]]
],
[ # Entry 17
[["punpcklbw", "mmx_64", "mmx_64"], ["punpcklbw", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[["punpcklbw", "mmx_64", "mmx_32"], ["punpcklbw", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 18
[["punpcklwd", "mmx_64", "mmx_64"], ["punpcklwd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[["punpcklwd", "mmx_64", "mmx_32"], ["punpcklwd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 19
[["punpckldq", "mmx_64", "mmx_64"], ["punpckldq", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[["punpckldq", "mmx_64", "mmx_32"], ["punpckldq", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 20
[[["movd", "movq"], "mmx_64", "gpr_32_or_64"], [["movd", "movq"], "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]],
[[["movd", "movq"], "mmx_64", "gpr_32_or_64"], [["movd", "movq"], "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 21
[["movq", "mmx_64", "mmx_64"], ["movdqa", "sse_128", "sse_128"], [None, 0, 0], ["movdqu", "sse_128", "sse_128"]],
[["movq", "mmx_64", "mmx_64"], ["movdqa", "sse_128", "sse_128"], [None, 0, 0], ["movdqu", "sse_128", "sse_128"]]
],
[ # Entry 22
[["pshufw", "mmx_64", "mmx_64"], ["pshufd", "sse_128", "sse_128"], ["pshuflw", "sse_128", "sse_128"], ["pshufhw", "sse_128", "sse_128"]],
[["pshufw", "mmx_64", "mmx_64"], ["pshufd", "sse_128", "sse_128"], ["pshuflw", "sse_128", "sse_128"], ["pshufhw", "sse_128", "sse_128"]]
],
[ # Entry 23
[[None, 0, 0], ["haddpd", "sse_128", "sse_128"], ["haddps", "sse_128", "sse_128"], [None, 0, 0]],
[[None, 0, 0], ["haddpd", "sse_128", "sse_128"], ["haddps", "sse_128", "sse_128"], [None, 0, 0]]
],
[ # Entry 24
[[None, 0, 0], ["hsubpd", "sse_128", "sse_128"], ["hsubps", "sse_128", "sse_128"], [None, 0, 0]],
[[None, 0, 0], ["hsubpd", "sse_128", "sse_128"], ["hsubps", "sse_128", "sse_128"], [None, 0, 0]]
],
[ # Entry 25
[[["movd", "movq"], "mmx_64", "gpr_32_or_64"], [["movd", "movq"], "sse_128", "gpr_32_or_64"], [None, 0, 0], ["movq", "sse_128_flip", "sse_128_flip"]],
[[["movd", "movq"], "mmx_64", "gpr_32_or_64"], [["movd", "movq"], "sse_128", "gpr_32_or_64"], [None, 0, 0], ["movq", "sse_128_flip", "sse_128_flip"]]
],
[ # Entry 26
[["cmpps", "sse_128", "sse_128"], ["cmppd", "sse_128", "sse_128"], ["cmpsd", "sse_128", "sse_128"], ["cmpss", "sse_128", "sse_128"]],
[["cmpps", "sse_128", "sse_128"], ["cmppd", "sse_128", "sse_128"], ["cmpsd", "sse_128", "sse_64"], ["cmpss", "sse_128", "sse_32"]]
],
[ # Entry 27
[["pinsrw", "mmx_64", "gpr_32_or_64"], ["pinsrw", "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]],
[["pinsrw", "mmx_64", "gpr_32_or_64"], ["pinsrw", "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 28
[["pextrw", "gpr_32_or_64", "mmx_64"], ["pextrw", "gpr_32_or_64", "sse_128"], [None, 0, 0], [None, 0, 0]],
[["pextrw", "gpr_32_or_64", "mmx_64"], ["pextrw", "gpr_32_or_64", "sse_128"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 29
[["shufps", "sse_128", "sse_128"], ["shufpd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[["shufps", "sse_128", "sse_128"], ["shufpd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 30
[[None, 0, 0], ["addsubpd", "sse_128", "sse_128"], ["addsubps", "sse_128", "sse_128"], [None, 0, 0]],
[[None, 0, 0], ["addsubpd", "sse_128", "sse_128"], ["addsubps", "sse_128", "sse_128"], [None, 0, 0]]
],
[ # Entry 31
[[None, 0, 0], ["movq", "sse_128_flip", "sse_128_flip"], ["movdq2q", "mmx_64", "sse_128"], ["movq2dq", "sse_128", "mmx_64"]],
[[None, 0, 0], ["movq", "sse_128_flip", "sse_128_flip"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 32
[["pmovmskb", "gpr_32_or_64", "mmx_64"], ["pmovmskb", "gpr_32_or_64", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], [None, 0, 0], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 33
[[None, 0, 0], ["cvttpd2dq", "sse_128", "sse_128"], ["cvtpd2dq", "sse_128", "sse_128"], ["cvtdq2pd", "sse_128", "sse_128"]],
[[None, 0, 0], ["cvttpd2dq", "sse_128", "sse_128"], ["cvtpd2dq", "sse_128", "sse_128"], ["cvtdq2pd", "sse_128", "sse_128"]]
],
[ # Entry 34
[[None, 0, 0], [None, 0, 0], [None, 0, 0], [None, 0, 0]],
[["movntq", "mmx_64", "mmx_64"], ["movntdq", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 35
[[None, 0, 0], [None, 0, 0], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], [None, 0, 0], ["lddqu", "sse_128", "sse_128"], [None, 0, 0]]
],
[ # Entry 36
[["maskmovq", "mmx_64", "mmx_64"], ["maskmovdqu", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], [None, 0, 0], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 37
[[None, 0, 0], ["pmovsxbw", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pmovsxbw", "sse_128", "sse_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 38
[[None, 0, 0], ["pmovsxbd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pmovsxbd", "sse_128", "sse_32"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 39
[[None, 0, 0], ["pmovsxbq", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pmovsxbq", "sse_128", "sse_16"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 40
[[None, 0, 0], ["pmovsxwd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pmovsxwd", "sse_128", "sse_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 41
[[None, 0, 0], ["pmovsxwq", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pmovsxwq", "sse_128", "sse_32"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 42
[[None, 0, 0], ["pmovsxdq", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pmovsxdq", "sse_128", "sse_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 43
[[None, 0, 0], [None, 0, 0], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["movntdqa", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 44
[[None, 0, 0], ["pmovzxbw", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pmovzxbw", "sse_128", "sse_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 45
[[None, 0, 0], ["pmovzxbd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pmovzxbd", "sse_128", "sse_32"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 46
[[None, 0, 0], ["pmovzxbq", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pmovzxbq", "sse_128", "sse_16"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 47
[[None, 0, 0], ["pmovzxwd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pmovzxwd", "sse_128", "sse_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 48
[[None, 0, 0], ["pmovzxwq", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pmovzxwq", "sse_128", "sse_32"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 49
[[None, 0, 0], ["pmovzxdq", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pmovzxdq", "sse_128", "sse_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 50
[[None, 0, 0], ["roundss", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["roundss", "sse_128", "sse_32"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 51
[[None, 0, 0], ["roundsd", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["roundsd", "sse_128", "sse_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 52
[[None, 0, 0], ["pextrb", "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pextrb", "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 53
[[None, 0, 0], ["pextrw", "gpr_32_or_64", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pextrw", "sse_16", "sse_128"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 54
[[None, 0, 0], [["pextrd", "pextrq"], "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], [["pextrd", "pextrq"], "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 55
[[None, 0, 0], ["extractps", "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["extractps", "sse_128", "sse_32"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 56
[[None, 0, 0], ["pinsrb", "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["pinsrb", "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 57
[[None, 0, 0], ["insertps", "sse_128", "sse_128"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], ["insertps", "sse_128", "sse_32"], [None, 0, 0], [None, 0, 0]]
],
[ # Entry 58
[[None, 0, 0], [["pinsrd", "pinsrq"], "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]],
[[None, 0, 0], [["pinsrd", "pinsrq"], "sse_128", "gpr_32_or_64"], [None, 0, 0], [None, 0, 0]]
]
]
Sparse3DNowOpcodes = [
[0x0c, "pi2fw"], [0x0d, "pi2fd"],
[0x1c, "pf2iw"], [0x1d, "pf2id"],
[0x86, "pfrcpv"], [0x87, "pfrsqrtv"], [0x8a, "pfnacc"], [0x8e, "pfpnacc"],
[0x90, "pfcmpge"], [0x94, "pfmin"], [0x96, "pfrcp"], [0x97, "pfrsqrt"], [0x9a, "pfsub"], [0x9e, "pfadd"],
[0xa0, "pfcmpgt"], [0xa4, "pfmax"], [0xa6, "pfrcpit1"], [0xa7, "pfrsqit1"], [0xaa, "pfsubr"], [0xae, "pfacc"],
[0xb0, "pfcmpeq"], [0xb4, "pfmul"], [0xb6, "pfrcpit2"], [0xb7, "pmulhrw"], [0xbb, "pswapd"], [0xbf, "pavgusb"]
]
Reg8List = ["al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"]
Reg8List64 = ["al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil", "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b"]
Reg16List = ["ax", "cx", "dx", "bx", "sp", "bp", "si", "di", "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"]
Reg32List = ["eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi", "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d"]
Reg64List = ["rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"]
MMXRegList = ["mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"]
XMMRegList = ["xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"]
FPURegList = ["st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7", "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7"]
RM16Components = [["bx", "si", "ds"], ["bx", "di", "ds"], ["bp", "si", "ss"], ["bp", "di", "ss"], ["si", None, "ds"],
["di", None, "ds"], ["bp", None, "ss"], ["bx", None, "ds"], [None, None, "ds"]]
class InstructionOperand:
def __init__(self):
self.operand = None
self.components = [None, None]
self.scale = 1
self.size = 0
self.immediate = 0
self.segment = None
self.rip_relative = False
class Instruction:
def __init__(self):
self.operation = None
self.operands = [InstructionOperand(), InstructionOperand(), InstructionOperand()]
self.flags = 0
self.segment = None
self.length = 0
def finalize(self):
while (len(self.operands) > 0) and (self.operands[-1].operand == None):
self.operands.pop()
class DecodeState:
def __init__(self):
self.result = Instruction()
self.opcode_offset = 0
self.flags = 0
self.invalid = False
self.insufficient_length = False
self.op_prefix = False
self.rep = False
self.using64 = False
self.rex = False
self.rex_rm1 = False
self.rex_rm2 = False
self.rex_reg = False
def get_byte_reg_list(state):
if state.rex:
return Reg8List64
else:
return Reg8List
def get_reg_list_for_final_op_size(state):
if state.final_op_size == 1:
return get_byte_reg_list(state)
if state.final_op_size == 2:
return Reg16List
if state.final_op_size == 4:
return Reg32List
if state.final_op_size == 8:
return Reg64List
def get_reg_list_for_addr_size(state):
if state.addr_size == 2:
return Reg16List
if state.addr_size == 4:
return Reg32List
if state.addr_size == 8:
return Reg64List
def get_final_op_size(state):
if state.flags & DEC_FLAG_BYTE:
return 1
else:
return state.op_size
def read8(state):
if len(state.opcode) < 1:
# Read past end of buffer, returning 0xcc from now on will guarantee exit
state.invalid = True
state.insufficient_length = True
state.opcode = ""
return 0xcc
val = ord(state.opcode[0])
state.opcode = state.opcode[1:]
state.prev_opcode = val
state.opcode_offset += 1
return val
def peek8(state):
if len(state.opcode) < 1:
# Read past end of buffer, returning 0xcc from now on will guarantee exit
state.invalid = True
state.insufficient_length = True
state.opcode = ""
return 0xcc
val = ord(state.opcode[0])
return val
def read16(state):
val = read8(state)
val |= read8(state) << 8
return val
def read32(state):
val = read16(state)
val |= read16(state) << 16
return val
def read64(state):
val = read32(state)
val |= read32(state) << 32
return val
def read8_signed(state):
val = read8(state)
if val & 0x80:
val = -(0x100 - val)
return val
def read16_signed(state):
val = read16(state)
if val & 0x8000:
val = -(0x10000 - val)
return val
def read32_signed(state):
val = read32(state)
if val & 0x80000000:
val = -(0x100000000 - val)
return val
def read_final_op_size(state):
if state.flags & DEC_FLAG_IMM_SX:
return read8_signed(state)
if state.final_op_size == 1:
return read8(state)
if state.final_op_size == 2:
return read16(state)
if state.final_op_size == 4:
return read32(state)
if state.final_op_size == 8:
return read32_signed(state)
def read_addr_size(state):
if state.addr_size == 2:
return read16(state)
if state.addr_size == 4:
return read32(state)
if state.addr_size == 8:
return read64(state)
def read_signed_final_op_size(state):
if state.final_op_size == 1:
return read8_signed(state)
if state.final_op_size == 2:
return read16_signed(state)
if state.final_op_size == 4:
return read32_signed(state)
if state.final_op_size == 8:
return read32_signed(state)
def update_operation_for_addr_size(state):
if state.addr_size == 4:
state.result.operation = state.result.operation[1]
elif state.addr_size == 8:
state.result.operation = state.result.operation[2]
else:
state.result.operation = state.result.operation[0]
def process_encoding(state, encoding):
state.result.operation = encoding[0]
encoder = encoding[1]
state.flags = Encoding[encoder][1]
if state.using64 and (state.flags & DEC_FLAG_INVALID_IN_64BIT):
state.invalid = True
return
if state.using64 and (state.flags & DEC_FLAG_DEFAULT_TO_64BIT):
if state.op_prefix:
state.op_size = 2
else:
state.op_size = 8
state.final_op_size = get_final_op_size(state)
if state.flags & DEC_FLAG_FLIP_OPERANDS:
state.operand0 = state.result.operands[1]
state.operand1 = state.result.operands[0]
else:
state.operand0 = state.result.operands[0]
state.operand1 = state.result.operands[1]
if state.flags & DEC_FLAG_FORCE_16BIT:
state.final_op_size = 2
if state.flags & DEC_FLAG_OPERATION_OP_SIZE:
if state.final_op_size == 4:
state.result.operation = state.result.operation[1]
elif state.final_op_size == 8:
if len(state.result.operation) < 3:
state.final_op_size = 4
state.result.operation = state.result.operation[1]
else:
state.result.operation = state.result.operation[2]
else:
state.result.operation = state.result.operation[0]
if state.flags & DEC_FLAG_REP:
if state.rep != None:
state.result.flags |= FLAG_REP
elif state.flags & DEC_FLAG_REP_COND:
if state.rep == "repne":
state.result.flags |= FLAG_REPNE
elif state.rep == "repe":
state.result.flags |= FLAG_REPE
Encoding[encoder][0](state)
if state.result.operation == None:
state.invalid = True
if state.result.flags & FLAG_LOCK:
# Ensure instruction allows lock and it has proper semantics
if (state.flags & DEC_FLAG_LOCK) == 0:
state.invalid = True
elif state.result.operation == "cmp":
state.invalid = True
elif (state.result.operands[0].operand != "mem") and (state.result.operands[1].operand != "mem"):
state.invalid = True
def process_opcode(state, map, opcode):
process_encoding(state, map[opcode])
def process_sparse_opcode(state, map, opcode):
state.result.operation = None
min = 0
max = len(map) - 1
while min <= max:
i = (min + max) / 2
if opcode > map[i][0]:
min = i + 1
elif opcode < map[i][0]:
max = i - 1
else:
process_encoding(state, [map[i][1], map[i][2]])
break
def get_final_segment(state, seg):
if state.result.segment == None:
return seg
else:
return state.result.segment
def set_mem_operand(state, oper, rmdef, immed):
oper.operand = "mem"
oper.components = [rmdef[0], rmdef[1]]
oper.immediate = immed
oper.segment = get_final_segment(state, rmdef[2])
def decode_rm(state, rm_oper, reg_list, rm_size):
rm_byte = read8(state)
mod = rm_byte >> 6
rm = rm_byte & 7
reg_field = (rm_byte >> 3) & 7
rm_oper.size = rm_size
if state.addr_size == 2:
if mod == 0:
if rm == 6:
rm = 8
set_mem_operand(state, rm_oper, RM16Components[rm], read16(state))
else:
set_mem_operand(state, rm_oper, RM16Components[rm], 0)
elif mod == 1:
set_mem_operand(state, rm_oper, RM16Components[rm], read8_signed(state))
elif mod == 2:
set_mem_operand(state, rm_oper, RM16Components[rm], read16_signed(state))
elif mod == 3:
rm_oper.operand = reg_list[rm]
if rm_oper.components[0] == None:
rm_oper.immediate &= 0xffff
else:
addr_reg_list = get_reg_list_for_addr_size(state)
if state.rex_rm1:
rm_reg1_offset = 8
else:
rm_reg1_offset = 0
if state.rex_rm2:
rm_reg2_offset = 8
else:
rm_reg2_offset = 0
seg = None
rm_oper.operand = "mem"
if (mod != 3) and (rm == 4):
# SIB byte present
sib_byte = read8(state)
base = sib_byte & 7
index = (sib_byte >> 3) & 7
rm_oper.scale = 1 << (sib_byte >> 6)
if (mod != 0) or (base != 5):
rm_oper.components[0] = addr_reg_list[base + rm_reg1_offset]
if (index + rm_reg2_offset) != 4:
rm_oper.components[1] = addr_reg_list[index + rm_reg2_offset]
if mod == 0:
if base == 5:
rm_oper.immediate = read32_signed(state)
elif mod == 1:
rm_oper.immediate = read8_signed(state)
elif mod == 2:
rm_oper.immediate = read32_signed(state)
if ((base + rm_reg1_offset) == 4) or ((base + rm_reg1_offset) == 5):
seg = "ss"
else:
seg = "ds"
else:
if mod == 0:
if rm == 5:
rm_oper.immediate = read32_signed(state)
if state.addr_size == 8:
rm_oper.rip_relative = True
state.result.flags |= FLAG_64BIT_ADDRESS
else:
rm_oper.components[0] = addr_reg_list[rm + rm_reg1_offset]
seg = "ds"
elif mod == 1:
rm_oper.components[0] = addr_reg_list[rm + rm_reg1_offset]
rm_oper.immediate = read8_signed(state)
if rm == 5:
seg = "ss"
else:
seg = "ds"
elif mod == 2:
rm_oper.components[0] = addr_reg_list[rm + rm_reg1_offset]
rm_oper.immediate = read32_signed(state)
if rm == 5:
seg = "ss"
else:
seg = "ds"
elif mod == 3:
rm_oper.operand = reg_list[rm + rm_reg1_offset]
if seg != None:
rm_oper.segment = get_final_segment(state, seg)
return reg_field
def decode_rm_reg(state, rm_oper, rm_reg_list, rm_size, reg_oper, reg_list, reg_size):
reg = decode_rm(state, rm_oper, rm_reg_list, rm_size)
if reg_oper != None:
if state.rex_reg:
reg_offset = 8
else:
reg_offset = 0
reg_oper.size = reg_size
reg_oper.operand = reg_list[reg + reg_offset]
def set_operand_to_es_edi(state, oper, size):
addr_reg_list = get_reg_list_for_addr_size(state)
oper.operand = "mem"
oper.components[0] = addr_reg_list[7]
oper.size = size
oper.segment = "es"
def set_operand_to_ds_esi(state, oper, size):
addr_reg_list = get_reg_list_for_addr_size(state)
oper.operand = "mem"
oper.components[0] = addr_reg_list[6]
oper.size = size
oper.segment = get_final_segment(state, "ds")
def set_operand_to_imm_addr(state, oper):
oper.operand = "mem"
oper.immediate = read_addr_size(state)
oper.segment = get_final_segment(state, "ds")
oper.size = state.final_op_size
def set_operand_to_eax_final_op_size(state, oper):
reg_list = get_reg_list_for_final_op_size(state)
oper.operand = reg_list[0]
oper.size = state.final_op_size
def set_operand_to_op_reg(state, oper):
reg_list = get_reg_list_for_final_op_size(state)
if state.rex_rm1:
reg_offset = 8
else:
reg_offset = 0
oper.operand = reg_list[(state.prev_opcode & 7) + reg_offset]
oper.size = state.final_op_size
def set_operand_to_imm(state, oper):
oper.operand = "imm"
oper.size = state.final_op_size
oper.immediate = read_final_op_size(state)
def set_operand_to_imm8(state, oper):
oper.operand = "imm"
oper.size = 1
oper.immediate = read8(state)
def set_operand_to_imm16(state, oper):
oper.operand = "imm"
oper.size = 2
oper.immediate = read16(state)
def decode_sse_prefix(state):
if state.op_prefix:
state.op_prefix = False
return 1
elif state.rep == "repne":
state.rep = None
return 2
elif state.rep == "repe":
state.rep = None
return 3
else:
return 0
def get_size_for_sse_type(type):
if type == 2:
return 8
elif type == 3:
return 4
else:
return 16
def get_operand_for_sse_entry_type(state, type, operand_index):
if type == "sse_128_flip":
operand_index = 1 - operand_index
if operand_index == 0:
return state.operand0
else:
return state.operand1
def get_reg_list_for_sse_entry_type(state, type):
if type == "mmx_32":
return MMXRegList
if type == "mmx_64":
return MMXRegList
if type == "gpr_32_or_64":
if state.final_op_size == 8:
return Reg64List
else:
return Reg32List
return XMMRegList
def get_size_for_sse_entry_type(state, type):
if type == "sse_16":
return 2
if type == "sse_32":
return 4
if type == "mmx_32":
return 4
if type == "sse_64":
return 8
if type == "mmx_64":
return 8
if type == "gpr_32_or_64":
if state.final_op_size == 8:
return 8
else:
return 4
return 16
def update_operation_for_sse_entry_type(state, type):
if (type == "gpr_32_or_64") and (state.final_op_size == 8):
state.result.operation = state.result.operation[1]
elif type == "gpr_32_or_64":
state.result.operation = state.result.operation[0]
def invalid_decode(state):
state.invalid = True
def decode_two_byte(state):
opcode = read8(state)
if opcode == 0x38:
process_sparse_opcode(state, ThreeByte0F38Map, read8(state))
elif opcode == 0x3a:
process_sparse_opcode(state, ThreeByte0F3AMap, read8(state))
set_operand_to_imm8(state, state.result.operands[2])
else:
process_opcode(state, TwoByteOpcodeMap, opcode)
def decode_fpu(state):
mod_rm = peek8(state)
reg = (mod_rm >> 3) & 7
op = state.result.operation
if (mod_rm & 0xc0) == 0xc0:
map = FPURegOpcodeMap[op]
else:
map = FPUMemOpcodeMap[op]
process_encoding(state, map[reg])
def decode_no_operands(state):
pass
def decode_reg_rm(state):
size = state.final_op_size
reg_list = get_reg_list_for_final_op_size(state)
if (state.flags & DEC_FLAG_REG_RM_SIZE_MASK) == DEC_FLAG_REG_RM_2X_SIZE:
size *= 2
elif (state.flags & DEC_FLAG_REG_RM_SIZE_MASK) == DEC_FLAG_REG_RM_FAR_SIZE:
size += 2
elif (state.flags & DEC_FLAG_REG_RM_SIZE_MASK) == DEC_FLAG_REG_RM_NO_SIZE:
size = 0
decode_rm_reg(state, state.operand1, reg_list, size, state.operand0, reg_list, state.final_op_size)
if (size != state.final_op_size) and (state.operand1.operand != "mem"):
state.invalid = True
def decode_reg_rm_imm(state):
reg_list = get_reg_list_for_final_op_size(state)
decode_rm_reg(state, state.operand1, reg_list, state.final_op_size,
state.operand0, reg_list, state.final_op_size)
set_operand_to_imm(state, state.result.operands[2])
def decode_rm_reg_imm8(state):
reg_list = get_reg_list_for_final_op_size(state)
decode_rm_reg(state, state.operand0, reg_list, state.final_op_size,
state.operand1, reg_list, state.final_op_size)
set_operand_to_imm8(state, state.result.operands[2])
def decode_rm_reg_cl(state):
reg_list = get_reg_list_for_final_op_size(state)
decode_rm_reg(state, state.operand0, reg_list, state.final_op_size,
state.operand1, reg_list, state.final_op_size)
state.result.operands[2].operand = "cl"
state.result.operands[2].size = 1
def decode_eax_imm(state):
set_operand_to_eax_final_op_size(state, state.operand0)
set_operand_to_imm(state, state.operand1)
def decode_push_pop_seg(state):
offset = 0
if state.prev_opcode >= 0xa0: # FS/GS
offset = -16
state.operand0.operand = ["es", "cs", "ss", "ds", "fs", "gs"][(state.prev_opcode >> 3) + offset]
state.operand0.size = state.final_op_size
def decode_op_reg(state):
set_operand_to_op_reg(state, state.operand0)
def decode_eax_op_reg(state):
set_operand_to_eax_final_op_size(state, state.operand0)
set_operand_to_op_reg(state, state.operand1)
def decode_op_reg_imm(state):
set_operand_to_op_reg(state, state.operand0)
state.operand1.operand = "imm"
state.operand1.size = state.final_op_size
if state.final_op_size == 8:
state.operand1.immediate = read64(state)
else:
state.operand1.immediate = read_final_op_size(state)
def decode_nop(state):
if state.rex_rm1:
state.result.operation = "xchg"
set_operand_to_eax_final_op_size(state, state.operand0)
set_operand_to_op_reg(state, state.operand1)
def decode_imm(state):
set_operand_to_imm(state, state.operand0)
def decode_imm16_imm8(state):
set_operand_to_imm16(state, state.operand0)
set_operand_to_imm8(state, state.operand1)
def decode_edi_dx(state):
set_operand_to_es_edi(state, state.operand0, state.final_op_size)
state.operand1.operand = "dx"
state.operand1.size = 2
def decode_dx_esi(state):
state.operand0.operand = "dx"
state.operand0.size = 2
set_operand_to_ds_esi(state, state.operand1, state.final_op_size)
def decode_rel_imm(state):
state.operand0.operand = "imm"
state.operand0.size = state.op_size
state.operand0.immediate = read_signed_final_op_size(state)
state.operand0.immediate += state.addr + state.opcode_offset
def decode_rel_imm_addr_size(state):
decode_rel_imm(state)
update_operation_for_addr_size(state)
def decode_group_rm(state):
reg_list = get_reg_list_for_final_op_size(state)
reg_field = decode_rm(state, state.operand0, reg_list, state.final_op_size)
state.result.operation = GroupOperations[state.result.operation][reg_field]
def decode_group_rm_imm(state):
reg_list = get_reg_list_for_final_op_size(state)
reg_field = decode_rm(state, state.operand0, reg_list, state.final_op_size)
state.result.operation = GroupOperations[state.result.operation][reg_field]
set_operand_to_imm(state, state.operand1)
def decode_group_rm_imm8v(state):
reg_list = get_reg_list_for_final_op_size(state)
reg_field = decode_rm(state, state.operand0, reg_list, state.final_op_size)
state.result.operation = GroupOperations[state.result.operation][reg_field]
set_operand_to_imm8(state, state.operand1)
def decode_group_rm_one(state):
reg_list = get_reg_list_for_final_op_size(state)
reg_field = decode_rm(state, state.operand0, reg_list, state.final_op_size)
state.result.operation = GroupOperations[state.result.operation][reg_field]
state.operand1.operand = "imm"
state.operand1.size = 1
state.operand1.immediate = 1
def decode_group_rm_cl(state):
reg_list = get_reg_list_for_final_op_size(state)
reg_field = decode_rm(state, state.operand0, reg_list, state.final_op_size)
state.result.operation = GroupOperations[state.result.operation][reg_field]
state.operand1.operand = "cl"
state.operand1.size = 1
def decode_group_f6_f7(state):
reg_list = get_reg_list_for_final_op_size(state)
reg_field = decode_rm(state, state.operand0, reg_list, state.final_op_size)
state.result.operation = GroupOperations[state.result.operation][reg_field]
if state.result.operation == "test":
set_operand_to_imm(state, state.operand1)
# Check for valid locking semantics
if (state.result.flags & FLAG_LOCK) and (state.result.operation != "not") and (state.result.operation != "neg"):
state.invalid = True
def decode_group_ff(state):
if state.using64:
# Default to 64-bit for jumps and calls and pushes
rm = peek8(state)
reg_field = (rm >> 3) & 7
if (reg_field == 2) or (reg_field == 4):
if state.op_prefix:
state.final_op_size = 4
state.op_size = 4
else:
state.final_op_size = 8
state.op_size = 8
elif reg_field == 6:
if state.op_prefix:
state.final_op_size = 2
state.op_size = 2
else:
state.final_op_size = 8
state.op_size = 8
reg_list = get_reg_list_for_final_op_size(state)
reg_field = decode_rm(state, state.operand0, reg_list, state.final_op_size)
state.result.operation = GroupOperations[state.result.operation][reg_field]
# Check for valid far jump/call semantics
if (state.result.operation == "callf") or (state.result.operation == "jmpf"):
if state.operand0.operand != "mem":
state.invalid = True
state.operand0.size += 2
# Check for valid locking semantics
if (state.result.flags & FLAG_LOCK) and (state.result.operation != "inc") and (state.result.operation != "dec"):
state.invalid = True
def decode_group_0f00(state):
rm = peek8(state)
mod_field = (rm >> 6) & 3
reg_field = (rm >> 3) & 7
if ((mod_field != 3) and (reg_field < 2)) or ((reg_field >= 2) and (reg_field <= 5)):
state.final_op_size = 2
reg_list = get_reg_list_for_final_op_size(state)
reg_field = decode_rm(state, state.operand0, reg_list, state.final_op_size)
state.result.operation = GroupOperations[state.result.operation][reg_field]
def decode_group_0f01(state):
rm = peek8(state)
mod_field = (rm >> 6) & 3
reg_field = (rm >> 3) & 7
rm_field = rm & 7
if (mod_field == 3) and (reg_field != 4) and (reg_field != 6):
state.result.operation = Group0F01RegOperations[reg_field][rm_field]
read8(state)
else:
if reg_field < 4:
if state.using64:
state.final_op_size = 10
else:
state.final_op_size = 6
elif ((mod_field != 3) and (reg_field == 4)) or (reg_field == 6):
state.final_op_size = 2
elif reg_field == 7:
state.final_op_size = 1
reg_list = get_reg_list_for_final_op_size(state)
reg_field = decode_rm(state, state.operand0, reg_list, state.final_op_size)
state.result.operation = GroupOperations[state.result.operation][reg_field]
def decode_group_0fae(state):
rm = peek8(state)
mod_field = (rm >> 6) & 3
reg_field = (rm >> 3) & 7
if mod_field == 3:
state.result.operation = GroupOperations[state.result.operation + 1][reg_field]
read8(state)
else:
if (reg_field & 2) == 0:
state.final_op_size = 512
elif (reg_field & 6) == 2:
state.final_op_size = 4
else:
state.final_op_size = 1
reg_list = get_reg_list_for_final_op_size(state)
reg_field = decode_rm(state, state.operand0, reg_list, state.final_op_size)
state.result.operation = GroupOperations[state.result.operation][reg_field]
def decode_0fb8(state):
if state.rep != "repe":
if state.using64:
if state.op_prefix:
state.op_size = 4
else:
state.op_size = 8
state.final_op_size = get_final_op_size(state)
state.operand0.operand = "imm"
state.operand0.size = state.final_op_size
state.operand0.immediate = read_signed_final_op_size(state)
state.operand0.immediate += state.addr + state.opcode_offset
else:
size = state.final_op_size
reg_list = get_reg_list_for_final_op_size(state)
if (state.flags & DEC_FLAG_RM_SIZE_MASK) == DEC_FLAG_REG_RM_2X_SIZE:
size *= 2
elif (state.flags & DEC_FLAG_RM_SIZE_MASK) == DEC_FLAG_REG_RM_FAR_SIZE:
size += 2
elif (state.flags & DEC_FLAG_RM_SIZE_MASK) == DEC_FLAG_REG_RM_NO_SIZE:
size = 0
decode_rm_reg(state, state.operand1, reg_list, size, state.operand0, reg_list, state.final_op_size)
if (size != state.final_op_size) and (state.operand1.operand != "mem"):
state.invalid = True
def decode_rm_sreg_v(state):
reg_list = get_reg_list_for_final_op_size(state)
reg_field = decode_rm(state, state.operand0, reg_list, state.final_op_size)
if reg_field >= 6:
state.invalid = True
state.operand1.operand = ["es", "cs", "ss", "ds", "fs", "gs", None, None][reg_field]
state.operand1.size = 2
if state.result.operands[0].operand == "cs":
state.invalid = True
def decode_rm8(state):
reg_list = get_byte_reg_list(state)
decode_rm(state, state.operand0, reg_list, 1)
def decode_rm_v(state):
reg_list = get_reg_list_for_final_op_size(state)
decode_rm(state, state.operand0, reg_list, state.final_op_size)
def decode_far_imm(state):
set_operand_to_imm(state, state.operand1)
set_operand_to_imm16(state, state.operand0)
def decode_eax_addr(state):
set_operand_to_eax_final_op_size(state, state.operand0)
set_operand_to_imm_addr(state, state.operand1)
if state.addr_size == 8:
state.result.flags |= FLAG_64BIT_ADDRESS
def decode_edi_esi(state):
set_operand_to_es_edi(state, state.operand0, state.final_op_size)
set_operand_to_ds_esi(state, state.operand1, state.final_op_size)
def decode_edi_eax(state):
set_operand_to_es_edi(state, state.operand0, state.final_op_size)
set_operand_to_eax_final_op_size(state, state.operand1)
def decode_eax_esi(state):
set_operand_to_eax_final_op_size(state, state.operand0)
set_operand_to_ds_esi(state, state.operand1, state.final_op_size)
def decode_al_ebx_al(state):
reg_list = get_reg_list_for_addr_size(state)
state.operand0.operand = "al"
state.operand0.size = 1
state.operand1.operand = "mem"
state.operand1.components = [reg_list[3], "al"]
state.operand1.size = 1
state.operand1.segment = get_final_segment(state, "ds")
def decode_eax_imm8(state):
set_operand_to_eax_final_op_size(state, state.operand0)
set_operand_to_imm8(state, state.operand1)
def decode_eax_dx(state):
set_operand_to_eax_final_op_size(state, state.operand0)
state.operand1.operand = "dx"
state.operand1.size = 2
def decode_3dnow(state):
decode_rm_reg(state, state.operand1, MMXRegList, 8, state.operand0, MMXRegList, 8)
op = read8(state)
state.result.operation = None
min = 0
max = len(Sparse3DNowOpcodes) - 1
while min <= max:
i = (min + max) / 2
if op > Sparse3DNowOpcodes[i][0]:
min = i + 1
elif op < Sparse3DNowOpcodes[i][0]:
max = i - 1
else:
state.result.operation = Sparse3DNowOpcodes[i][1]
break
def decode_sse_table(state):
type = decode_sse_prefix(state)
rm = peek8(state)
mod_field = (rm >> 6) & 3
entry = SSETable[state.result.operation]
if mod_field == 3:
op_entry = entry[0][type]
else:
op_entry = entry[1][type]
state.result.operation = op_entry[0]
decode_rm_reg(state, get_operand_for_sse_entry_type(state, op_entry[2], 1),
get_reg_list_for_sse_entry_type(state, op_entry[2]), get_size_for_sse_entry_type(state, op_entry[2]),
get_operand_for_sse_entry_type(state, op_entry[1], 0),
get_reg_list_for_sse_entry_type(state, op_entry[1]), get_size_for_sse_entry_type(state, op_entry[1]))
if state.flags & DEC_FLAG_INC_OPERATION_FOR_64:
update_operation_for_sse_entry_type(state, op_entry[1])
update_operation_for_sse_entry_type(state, op_entry[2])
def decode_sse_table_imm8(state):
type = decode_sse_prefix(state)
rm = peek8(state)
mod_field = (rm >> 6) & 3
entry = SSETable[state.result.operation]
if mod_field == 3:
op_entry = entry[0][type]
else:
op_entry = entry[1][type]
state.result.operation = op_entry[0]
decode_rm_reg(state, get_operand_for_sse_entry_type(state, op_entry[2], 1),
get_reg_list_for_sse_entry_type(state, op_entry[2]), get_size_for_sse_entry_type(state, op_entry[2]),
get_operand_for_sse_entry_type(state, op_entry[1], 0),
get_reg_list_for_sse_entry_type(state, op_entry[1]), get_size_for_sse_entry_type(state, op_entry[1]))
if state.flags & DEC_FLAG_INC_OPERATION_FOR_64:
update_operation_for_sse_entry_type(state, op_entry[1])
update_operation_for_sse_entry_type(state, op_entry[2])
set_operand_to_imm8(state, state.result.operands[2])
def decode_sse_table_mem8(state):
type = decode_sse_prefix(state)
rm = peek8(state)
mod_field = (rm >> 6) & 3
entry = SSETable[state.result.operation]
if mod_field == 3:
op_entry = entry[0][type]
else:
op_entry = entry[1][type]
state.result.operation = op_entry[0]
decode_rm_reg(state, get_operand_for_sse_entry_type(state, op_entry[2], 1),
get_reg_list_for_sse_entry_type(state, op_entry[2]), get_size_for_sse_entry_type(state, op_entry[2]),
get_operand_for_sse_entry_type(state, op_entry[1], 0),
get_reg_list_for_sse_entry_type(state, op_entry[1]), get_size_for_sse_entry_type(state, op_entry[1]))
if state.flags & DEC_FLAG_INC_OPERATION_FOR_64:
update_operation_for_sse_entry_type(state, op_entry[1])
update_operation_for_sse_entry_type(state, op_entry[2])
if state.operand0.operand == "mem":
state.operand0.size = 1
if state.operand1.operand == "mem":
state.operand1.size = 1
def decode_sse(state):
type = decode_sse_prefix(state)
rm = peek8(state)
mod_field = (rm >> 6) & 3
state.result.operation = state.result.operation[type]
if mod_field == 3:
size = 16
else:
size = get_size_for_sse_type(type)
decode_rm_reg(state, state.operand1, XMMRegList, size, state.operand0, XMMRegList, 16)
def decode_sse_single(state):
type = decode_sse_prefix(state)
rm = peek8(state)
mod_field = (rm >> 6) & 3
if (type == 1) or (type == 2):
state.invalid = True
else:
state.result.operation = state.result.operation[type & 1]
if mod_field == 3:
size = 16
else:
size = get_size_for_sse_type(type)
decode_rm_reg(state, state.operand1, XMMRegList, 16, state.operand0, XMMRegList, 16)
def decode_sse_packed(state):
type = decode_sse_prefix(state)
if (type == 2) or (type == 3):
state.invalid = True
else:
state.result.operation = state.result.operation[type & 1]
decode_rm_reg(state, state.operand1, XMMRegList, 16, state.operand0, XMMRegList, 16)
def decode_mmx(state):
if state.op_prefix:
decode_rm_reg(state, state.operand1, XMMRegList, 16, state.operand0, XMMRegList, 16)
else:
decode_rm_reg(state, state.operand1, MMXRegList, 8, state.operand0, MMXRegList, 8)
def decode_mmx_sse_only(state):
if state.op_prefix:
decode_rm_reg(state, state.operand1, XMMRegList, 16, state.operand0, XMMRegList, 16)
else:
state.invalid = True
def decode_mmx_group(state):
if state.op_prefix:
reg_field = decode_rm(state, state.operand0, XMMRegList, 16)
state.result.operation = MMXGroupOperations[state.result.operation][reg_field][1]
else:
reg_field = decode_rm(state, state.operand0, MMXRegList, 8)
state.result.operation = MMXGroupOperations[state.result.operation][reg_field][0]
set_operand_to_imm8(state, state.operand1)
def decode_pinsrw(state):
type = decode_sse_prefix(state)
rm = peek8(state)
mod_field = (rm >> 6) & 3
entry = SSETable[state.result.operation]
if mod_field == 3:
op_entry = entry[0][type]
else:
op_entry = entry[1][type]
state.result.operation = op_entry[0]
decode_rm_reg(state, get_operand_for_sse_entry_type(state, op_entry[2], 1),
get_reg_list_for_sse_entry_type(state, op_entry[2]), get_size_for_sse_entry_type(state, op_entry[2]),
get_operand_for_sse_entry_type(state, op_entry[1], 0),
get_reg_list_for_sse_entry_type(state, op_entry[1]), get_size_for_sse_entry_type(state, op_entry[1]))
if state.flags & DEC_FLAG_INC_OPERATION_FOR_64:
update_operation_for_sse_entry_type(state, op_entry[1])
update_operation_for_sse_entry_type(state, op_entry[2])
set_operand_to_imm8(state, state.result.operands[2])
if state.operand1.operand == "mem":
state.operand1.size = 2
def decode_reg_cr(state):
if state.final_op_size == 2:
state.final_op_size = 4
reg_list = get_reg_list_for_final_op_size(state)
reg = read8(state)
if state.result.flags & FLAG_LOCK:
state.result.flags &= ~FLAG_LOCK
state.rex_reg = True
if state.rex_rm1:
state.operand0.operand = reg_list[(reg & 7) + 8]
else:
state.operand0.operand = reg_list[(reg & 7)]
state.operand0.size = state.final_op_size
if state.rex_reg:
state.operand1.operand = state.result.operation[((reg >> 3) & 7) + 8]
else:
state.operand1.operand = state.result.operation[((reg >> 3) & 7)]
state.operand1.size = state.final_op_size
state.result.operation = "mov"
def decode_mov_sx_zx_8(state):
decode_rm_reg(state, state.operand1, get_byte_reg_list(state), 1, state.operand0,
get_reg_list_for_final_op_size(state), state.final_op_size)
def decode_mov_sx_zx_16(state):
decode_rm_reg(state, state.operand1, Reg16List, 2, state.operand0,
get_reg_list_for_final_op_size(state), state.final_op_size)
def decode_mem16(state):
decode_rm(state, state.operand0, Reg32List, 2)
if state.operand0.operand != "mem":
state.invalid = True
def decode_mem32(state):
decode_rm(state, state.operand0, Reg32List, 4)
if state.operand0.operand != "mem":
state.invalid = True
def decode_mem64(state):
decode_rm(state, state.operand0, Reg32List, 8)
if state.operand0.operand != "mem":
state.invalid = True
def decode_mem80(state):
decode_rm(state, state.operand0, Reg32List, 10)
if state.operand0.operand != "mem":
state.invalid = True
def decode_mem_float_env(state):
if state.final_op_size == 2:
decode_rm(state, state.operand0, Reg32List, 14)
else:
decode_rm(state, state.operand0, Reg32List, 28)
if state.operand0.operand != "mem":
state.invalid = True
def decode_mem_float_save(state):
if state.final_op_size == 2:
decode_rm(state, state.operand0, Reg32List, 94)
else:
decode_rm(state, state.operand0, Reg32List, 108)
if state.operand0.operand != "mem":
state.invalid = True
def decode_fpu_reg(state):
decode_rm(state, state.operand0, FPURegList, 10)
def decode_fpu_reg_st0(state):
decode_rm(state, state.operand0, FPURegList, 10)
state.operand1.operand = "st0"
state.operand1.size = 10
def decode_reg_group_no_operands(state):
rm_byte = read8(state)
state.result.operation = GroupOperations[state.result.operation][rm_byte & 7]
def decode_reg_group_ax(state):
rm_byte = read8(state)
state.result.operation = GroupOperations[state.result.operation][rm_byte & 7]
state.operand0.operand = "ax"
state.operand0.size = 2
def decode_cmpxch8b(state):
rm = peek8(state)
reg_field = (rm >> 3) & 7
if reg_field == 1:
if state.final_op_size == 2:
state.final_op_size = 4
elif state.final_op_size == 8:
state.result.operation = "cmpxch16b"
decode_rm(state, state.operand0, get_reg_list_for_final_op_size(state), state.final_op_size * 2)
elif reg_field == 6:
if state.op_prefix:
state.result.operation = "vmclear"
elif state.rep == "repe":
state.result.operation = "vmxon"
else:
state.result.operation = "vmptrld"
decode_rm(state, state.operand0, Reg64List, 8)
elif reg_field == 7:
state.result.operation = "vmptrst"
decode_rm(state, state.operand0, Reg64List, 8)
else:
state.invalid = True
if state.operand0.operand != "mem":
state.invalid = True
def decode_mov_nti(state):
if state.final_op_size == 2:
state.final_op_size = 4
decode_rm_reg(state, state.operand0, get_reg_list_for_final_op_size(state), state.final_op_size,
state.operand1, get_reg_list_for_final_op_size(state), state.final_op_size)
if state.operand0.operand != "mem":
state.invalid = True
def decode_crc32(state):
src_reg_list = get_reg_list_for_final_op_size(state)
if state.final_op_size == 8:
dest_reg_list = Reg64List
dest_size = 8
else:
dest_reg_list = Reg32List
dest_size = 4
decode_rm_reg(state, state.operand1, src_reg_list, state.final_op_size,
state.operand0, dest_reg_list, dest_size)
def decode_arpl(state):
if state.using64:
state.result.operation = "movsxd"
reg_list = get_reg_list_for_final_op_size(state)
decode_rm_reg(state, state.operand1, Reg32List, 4, state.operand0, reg_list, state.final_op_size)
else:
state.final_op_size = 2
reg_list = get_reg_list_for_final_op_size(state)
decode_rm_reg(state, state.operand0, reg_list, 2, state.operand1, reg_list, state.final_op_size)
Encoding = {
None : [invalid_decode, 0],
"two_byte" : [decode_two_byte, 0], "fpu" : [decode_fpu, 0],
"no_operands" : [decode_no_operands, 0], "op_size" : [decode_no_operands, DEC_FLAG_OPERATION_OP_SIZE],
"op_size_def64" : [decode_no_operands, DEC_FLAG_DEFAULT_TO_64BIT | DEC_FLAG_OPERATION_OP_SIZE],
"op_size_no64" : [decode_no_operands, DEC_FLAG_INVALID_IN_64BIT | DEC_FLAG_OPERATION_OP_SIZE],
"reg_rm_8" : [decode_reg_rm, DEC_FLAG_BYTE], "rm_reg_8" : [decode_reg_rm, DEC_FLAG_BYTE | DEC_FLAG_FLIP_OPERANDS],
"rm_reg_8_lock" : [decode_reg_rm, DEC_FLAG_BYTE | DEC_FLAG_FLIP_OPERANDS | DEC_FLAG_LOCK],
"rm_reg_16" : [decode_reg_rm, DEC_FLAG_FLIP_OPERANDS | DEC_FLAG_FORCE_16BIT],
"reg_rm_v" : [decode_reg_rm, 0], "rm_reg_v" : [decode_reg_rm, DEC_FLAG_FLIP_OPERANDS],
"rm_reg_v_lock" : [decode_reg_rm, DEC_FLAG_FLIP_OPERANDS | DEC_FLAG_LOCK],
"reg_rm2x_v" : [decode_reg_rm, DEC_FLAG_REG_RM_2X_SIZE], "reg_rm_imm_v" : [decode_reg_rm_imm, 0],
"reg_rm_immsx_v" : [decode_reg_rm_imm, DEC_FLAG_IMM_SX], "reg_rm_0" : [decode_reg_rm, DEC_FLAG_REG_RM_NO_SIZE],
"reg_rm_f" : [decode_reg_rm, DEC_FLAG_REG_RM_FAR_SIZE],
"rm_reg_def64" : [decode_reg_rm, DEC_FLAG_FLIP_OPERANDS | DEC_FLAG_DEFAULT_TO_64BIT],
"rm_reg_imm8_v" : [decode_rm_reg_imm8, 0], "rm_reg_cl_v" : [decode_rm_reg_cl, 0],
"eax_imm_8" : [decode_eax_imm, DEC_FLAG_BYTE], "eax_imm_v" : [decode_eax_imm, 0],
"push_pop_seg" : [decode_push_pop_seg, 0],
"op_reg_v" : [decode_op_reg, 0], "op_reg_v_def64" : [decode_op_reg, DEC_FLAG_DEFAULT_TO_64BIT],
"eax_op_reg_v" : [decode_eax_op_reg, 0], "op_reg_imm_8" : [decode_op_reg_imm, DEC_FLAG_BYTE],
"op_reg_imm_v" : [decode_op_reg_imm, 0], "nop" : [decode_nop, 0],
"imm_v_def64" : [decode_imm, DEC_FLAG_DEFAULT_TO_64BIT],
"immsx_v_def64" : [decode_imm, DEC_FLAG_IMM_SX | DEC_FLAG_DEFAULT_TO_64BIT],
"imm_8" : [decode_imm, DEC_FLAG_BYTE], "imm_16" : [decode_imm, DEC_FLAG_FORCE_16BIT],
"imm16_imm8" : [decode_imm16_imm8, 0],
"edi_dx_8_rep" : [decode_edi_dx, DEC_FLAG_BYTE | DEC_FLAG_REP],
"edi_dx_op_size_rep" : [decode_edi_dx, DEC_FLAG_OPERATION_OP_SIZE | DEC_FLAG_REP],
"dx_esi_8_rep" : [decode_dx_esi, DEC_FLAG_BYTE | DEC_FLAG_REP],
"dx_esi_op_size_rep" : [decode_dx_esi, DEC_FLAG_OPERATION_OP_SIZE | DEC_FLAG_REP],
"relimm_8_def64" : [decode_rel_imm, DEC_FLAG_BYTE | DEC_FLAG_DEFAULT_TO_64BIT],
"relimm_v_def64" : [decode_rel_imm, DEC_FLAG_DEFAULT_TO_64BIT],
"relimm_8_addr_size_def64" : [decode_rel_imm_addr_size, DEC_FLAG_BYTE | DEC_FLAG_DEFAULT_TO_64BIT],
"group_rm_8" : [decode_group_rm, DEC_FLAG_BYTE], "group_rm_v" : [decode_group_rm, 0],
"group_rm_8_lock" : [decode_group_rm, DEC_FLAG_BYTE | DEC_FLAG_LOCK],
"group_rm_0" : [decode_group_rm, DEC_FLAG_REG_RM_NO_SIZE],
"group_rm_imm_8" : [decode_group_rm_imm, DEC_FLAG_BYTE],
"group_rm_imm_8_lock" : [decode_group_rm_imm, DEC_FLAG_BYTE | DEC_FLAG_LOCK],
"group_rm_imm_8_no64_lock" : [decode_group_rm_imm, DEC_FLAG_BYTE | DEC_FLAG_INVALID_IN_64BIT | DEC_FLAG_LOCK],
"group_rm_imm8_v" : [decode_group_rm_imm8v, 0],
"group_rm_imm_v" : [decode_group_rm_imm, 0], "group_rm_imm_v_lock" : [decode_group_rm_imm, DEC_FLAG_LOCK],
"group_rm_immsx_v_lock" : [decode_group_rm_imm, DEC_FLAG_IMM_SX | DEC_FLAG_LOCK],
"group_rm_one_8" : [decode_group_rm_one, DEC_FLAG_BYTE], "group_rm_one_v" : [decode_group_rm_one, 0],
"group_rm_cl_8" : [decode_group_rm_cl, DEC_FLAG_BYTE], "group_rm_cl_v" : [decode_group_rm_cl, 0],
"group_f6" : [decode_group_f6_f7, DEC_FLAG_BYTE | DEC_FLAG_LOCK], "group_f7" : [decode_group_f6_f7, DEC_FLAG_LOCK],
"group_ff" : [decode_group_ff, DEC_FLAG_LOCK],
"group_0f00" : [decode_group_0f00, 0], "group_0f01" : [decode_group_0f01, 0], "group_0fae" : [decode_group_0fae, 0],
"_0fb8" : [decode_0fb8, 0],
"rm_sreg_v" : [decode_rm_sreg_v, 0], "sreg_rm_v" : [decode_rm_sreg_v, DEC_FLAG_FLIP_OPERANDS],
"rm_8" : [decode_rm8, 0], "rm_v_def64" : [decode_rm_v, DEC_FLAG_DEFAULT_TO_64BIT],
"far_imm_no64" : [decode_far_imm, DEC_FLAG_INVALID_IN_64BIT],
"eax_addr_8" : [decode_eax_addr, DEC_FLAG_BYTE], "eax_addr_v" : [decode_eax_addr, 0],
"addr_eax_8" : [decode_eax_addr, DEC_FLAG_BYTE | DEC_FLAG_FLIP_OPERANDS],
"addr_eax_v" : [decode_eax_addr, DEC_FLAG_FLIP_OPERANDS],
"edi_esi_8_rep" : [decode_edi_esi, DEC_FLAG_BYTE | DEC_FLAG_REP],
"edi_esi_op_size_rep" : [decode_edi_esi, DEC_FLAG_OPERATION_OP_SIZE | DEC_FLAG_REP],
"esi_edi_8_repc" : [decode_edi_esi, DEC_FLAG_BYTE | DEC_FLAG_FLIP_OPERANDS | DEC_FLAG_REP_COND],
"esi_edi_op_size_repc" : [decode_edi_esi, DEC_FLAG_FLIP_OPERANDS | DEC_FLAG_OPERATION_OP_SIZE | DEC_FLAG_REP_COND],
"edi_eax_8_rep" : [decode_edi_eax, DEC_FLAG_BYTE | DEC_FLAG_REP],
"edi_eax_op_size_rep" : [decode_edi_eax, DEC_FLAG_OPERATION_OP_SIZE | DEC_FLAG_REP],
"eax_esi_8_rep" : [decode_eax_esi, DEC_FLAG_BYTE | DEC_FLAG_REP],
"eax_esi_op_size_rep" : [decode_eax_esi, DEC_FLAG_OPERATION_OP_SIZE | DEC_FLAG_REP],
"eax_edi_8_repc" : [decode_edi_eax, DEC_FLAG_BYTE | DEC_FLAG_FLIP_OPERANDS | DEC_FLAG_REP_COND],
"eax_edi_op_size_repc" : [decode_edi_eax, DEC_FLAG_FLIP_OPERANDS | DEC_FLAG_OPERATION_OP_SIZE | DEC_FLAG_REP_COND],
"al_ebx_al" : [decode_al_ebx_al, 0],
"eax_imm8_8" : [decode_eax_imm8, DEC_FLAG_BYTE], "eax_imm8_v" : [decode_eax_imm8, 0],
"imm8_eax_8" : [decode_eax_imm8, DEC_FLAG_BYTE | DEC_FLAG_FLIP_OPERANDS],
"imm8_eax_v" : [decode_eax_imm8, DEC_FLAG_FLIP_OPERANDS],
"eax_dx_8" : [decode_eax_dx, DEC_FLAG_BYTE], "eax_dx_v" : [decode_eax_dx, 0],
"dx_eax_8" : [decode_eax_dx, DEC_FLAG_BYTE | DEC_FLAG_FLIP_OPERANDS],
"dx_eax_v" : [decode_eax_dx, DEC_FLAG_FLIP_OPERANDS], "_3dnow" : [decode_3dnow, 0],
"sse_table" : [decode_sse_table, 0], "sse_table_flip" : [decode_sse_table, DEC_FLAG_FLIP_OPERANDS],
"sse_table_imm_8" : [decode_sse_table_imm8, 0], "sse_table_imm_8_flip" : [decode_sse_table_imm8, DEC_FLAG_FLIP_OPERANDS],
"sse_table_incop64" : [decode_sse_table, DEC_FLAG_INC_OPERATION_FOR_64],
"sse_table_incop64_flip" : [decode_sse_table, DEC_FLAG_INC_OPERATION_FOR_64 | DEC_FLAG_FLIP_OPERANDS],
"sse_table_mem8" : [decode_sse_table_mem8, 0], "sse_table_mem8_flip" : [decode_sse_table_mem8, DEC_FLAG_FLIP_OPERANDS],
"sse" : [decode_sse, 0], "sse_single" : [decode_sse_single, 0], "sse_packed" : [decode_sse_packed, 0],
"mmx" : [decode_mmx, 0], "mmx_sseonly" : [decode_mmx_sse_only, 0],
"mmx_group" : [decode_mmx_group, 0], "pinsrw" : [decode_pinsrw, 0],
"reg_cr" : [decode_reg_cr, DEC_FLAG_DEFAULT_TO_64BIT | DEC_FLAG_LOCK],
"cr_reg" : [decode_reg_cr, DEC_FLAG_FLIP_OPERANDS | DEC_FLAG_DEFAULT_TO_64BIT | DEC_FLAG_LOCK],
"movsxzx_8" : [decode_mov_sx_zx_8, 0], "movsxzx_16" : [decode_mov_sx_zx_16, 0],
"mem_16" : [decode_mem16, 0], "mem_32" : [decode_mem32, 0], "mem_64" : [decode_mem64, 0], "mem_80" : [decode_mem80, 0],
"mem_floatenv" : [decode_mem_float_env, 0], "mem_floatsave" : [decode_mem_float_save, 0],
"fpureg" : [decode_fpu_reg, 0], "st0_fpureg" : [decode_fpu_reg_st0, DEC_FLAG_FLIP_OPERANDS],
"fpureg_st0" : [decode_fpu_reg_st0, 0],
"reggroup_no_operands" : [decode_reg_group_no_operands, 0], "reggroup_ax" : [decode_reg_group_ax, 0],
"cmpxch8b" : [decode_cmpxch8b, DEC_FLAG_LOCK], "movnti" : [decode_mov_nti, 0],
"crc32_8" : [decode_crc32, DEC_FLAG_BYTE], "crc32_v" : [decode_crc32, 0],
"arpl" : [decode_arpl, 0]
}
def x86_reg_size(reg):
if reg in Reg8List:
return 1
if reg in Reg8List64:
return 1
if reg in Reg16List:
return 2
if reg in Reg32List:
return 4
if reg in Reg64List:
return 8
if reg in MMXRegList:
return 8
if reg in XMMRegList:
return 16
return 10
def process_prefixes(state):
rex = 0
addr_prefix = False
while not state.invalid:
prefix = read8(state)
if (prefix >= 0x26) and (prefix <= 0x3e) and ((prefix & 7) == 6):
# Segment prefix
state.result.segment = ["es", "cs", "ss", "ds"][(prefix >> 3) - 4]
elif prefix == 0x64:
state.result.segment = "fs"
elif prefix == 0x65:
state.result.segment = "gs"
elif prefix == 0x66:
state.op_prefix = True
state.result.flags |= FLAG_OPSIZE
elif prefix == 0x67:
addr_prefix = True
state.result.flags |= FLAG_ADDRSIZE
elif prefix == 0xf0:
state.result.flags |= FLAG_LOCK
elif prefix == 0xf2:
state.rep = "repne"
elif prefix == 0xf3:
state.rep = "repe"
elif state.using64 and (prefix >= 0x40) and (prefix <= 0x4f):
# REX prefix
rex = prefix
continue
else:
# Not a prefix, continue instruction processing
state.opcode = chr(prefix) + state.opcode
state.opcode_offset -= 1
break
# Force ignore REX unless it is the last prefix
rex = 0
if state.op_prefix:
if state.op_size == 2:
state.op_size = 4
else:
state.op_size = 2
if addr_prefix:
if state.addr_size == 4:
state.addr_size = 2
else:
state.addr_size = 4
if rex != 0:
# REX prefix found before opcode
state.rex = True
state.rex_rm1 = (rex & 1) != 0
state.rex_rm2 = (rex & 2) != 0
state.rex_reg = (rex & 4) != 0
if (rex & 8) != 0:
state.op_size = 8
def finish_disassemble(state):
state.result.length = state.opcode_offset
for i in state.result.operands:
if i.rip_relative:
i.immediate += state.addr + state.result.length
if state.insufficient_length and (state.orig_len < 15):
state.result.flags |= FLAG_INSUFFICIENT_LENGTH
if state.invalid:
state.result.operation = None
state.result.finalize()
def disassemble16(opcode, addr):
state = DecodeState()
state.opcode = opcode
state.addr = addr
state.addr_size = 2
state.op_size = 2
state.using64 = False
if len(state.opcode) > 15:
state.opcode = state.opcode[0:15]
state.orig_len = len(state.opcode)
process_prefixes(state)
process_opcode(state, MainOpcodeMap, read8(state))
finish_disassemble(state)
state.result.addr_size = state.addr_size
return state.result
def disassemble32(opcode, addr):
state = DecodeState()
state.opcode = opcode
state.addr = addr
state.addr_size = 4
state.op_size = 4
state.using64 = False
if len(state.opcode) > 15:
state.opcode = state.opcode[0:15]
state.orig_len = len(state.opcode)
process_prefixes(state)
process_opcode(state, MainOpcodeMap, read8(state))
finish_disassemble(state)
state.result.addr_size = state.addr_size
return state.result
def disassemble64(opcode, addr):
state = DecodeState()
state.opcode = opcode
state.addr = addr
state.addr_size = 8
state.op_size = 4
state.using64 = True
if len(state.opcode) > 15:
state.opcode = state.opcode[0:15]
state.orig_len = len(state.opcode)
process_prefixes(state)
process_opcode(state, MainOpcodeMap, read8(state))
finish_disassemble(state)
state.result.addr_size = state.addr_size
return state.result
def get_size_string(size):
if size == 1:
return "byte "
if size == 2:
return "word "
if size == 4:
return "dword "
if size == 6:
return "fword "
if size == 8:
return "qword "
if size == 10:
return "tword "
if size == 16:
return "oword "
return ""
def get_operand_string(type, scale, plus):
if plus:
result = "+"
else:
result = ""
result += type
if scale != 1:
result += "*%d" % scale
return result
def format_instruction_string(fmt, opcode, addr, instr):
result = ""
i = 0
while i < len(fmt):
if fmt[i] == '%':
width = 0
i += 1
while i < len(fmt):
if fmt[i] == 'a':
if width == 0:
width = 8
result += ("%%.%dx" % width) % addr
break
elif fmt[i] == 'b':
for j in range(0, instr.length):
result += "%.2x" % ord(opcode[j])
for j in range(instr.length, width):
result += " "
break
elif fmt[i] == 'i':
operation = ""
if instr.flags & FLAG_LOCK:
operation += "lock "
if instr.flags & FLAG_ANY_REP:
operation += "rep"
if instr.flags & FLAG_REPNE:
operation += "ne"
elif instr.flags & FLAG_REPE:
operation += "e"
operation += " "
operation += instr.operation
for j in range(len(operation), width):
operation += " "
result += operation
break
elif fmt[i] == 'o':
for j in range(0, len(instr.operands)):
if j != 0:
result += ", "
if instr.operands[j].operand == "imm":
numfmt = "0x%%.%dx" % (instr.operands[j].size * 2)
result += numfmt % (instr.operands[j].immediate &
((1 << (instr.operands[j].size * 8)) - 1))
elif instr.operands[j].operand == "mem":
plus = False
result += get_size_string(instr.operands[j].size)
if (instr.segment != None) or (instr.operands[j].segment == "es"):
result += instr.operands[j].segment + ":"
result += '['
if instr.operands[j].components[0] != None:
result += instr.operands[j].components[0]
plus = True
if instr.operands[j].components[1] != None:
result += get_operand_string(instr.operands[j].components[1],
instr.operands[j].scale, plus)
plus = True
if (instr.operands[j].immediate != 0) or ((instr.operands[j].components[0] == None) and (instr.operands[j].components[1] == None)):
if plus and (instr.operands[j].immediate >= -0x80) and (instr.operands[j].immediate < 0):
result += '-'
result += "0x%.2x" % (-instr.operands[j].immediate)
elif plus and (instr.operands[j].immediate > 0) and (instr.operands[j].immediate <= 0x7f):
result += '+'
result += "0x%.2x" % instr.operands[j].immediate
elif (instr.flags & FLAG_64BIT_ADDRESS) != 0:
if plus:
result += '+'
result += "0x%.16x" % instr.operands[j].immediate
else:
if plus:
result += '+'
result += "0x%.8x" % (instr.operands[j].immediate & 0xffffffff)
result += ']'
else:
result += instr.operands[j].operand
break
elif (fmt[i] >= '0') and (fmt[i] <= '9'):
width = (width * 10) + (ord(fmt[i]) - 0x30)
else:
result += fmt[i]
break
i += 1
else:
result += fmt[i]
i += 1
return result
def disassemble16_to_string(fmt, opcode, addr):
instr = disassemble16(opcode, addr)
return format_instruction_string(fmt, opcode, addr, instr)
def disassemble32_to_string(fmt, opcode, addr):
instr = disassemble32(opcode, addr)
return format_instruction_string(fmt, opcode, addr, instr)
def disassemble64_to_string(fmt, opcode, addr):
instr = disassemble64(opcode, addr)
return format_instruction_string(fmt, opcode, addr, instr)
| 41.604956 | 195 | 0.652687 | 13,344 | 85,623 | 3.877023 | 0.09982 | 0.034561 | 0.02969 | 0.025515 | 0.587436 | 0.533662 | 0.48126 | 0.42105 | 0.382855 | 0.345859 | 0 | 0.066937 | 0.144879 | 85,623 | 2,057 | 196 | 41.625182 | 0.639651 | 0.037023 | 0 | 0.412473 | 0 | 0 | 0.209012 | 0.002483 | 0 | 0 | 0.007535 | 0 | 0 | 1 | 0.067287 | false | 0.000547 | 0 | 0 | 0.114333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e4be7a86b7c2c6de6676d47c165fb2dc92bdf33 | 9,123 | py | Python | train.py | LeiShenVictoria/Static-Dynamic-Attention-CNNRNN | e2823717d22c9e543428d471ff19113bbb59ebfe | [
"MIT"
] | null | null | null | train.py | LeiShenVictoria/Static-Dynamic-Attention-CNNRNN | e2823717d22c9e543428d471ff19113bbb59ebfe | [
"MIT"
] | null | null | null | train.py | LeiShenVictoria/Static-Dynamic-Attention-CNNRNN | e2823717d22c9e543428d471ff19113bbb59ebfe | [
"MIT"
] | null | null | null | import sys
import os
import random
import re
import time
import torch
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
#from static_model import StaticModel
from CNNencoder import StaticModel
#from dyna_model import DynamicModel
from dynaMerge import DynamicModel
from data_utils import *
from pathlib import Path
username = Path.home().name
save_dir = Path(f'./data1/{username}/conversation/')
def init_command_line(argv):
from argparse import ArgumentParser
usage = "train"
description = ArgumentParser(usage)
description.add_argument("--w2v_file", type=str, default="./data/train_300e.w2v")
description.add_argument("--train_file", type=str, default="./data/train_cornell.txt")
description.add_argument("--max_context_size", type=int, default=9)
description.add_argument("--batch_size", type=int, default=80)
description.add_argument("--hidden_size", type=int, default=1024)
description.add_argument("--max_senten_len", type=int, default=15)
description.add_argument("--type_model", type=int, default=1)
#description.add_argument('-kernel_sizes', type=str, default='2,3,4,5')
#description.add_argument('-kernel_num', type=int, default=256)
description.add_argument('-kernel_sizes', type=str, default='2,3')
description.add_argument('-kernel_num', type=int, default=512)
description.add_argument('-static', action='store_true', default=False)
description.add_argument("--lr", type=float, default=0.001)
description.add_argument("--weight_decay", type=float, default=1e-5)
description.add_argument("--dropout", type=float, default=0.5)
description.add_argument("--epochs", type=int, default=30)
description.add_argument("--teach_forcing", type=int, default=1)
description.add_argument("--shuffle", type=int, default=1)
description.add_argument("--print_every", type=int, default=200)
description.add_argument("--save_model", type=int, default=1)
description.add_argument("--weights", type=str, default=None)
return description.parse_args(argv)
opts = init_command_line(sys.argv[1:])
print ("Configure:")
print (" train_file:",opts.train_file)
print (" w2v_file:",opts.w2v_file)
print (" max_context_size:",opts.max_context_size)
print (" batch_size:",opts.batch_size)
print (" hidden_size:",opts.hidden_size)
print (" max_senten_len:",opts.max_senten_len)
if opts.type_model:
print (" static model")
else:
print (" dynamic model")
print(" kernel_sizes:", opts.kernel_sizes)
print(" kernel_num:", opts.kernel_num)
print(" static embedding:", opts.static)
print (" learning rate:",opts.lr)
print (" weight_decay:",opts.weight_decay)
print (" dropout:",opts.dropout)
print (" epochs:",opts.epochs)
print (" teach_forcing:",opts.teach_forcing)
print (" shuffle:",opts.shuffle)
print (" print_every:",opts.print_every)
print (" save_model:",opts.save_model)
print (" weights:",opts.weights)
print ("")
opts.kernel_sizes = [int(k) for k in opts.kernel_sizes.split(',')]
print(" kernel_sizes_list:", type(opts.kernel_sizes))
def save_epoch_model(statedict, save_path, epoch):
epoch = epoch + 1
if not os.path.exists(save_path):
os.mkdir(save_path)
ckpt_path = os.path.join(save_path, f'{epoch}.pkl')
print(f'Save parameters to {ckpt_path}')
torch.save(statedict, ckpt_path)
def train_batch(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,model,model_optimizer,criterion,ini_idx):
loss = 0
model_optimizer.zero_grad()
list_pred = model(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,ini_idx)
for idx,reply_tensor in enumerate(reply_tensor_batch):
loss_s = criterion(list_pred[idx],Variable(reply_tensor).cuda())
loss += loss_s
loss.backward()
model_optimizer.step()
return loss.data[0]
def train_model(word2index,ini_idx,corpus_pairs,model,model_optimizer,criterion,epochs,
batch_size,max_senten_len,max_context_size,print_every,save_model,shuffle):
print ("start training...")
model.train()
state_loss = 10000.0
for ei in range(epochs):
print ("Iteration {}: ".format(ei+1))
epoch_loss = 0
every_loss = 0
t0 = time.time()
pairs_batches,num_batches = buildingPairsBatch(corpus_pairs,batch_size,shuffle=shuffle)
print ("num_batches:",num_batches)
idx_batch = 0
for reply_tensor_batch, contexts_tensor_batch, pad_matrix_batch in getTensorsPairsBatch(word2index,pairs_batches,max_context_size):
loss = train_batch(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,model,model_optimizer,criterion,ini_idx)
epoch_loss += loss
every_loss += loss
if (idx_batch+1)%print_every == 0:
every_avg_loss = every_loss/(max_senten_len*(idx_batch+1))
print ("{} batches finished, avg_loss:{}".format(idx_batch+1, every_avg_loss))
idx_batch += 1
epoch_avg_loss = epoch_loss/(max_senten_len*num_batches)
print ("epoch_avg_loss:",epoch_avg_loss)
if save_model:# and epoch_avg_loss < state_loss:
print ("save model...")
if opts.type_model:
if "cornell" in opts.train_file:
#save_path = save_dir.joinpath('cornell','static.model')
#os.makedirs(save_path, exist_ok=True)
#torch.save(model.state_dict(), "./cornell_static_parameters_IterEnd1")
save_epoch_model(model.state_dict(), "./cornell_static_parameters", ei)
elif "ubuntu" in opts.train_file:
#save_path = save_dir.joinpath('ubuntu','static.model')
#os.makedirs(save_path, exist_ok=True)
#torch.save(model.state_dict(), "./ubuntu_static_parameters_IterEnd")
save_epoch_model(model.state_dict(), "./ubuntu_static_parameters", ei)
else:
#torch.save(model.state_dict(), "./opensubtitles_static_parameters_IterEnd")
save_epoch_model(model.state_dict(), "./opensubtitles_static_parameters", ei)
else:
if "cornell" in opts.train_file:
#save_path = save_dir.joinpath('cornell','dynamic.model')
#os.makedirs(save_path, exist_ok=True)
#torch.save(model.state_dict(), "./cornell_dynamic_parameters_IterEnd")
save_epoch_model(model.state_dict(), "./cornell_dynamic_parameters", ei)
elif "ubuntu" in opts.train_file:
#save_path = save_dir.joinpath('ubuntu','dynamic.model')
#os.makedirs(save_path, exist_ok=True)
#torch.save(model.state_dict(), "./ubuntu_dynamic_parameters_IterEnd")
save_epoch_model(model.state_dict(), "./ubuntu_dynamic_parameters", ei)
else:
#torch.save(model.state_dict(), "./opensubtitles_dynamic_parameters_IterEnd")
save_epoch_model(model.state_dict(), "./opensubtitles_dynamic_parameters", ei)
# os.makedirs(self.save_path, exist_ok=True)
state_loss = epoch_avg_loss
print ("Iteration time:",time.time()-t0)
print ("=============================================" )
print ("")
if __name__ == '__main__':
ini_char = '</i>'
unk_char = '<unk>'
t0 = time.time()
print ("loading word2vec...")
ctable = W2vCharacterTable(opts.w2v_file,ini_char,unk_char)
print(" dict size:",ctable.getDictSize())
print (" emb size:",ctable.getEmbSize())
print ("")
ctable,corpus_pairs = readingData(ctable,opts.train_file,opts.max_senten_len,opts.max_context_size)
print (time.time()-t0)
print ("")
if opts.type_model:
# model = StaticModel(ctable.getDictSize(),ctable.getEmbSize(),opts.hidden_size,opts.batch_size,opts.dropout,
# opts.max_senten_len,opts.teach_forcing).cuda()
model = StaticModel(ctable.getDictSize(),ctable.getEmbSize(),opts.hidden_size,opts.batch_size,opts.dropout,opts.max_senten_len,opts.teach_forcing,opts.kernel_num,opts.kernel_sizes).cuda()
else:
model = DynamicModel(ctable.getDictSize(),ctable.getEmbSize(),opts.hidden_size,opts.batch_size,opts.dropout,
opts.max_senten_len,opts.teach_forcing, opts.kernel_num, opts.kernel_sizes).cuda()
if opts.weights != None:
print ("load weights...")
model.load_state_dict(torch.load(opts.weights))
else:
model.init_parameters(ctable.getEmbMatrix())
model_optimizer = optim.Adam(model.parameters(), lr=opts.lr, weight_decay=opts.weight_decay)
criterion = nn.NLLLoss()
print ("")
word2index = ctable.getWord2Index()
ini_idx = word2index[ini_char]
train_model(word2index,ini_idx,corpus_pairs,model,model_optimizer,criterion,opts.epochs,opts.batch_size,
opts.max_senten_len,opts.max_context_size,opts.print_every,opts.save_model,opts.shuffle)
print ("")
| 44.720588 | 195 | 0.676532 | 1,159 | 9,123 | 5.044003 | 0.160483 | 0.050291 | 0.079028 | 0.016421 | 0.413958 | 0.393774 | 0.371707 | 0.351865 | 0.314232 | 0.229046 | 0 | 0.0107 | 0.190727 | 9,123 | 203 | 196 | 44.940887 | 0.781119 | 0.136578 | 0 | 0.134615 | 0 | 0 | 0.136856 | 0.03781 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.089744 | 0 | 0.128205 | 0.294872 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e4c314862be76fd6a5e4ccb10c67ae77671cff4 | 1,770 | py | Python | evaluation/experiment/sentence_classification/training.py | UKPLab/arxiv2018-xling-sentence-embeddings | 95305c1a3d6d3e8c5f5365db463ba11cc9bd33b1 | [
"Apache-2.0"
] | 196 | 2018-02-21T09:26:49.000Z | 2021-10-05T12:20:11.000Z | evaluation/experiment/sentence_classification/training.py | UKPLab/arxiv2018-xling-sentence-embeddings | 95305c1a3d6d3e8c5f5365db463ba11cc9bd33b1 | [
"Apache-2.0"
] | 3 | 2018-06-28T15:17:31.000Z | 2019-07-22T17:09:45.000Z | evaluation/experiment/sentence_classification/training.py | UKPLab/arxiv2018-xling-sentence-embeddings | 95305c1a3d6d3e8c5f5365db463ba11cc9bd33b1 | [
"Apache-2.0"
] | 24 | 2018-03-05T19:01:32.000Z | 2022-02-25T03:01:51.000Z | import math
import numpy as np
from experiment.utils.training import BatchedTraining
class SentenceClassificationBatchedTraining(BatchedTraining):
def __init__(self, config, config_global, logger):
super(SentenceClassificationBatchedTraining, self).__init__(config, config_global, logger)
self.n_batches = None
self.data = None
self.batch_i = 0
self.epoch_shuffle_indices = None
def get_feed_dict(self, model, data, sess):
batch_sents, batch_labels = self.get_next_batch(model, data, sess)
return {
model.input_sent: batch_sents,
model.input_label: batch_labels,
model.dropout_keep_prob: self.dropout_keep_prob
}
def prepare_next_epoch(self, model, data, sess, epoch):
self.epoch_learning_rate = self.initial_learning_rate
if self.dynamic_learning_rate:
self.epoch_learning_rate /= float(epoch)
self.n_batches = int(math.ceil(len(data.train) / float(self.batchsize)))
if self.data is None:
self.data = data.train
self.epoch_shuffle_indices = np.random.permutation(len(self.data))
self.batch_i = 0
def get_n_batches(self):
return self.n_batches
def get_next_batch(self, model, data, sess):
"""Return the training data for the next batch
:return: questions, good answers, bad answers
:rtype: list, list, list
"""
indices = self.epoch_shuffle_indices[self.batch_i * self.batchsize: (self.batch_i + 1) * self.batchsize]
batch_data = [self.data[i] for i in indices]
self.batch_i += 1
# transpose of zip(batch_data)
return zip(*batch_data)
component = SentenceClassificationBatchedTraining
| 32.777778 | 112 | 0.672316 | 222 | 1,770 | 5.112613 | 0.328829 | 0.035242 | 0.044053 | 0.060793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002981 | 0.241808 | 1,770 | 53 | 113 | 33.396226 | 0.842772 | 0.081921 | 0 | 0.058824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.088235 | 0.029412 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e4c5aa2e4e0404fffa88a1c14672d6de337f9d8 | 5,096 | py | Python | spec/explainers/random_attention.py | deep-spin/spec-blackboxnlp | 23db7a559e09ff7f63ede06b04cad226432b90db | [
"MIT"
] | 2 | 2020-11-26T07:46:48.000Z | 2021-07-28T08:06:58.000Z | spec/explainers/random_attention.py | deep-spin/spec-blackboxnlp | 23db7a559e09ff7f63ede06b04cad226432b90db | [
"MIT"
] | null | null | null | spec/explainers/random_attention.py | deep-spin/spec-blackboxnlp | 23db7a559e09ff7f63ede06b04cad226432b90db | [
"MIT"
] | null | null | null | import torch
from spec import constants
from spec.explainers.explainer import Explainer
from spec.explainers.utils import filter_word_ids_with_non_zero_probability
class RandomAttentionExplainer(Explainer):
def __init__(self, fields_tuples, options):
super().__init__(fields_tuples)
self.words_vocab_size = len(self.fields_dict['words'].vocab)
self.explainer_attn_top_k = options.explainer_attn_top_k
self.message_type = options.message_type
# options.word_embeddings_size is updated in the classifier constructor
# when a path to pretrained embeddings is passed
self.emb_size = options.word_embeddings_size
self.random_type = options.explainer_random_type
self.valid_top_word_ids = None
def build_loss(self, loss_weights=None):
self._loss = None
def forward(self, batch, classifier):
# generate random attn_weights
if self.random_type == 'beta':
mask = torch.ne(batch.words, constants.PAD_ID)
beta = torch.distributions.beta.Beta(5.0, 5.0)
attn_weights = beta.sample(batch.words.shape)
attn_weights = attn_weights.squeeze(-1).to(batch.words.device)
attn_weights[mask == 0] = 0
elif self.random_type == 'uniform':
mask = torch.ne(batch.words, constants.PAD_ID)
attn_weights = torch.rand(batch.words.shape).to(batch.words.device)
attn_weights = attn_weights / attn_weights.sum(-1).unsqueeze(-1)
attn_weights[mask == 0] = 0
elif self.random_type == 'zero_max_out':
_ = classifier(batch)
attn_weights = classifier.attn_weights.squeeze()
arange = torch.arange(attn_weights.shape[0]).to(attn_weights.device)
# maybe we can try zero out k max?
_, max_idxs = torch.topk(attn_weights, k=1, dim=-1)
attn_weights[arange, max_idxs.squeeze()] = 0
elif self.random_type == 'first_states':
mask = torch.ne(batch.words, constants.PAD_ID)
_ = classifier(batch)
bs, ts = batch.words.shape
attn_weights = torch.arange(ts, 0, -1).repeat(bs, 1).float()
attn_weights = attn_weights.to(batch.words.device)
attn_weights = attn_weights / ts
attn_weights[mask == 0] = 0
elif self.random_type == 'last_states':
mask = torch.ne(batch.words, constants.PAD_ID)
_ = classifier(batch)
bs, ts = batch.words.shape
attn_weights = torch.arange(1, ts + 1).repeat(bs, 1).float()
attn_weights = attn_weights.to(batch.words.device)
attn_weights = attn_weights / ts
attn_weights[mask == 0] = 0
elif self.random_type == 'mid_states':
mask = torch.ne(batch.words, constants.PAD_ID)
lengths = mask.int().sum(-1).tolist()
bs, ts = batch.words.shape
attn_weights = torch.zeros(bs, ts).to(batch.words.device)
for i, ell in enumerate(lengths):
attn_weight_left = torch.arange(1, ell // 2 + 1)
attn_weight_right = torch.arange(ell // 2, 0, -1)
w = [attn_weight_left]
if ell % 2 != 0:
attn_weight_mid = torch.tensor([(ell + 1) // 2])
w.append(attn_weight_mid)
w.append(attn_weight_right)
concat_tensors = torch.cat(w).to(attn_weights.device)
attn_weights[i, :ell] = concat_tensors
attn_weights = attn_weights.float()
else: # shuffle
_ = classifier(batch)
attn_weights = classifier.attn_weights.squeeze()
mask = torch.ne(batch.words, constants.PAD_ID)
lengths = mask.int().sum(-1).tolist()
for i in range(attn_weights.shape[0]):
valid_random_idx = torch.arange(attn_weights.shape[1])
idx = torch.randperm(lengths[i])
valid_random_idx[:lengths[i]] = idx
attn_weights[i] = attn_weights[i, valid_random_idx]
# find the topk attn weights using 1 < k < seq_len
k = min(self.explainer_attn_top_k, attn_weights.shape[-1])
top_probas, top_idxs = torch.topk(attn_weights, k, dim=-1)
# recover the word ids from the top indexes
top_word_ids = batch.words.gather(1, top_idxs)
# what to do when top ids map to pad ids? or when
# it returns instances zeroed out by sparsity?
# for now, hard coded in pure python: filter out these entries
valid_top_word_ids = filter_word_ids_with_non_zero_probability(
top_word_ids, top_probas, pad_id=constants.PAD_ID
)
# save for getting the words later
self.valid_top_word_ids = valid_top_word_ids
# create the message
message = self.make_message(
valid_top_word_ids, top_probas, classifier.word_emb
)
# create a time dimension of size 1
message = message.unsqueeze(1)
return message
| 43.186441 | 80 | 0.61617 | 664 | 5,096 | 4.486446 | 0.22741 | 0.155086 | 0.040282 | 0.05908 | 0.410876 | 0.338033 | 0.315206 | 0.291709 | 0.206781 | 0.181269 | 0 | 0.012386 | 0.287088 | 5,096 | 117 | 81 | 43.555556 | 0.807597 | 0.101452 | 0 | 0.287356 | 0 | 0 | 0.013363 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.045977 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e4e5a5c9c0e1e8ff2e56cee306ac39096e94fd8 | 6,529 | py | Python | shotmanager/vectorPathHandler.py | meee1/OpenSolo | 6f299639adbad1e8d573c8ae1135832711b600e4 | [
"Apache-2.0"
] | 68 | 2019-09-23T03:27:05.000Z | 2022-03-12T03:00:41.000Z | shotmanager/vectorPathHandler.py | meee1/OpenSolo | 6f299639adbad1e8d573c8ae1135832711b600e4 | [
"Apache-2.0"
] | 22 | 2019-10-26T20:15:56.000Z | 2022-02-12T05:41:56.000Z | shotmanager/vectorPathHandler.py | meee1/OpenSolo | 6f299639adbad1e8d573c8ae1135832711b600e4 | [
"Apache-2.0"
] | 33 | 2019-09-29T19:52:19.000Z | 2022-03-12T03:00:43.000Z | #
# Code common across shots to handle movement on paths
#
from pymavlink import mavutil
import location_helpers
import shotLogger
from pathHandler import PathHandler
from shotManagerConstants import *
import math
from vector3 import Vector3
logger = shotLogger.logger
#Path accel/decel constants
WPNAV_ACCEL = 200
WPNAV_ACCEL_Z = 160
# for 3D max speed
HIGH_PATH_SPEED = 5.0
LOW_PATH_SPEED = 1.5
MAX_PATH_SPEED = HIGH_PATH_SPEED + LOW_PATH_SPEED
# used to correct for drag or other factors
ERROR_P = .01
# special case of PathHandler
class VectorPathHandler(PathHandler):
def __init__(self, vehicle, shotManager, heading, pitch):
PathHandler.__init__(self, vehicle, shotManager)
# the initial reference position
self.initialLocation = vehicle.location.global_relative_frame
self.heading = heading
# creates a unit vector from telemetry data
self.unitVector = self.getUnitVectorFromHeadingAndTilt(heading, pitch)
# limit speed based on vertical component
# We can't go full speed vertically
# this section should be 2.0 to 8.0 m/s
# to generate a nice speed limiting curve we scale it.
# pitch is used to generate the vertical portion of the 3d Vector
pitch = min(pitch, 0) # level
pitch = max(pitch, -90) # down
accelXY = shotManager.getParam( "WPNAV_ACCEL", WPNAV_ACCEL ) / 100.0
accelZ = shotManager.getParam( "WPNAV_ACCEL_Z", WPNAV_ACCEL_Z ) / 100.0
cos_pitch = math.cos(math.radians(pitch))
self.maxSpeed = LOW_PATH_SPEED + (cos_pitch**3 * HIGH_PATH_SPEED)
self.maxSpeed = min(self.maxSpeed, MAX_PATH_SPEED)
self.accel = accelZ + (cos_pitch**3 * (accelXY - accelZ))
self.accel *= UPDATE_TIME
# the current distance from the intitial location
self.distance = 0.0
#for synthetic acceleration
self.currentSpeed = 0.0
self.desiredSpeed = 0.0
self.distError = 0.0
# given RC input, calculate a speed to move along vector
def move(self, channels):
# allows travel along the vector
# use the max of them
if abs(channels[ROLL]) > abs(channels[PITCH]):
userInput = channels[ROLL]
else:
userInput = -channels[PITCH]
# user controls speed
if self.cruiseSpeed == 0.0:
self.desiredSpeed = userInput * self.maxSpeed
# cruise control
else:
speed = abs(self.cruiseSpeed)
# if sign of stick and cruiseSpeed don't match then...
if math.copysign(1, userInput) != math.copysign(1, self.cruiseSpeed): # slow down
speed *= (1.0 - abs(userInput))
else: # speed up
speed += (self.maxSpeed - speed) * abs(userInput)
# carryover user input sign
if self.cruiseSpeed < 0:
speed = -speed
# limit speed
if speed > self.maxSpeed:
speed = self.maxSpeed
elif -speed > self.maxSpeed:
speed = -self.maxSpeed
self.desiredSpeed = speed
# Synthetic acceleration
if self.desiredSpeed > self.currentSpeed:
self.currentSpeed += self.accel
self.currentSpeed = min(self.currentSpeed, self.desiredSpeed)
elif self.desiredSpeed < self.currentSpeed:
self.currentSpeed -= self.accel
self.currentSpeed = max(self.currentSpeed, self.desiredSpeed)
else:
self.currentSpeed = self.desiredSpeed
# the distance to fly along the vectorPath
self.distance += self.currentSpeed * UPDATE_TIME
self.distance += self.distError * ERROR_P
# generate Guided mode commands to move the copter
self.travel()
# report speed output
return abs(self.currentSpeed)
def travel(self):
''' generate a new location from our distance offset and initial position '''
# the location of the vehicle in meters from the origin
offsetVector = self.unitVector * self.distance
# Scale unit vector by speed
velVector = self.unitVector * self.currentSpeed
# Convert NEU to NED velocity
#velVector.z = -velVector.z
# generate a new Location from our offset vector and initial location
loc = location_helpers.addVectorToLocation(self.initialLocation, offsetVector)
# calc dot product so we can assign a sign to the distance
vectorToTarget = location_helpers.getVectorFromPoints( self.initialLocation, self.vehicle.location.global_relative_frame)
dp = self.unitVector.x * vectorToTarget.x
dp += self.unitVector.y * vectorToTarget.y
dp += self.unitVector.z * vectorToTarget.z
self.actualDistance = location_helpers.getDistanceFromPoints3d(self.initialLocation, self.vehicle.location.global_relative_frame)
if (dp < 0):
self.actualDistance = -self.actualDistance
# We can now compare the actual vs vector distance
self.distError = self.actualDistance - self.distance
# formulate mavlink message for pos-vel controller
posVelMsg = self.vehicle.message_factory.set_position_target_global_int_encode(
0, # time_boot_ms (not used)
0, 1, # target system, target component
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, # frame
0b0000110111000000, # type_mask - enable pos/vel
int(loc.lat * 10000000), # latitude (degrees*1.0e7)
int(loc.lon * 10000000), # longitude (degrees*1.0e7)
loc.alt, # altitude (meters)
velVector.x, velVector.y, velVector.z, # North, East, Down velocity (m/s)
0, 0, 0, # x, y, z acceleration (not used)
0, 0) # yaw, yaw_rate (not used)
# send pos-vel command to vehicle
self.vehicle.send_mavlink(posVelMsg)
def getUnitVectorFromHeadingAndTilt(self, heading, tilt):
''' generate a vector from the camera gimbal '''
angle = math.radians(90 - heading)
tilt = math.radians(tilt)
# create a vector scaled by tilt
x = math.cos(tilt)
# Rotate the vector
nx = x * math.cos(angle)
ny = x * math.sin(angle)
# Up
z = math.sin(tilt)
return Vector3(ny, nx, z)
| 35.483696 | 137 | 0.627202 | 763 | 6,529 | 5.281782 | 0.315858 | 0.051613 | 0.034739 | 0.021588 | 0.103226 | 0.094789 | 0.064516 | 0.064516 | 0.036228 | 0.036228 | 0 | 0.021763 | 0.296217 | 6,529 | 183 | 138 | 35.677596 | 0.855277 | 0.266656 | 0 | 0.041237 | 0 | 0 | 0.005084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041237 | false | 0 | 0.072165 | 0 | 0.14433 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e51536d35690c069b3289446524366923e72f15 | 8,183 | py | Python | IntensivregisterUpdate.py | fe-lix-werner/covid19de_monitor | 3867676756a36f06c10b348e53afba125d244525 | [
"MIT"
] | 2 | 2021-05-06T21:00:09.000Z | 2021-06-26T11:51:19.000Z | IntensivregisterUpdate.py | fe-lix-werner/covid19de_monitor | 3867676756a36f06c10b348e53afba125d244525 | [
"MIT"
] | 3 | 2020-10-29T06:20:06.000Z | 2021-01-16T16:17:08.000Z | IntensivregisterUpdate.py | fe-lix-werner/covid19de_monitor | 3867676756a36f06c10b348e53afba125d244525 | [
"MIT"
] | 3 | 2020-10-28T16:45:23.000Z | 2020-12-09T13:35:09.000Z | #!/usr/bin/env python3
import requests
import json
import argparse
import datetime
import io
import threading
BL_API = 'https://www.intensivregister.de/api/public/reporting/laendertabelle'
LK_API = 'https://diviexchange.blob.core.windows.net/%24web/DIVI_Intensivregister_Auszug_pro_Landkreis.csv'
BL_DICT = {'BW': 'BADEN_WUERTTEMBERG','BY' : 'BAYERN','BE': 'BERLIN','BB': 'BRANDENBURG','HB': 'BREMEN','HH': 'HAMBURG','HE': 'HESSEN','MV': 'MECKLENBURG_VORPOMMERN','NI': 'NIEDERSACHSEN','NW': 'NORDRHEIN_WESTFALEN','RP': 'RHEINLAND_PFALZ','SL': 'SAARLAND','SN': 'SACHSEN','ST': 'SACHSEN_ANHALT','SH': 'SCHLESWIG_HOLSTEIN','TH': 'THUERINGEN'}
GS_DICT = {}
with open('ags-dict.json', encoding='utf-8') as json_file:
GS_DICT = json.load(json_file,)
class IntensivregisterUpdate:
def __init__(self):
self.prefix = ''
th_bl = threading.Thread(self.update_bl_data())
th_lk = threading.Thread(self.update_lk_data())
th_bl.start()
th_lk.start()
th_bl.join()
th_lk.join()
def update_lk_data(self):
result = requests.get(LK_API)
self.lk_data = self.parse_csv_to_json(result.text)["data"]
def update_bl_data(self):
self.bl_data = self.get_data_as_json()
def get_data_as_json(self):
response = requests.get(BL_API)
return response.json()["data"]
def get_occupancy_by_bl_in_percent(self,bl):
bl_full = BL_DICT[bl]
for item in self.bl_data:
if item['bundesland'] == bl_full:
return item['bettenBelegtToBettenGesamtPercent']
def get_occupancy_by_bl_in_percent_with_7d_emgergancy_beds_in_percent(self,bl):
return round(self.get_all_occupied_beds_by_bl(bl)/(self.get_all_beds_by_bl(bl)+self.get_all_emergency_beds_7d_by_bl(bl)) * 100, 1)
def get_all_beds_by_bl(self,bl):
bl_full = BL_DICT[bl]
for item in self.bl_data:
if item['bundesland'] == bl_full:
return item['intensivBettenGesamt']
def get_all_occupied_beds_by_bl(self,bl):
bl_full = BL_DICT[bl]
for item in self.bl_data:
if item['bundesland'] == bl_full:
return item['intensivBettenBelegt']
def get_all_emergency_beds_7d_by_bl(self,bl):
bl_full = BL_DICT[bl]
for item in self.bl_data:
if item['bundesland'] == bl_full:
return item['intensivBettenNotfall7d']
def get_all_beds(self):
b_sum = 0
for item in self.bl_data:
b_sum += item['intensivBettenGesamt']
return b_sum
def get_all_occupied_beds(self):
bo_sum = 0
for item in self.bl_data:
bo_sum += item['intensivBettenBelegt']
return bo_sum
def get_all_emergency_beds_7d(self):
be_sum = 0
for item in self.bl_data:
be_sum += item['intensivBettenNotfall7d']
return be_sum
def get_overall_occupancy_in_percent(self):
return round(self.get_all_occupied_beds()/self.get_all_beds() * 100, 1)
def get_overall_occupancy_in_percent_with_emergency_beds(self):
return round(self.get_all_occupied_beds()/(self.get_all_beds() + self.get_all_emergency_beds_7d())* 100, 1)
def get_date(self):
for item in self.bl_data:
t = item['creationTimestamp']
return datetime.datetime.strptime(t, '%Y-%m-%dT%H:%M:%SZ')
def parse_csv_to_json(self,csv_as_string):
csvfile = io.StringIO(csv_as_string)
arr=[]
headers = []
# Read in the headers/first row
for header in csvfile.readline().split(','):
headers.append(header)
# Extract the information into the "xx" : "yy" format.
for line in csvfile.readlines():
lineStr = '\n'
for i,item in enumerate(line.split(',')):
lineStr+='"'+headers[i].replace('\r\n','') +'" : "' + item.replace('\r\n','') + '",\n'
arr.append(lineStr)
csvfile.close()
#convert the array into a JSON string:
jsn = '{ "data":['
jsnEnd = ']}'
for i in range(len(arr)-1):
if i == len(arr)-2:
jsn+="{"+str(arr[i])[:-2]+"}"
else:
jsn+="{"+str(arr[i])[:-2]+"},"
jsn+=jsnEnd
return json.loads(jsn)
def get_lk_data(self,lk_name):
gs = ""
try:
gs = GS_DICT[lk_name]
except:
return None
for entry in self.lk_data:
if int(entry["gemeindeschluessel"]) == gs:
return entry
def lk_data_formatted(self,lk_data):
if (lk_data == None):
return "Your Landkreis or Stadt isn't in the list. See -la to list all Landkreise and Städte."
fb = int(lk_data["betten_frei"])
ob = int(lk_data["betten_belegt"])
ab = fb + ob
rate = round(ob/ab*100,2)
return ("{percent}% ({ob}/{ab})").format(percent=rate, ob=ob ,ab=ab)
def lk_data_for_areas(self,areas):
result = ""
for area in areas:
BEZ = area["BEZ"]
GEN = area["GEN"]
if BEZ != "Landkreis":
BEZ = "Stadt"
result += "{gen} {bez}: {rate}\n".format(gen=GEN,bez=BEZ,rate=self.lk_data_formatted(self.get_lk_data(GEN + " " + BEZ)))
return result[:-1]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-lb", "--listbundeslander", help="Lists all available states and their abbreviations", action="store_true")
parser.add_argument("-lk", "--landkreis", help="Print Landkreis occupancy rate", type=str)
parser.add_argument("-s", "--stadt", help="Print Stadt occupancy rate", type=str)
parser.add_argument("-b", "--bundesland", help="Show the percentage of occupied beds in a specific state. Example: -b BY")
parser.add_argument("-d", "--deutschland", help="Show the Percentage of all occupied beds in Germany",action="store_true")
parser.add_argument("-dn", "--deutschlandwithemergency", help="Show the Percentage of all occupied beds in Germany including the 7 day emergency beds",action="store_true")
parser.add_argument("-bn", "--bundeslandwithemergency", help="Show the percentage of occupied beds in a specific state including the 7 day emergency beds. Example: -bn BY")
parser.add_argument("-p", "--prefix", help="Print given prefix as String before the actual number. Example: -p 'BY beds' -bn BY")
parser.add_argument("-la","--listareas", help="Prints all names of the Landreise and Städte",action="store_true")
parser.add_argument("-a","--areas", help="Receives JSON file with defined areas of interest.")
args = parser.parse_args()
iu = IntensivregisterUpdate()
if args.prefix:
iu.prefix = args.prefix
args = parser.parse_args()
if args.listbundeslander:
print(json.dumps(BL_DICT,indent=4))
elif args.bundesland:
print(iu.prefix + str(iu.get_occupancy_by_bl_in_percent(args.bundesland)))
elif args.deutschland:
print(iu.prefix + str(iu.get_overall_occupancy_in_percent()))
elif args.deutschlandwithemergency:
print(iu.prefix + str(iu.get_overall_occupancy_in_percent_with_emergency_beds()))
elif args.bundeslandwithemergency:
print(iu.prefix + str(iu.get_occupancy_by_bl_in_percent_with_7d_emgergancy_beds_in_percent(args.bundeslandwithemergency)))
elif args.landkreis:
result = iu.lk_data_formatted(iu.get_lk_data(args.landkreis + " Landkreis"))
if result != None:
print(iu.prefix + str(result))
elif args.stadt:
result = iu.lk_data_formatted(iu.get_lk_data(args.stadt + " Stadt"))
if result != None:
print(iu.prefix + str(result))
elif args.areas:
with open(args.areas) as json_file:
example_area = json.load(json_file)
result = iu.lk_data_for_areas(example_area)
print(iu.prefix + str(result))
elif args.listareas:
l = list(GS_DICT.keys())
l.sort()
for e in l:
print(e)
else:
print("Please use help to see your options (--help)")
| 39.723301 | 342 | 0.630331 | 1,104 | 8,183 | 4.442029 | 0.236413 | 0.022023 | 0.034666 | 0.021207 | 0.367251 | 0.347268 | 0.285073 | 0.235318 | 0.208605 | 0.208605 | 0 | 0.006249 | 0.237321 | 8,183 | 205 | 343 | 39.917073 | 0.779523 | 0.017231 | 0 | 0.14881 | 0 | 0.017857 | 0.216721 | 0.01891 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113095 | false | 0 | 0.035714 | 0.017857 | 0.261905 | 0.059524 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e527faed8095c3ea58150eacca0a778dfb4545f | 2,693 | py | Python | pyscrubber.py | sanketsaurav/nlm-scrubber-docker | dbd2529b8b2e453f0800e1b2a4126df026e45190 | [
"MIT"
] | 8 | 2019-04-11T17:37:38.000Z | 2021-07-26T17:03:30.000Z | pyscrubber.py | sanketsaurav/nlm-scrubber-docker | dbd2529b8b2e453f0800e1b2a4126df026e45190 | [
"MIT"
] | null | null | null | pyscrubber.py | sanketsaurav/nlm-scrubber-docker | dbd2529b8b2e453f0800e1b2a4126df026e45190 | [
"MIT"
] | 2 | 2020-01-29T00:37:25.000Z | 2020-09-26T06:27:04.000Z | #!/usr/bin/env python
import uuid
import os
import subprocess
import shutil
DOC_DELIMITER = '\n##### DOCUMENT #############################################################'
class Scrubber():
'''This class is a wrapper around the `nlm_scrubber` library.'''
def __init__(self, working_directory='/tmp/nlm_scrubber'):
self.working_directory = working_directory
def _setup(self, base_path):
if not os.path.exists(base_path):
os.makedirs(base_path)
input_path = '%s/input' % (base_path)
if not os.path.exists(input_path):
os.makedirs(input_path)
output_path = '%s/output' % (base_path)
if not os.path.exists(output_path):
os.makedirs(output_path)
def scrub(self, inputs, docker=True):
my_uuid = str(uuid.uuid4())
base_path = '%s/%s' % (self.working_directory, my_uuid)
self._setup(base_path)
if not docker:
self.config_file = '%s/config' % (base_path)
with open(self.config_file, 'w') as file:
file.write('ROOT1 = %s\n' % (base_path))
file.write('ClinicalReports_dir = ROOT1/input\n')
file.write('ClinicalReports_files = .*\\.txt\n')
file.write('nPHI_outdir = ROOT1/output\n')
for index, input in enumerate(inputs):
# Write string to disk
with open('%s/input/data_%s.txt' % (base_path, index), 'w') as file:
file.write(input)
# Run scrubber with config
if docker:
input_path = '%s/input' % base_path
output_path = '%s/output' % base_path
run = 'docker run -it --rm -v %s:/tmp/once_off/input -v %s:/tmp/once_off/output --env SCRUBBER_REGEX radaisystems/nlm-scrubber' % (input_path, output_path)
result = subprocess.run(run, capture_output=True, shell=True, env={'SCRUBBER_REGEX':'.*\.txt'})
else:
result = subprocess.run(['/opt/nlm_scrubber', self.config_file], capture_output=True)
outputs = []
for index, input in enumerate(inputs):
# Retrieve results
with open('%s/output/data_%s.nphi.txt' % (base_path, index)) as file:
output = file.read()
if DOC_DELIMITER in output:
output = output[:output.find(DOC_DELIMITER)]
outputs.append(output)
# Cleanup
shutil.rmtree(base_path)
return outputs
def scrub(inputs):
scrubber = Scrubber()
return scrubber.scrub(inputs)
if __name__ == "__main__":
print(scrub(['testing', 'My name is Robert Hafner.', 'This string is also a test. 1/19/1998']))
| 33.246914 | 167 | 0.57928 | 332 | 2,693 | 4.506024 | 0.304217 | 0.074866 | 0.026738 | 0.034759 | 0.183824 | 0.14639 | 0.083556 | 0 | 0 | 0 | 0 | 0.005615 | 0.272558 | 2,693 | 80 | 168 | 33.6625 | 0.75804 | 0.0557 | 0 | 0.038462 | 0 | 0.019231 | 0.210817 | 0.070272 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.211538 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e528039973bc6fa5f650d3dd268fb60bdd771de | 2,569 | py | Python | src/galaxy_crawler/models/helper.py | pddg/galaxy_crawler | cc0634dfca7d81ee49e5370ff0bf83cca92ec4ac | [
"Apache-2.0"
] | 2 | 2019-12-24T10:45:37.000Z | 2022-03-04T00:47:14.000Z | src/galaxy_crawler/models/helper.py | pddg/galaxy_crawler | cc0634dfca7d81ee49e5370ff0bf83cca92ec4ac | [
"Apache-2.0"
] | 2 | 2019-10-31T17:42:36.000Z | 2020-03-24T18:20:41.000Z | src/galaxy_crawler/models/helper.py | pddg/galaxy_crawler | cc0634dfca7d81ee49e5370ff0bf83cca92ec4ac | [
"Apache-2.0"
] | null | null | null | from typing import TYPE_CHECKING
import pandas as pd
from galaxy_crawler.models import utils
from galaxy_crawler.models import v1 as models
if TYPE_CHECKING:
from datetime import datetime
from typing import List, Optional
from sqlalchemy.engine import Engine
def get_roles_df(engine: 'Engine', except_role_types: 'Optional[List[int]]' = None):
"""
Obtain all roles with repository data as pandas.DataFrame
:param engine: Database engine for connection
:param except_role_types: Filtering role type based on given integers.
:return: pandas.DataFrame
"""
session = utils.get_scoped_session(engine)
get_all_role_query = str(session.query(models.Role, models.Repository) \
.join(models.Repository, models.Role.repository_id == models.Repository.repository_id))
role_df = pd.read_sql_query(get_all_role_query, engine, index_col=['roles_role_id'])
# Remove column name prefix `roles_`
role_df.rename(columns=lambda x: x[6:] if x.startswith("roles_") else x, inplace=True)
if except_role_types is not None:
# ~series.isin(some) indicate that `series not in some`
role_df = role_df[~role_df["role_type_id"].isin(except_role_types)]
return role_df
def filter_roles_df_by_modified_date(roles: 'pd.DataFrame',
from_date: 'datetime',
to_date: 'datetime') -> 'pd.DataFrame':
"""
Filtering roles by the modified date.
Returns only those with a value that was updated between `from_date` and `to_date`.
:param roles: Roles DataFrame
:param from_date: Lower threshold of modified datetime
:param to_date: Upper threshold of modified datetime
:return: pandas.DataFrame
"""
if to_date <= from_date:
to_date, from_date = from_date, to_date
masks = (roles["modified"] <= to_date) & (roles["modified"] >= from_date)
return roles.loc[masks]
def filter_roles_df_by_dl_percentile(roles: 'pd.DataFrame',
percentile: 'float' = 0.9) -> 'pd.DataFrame':
"""
Filtering roles by the number of downloads.
Returns only those with a value greater than or equal to the specified percentile value.
:param roles: Roles DataFrame
:param percentile: 0 <= N <= 1
:return: pandas.DataFrame
"""
assert 0 <= percentile <= 1, "Percentile should be 0 <= N <= 1."
threshold = roles['download_count'].quantile(percentile)
masks = roles["download_count"] >= threshold
return roles.loc[masks]
| 40.777778 | 116 | 0.679253 | 342 | 2,569 | 4.912281 | 0.339181 | 0.033333 | 0.035714 | 0.027381 | 0.191667 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0.00503 | 0.226158 | 2,569 | 62 | 117 | 41.435484 | 0.84004 | 0.309848 | 0 | 0.066667 | 0 | 0 | 0.120381 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 1 | 0.1 | false | 0 | 0.233333 | 0 | 0.433333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e52978a04dbb6b314c47e801d5498f9f38d6a4e | 2,065 | py | Python | frameworks/tc_scikit/features/dependency_distribution_spacy.py | Asteur/textclassification | 80222e99e1a195031cf8e98bc294a09e498c29a3 | [
"MIT"
] | 5 | 2018-08-12T19:30:30.000Z | 2022-03-04T15:27:31.000Z | frameworks/tc_scikit/features/dependency_distribution_spacy.py | Asteur/textclassification | 80222e99e1a195031cf8e98bc294a09e498c29a3 | [
"MIT"
] | null | null | null | frameworks/tc_scikit/features/dependency_distribution_spacy.py | Asteur/textclassification | 80222e99e1a195031cf8e98bc294a09e498c29a3 | [
"MIT"
] | 2 | 2018-07-13T02:06:48.000Z | 2020-12-10T13:35:17.000Z | import logging
from collections import OrderedDict
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Normalizer
from frameworks.tc_scikit.models.tiger import TIGER_TAGSET_SPACY
def build_feature_selection(use_TIGER=True, k=5):
pipeline = Pipeline([('transformer',
DependencyDistributionSpacy(use_TIGER=use_TIGER)),
('feature_selection', SelectKBest(chi2, k=k)),
('normalizer', Normalizer())
])
return ('dependency_distribution_spacy', pipeline)
def build(use_TIGER=True):
pipeline = Pipeline([('transformer',
DependencyDistributionSpacy(use_TIGER=use_TIGER)),
('normalizer', Normalizer())
])
return ('dependency_distribution_spacy', pipeline)
dependency_black_list = ['ROOT', 'punct']
def get_dependency_histogram(pos_list, tag_set):
histogram = OrderedDict.fromkeys(tag_set, 0)
for entry in pos_list:
if entry and entry not in dependency_black_list and '||' not in entry:
histogram[entry] += 1
values = []
for key, value in histogram.items():
values.append(value)
histogram = np.array(values, dtype=np.float64)
return histogram
class DependencyDistributionSpacy(BaseEstimator):
def __init__(self, use_TIGER=True):
self.logger = logging.getLogger()
self.use_TIGER = use_TIGER
def fit(self, X, y):
return self
def transform(self, X):
return list(map(lambda x: self.transform_document(x), X))
def transform_document(self, document):
if self.use_TIGER:
dependency_list = list(map(lambda x: x.releation, document.dependencies))
distribution = get_dependency_histogram(dependency_list, TIGER_TAGSET_SPACY)
return distribution
else:
raise NotImplementedError("")
| 32.777778 | 88 | 0.659564 | 223 | 2,065 | 5.914798 | 0.35426 | 0.060652 | 0.027293 | 0.036391 | 0.198635 | 0.198635 | 0.198635 | 0.106141 | 0 | 0 | 0 | 0.004534 | 0.2523 | 2,065 | 62 | 89 | 33.306452 | 0.849741 | 0 | 0 | 0.212766 | 0 | 0 | 0.061985 | 0.028087 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148936 | false | 0 | 0.170213 | 0.042553 | 0.468085 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e52f62008c27b479273629366f87f8875845031 | 4,728 | py | Python | structsolve/arc_length_riks.py | saullocastro/structsolve | 3c325068ca13e8632f506fb18c2ea5de495c581e | [
"BSD-2-Clause"
] | 1 | 2022-03-24T16:15:04.000Z | 2022-03-24T16:15:04.000Z | structsolve/arc_length_riks.py | saullocastro/structsolve | 3c325068ca13e8632f506fb18c2ea5de495c581e | [
"BSD-2-Clause"
] | null | null | null | structsolve/arc_length_riks.py | saullocastro/structsolve | 3c325068ca13e8632f506fb18c2ea5de495c581e | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
from numpy import dot
from scipy.sparse import csr_matrix, vstack as spvstack, hstack as sphstack
from .static import solve
from .logger import msg, warn
def _solver_arc_length_riks(an, silent=False):
r"""Arc-Length solver using the Riks method
"""
msg('___________________________________________', level=1, silent=silent)
msg(' ', level=1, silent=silent)
msg('Arc-Length solver using Riks implementation', level=1, silent=silent)
msg('___________________________________________', level=1, silent=silent)
msg('Initializing...', level=1, silent=silent)
lbd = 0.
arc_length = an.initialInc
length = arc_length
dlbd = arc_length
max_arc_length = an.maxArcLength
modified_NR = an.modified_NR
kC = an.calc_kC(silent=True)
fext = an.calc_fext(inc=1., silent=True)
kT = kC
c = solve(kC, arc_length*fext, silent=True)
fint = kC*c
dc = c
c_last = 0 * c
step_num = 1
if modified_NR:
compute_NL_matrices = False
else:
compute_NL_matrices = True
while step_num < 1000:
msg('Step %d, lbd %1.5f, arc-length %1.5f' % (step_num, lbd, arc_length), level=1, silent=silent)
min_Rmax = 1.e6
prev_Rmax = 1.e6
converged = False
iteration = 0
varlbd = 0
varc = 0
phi = 1 # spheric arc-length
while True:
iteration += 1
if iteration > an.maxNumIter:
warn('Maximum number of iterations achieved!', level=2, silent=silent)
break
q = fext
TMP = sphstack((kT, -q[:, None]), format='lil')
dcext = np.concatenate((dc, [0.]))
TMP = spvstack((TMP, 2*dcext[None, :]), format='lil')
TMP[-1, -1] = 2*phi**2*dlbd*np.dot(q, q)
TMP = TMP.tocsr()
right_vec = np.zeros(q.shape[0]+1, dtype=q.dtype)
R = fint - (lbd + dlbd)*q
A = - (np.dot(dc, dc) + phi**2*dlbd**2*np.dot(q, q) - arc_length**2)
right_vec[:-1] = -R
right_vec[-1] = A
solution = solve(TMP, right_vec, silent=True)
varc = solution[:-1]
varlbd = solution[-1]
dlbd = dlbd + varlbd
dc = dc + varc
msg('iter %d, lbd+dlbd %1.5f' % (iteration, lbd+dlbd), level=2, silent=silent)
# computing the Non-Linear matrices
if compute_NL_matrices:
kC = an.calc_kC(c=(c + dc), NLgeom=True, silent=True)
kG = an.calc_kG(c=(c + dc), NLgeom=True, silent=True)
kT = kC + kG
if modified_NR:
compute_NL_matrices = False
else:
if not modified_NR:
compute_NL_matrices = True
# calculating the residual
fint = an.calc_fint(c + dc, silent=True)
Rmax = np.abs((lbd + dlbd)*fext - fint).max()
if iteration >=2 and Rmax <= an.absTOL:
converged = True
break
if (Rmax > min_Rmax and Rmax > prev_Rmax and iteration > 3):
warn('Diverged - Rmax value significantly increased', level=2, silent=silent)
break
else:
min_Rmax = min(min_Rmax, Rmax)
change_rate_Rmax = abs(1 - Rmax/prev_Rmax)
if (iteration > 2 and change_rate_Rmax < an.too_slow_TOL):
warn('Diverged - convergence too slow', level=2, silent=silent)
break
prev_Rmax = Rmax
if converged:
step_num += 1
msg('Converged at lbd+dlbd of %1.5f, total length %1.5f' % (lbd + dlbd, length), level=2, silent=silent)
length += arc_length
lbd = lbd + dlbd
arc_length *= 1.1111
dlbd = arc_length
c_last = c.copy()
c = c + dc
an.increments.append(lbd)
an.cs.append(c.copy())
else:
msg('Reseting step with reduced arc-length', level=2, silent=silent)
arc_length *= 0.90
if length >= max_arc_length:
msg('Maximum specified arc-length of %1.5f achieved' % max_arc_length, level=2, silent=silent)
break
dc = c - c_last
dlbd = arc_length
kC = an.calc_kC(c=c, NLgeom=True, silent=True)
kG = an.calc_kG(c=c, NLgeom=True, silent=True)
kT = kC + kG
fint = an.calc_fint(c=c, silent=True)
compute_NL_matrices = False
msg('Finished Non-Linear Static Analysis', silent=silent)
msg(' total arc-length %1.5f' % length, level=1, silent=silent)
| 34.014388 | 116 | 0.544628 | 611 | 4,728 | 3.95581 | 0.219313 | 0.08192 | 0.034754 | 0.052131 | 0.233347 | 0.149359 | 0.116674 | 0.092677 | 0.061233 | 0.061233 | 0 | 0.023548 | 0.344332 | 4,728 | 138 | 117 | 34.26087 | 0.756129 | 0.026227 | 0 | 0.207207 | 0 | 0 | 0.121845 | 0.018712 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009009 | false | 0 | 0.045045 | 0 | 0.054054 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e53c6858211b2819fa897c860639a3bd48a5cee | 3,245 | py | Python | interactive_map_tester/interactive_map_tester/visualizeInteractiveMap.py | antonikaras/thesis_ros2 | 36673cd8a4161b1cf4045e8bdda36275a2a337ce | [
"BSD-2-Clause"
] | 1 | 2021-06-27T02:01:22.000Z | 2021-06-27T02:01:22.000Z | interactive_map_tester/interactive_map_tester/visualizeInteractiveMap.py | antonikaras/thesis_ros2 | 36673cd8a4161b1cf4045e8bdda36275a2a337ce | [
"BSD-2-Clause"
] | 1 | 2021-09-30T01:56:04.000Z | 2021-09-30T10:26:13.000Z | interactive_map_tester/interactive_map_tester/visualizeInteractiveMap.py | antonikaras/thesis_ros2 | 36673cd8a4161b1cf4045e8bdda36275a2a337ce | [
"BSD-2-Clause"
] | 1 | 2021-09-30T01:52:28.000Z | 2021-09-30T01:52:28.000Z | # Import ROS2 libraries
import rclpy
from rclpy.node import Node
from cv_bridge import CvBridge, CvBridgeError
from rclpy.qos import QoSProfile
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.executors import MultiThreadedExecutor
# Import message files
from sensor_msgs.msg import Image
from autonomous_exploration_msgs.msg import MapData
from nav_msgs.msg import OccupancyGrid
# Import other libraries
import numpy as np
import cv2 as cv
class VisualizeInteractiveMap(Node):
"""
Convert the map published from Unity to an image topic
"""
def __init__(self):
super().__init__("visualize_interactive_map")
# Initialize the variables
self.bridge = CvBridge()
qos = QoSProfile(depth=10)
# Create subscribers
## /rosbridge_msgs_unity/interactive_map
self.create_subscription(MapData, "rosbridge_msgs_unity/interactive_map", self._mapCallback, qos)
# Create publishers
## /interactive_map/image
self.interactiveMap_Imagepub = self.create_publisher(Image, "/interactive_map/image", qos)
## /interactive_map/map
self.interactiveMap_Mappub = self.create_publisher(OccupancyGrid, "/interactive_map/map", qos)
self.get_logger().info("Interactive map to image converter initiated")
def _mapCallback(self, data:MapData):
# Store the map Info
width = data.height
height = data.width
# Rearrange the data to be visible correctly on unity
map = np.array(data.map).reshape(width, height)
map = np.flip(map, 0)
map = map.flatten()
map_img = np.zeros((width * height, 3))
# Generate the colors randomly
colors = 255 * np.random.rand(max(map), 1, 3)
for i in range(max(map)):
map_img[map == (i + 1)] = colors[i, :, :]
# Reshape the map image to width * height * 3
map_img = np.reshape(map_img, (width, height, 3))
#map_img = np.flip(map_img, 1)
map_img = map_img.astype(np.uint8)
# Create the interactive map
intMap = OccupancyGrid()
intMap.header.frame_id = 'map'
intMap.data = [int(el) for el in map]
intMap.info.resolution = data.resolution
intMap.info.width = width
intMap.info.height = height
intMap.info.origin.position.x = float(data.origin[0])
intMap.info.origin.position.y = float(data.origin[1])
# Publish the image
self.interactiveMap_Imagepub.publish(self.bridge.cv2_to_imgmsg(map_img, "rgb8"))
# Publish the map
self.interactiveMap_Mappub.publish(intMap)
###################################################################################################
def main(args=None):
rclpy.init(args=args)
VIM = VisualizeInteractiveMap()
executor = MultiThreadedExecutor()
try:
rclpy.spin(VIM)
except KeyboardInterrupt:
pass
#rclpy.spin_until_future_complete(SR, )
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
#SR.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | 32.45 | 105 | 0.646533 | 385 | 3,245 | 5.293506 | 0.374026 | 0.061825 | 0.019136 | 0.028459 | 0.054956 | 0.054956 | 0 | 0 | 0 | 0 | 0 | 0.008045 | 0.233898 | 3,245 | 100 | 106 | 32.45 | 0.811746 | 0.21171 | 0 | 0 | 0 | 0 | 0.067053 | 0.034354 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0.018868 | 0.207547 | 0 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e5512d26ff378d4e8524182eb1fa3deb226d67e | 1,027 | py | Python | b64.py | Orphan-Crippler/b64 | a93ab8bc96ae0b443911cb28a57ec94f4eab29bb | [
"BSD-3-Clause"
] | null | null | null | b64.py | Orphan-Crippler/b64 | a93ab8bc96ae0b443911cb28a57ec94f4eab29bb | [
"BSD-3-Clause"
] | null | null | null | b64.py | Orphan-Crippler/b64 | a93ab8bc96ae0b443911cb28a57ec94f4eab29bb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Libraries
from base64 import b64encode, b64decode
# Messages
warn = '!!!!!!!!!!!!!!!!!!!!!!!! WARNING: '
valid = '\nPlease Enter a Valid Entry!\n'
# Menu/Encode/Decode Logic
def app(st):
if st == '1':
code = input('Enter Stuff to Encode: ').encode()
print('\n')
try:
ans = b64encode(code)
print('\n',str(ans)[2:-1],'\n')
return
except Exception as x:
print(warn,x,'\n')
elif st == '2':
code = input('Enter Stuff to Decode: ').encode()
print('\n')
try:
ans = b64decode(code)
print('\n',str(ans)[2:-1],'\n')
return
except Exception as x:
print(warn,x,'\n')
elif st == 'q':
exit()
else:
print(valid)
return
#Main Menu Loop
while True:
try:
app(input('\nEnter 1 to Encode\n\nEnter 2 to Decode\n\nEnter q to Quit\n'))
except KeyboardInterrupt:
break
| 23.883721 | 78 | 0.487829 | 125 | 1,027 | 4.008 | 0.432 | 0.047904 | 0.055888 | 0.075848 | 0.39521 | 0.239521 | 0.239521 | 0.239521 | 0.239521 | 0.239521 | 0 | 0.028401 | 0.348588 | 1,027 | 42 | 79 | 24.452381 | 0.720478 | 0.076923 | 0 | 0.4375 | 0 | 0.03125 | 0.211987 | 0.026637 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.03125 | 0 | 0.15625 | 0.21875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e56257d1dcde4b6d5d35bb6f662c1c19842ef6e | 325 | py | Python | code/examples/04-adc/adc_pot_servo.py | yuanyanhui/intro-upy-esp32 | 5f066ca8b1804dc6032e8f0a5957acd6e36baffb | [
"MIT"
] | null | null | null | code/examples/04-adc/adc_pot_servo.py | yuanyanhui/intro-upy-esp32 | 5f066ca8b1804dc6032e8f0a5957acd6e36baffb | [
"MIT"
] | null | null | null | code/examples/04-adc/adc_pot_servo.py | yuanyanhui/intro-upy-esp32 | 5f066ca8b1804dc6032e8f0a5957acd6e36baffb | [
"MIT"
] | 1 | 2022-03-09T08:40:41.000Z | 2022-03-09T08:40:41.000Z | """
Control servo using potentiometer
"""
from machine import Pin, ADC, PWM
pot = ADC(Pin(32), atten = ADC.ATTN_11DB) # 电位器 - ADC
servo = PWM(Pin(33), freq = 50) # 舵机
while True:
adc_value = pot.read()
pulse_width_value = (125 - 25)/4095 * adc_value + 25
servo.duty(int(pulse_width_value))
| 23.214286 | 56 | 0.621538 | 48 | 325 | 4.0625 | 0.645833 | 0.082051 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.077551 | 0.246154 | 325 | 14 | 57 | 23.214286 | 0.718367 | 0.144615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e56e1fd3a0ac1e94ebfe17d566b249e1081cc8e | 3,720 | py | Python | project7/project7-lp-single.py | karulont/combopt | 98ad05f574d1ce355fc653df60bedde8a3bb838b | [
"MIT"
] | 1 | 2016-12-23T08:38:57.000Z | 2016-12-23T08:38:57.000Z | project7/project7-lp-single.py | karulont/combopt | 98ad05f574d1ce355fc653df60bedde8a3bb838b | [
"MIT"
] | null | null | null | project7/project7-lp-single.py | karulont/combopt | 98ad05f574d1ce355fc653df60bedde8a3bb838b | [
"MIT"
] | null | null | null | from gurobipy import *
from sys import argv
import json
import math
import drawful
def read_lst(fn):
with open(fn, 'r') as f:
(n, tp) = json.load(f)
return (n, tp)
def write_lst(fn, lst):
with open(fn, 'w') as f:
json.dump(lst, f)
def distance(v1, v2):
return math.sqrt((v2[0] - v1[0]) ** 2 + (v2[1] - v1[1]) ** 2 + (v2[2] - v1[2]) ** 2)
def distance_squared(v1, v2):
return (v2[0] - v1[0]) ** 2 + (v2[1] - v1[1]) ** 2 + (v2[2] - v1[2]) ** 2
def get_permutation(edges, last_perm, last_frame, frame, n):
perm = [0] * n
for v1, v2 in edges:
v1i = last_frame.index(list(v1))
v2i = frame.index(list(v2))
# j = last_perm.index(v1i)
perm[v2i] = last_perm[v1i]
return perm
def main():
def optimize_single(f):
m = Model('project7')
print("Adding variables...")
edge_vars = {}
point_edges = {}
t1, f1 = frames[f]
t2, f2 = frames[f + 1]
for i in range(n):
v1 = tuple(f1[i])
point_edges[v1] = []
for j in range(n):
v2 = tuple(f2[j])
cost = distance_squared(v1, v2)
# if (v1, v2) in edge_vars[f]:
# print("Duplicate vertex!")
# return
edge_vars[v1, v2] = m.addVar(obj=cost, vtype=GRB.BINARY)
point_edges[v1].append(edge_vars[v1, v2])
m.update()
print("Adding constraints...")
'''
# There must be n edges from one frame to the next
for frame in edge_vars:
m.addConstr(quicksum(frame.values()) == n)
'''
# There must be one incoming edge per point in the last n-1 frames
for v2 in frames[f + 1][1]:
v2 = tuple(v2)
v2_edges = []
for v1 in frames[f][1]:
v1 = tuple(v1)
v2_edges.append(edge_vars[v1, v2])
m.addConstr(quicksum(v2_edges) == 1)
# There must be one outgoing edge per point in the first n-1 frames
for edges in point_edges:
m.addConstr(quicksum(point_edges[edges]) == 1)
m.optimize()
edges = m.getAttr('x', edge_vars).items()
selected = []
for edge, value in edges:
if value:
selected.append(edge)
# Calculate cost
cost = 0
for v1, v2 in selected:
cost += distance(v1, v2)
print("cost", f, ":", cost)
return get_permutation(selected, last_perm, frames[f][1], frames[f + 1][1], n)
# fn = 'data-n2-t3.json'
# fn = 'example-points.lst'
# fn = 'points-00125-0.lst'
# fn = 'points-10400-0.lst'
# fn = 'points-00125-0.lst'
# fn = 'new/points-00020-0.lst'
# fn = 'points-02500-0.lst'
fn = 'points_v-209-0.3.lst'
if len(argv) == 2:
fn = argv[1]
n, frames = read_lst(fn)
orig_frames = [[tuple(u) for u in ss[1]] for ss in frames]
nf = len(frames) - 1
print("n:", n)
print("frames: t0-t" + str(nf))
solution = [n]
last_perm = [i for i in range(n)]
for f in range(nf):
last_perm = optimize_single(f)
solution.append(last_perm)
# print(solution)
write_lst(fn + '.sol', solution)
return (orig_frames, solution[1], solution[2])
if __name__ == '__main__':
import time
start = time.clock()
(orig_frames, solution1, solution2) = main()
end = time.clock()
print("time: {0:.3f} s".format(end - start))
drawful.drawWithIndices(orig_frames, solution1, solution2)
| 27.555556 | 89 | 0.508065 | 511 | 3,720 | 3.606654 | 0.25636 | 0.023874 | 0.021704 | 0.026044 | 0.109061 | 0.070537 | 0.049919 | 0.026044 | 0.026044 | 0.026044 | 0 | 0.057875 | 0.349731 | 3,720 | 134 | 90 | 27.761194 | 0.70401 | 0.117742 | 0 | 0 | 0 | 0 | 0.039381 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084337 | false | 0 | 0.072289 | 0.024096 | 0.228916 | 0.072289 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e59427f48e64e36035d7a96d2e7bfa4cb4b20f1 | 4,515 | py | Python | src/main.py | darrenyaoyao/VR_motion_predict | 2039197d017a16460caefff57bfb117c0bd814bc | [
"MIT"
] | null | null | null | src/main.py | darrenyaoyao/VR_motion_predict | 2039197d017a16460caefff57bfb117c0bd814bc | [
"MIT"
] | null | null | null | src/main.py | darrenyaoyao/VR_motion_predict | 2039197d017a16460caefff57bfb117c0bd814bc | [
"MIT"
] | null | null | null | import select
import socket
import struct
import traceback
import logging
import time
import numpy as np
import queue
import random,threading,time
from translate import pose_predict
import csv
import time
def health_check(s):
readable,writeable,err = select.select([s],[s],[s],0)
if len(readable)<1 or len(writeable)<1 or len(err)>0:
raise socket.error("discon")
def getbytes(s,num):
recv_num=0
recv_data=b""
while recv_num<num:
data = s.recv(num-recv_num)
recv_num += len(data)
recv_data += data
return recv_data
def receivepacket(s):
try:
bytes_received = getbytes(s,76)
_id = struct.unpack('<I', bytes_received[:4])[0]
pose = np.frombuffer(bytes_received[4:], dtype=np.float32) #converting into float array
return _id,pose
except Exception as e:
print("receiving packet error!")
def sending(s,_id,result):
try:
bytes_to_send=struct.pack('<I', _id)
for i in range(25):
for j in range(18):
bytes_to_send+=struct.pack('<f', result[i][j])
s.sendall(bytes_to_send) #sending back
except Exception as e:
logging.error(traceback.format_exc())
print("sending result error!")
def interpolation(data_queue, time_queue):
interpolated_data_queue = []
for i in range(len(data_queue)):
if i != 0 and (time_queue[i] - time_queue[i-1]) > 40:
interpolated_data_queue.append((data_queue[i]+data_queue[i-1])/2)
interpolated_data_queue.append(data_queue[i])
return interpolated_data_queue
class MLService(threading.Thread):
def __init__(self, s, queue_map, queue_time_map, model):
threading.Thread.__init__(self,name="mlservice")
self.s=s
self.queue_map = queue_map
self.queue_time_map = queue_time_map
self.model = model
self.doRun = True
def run(self):
print("ML running!!\n")
while self.doRun:
if(len(self.queue_map)==2):
for _id,queue in self.queue_map.items():
print("ml _id: ",_id,", length: ",len(queue))
interpolated_data_queue = interpolation(queue_map,queue_time_map)
if len(interpolated_data_queue)==100:
poses = np.array(interpolated_data_queue)
result = self.model.sample(poses)
sending(self.s,_id,result)
if __name__=="__main__":
# create model
model = pose_predict()
# create socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
s.bind(("127.0.0.1", 60000))
print('socket created ')
s.listen()
queue_map = {}
queue_time_map = {}
print('socket listensing ... ')
while True: # for connect multiple times
try:
conn, addr = s.accept()
print(addr[0] + 'connect!!')
mlservice = MLService(conn,queue_map,queue_time_map,model)
mlservice.start()
#handle one client!!
while True:
try:
# health_check(conn)
_id, input_pose = receivepacket(conn)
print("Input ")
print(_id)
if _id not in queue_map.keys():
queue_map[_id]=[]
data_ = queue_map[_id]
if(len(data_)==100):
data_[0:99] = data_[1:100]
data_[99] = input_pose
else:
data_.append(input_pose)
if _id not in queue_time_map.keys():
queue_time_map[_id]=[]
time_data_ = queue_time_map[_id]
if(len(time_data_)==100):
time_data_[0:99] = time_data_[1:100]
time_data_[99] = int(round(time.time() * 1000))
else:
time_data_.append(int(round(time.time() * 1000)))
except Exception as e:
logging.error(traceback.format_exc())
queue_map.clear()
break
#end of handle client
mlservice.doRun=False
mlservice.join()
except socket.timeout:
pass | 31.137931 | 95 | 0.534219 | 530 | 4,515 | 4.307547 | 0.269811 | 0.055191 | 0.047306 | 0.032852 | 0.162067 | 0.096364 | 0.074463 | 0.04205 | 0.04205 | 0 | 0 | 0.024619 | 0.36124 | 4,515 | 145 | 96 | 31.137931 | 0.76699 | 0.033666 | 0 | 0.132743 | 0 | 0 | 0.038108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061947 | false | 0.00885 | 0.106195 | 0 | 0.20354 | 0.079646 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e61401fb8829929663164716c5983c2dde332cf | 2,421 | py | Python | lib/models/model_factory.py | iShohei220/kaggle-pku-autonomous-driving | 647f1c48044f0c2cebcc5cb71854cb39ace0078c | [
"MIT"
] | 21 | 2020-01-22T05:18:52.000Z | 2021-09-28T15:55:10.000Z | lib/models/model_factory.py | iShohei220/kaggle-pku-autonomous-driving | 647f1c48044f0c2cebcc5cb71854cb39ace0078c | [
"MIT"
] | null | null | null | lib/models/model_factory.py | iShohei220/kaggle-pku-autonomous-driving | 647f1c48044f0c2cebcc5cb71854cb39ace0078c | [
"MIT"
] | 10 | 2020-01-30T14:25:50.000Z | 2020-08-25T02:03:50.000Z | import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import pretrainedmodels
from . import resnet_fpn
from . import dla
def get_model(name, heads, head_conv=128, num_filters=[256, 256, 256],
dcn=False, gn=False, ws=False, freeze_bn=False, **kwargs):
if 'res' in name and 'fpn' in name:
backbone = '_'.join(name.split('_')[:-1])
model = resnet_fpn.ResNetFPN(backbone, heads, head_conv, num_filters,
dcn=dcn, gn=gn, ws=ws, freeze_bn=freeze_bn)
elif 'dla' in name:
pretrained = '_'.join(name.split('_')[1:])
model = dla.get_dla34(heads, pretrained, head_conv, num_filters,
gn=gn, ws=ws, freeze_bn=freeze_bn)
else:
raise NotImplementedError
return model
def get_pose_model(model_name='resnet18', num_outputs=None, pretrained=True,
freeze_bn=False, dropout_p=0, **kwargs):
if 'densenet' in model_name:
model = models.__dict__[model_name](num_classes=1000,
pretrained=pretrained)
in_features = model.classifier.in_features
model.classifier = nn.Linear(in_features, num_outputs)
else:
pretrained = 'imagenet' if pretrained else None
model = pretrainedmodels.__dict__[model_name](num_classes=1000,
pretrained=pretrained)
if 'dpn' in model_name:
in_channels = model.last_linear.in_channels
model.last_linear = nn.Conv2d(in_channels, num_outputs,
kernel_size=1, bias=True)
else:
if 'resnet' in model_name:
model.avgpool = nn.AdaptiveAvgPool2d(1)
else:
model.avg_pool = nn.AdaptiveAvgPool2d(1)
in_features = model.last_linear.in_features
if dropout_p == 0:
model.last_linear = nn.Linear(in_features, num_outputs)
else:
model.last_linear = nn.Sequential(
nn.Dropout(p=dropout_p),
nn.Linear(in_features, num_outputs),
)
if freeze_bn:
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.requires_grad = False
m.bias.requires_grad = False
return model
| 37.246154 | 80 | 0.576208 | 283 | 2,421 | 4.696113 | 0.293286 | 0.047404 | 0.056433 | 0.040632 | 0.242287 | 0.176072 | 0.155004 | 0.106847 | 0 | 0 | 0 | 0.021672 | 0.33292 | 2,421 | 64 | 81 | 37.828125 | 0.801238 | 0 | 0 | 0.169811 | 0 | 0 | 0.019 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0 | 0.113208 | 0 | 0.188679 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e63134eca33ec86bdf84b7c00c96006e7888c50 | 16,775 | py | Python | src/moderations/slack.py | definitelysecure/shipwrecked | 3b79c6df63ed3c271ccb1b8a21081c76bcd9f08a | [
"MIT"
] | null | null | null | src/moderations/slack.py | definitelysecure/shipwrecked | 3b79c6df63ed3c271ccb1b8a21081c76bcd9f08a | [
"MIT"
] | null | null | null | src/moderations/slack.py | definitelysecure/shipwrecked | 3b79c6df63ed3c271ccb1b8a21081c76bcd9f08a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from datetime import datetime
import json
import re
import requests
from django.http import HttpResponse
from accounts.models import AuthToken
from moderations.models import Moderation, ModerationAction
from moderations.utils import timedelta_to_str
class SlackSdk(object):
@staticmethod
def get_channel_data(channel):
auth_token_object = AuthToken.objects.filter(
service_name='slack', service_entity_auth_name=channel
).first()
if auth_token_object:
channel_id = auth_token_object.service_entity_auth_id
token = auth_token_object.service_auth_token
return token, channel_id
else:
return None, None
@staticmethod
def post_moderation(text):
attachments = [
{
'fallback': "Moderator actions",
'callback_id': 'mod-inbox',
'attachment_type': 'default',
'actions': [
{
'name': 'approve',
'text': "Approve",
'type': 'button',
'value': 'approve',
'style': 'primary'
},
{
'name': 'reject',
'text': "Reject",
'type': 'button',
'value': 'reject'
}
]
}
]
token, channel_id = SlackSdk.get_channel_data('#mod-inbox')
if channel_id:
response = SlackSdk.create_message(token,
channel_id, text, attachments)
return response.json()
else:
data = {
'success': False,
'message': "{} is not a valid channel or "
"was not previously authorized".format(channel_id)
}
return data
@staticmethod
def post_leaderboard(leaderboard):
"""
leaderboard = [
{'@jared': 12,345},
]
"""
def render_board(leaderboard, title):
text = '┌----------------------┬----------------------┐\n'
text += '│ {0: <20} | {1: <20} │\n'.format('Mod', title)
sorted_leaderboard = sorted(leaderboard.items(),
key=lambda x: x[1],
reverse=True)
for k, v in sorted_leaderboard:
if k:
text += '├----------------------┼----------------------┤\n'
text += '│ {0: <20} │ {1: <20} │\n'.format(k, v)
text += '└----------------------┴----------------------┘\n'
return text
def avg(a, b):
if b > 0.0:
return a/float(b) * 100.0
else:
return 0
text = (
"LEADERBOARD as of {date}\n"
"```\n"
"{all_time}\n"
"{seven_days}\n"
"```\n"
)
text = text.format(
date=datetime.utcnow(),
all_time=render_board(leaderboard['all_time'], 'All Time'),
seven_days=render_board(leaderboard['seven_days'], 'Last 7 Days')
)
text += 'MOD TEAM SPEED REPORT AS OF {} UTC\n'.format(datetime.utcnow())
text += '```\n'
text += 'Average time to first mod review (all-time): %s over %i pieces of content\n' \
% (timedelta_to_str(leaderboard['avg']['all_time']['review'][0]),
leaderboard['avg']['all_time']['review'][1])
text += 'Average time to first mod review (last 7 days): %s over %i pieces of content\n' \
% (timedelta_to_str(leaderboard['avg']['seven_days']['review'][0]),
leaderboard['avg']['seven_days']['review'][1])
text += 'Average time to first mod resolution (all-time): %s over %i pieces of content\n' \
% (timedelta_to_str(leaderboard['avg']['all_time']['resolution'][0]),
leaderboard['avg']['all_time']['resolution'][1])
text += 'Average time to first mod resolution (last 7 days): %s over %i pieces of content\n' \
% (timedelta_to_str(leaderboard['avg']['seven_days']['resolution'][0]),
leaderboard['avg']['seven_days']['resolution'][1])
text += '```\n'
text += 'CONTENT QUALITY REPORT AS OF {} UTC\n'.format(datetime.utcnow())
counts = leaderboard['counts']
text += '```\n'
text += 'Past 7 days content: %i\n' \
% counts['total']
text += 'Past 7 days flagged by mods: %i (%.2f%%)\n' \
% (counts['total_flagged'],
avg(counts['total_flagged'], counts['total']))
text += 'Reason: Off topic: %i (%.2f%% of flags)\n' \
% (counts['off_topic'],
avg(counts['off_topic'], counts['total_flagged']))
text += 'Reason: Inappropriate: %i (%.2f%% of flags)\n' \
% (counts['inappropriate'],
avg(counts['inappropriate'], counts['total_flagged']))
text += 'Reason: Contact info: %i (%.2f%% of flags)\n' \
% (counts['contact_info'],
avg(counts['contact_info'], counts['total_flagged']))
text += 'Reason: Other: %i (%.2f%% of flags)\n' \
% (counts['other'],
avg(counts['other'], counts['total_flagged']))
text += '```\n'
token, channel_id = SlackSdk.get_channel_data('#mod-leaderboard')
return SlackSdk.create_message(token, channel_id,
text, [], in_channel=True)
@staticmethod
def create_message(access_token, channel_id,
text='', attachments=[], in_channel=False):
is_image = False
if 'https://res.cloudinary.com/' in text:
is_image = True
if len(text) >= 3500:
search_text = re.findall(
'^(.* posted the) <(https://.*)\|(.*)>.*:\n',
text
)
if search_text:
new_content_text = search_text[0][0]
link = search_text[0][1]
new_content_type = search_text[0][2]
text = '%s %s. WARNING: this content cannot be displayed, ' \
'please read the complete content <%s|HERE>' \
% (new_content_text, new_content_type, link)
params = {
'token': access_token,
'channel': channel_id,
'text': text,
'attachments': json.dumps(attachments),
'unfurl_links': False,
'unfurl_media': is_image,
}
if in_channel:
params['response_type'] = 'in_channel'
return requests.get(
url='https://slack.com/api/chat.postMessage',
params=params
)
@staticmethod
def delete_message(access_token, channel_id, ts):
return requests.get(
url='https://slack.com/api/chat.delete',
params={
'token': access_token,
'ts': ts,
'channel': channel_id,
}
)
@staticmethod
def update_message(access_token, channel_id, ts,
text='', attachments=[]):
return requests.get(
url='https://slack.com/api/chat.update',
params={
'token': access_token,
'ts': ts,
'channel': channel_id,
'text': text,
'attachments': json.dumps(attachments),
'parse': 'none',
}
)
def mod_inbox_approved(data, moderation):
original_message = data.get('original_message')
text = original_message.get('text')
approved_by = data.get('user').get('name')
approved_time = float(data.get('action_ts').split('.')[0])
approved_time = datetime.utcfromtimestamp(approved_time)
approved_time = approved_time.strftime('%Y-%m-%d %I:%M%p')
ts = data.get('message_ts')
attachments = [
{
"fallback": "Please moderate this.",
"text": ":white_check_mark: _Approved by @%s %s UTC_" %
(approved_by, approved_time),
"callback_id": "mod-approved",
"attachment_type": "default",
"mrkdwn_in": [
"text"
]
}
]
token, channel_id = SlackSdk.get_channel_data('#mod-approved')
response = SlackSdk.create_message(token, channel_id, text, attachments)
if response.status_code == 200:
data = response.json()
if data.get('ok'):
token, channel_id = SlackSdk.get_channel_data('#mod-inbox')
save_moderation_action(moderation, approved_by, channel_id,
'approve', data.get('ts'))
reponse = SlackSdk.delete_message(token, channel_id, ts)
return HttpResponse('')
def mod_inbox_reject(data, moderation):
original_message = data.get('original_message')
text = original_message.get('text')
ts = data.get('message_ts')
attachments = [
{
"fallback": "Moderator actions",
"text": "_Reject: Select a reason_",
"callback_id": "mod-inbox",
"attachment_type": "default",
"mrkdwn_in": [
"text"
],
"actions": [
{
"name": "Off topic",
"text": "Off topic",
"type": "button",
"value": "off_topic",
"style": "danger"
},
{
"name": "Inappropriate",
"text": "Inappropriate",
"type": "button",
"value": "inappropriate",
"style": "danger"
},
{
"name": "Contact info",
"text": "Contact info",
"type": "button",
"value": "contact_info",
"style": "danger"
},
{
"name": "Other",
"text": "Other",
"type": "button",
"value": "other",
"style": "danger"
},
{
"name": "Undo",
"text": "Undo",
"type": "button",
"value": "undo"
}
]
}
]
token, channel_id = SlackSdk.get_channel_data('#mod-inbox')
response = SlackSdk.update_message(token, channel_id, ts,
text=text, attachments=attachments)
data = response.json()
return HttpResponse('')
def mod_inbox_reject_undo(data):
original_message = data.get('original_message')
text = original_message.get('text')
ts = data.get('message_ts')
attachments = [
{
"fallback": "Moderator actions",
"callback_id": "mod-inbox",
"attachment_type": "default",
"actions": [
{
"name": "approve",
"text": "Approve",
"type": "button",
"value": "approve",
"style": "primary"
},
{
"name": "reject",
"text": "Reject",
"type": "button",
"value": "reject"
}
]
}
]
token, channel_id = SlackSdk.get_channel_data('#mod-inbox')
SlackSdk.update_message(token, channel_id,
ts, text=text, attachments=attachments)
return HttpResponse('')
def mod_inbox_reject_reason(data, moderation):
original_message = data.get('original_message')
text = original_message.get('text')
rejected_by = data.get('user').get('name')
rejected_time = float(data.get('action_ts').split('.')[0])
rejected_time = datetime.utcfromtimestamp(rejected_time)
rejected_time = rejected_time.strftime('%Y-%m-%d %I:%M%p')
rejected_reason = data.get('actions')[0]['value']
ts = data.get('message_ts')
attachments = [
{
"fallback": "Moderator actions",
"text": "_%s UTC: @%s rejected this with the reason: \"%s\"_" %
(rejected_time, rejected_by, rejected_reason),
"callback_id": "mod-flagged",
"attachment_type": "default",
"mrkdwn_in": [
"text"
],
"actions": [
{
"name": "Resolve",
"text": "Mark resolved",
"type": "button",
"value": "resolve",
"style": "primary"
}
]
}
]
token, channel_id = SlackSdk.get_channel_data('#mod-flagged')
response = SlackSdk.create_message(token, channel_id,
text=text, attachments=attachments)
if response.status_code == 200:
data = response.json()
if data.get('ok'):
token, channel_id = SlackSdk.get_channel_data('#mod-inbox')
save_moderation_action(moderation, rejected_by,
channel_id, rejected_reason, data.get('ts'))
SlackSdk.delete_message(token, channel_id, ts)
return HttpResponse('')
def mod_inbox(data, action, moderation):
if action == 'approve':
return mod_inbox_approved(data, moderation)
elif action == 'reject':
return mod_inbox_reject(data, moderation)
elif action == 'undo':
return mod_inbox_reject_undo(data)
elif (action == 'off_topic') or (action == 'inappropriate') \
or (action == 'contact_info') or (action == 'other'):
return mod_inbox_reject_reason(data, moderation)
def mod_flagged_resolve(data, moderation):
original_message = data.get('original_message')
text = original_message.get('text')
resolved_by = data.get('user').get('name')
resolved_time = float(data.get('action_ts').split('.')[0])
resolved_time = datetime.utcfromtimestamp(resolved_time)
resolved_time = resolved_time.strftime('%Y-%m-%d %I:%M%p')
rejected_reason = original_message.get('attachments')[0]['text']
message_ts = data.get('message_ts')
attachments = [
{
"fallback": "Please moderate this.",
"text": "%s\n_%s UTC: @%s marked this \"Resolved\"_" %
(rejected_reason, resolved_time, resolved_by),
"callback_id": "mod-resolved",
"attachment_type": "default",
"mrkdwn_in": [
"text"
]
}
]
token, channel_id = SlackSdk.get_channel_data('#mod-resolved')
response = SlackSdk.create_message(token, channel_id, text=text,
attachments=attachments)
if response.status_code == 200:
data = response.json()
if data.get('ok'):
token, channel_id = SlackSdk.get_channel_data('#mod-flagged')
ts = data.get('ts')
save_moderation_action(moderation, resolved_by, channel_id,
'resolve', ts)
SlackSdk.delete_message(token, channel_id, message_ts)
return HttpResponse('')
def mod_flagged(data, action, moderation):
if action == 'resolve':
return mod_flagged_resolve(data, moderation)
assert False, action
def save_moderation_action(moderation, username, channel_id,
action, message_id):
moderation.status = channel_id
moderation.status_reason = action
moderation.message_id = message_id
moderation.save()
ModerationAction.objects.create(moderation=moderation,
action=action,
action_author_id=username)
def moderate(data):
"""
"""
data = data.get('payload')
data = json.loads(data)
if data:
action = data.get('actions')[0].get('value')
message_id = data.get('message_ts')
moderation = Moderation.objects.get_by_message_id(message_id)
callback_id = data.get('callback_id')
if callback_id == 'mod-inbox':
return mod_inbox(data, action, moderation)
elif callback_id == 'mod-flagged':
return mod_flagged(data, action, moderation)
return HttpResponse(json.dumps(data, indent=4))
| 33.086785 | 102 | 0.497466 | 1,611 | 16,775 | 5.002483 | 0.139665 | 0.039087 | 0.041693 | 0.027299 | 0.528353 | 0.452289 | 0.414816 | 0.398312 | 0.347065 | 0.285023 | 0 | 0.006559 | 0.363756 | 16,775 | 506 | 103 | 33.152174 | 0.747213 | 0.003815 | 0 | 0.289216 | 0 | 0.009804 | 0.209304 | 0.008824 | 0 | 0 | 0 | 0 | 0.002451 | 1 | 0.041667 | false | 0 | 0.019608 | 0.004902 | 0.122549 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e651a2fe83d07fb048afcf923ef6a66a310e429 | 2,814 | py | Python | APS.py | PabloGramos/APS | a7825628f8ce7ef46da413948c40d03c8118717e | [
"MIT"
] | null | null | null | APS.py | PabloGramos/APS | a7825628f8ce7ef46da413948c40d03c8118717e | [
"MIT"
] | null | null | null | APS.py | PabloGramos/APS | a7825628f8ce7ef46da413948c40d03c8118717e | [
"MIT"
] | null | null | null | def soma():
r=1
while r>0:
n1 = int(input("Valor: "))
n2 = int(input(f"{n1} + "))
soma = n1 + n2
print(f"\n{n1} + {n2} = {soma}\n")
r=int(input("1-Continuar 0-Sair: "))
if r > 1:
print("Opção Inválida!....Saindo")
r=0
def sub():
r = 1
while r > 0:
n1 = int(input("Valor: "))
n2 = int(input(f"{n1} - "))
sub = n1 - n2
print(f"\n{n1} - {n2} = {sub}\n")
r = int(input("1-Continuar 0-Sair: "))
if r > 1:
print("Opção Inválida!....Saindo")
r = 0
def mult():
r = 1
while r > 0:
n1 = int(input("Valor: "))
n2 = int(input(f"{n1} X "))
mult = n1 * n2
print(f"\n{n1} X {n2} = {mult}\n")
r = int(input("1-Continuar 0-Sair: "))
if r > 1:
print("Opção Inválida!....Saindo")
r = 0
def div():
r = 1
while r > 0:
n1 = int(input("Valor: "))
n2 = int(input(f"{n1}/ "))
if n2 == 0:
print("Não existe divisão por 0! ")
break
div = n1 / n2
print("\n{} / {} = {:.2f}\n".format(n1, n2, div))
r = int(input("1-Continuar 0-Sair: "))
if r > 1:
print("Opção Inválida!....Saindo")
r = 0
def raiz():
import math
r = 1
while r > 0:
n = int(input("Digite o valor: "))
if n < 0:
print("Não existe raiz de números negativos!")
break
raiz = math.sqrt(n)
print("\nRaiz de {} = {:.2f}\n".format(n, raiz))
r = int(input("1-Continuar 0-Sair: "))
if r > 1:
print("Opção Inválida!....Saindo")
r = 0
def sct():
import math
r = 1
while r > 0:
n = float(input("Digite o valor: "))
seno = math.sin(math.radians(n))
cosseno = math.cos(math.radians(n))
tang = math.tan(math.radians(n))
print("\nO valor {} possui Seno = {:.2f}, Cosseno = {:.2f} e Tangente = {:.2f}\n".format(n, seno, cosseno, tang))
r = int(input("1-Continuar 0-Sair: "))
if r > 1:
print("Opção Inválida!....Saindo")
r = 0
print("""
-------------CALCULADORA--------------
-----------Pablo--Vinícius------------
""")
corpo=True
while corpo == True:
print("""
MENU
1 - Soma
2 - Subtração
3 - Multiplicação
4 - Divisão
5 - Raiz Quadrada
6 - Seno, Cosseno, Tangente
0 - Sair
""")
op = int(input("Escolha a operação: "))
if op == 1:
soma()
elif op == 2:
sub()
elif op == 3:
mult()
elif op == 4:
div()
elif op == 5:
raiz()
elif op == 6:
sct()
elif op == 0:
break
else:
print("Opção inválida!") | 26.299065 | 121 | 0.429638 | 367 | 2,814 | 3.294278 | 0.201635 | 0.105873 | 0.104218 | 0.039702 | 0.473945 | 0.473945 | 0.463193 | 0.438379 | 0.405294 | 0.405294 | 0 | 0.049743 | 0.378465 | 2,814 | 107 | 122 | 26.299065 | 0.641509 | 0 | 0 | 0.457944 | 0 | 0.009346 | 0.314387 | 0.026998 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056075 | false | 0 | 0.018692 | 0 | 0.074766 | 0.158879 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e675938f3d093c0365d3aa398c262cefa7433e0 | 3,357 | py | Python | home-assistant-backup.py | scaarup/home-assistant-backup | 1310054ebd41550292d45329411500cb08b369a1 | [
"MIT"
] | null | null | null | home-assistant-backup.py | scaarup/home-assistant-backup | 1310054ebd41550292d45329411500cb08b369a1 | [
"MIT"
] | null | null | null | home-assistant-backup.py | scaarup/home-assistant-backup | 1310054ebd41550292d45329411500cb08b369a1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Created by Søren Christian Aarup, sc@aarup.org
# https://github.com/scaarup/home-assistant-backup
# api ref.: https://developers.home-assistant.io/docs/api/supervisor/endpoints
import requests,json,datetime,gzip,sys,datetime
from datetime import timedelta, date
token = 'Bearer <token>'
host = '<url>'
retention = 12 # In days, how many backups do you want to keep on Home Assistant (normally in /backup).
backupname = 'hassio_backup_full-'
date_string = datetime.datetime.now().strftime('%Y%m%d')
_d = date.today() - timedelta(retention)
oldestbackup = backupname+_d.strftime('%Y%m%d')+'.tar.gz'
name = backupname+date_string+'.tar.gz'
debug = 1
def debuglog(msg):
if debug == 1:
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' DEBUG: '+msg)
def log(msg):
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' INFO: '+msg)
# Ping Supervisor, quit if fail:
response = requests.get(host+'/api/hassio/supervisor/ping', headers={'authorization': token})
json_response = response.json()
if not json_response['result'] == 'ok':
log('Supervisor not responding ok to our ping! '+str(response.status_code)+' '+str(response.content))
sys.exit(1)
##
def listBackups(name):
debuglog('Looping through backups on HA, looking for '+name)
response = requests.get(
host+'/api/hassio/backups',
headers={'authorization': token}
)
json_response = response.json()
backups = json_response['data']['backups']
for backup in backups:
debuglog('\t'+backup['name']+' '+backup['slug'])
if (backup['name'] == name):
debuglog('Found our backup on HA:')
return backup['slug']
def createBackupFull(name):
debuglog('Creating backup '+name)
response = requests.post(
host+'/api/hassio/backups/new/full',
json={'name': name},
headers={'authorization': token,'content-type': 'application/json'}
)
debuglog(str(response.status_code)+' '+str(response.content))
json_response = response.json()
debuglog('Create backup response: '+json_response['result'])
return json_response['data']['slug']
def removeBackup(name,slug):
debuglog('Removing backup '+name+' on server')
response = requests.delete(
host+'/api/hassio/backups/'+slug,
headers={'authorization': token,
'content-type': 'application/json'}
)
debuglog(str(response.status_code)+' '+str(response.content))
json_response = response.json()
def getBackup(name,slug):
log('Downloading backup '+name)
response = requests.get(
host+'/api/hassio/backups/'+slug+'/download',
headers={'authorization': token}
)
output = gzip.open(name, 'wb')
# try:
output.write(response.content)
# finally:
output.close()
if response.status_code == 200:
debuglog('Download ok')
else:
debuglog('Download response '+str(response.status_code)+' '+str(response.content))
# Create the backup, get the slug:
slug = createBackupFull(name)
# Download the backup:
getBackup(name,slug)
# Remove our oldest backup, according to retention
slug = listBackups(oldestbackup)
if slug is not None:
debuglog('Calling removeBackup for '+oldestbackup+' with slug '+slug)
removeBackup(name,slug)
else:
debuglog('Did not find a backup to delete.')
| 34.96875 | 105 | 0.670539 | 422 | 3,357 | 5.2891 | 0.329384 | 0.043011 | 0.029122 | 0.019713 | 0.301075 | 0.28853 | 0.274194 | 0.1819 | 0.143369 | 0.143369 | 0 | 0.002874 | 0.170688 | 3,357 | 95 | 106 | 35.336842 | 0.798851 | 0.129282 | 0 | 0.16 | 0 | 0 | 0.243643 | 0.0189 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.026667 | 0 | 0.133333 | 0.026667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e6cd67c9f0d05ca91537b7f522e588f70c9a9c4 | 1,319 | py | Python | src/features/build_features.py | mikolajsacha/tweetsclassification | 33756cf6877f9cec328f08a3c728b26bf123bc8f | [
"MIT"
] | 4 | 2016-11-22T11:26:06.000Z | 2017-02-22T12:56:45.000Z | src/features/build_features.py | mikolajsacha/tweetsclassification | 33756cf6877f9cec328f08a3c728b26bf123bc8f | [
"MIT"
] | 26 | 2016-11-08T20:04:37.000Z | 2017-02-18T13:51:39.000Z | src/features/build_features.py | mikolajsacha/tweetsclassification | 33756cf6877f9cec328f08a3c728b26bf123bc8f | [
"MIT"
] | null | null | null | """
Contains class FeatureBuilder for building feature set from given data set and word embedding
"""
import numpy as np
class FeatureBuilder(object):
"""
Class used for building feature matrix.
Field "labels" is a list of categories of sentences
Field "features" is a features matrix of shape (training set sixe, vector_length)
"""
def __init__(self):
self.labels = np.empty(0, dtype=np.uint8)
self.features = np.empty(0, dtype=float)
self.labels.flags.writeable = False
self.features.flags.writeable = False
def build(self, sentence_embedding, labels, sentences):
"""
:param sentence_embedding: instance of sentence embedding class implementing ISentenceEmbedding interface
:param labels: a numpy vector of labels of sentences
:param sentences: a numpy matrix of sentences (rows = sentences, columns = words)
"""
self.labels = labels
sentences_vectors_length = sentence_embedding.target_vector_length
self.features = np.empty((sentences.shape[0], sentences_vectors_length), dtype=float)
for i in xrange(sentences.shape[0]):
self.features[i] = sentence_embedding[sentences[i]]
self.labels.flags.writeable = False
self.features.flags.writeable = False
| 36.638889 | 113 | 0.690675 | 162 | 1,319 | 5.530864 | 0.37037 | 0.066964 | 0.084821 | 0.029018 | 0.133929 | 0.133929 | 0.133929 | 0.133929 | 0.133929 | 0.133929 | 0 | 0.004892 | 0.225171 | 1,319 | 35 | 114 | 37.685714 | 0.87182 | 0.38514 | 0 | 0.266667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e6d28bc002be9af0e517b72024d00a394efa949 | 1,624 | py | Python | json/conanfile.py | kapilsh/conan-scripts | 31c55397a2d721c80da5dbd6a6c738accfdbb241 | [
"MIT"
] | null | null | null | json/conanfile.py | kapilsh/conan-scripts | 31c55397a2d721c80da5dbd6a6c738accfdbb241 | [
"MIT"
] | null | null | null | json/conanfile.py | kapilsh/conan-scripts | 31c55397a2d721c80da5dbd6a6c738accfdbb241 | [
"MIT"
] | null | null | null | import os
from conans import ConanFile
from conans.tools import download, check_sha256
class NlohmannJsonConan(ConanFile):
name = "json"
with open(os.path.join(os.path.dirname(os.path.realpath(
__file__)), "VERSION.txt"), 'r') as version_file:
version = version_file.read()
settings = {}
description = "JSON for Modern C++"
generators = "cmake", "virtualenv"
exports = "VERSION.txt"
url = "https://github.com/nlohmann/json"
license = "https://github.com/nlohmann/json/blob/v2.1.0/LICENSE.MIT"
options = {'no_exceptions': [True, False]}
default_options = 'no_exceptions=False'
def config(self):
self.options.remove("os")
self.options.remove("compiler")
self.options.remove("shared")
self.options.remove("build_type")
self.options.remove("arch")
def source(self):
download_url = 'https://github.com/nlohmann/json/releases/' \
'download/v{!s}/json.hpp'.format(self.version)
download(download_url, 'json.hpp')
check_sha256('json.hpp',
'a571dee92515b685784fd527e38405cf3f5e13e96edbfe3f03d6df2e'
'363a767b')
def build(self):
return # Nothing to do. Header Only
def package(self):
self.copy(pattern='json.hpp', dst='include/nlohmann', src=".")
def package_info(self):
if self.options.no_exceptions:
self.cpp_info.defines.append('JSON_NOEXCEPTION=1')
self.cpp_info.includedirs = ['include']
self.env_info.CPATH.append("{}/include".format(self.package_folder))
| 33.142857 | 79 | 0.633621 | 187 | 1,624 | 5.390374 | 0.481283 | 0.065476 | 0.084325 | 0.065476 | 0.083333 | 0.05754 | 0 | 0 | 0 | 0 | 0 | 0.039075 | 0.227833 | 1,624 | 48 | 80 | 33.833333 | 0.764753 | 0.01601 | 0 | 0 | 0 | 0.026316 | 0.260652 | 0.049499 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.078947 | 0.026316 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e6d68261b931f6d3c99896fa9c575feee129b51 | 5,958 | py | Python | projects/seeker/tasks/dialogue.py | DrMatters/ParlAI | 755b9dcb778deb5a82029d69ae3260579c6450f1 | [
"MIT"
] | null | null | null | projects/seeker/tasks/dialogue.py | DrMatters/ParlAI | 755b9dcb778deb5a82029d69ae3260579c6450f1 | [
"MIT"
] | null | null | null | projects/seeker/tasks/dialogue.py | DrMatters/ParlAI | 755b9dcb778deb5a82029d69ae3260579c6450f1 | [
"MIT"
] | 1 | 2022-01-24T13:22:18.000Z | 2022-01-24T13:22:18.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
SeeKeR Dialogue Tasks.
"""
from typing import Optional
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.teachers import MultiTaskTeacher
import parlai.tasks.convai2.agents as convai2
import parlai.tasks.blended_skill_talk.agents as bst
import parlai.tasks.empathetic_dialogues.agents as ed
import parlai.tasks.wizard_of_internet.agents as woi
import parlai.tasks.wizard_of_wikipedia.agents as wow
import parlai.tasks.msc.agents as msc
import parlai.tasks.ms_marco.agents as ms_marco
import parlai.utils.logging as logging
import projects.seeker.tasks.mutators # type: ignore # noqa: F401
class WoiDialogueTeacher(woi.DefaultTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
[
'flatten',
'woi_pop_documents_mutator',
'woi_filter_no_passage_used',
'woi_add_checked_sentence_to_input',
'skip_retrieval_mutator',
]
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = "WoiDialogueTeacher"
class WowDialogueTeacher(wow.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['add_missing_turns'] = 'all'
mutators = '+'.join(
[
'flatten',
'wow_filter_no_passage_used',
'wow_add_checked_sentence_to_input',
'skip_retrieval_mutator',
'wow_to_woi',
'woi_pop_documents_mutator',
]
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = "WowDialogueTeacher"
class MsMarcoDialogueTeacher(ms_marco.DefaultTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
[
'ms_marco_filter_has_answer',
'ms_marco_create_fid_docs',
'ms_marco_find_selected_sentence_for_response',
'woi_pop_documents_mutator',
'skip_retrieval_mutator',
]
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = "MsMarcoDialogueTeacher"
def get_dialogue_task_mutators(opt: Opt) -> str:
"""
Set the mutators appropriately for the dialogue tasks.
"""
mutators = '+'.join(
['flatten', 'extract_entity_for_response_model', 'skip_retrieval_mutator']
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
return mutators
class Convai2DialogueTeacher(convai2.NormalizedTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
opt['task'] += ':no_cands'
super().__init__(opt, shared)
self.id = 'Convai2DialogueTeacher'
class EDDialogueTeacher(ed.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
super().__init__(opt, shared)
self.id = 'EDDialogueTeacher'
class BSTDialogueTeacher(bst.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
super().__init__(opt, shared)
self.id = 'BSTDialogueTeacher'
class MSCDialogueTeacher(msc.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
opt['include_session1'] = False
super().__init__(opt, shared)
self.id = 'MSCDialogueTeacher'
class MSCDialogueOverlapTeacher(msc.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = '+'.join(
['flatten', 'msc_find_selected_sentence_response', 'skip_retrieval_mutator']
)
opt['include_session1'] = False
super().__init__(opt, shared)
self.id = 'MSCDialogueOverlapTeacher'
class DialogueTeacher(MultiTaskTeacher):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
WoiDialogueTeacher.add_cmdline_args(parser, partial_opt)
WowDialogueTeacher.add_cmdline_args(parser, partial_opt)
MsMarcoDialogueTeacher.add_cmdline_args(parser, partial_opt)
Convai2DialogueTeacher.add_cmdline_args(parser, partial_opt)
EDDialogueTeacher.add_cmdline_args(parser, partial_opt)
BSTDialogueTeacher.add_cmdline_args(parser, partial_opt)
MSCDialogueTeacher.add_cmdline_args(parser, partial_opt)
MSCDialogueOverlapTeacher.add_cmdline_args(parser, partial_opt)
return parser
def __init__(self, opt, shared=None):
tasks = [
f"projects.seeker.tasks.dialogue:{teacher}"
for teacher in [
'WoiDialogueTeacher',
'WowDialogueTeacher',
'MsMarcoDialogueTeacher',
'Convai2DialogueTeacher',
'EDDialogueTeacher',
'BSTDialogueTeacher',
'MSCDialogueTeacher',
'MSCDialogueOverlapTeacher',
]
]
opt['task'] = ','.join(tasks)
super().__init__(opt, shared)
class DefaultTeacher(DialogueTeacher):
pass
| 34.241379 | 88 | 0.643337 | 609 | 5,958 | 5.981938 | 0.228243 | 0.044469 | 0.027175 | 0.034587 | 0.444688 | 0.430963 | 0.343947 | 0.343947 | 0.313478 | 0.286028 | 0 | 0.002912 | 0.250755 | 5,958 | 173 | 89 | 34.439306 | 0.813172 | 0.049513 | 0 | 0.375 | 0 | 0 | 0.2126 | 0.114108 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080882 | false | 0.022059 | 0.095588 | 0 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e6d9f56ad67c28ab101dfa720b2f55910ca38c7 | 350 | py | Python | chrispile/util.py | FNNDSC/chrispile | 9eb688b17bd3392c23b5cc2a1e11470d78d6029a | [
"MIT"
] | null | null | null | chrispile/util.py | FNNDSC/chrispile | 9eb688b17bd3392c23b5cc2a1e11470d78d6029a | [
"MIT"
] | null | null | null | chrispile/util.py | FNNDSC/chrispile | 9eb688b17bd3392c23b5cc2a1e11470d78d6029a | [
"MIT"
] | null | null | null | import abc
from argparse import ArgumentParser, Namespace
from .config import get_config
class CommandProvider(abc.ABC):
def __init__(self, parser: ArgumentParser):
self.config = get_config()
parser.set_defaults(func=self)
@abc.abstractmethod
def __call__(self, options: Namespace):
raise NotImplementedError()
| 25 | 47 | 0.728571 | 39 | 350 | 6.25641 | 0.564103 | 0.07377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.191429 | 350 | 13 | 48 | 26.923077 | 0.862191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.3 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e6f1afc0f744ac9404e2211aba6de066e7ef17c | 297 | py | Python | Part_1_beginner/07_type_dictionary/rozwiazania/exercise_1.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_1_beginner/07_type_dictionary/rozwiazania/exercise_1.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_1_beginner/07_type_dictionary/rozwiazania/exercise_1.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | 1 | 2021-02-20T08:30:56.000Z | 2021-02-20T08:30:56.000Z |
# Stwórz słownik, w którym kluczami będą różne przedmioty szkolne
# a wartościami oceny uzyskane z tych przedmiotów
grades = {
"Matematyka": [4, 2, 6, 5, 3],
"Fizyka": [5, 5, 2, 4, 3],
"Chemia": [4, 1, 4, 5, 4],
"Biologia": [3, 5, 5, 2, 5],
}
print("Przedmioty i oceny", grades)
| 24.75 | 65 | 0.599327 | 46 | 297 | 3.869565 | 0.630435 | 0.022472 | 0.033708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.087336 | 0.228956 | 297 | 11 | 66 | 27 | 0.689956 | 0.373737 | 0 | 0 | 0 | 0 | 0.263736 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e7040e11ae9ee1dfa29a9acc88975d2a9c16bff | 703 | py | Python | paper2/figures/calibration_default.py | dfm/mapping_stellar_surfaces | 52d4ba1a726c65868e4a1290a801fe046fb2155f | [
"MIT"
] | 10 | 2021-01-21T17:03:26.000Z | 2021-12-19T17:49:28.000Z | paper2/figures/calibration_default.py | dfm/mapping_stellar_surfaces | 52d4ba1a726c65868e4a1290a801fe046fb2155f | [
"MIT"
] | 10 | 2021-01-21T15:55:53.000Z | 2021-03-30T14:35:16.000Z | paper2/figures/calibration_default.py | dfm/mapping_stellar_surfaces | 52d4ba1a726c65868e4a1290a801fe046fb2155f | [
"MIT"
] | 2 | 2021-01-21T15:41:58.000Z | 2021-01-25T16:26:15.000Z | from starry_process import calibrate
import numpy as np
import os
import shutil
# Utility funcs to move figures to this directory
abspath = lambda *args: os.path.join(
os.path.dirname(os.path.abspath(__file__)), *args
)
copy = lambda name, src, dest: shutil.copyfile(
abspath("data", name, src), abspath(dest)
)
# Run
calibrate.run(path=abspath("data/default"), ncols=7, clip=True)
# Copy output to this directory
copy("default", "data.pdf", "calibration_default_data.pdf")
copy("default", "corner_transformed.pdf", "calibration_default_corner.pdf")
copy("default", "latitude.pdf", "calibration_default_latitude.pdf")
copy("default", "inclination.pdf", "calibration_default_inclination.pdf")
| 31.954545 | 75 | 0.755334 | 97 | 703 | 5.329897 | 0.443299 | 0.085106 | 0.162476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001592 | 0.106686 | 703 | 21 | 76 | 33.47619 | 0.821656 | 0.11522 | 0 | 0 | 0 | 0 | 0.365696 | 0.237864 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e7169ec55a244a64b91496630b7d2210f0c8139 | 5,922 | py | Python | python/ad/spectral_outlier.py | rislam/ad_examples | 20e6dd2dbfd111ed5f69a9018180f7ef5ab627f6 | [
"MIT"
] | 1 | 2019-02-21T02:28:34.000Z | 2019-02-21T02:28:34.000Z | python/ad/spectral_outlier.py | kinect59/ad_examples | bf0bb75faa3f713a2efef04b6b093e6a313825af | [
"MIT"
] | null | null | null | python/ad/spectral_outlier.py | kinect59/ad_examples | bf0bb75faa3f713a2efef04b6b093e6a313825af | [
"MIT"
] | null | null | null | import numpy.random as rnd
from sklearn import manifold
from sklearn.ensemble import IsolationForest
from common.gen_samples import *
"""
pythonw -m ad.spectral_outlier
"""
def euclidean_dist(x1, x2):
dist = np.sqrt(np.sum((x1 - x2) ** 2))
return dist
class LabelDiffusion(object):
"""
IMPORTANT: The results from Python's Scikit-Learn MDS API are significantly
different (and sub-optimal) from R. Strongly recommend R's isoMDS for the last
step of converting pair-wise distances to 2D coordinates.
"""
def __init__(self, n_neighbors=10, k2=0.5, alpha=0.99,
n_components=2, eigen_solver='auto',
tol=0., max_iter=None, n_jobs=1, metric=True):
self.n_neighbors = n_neighbors
self.k2 = k2
self.alpha = alpha
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.n_jobs = n_jobs
self.metric = metric
self.alphas_ = None
self.lambdas_ = None
def fit_transform(self, x_in):
n = nrow(x_in)
x = normalize_and_center_by_feature_range(x_in)
dists = np.zeros(shape=(n, n), dtype=float)
for i in range(n):
for j in range(i, n):
dists[i, j] = euclidean_dist(x[i, :], x[j, :])
dists[j, i] = dists[i, j]
logger.debug(dists[0, 0:10])
neighbors = np.zeros(shape=(n, self.n_neighbors), dtype=int)
for i in range(n):
neighbors[i, :] = np.argsort(dists[i, :])[0:self.n_neighbors]
logger.debug(neighbors[0, 0:10])
W = np.zeros(shape=(n, n))
for i in range(n):
for j in neighbors[i, :]:
# diagonal elements of W will be zeros
if i != j:
W[i, j] = np.exp(-(dists[i, j] ** 2) / self.k2)
W[j, i] = W[i, j]
D = W.sum(axis=1)
# logger.debug(str(list(D[0:10])))
iDroot = np.diag(np.sqrt(D) ** (-1))
S = iDroot.dot(W.dot(iDroot))
# logger.debug("S: %s" % str(list(S[0, 0:10])))
B = np.eye(n) - self.alpha * S
# logger.debug("B: %s" % str(list(B[0, 0:10])))
A = np.linalg.inv(B)
tdA = np.diag(np.sqrt(np.diag(A)) ** (-1))
A = tdA.dot(A.dot(tdA))
# logger.debug("A: %s" % str(list(A[0, 0:10])))
d = 1 - A
# logger.debug("d: %s" % str(list(d[0, 0:10])))
# logger.debug("min(d): %f, max(d): %f" % (np.min(d), np.max(d)))
mds = manifold.MDS(self.n_components,
metric=self.metric, dissimilarity='precomputed')
# using abs below because some zeros are represented as -0; other values are positive.
embedding = mds.fit_transform(np.abs(d))
return embedding
if __name__ == "__main__":
logger = logging.getLogger(__name__)
args = get_command_args(debug=True, debug_args=["--debug",
"--plot",
"--log_file=temp/spectral_outlier.log"])
# print "log file: %s" % args.log_file
configure_logger(args)
# sample_type = "4_"
# sample_type = "donut_"
sample_type = "face_"
rnd.seed(42)
x, y = get_demo_samples(sample_type)
n = x.shape[0]
xx = yy = x_grid = Z = scores = None
if args.plot:
plot_sample(x, y, pdfpath="temp/spectral_%ssamples.pdf" % sample_type)
n_neighbors = 10
n_components = 2
method = "standard" # ['standard', 'ltsa', 'hessian', 'modified']
# embed_type = "se"
# embed_type = "tsne"
# embed_type = "isomap"
# embed_type = "mds"
# embed_type = "lle_%s" % method
embed_type = "diffusion"
if embed_type == "se":
embed = manifold.SpectralEmbedding(n_components=n_components, n_neighbors=n_neighbors)
elif embed_type == "tsne":
embed = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
elif embed_type.startswith("lle_"):
embed = manifold.LocallyLinearEmbedding(n_neighbors=n_neighbors, n_components=n_components,
eigen_solver='auto', method=method)
elif embed_type == "isomap":
embed = manifold.Isomap(n_neighbors=n_neighbors, n_components=n_components)
elif embed_type == "mds":
embed = manifold.MDS(n_components=n_components)
elif embed_type == "diffusion":
embed = LabelDiffusion(n_neighbors=n_neighbors, n_components=n_components, metric=True)
else:
raise ValueError("invalid embed type %s" % embed_type)
x_tr = embed.fit_transform(x)
logger.debug(x_tr)
if args.plot:
plot_sample(x_tr, y, pdfpath="temp/spectral_%s%s.pdf" % (sample_type, embed_type))
ad_type = 'ifor'
outliers_fraction = 0.1
ad = IsolationForest(max_samples=256, contamination=outliers_fraction, random_state=None)
ad.fit(x_tr)
scores = -ad.decision_function(x_tr)
top_anoms = np.argsort(-scores)[np.arange(10)]
if args.plot:
# to plot probability contours
xx, yy = np.meshgrid(np.linspace(np.min(x_tr[:, 0]), np.max(x_tr[:, 0]), 50),
np.linspace(np.min(x_tr[:, 1]), np.max(x_tr[:, 1]), 50))
x_grid = np.c_[xx.ravel(), yy.ravel()]
Z = -ad.decision_function(x_grid)
Z = Z.reshape(xx.shape)
pdfpath = "temp/spectral_%scontours_%s_%s.pdf" % (sample_type, ad_type, embed_type)
dp = DataPlotter(pdfpath=pdfpath, rows=1, cols=1)
pl = dp.get_next_plot()
pl.contourf(xx, yy, Z, 20, cmap=plt.cm.get_cmap('jet'))
dp.plot_points(x_tr, pl, labels=y, lbl_color_map={0: "grey", 1: "red"}, s=25)
pl.scatter(x_tr[top_anoms, 0], x_tr[top_anoms, 1], marker='o', s=35,
edgecolors='red', facecolors='none')
dp.close()
| 33.647727 | 99 | 0.577676 | 833 | 5,922 | 3.92557 | 0.285714 | 0.057187 | 0.026911 | 0.047095 | 0.10948 | 0.088073 | 0.06422 | 0.049541 | 0 | 0 | 0 | 0.020872 | 0.279973 | 5,922 | 175 | 100 | 33.84 | 0.746013 | 0.148261 | 0 | 0.055046 | 0 | 0 | 0.05136 | 0.023968 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027523 | false | 0 | 0.036697 | 0 | 0.091743 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e71e3d05682a0aebe1f4b9f321ce88d5da677b1 | 5,042 | py | Python | simchain/vm.py | Frank-gh/simchain | 4dec42b6039730e4dcc0068209dd90200ee6b3d3 | [
"Unlicense"
] | 74 | 2018-11-14T02:36:13.000Z | 2022-03-11T08:24:17.000Z | simchain/vm.py | Frank-gh/simchain | 4dec42b6039730e4dcc0068209dd90200ee6b3d3 | [
"Unlicense"
] | null | null | null | simchain/vm.py | Frank-gh/simchain | 4dec42b6039730e4dcc0068209dd90200ee6b3d3 | [
"Unlicense"
] | 35 | 2019-01-16T04:18:24.000Z | 2022-03-21T09:05:12.000Z |
from .logger import logger
from .ecc import convert_pubkey_to_addr,VerifyingKey,sha256d
class Stack(list):
push = list.append
def peek(self):
return self[-1]
class LittleMachine(object):
def __init__(self):
self.stack = Stack()
self._map = {
"OP_ADD": self.add,
"OP_MINUS": self.minus,
"OP_MUL": self.mul,
"OP_EQ": self.equal_check,
"OP_EQUAL" : self.equal,
"OP_CHECKSIG": self.check_sig,
"OP_ADDR": self.calc_addr,
"OP_DUP" : self.dup,
"OP_NDUP" : self.ndup,
"OP_CHECKMULSIG" : self.check_mulsig,
"OP_MULHASH": self.calc_mulhash,
}
def set_script(self,script,message = b''):
self.clear()
self.result = True
self.pointer = 0
self.message = message
self.script = script
def clear(self):
self.stack.clear()
def peek(self):
return self.stack.peek()
def pop(self):
return self.stack.pop()
def push(self,value):
self.stack.push(value)
def evaluate(self,op):
if op in self._map:
self._map[op]()
elif isinstance(op,str) or\
isinstance(op,bytes)or\
isinstance(op,int) or\
isinstance(op,bool):
self.push(op)
else:
logger.info('Uknow opcode: '.format(op))
def add(self):
self.push(self.pop() + self.pop())
def minus(self):
last = self.pop()
self.push(self.pop() - last)
def mul(self):
self.push(self.pop() * self.pop())
def dup(self):
self.push(self.peek())
def ndup(self):
n = self.pop()
for val in self.stack[-n:]:
self.push(val)
self.push(n)
def equal_check(self):
flag = self.pop() == self.pop()
if not flag:
self.result = False
def equal(self):
self.push(self.pop()==self.pop())
def calc_mulhash(self):
n = self.pop()
pk_strs = [self.pop() for _ in range(n)]
s = b''
for val in pk_strs[::-1]:
s += val
self.push(sha256d(s))
def check_sig(self):
pk_str = self.pop()
sig = self.pop()
verifying_key = VerifyingKey.from_bytes(pk_str)
try:
flag = verifying_key.verify(sig,self.message)
except Exception:
flag = False
self.push(flag)
def check_mulsig(self):
n = self.pop()
pk_strs = [self.pop() for _ in range(n)]
m = self.pop()
sigs = [self.pop() for _ in range(m)]
pk_strs = pk_strs[-m:]
for i in range(m):
verifying_key = VerifyingKey.from_bytes(pk_strs[i])
try:
flag = verifying_key.verify(sigs[i],self.message)
except Exception:
flag = False
if not flag:
falg = False
break
self.push(flag)
def calc_addr(self):
pk_str = self.pop()
self.push(convert_pubkey_to_addr(pk_str))
def run(self):
while (self.pointer < len(self.script)):
op = self.script[self.pointer]
self.pointer += 1
self.evaluate(op)
if not self.result:
return False
else:
return self.peek()
if __name__ == "__main__":
from datatype import Vin,Vout
from ecc import SigningKey,convert_pubkey_to_addr
## k = 12356
## k1 = 23464
## sk = SigningKey.from_number(k)
## pk = sk.get_verifying_key()
##
## sk1 = SigningKey.from_number(k1)
## pk1 = sk1.get_verifying_key()
## addr = convert_pubkey_to_addr(pk.to_bytes())
## addr1 = convert_pubkey_to_addr(pk1.to_bytes())
##
## m1 = b'hello'
## m2 = b'go away'
## sig = sk.sign(m1)
## sig1 = sk1.sign(m2)
## vin = Vin(None,sig1,pk1.to_bytes())
## vout = Vout(addr,10)
##
## sig_script = [vin.sig_script[:64],vin.sig_script[64:]]
## pubkey_script = vout.pubkey_script.split(' ')
kA = 3453543
kB = 2349334
skA = SigningKey.from_number(kA)
skB = SigningKey.from_number(kB)
pkA = skA.get_verifying_key()
pkB = skB.get_verifying_key()
message = b'I love blockchain'
sigA = skA.sign(message)
sigB = skB.sign(message)
Hash = sha256d(pkA.to_bytes()+pkB.to_bytes())
sig_script = [sigA,sigB,2,pkA.to_bytes(),pkB.to_bytes(),2]
pubkey_script = ['OP_NDUP','OP_MULHASH',Hash,'OP_EQ',2,'OP_CHECKMULSIG']
script = sig_script + pubkey_script
machine = LittleMachine()
machine.set_script(script,message)
print (machine.run())
## script = [a,1,2,'OP_DUP','OP_ADD','OP_EQ']
## machine = LittleMachine()
## machine.set_script(script)
## print(machine.run())
| 24.837438 | 76 | 0.527172 | 624 | 5,042 | 4.086538 | 0.214744 | 0.054902 | 0.025882 | 0.037255 | 0.237647 | 0.165882 | 0.062353 | 0.062353 | 0.028235 | 0.028235 | 0 | 0.018987 | 0.341928 | 5,042 | 202 | 77 | 24.960396 | 0.749548 | 0.12733 | 0 | 0.162791 | 0 | 0 | 0.037489 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155039 | false | 0 | 0.031008 | 0.023256 | 0.248062 | 0.007752 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e73ac16adb060cc06fc6f0d2d05cbe18736f6a0 | 5,556 | py | Python | bgx/validator-bgx/sawtooth_validator/journal/consensus/consensus_factory.py | sparsov/DGT-Kawartha-demo | edfbc18f2c70e813805ec23c28fbc35bf7866ffc | [
"Apache-2.0"
] | null | null | null | bgx/validator-bgx/sawtooth_validator/journal/consensus/consensus_factory.py | sparsov/DGT-Kawartha-demo | edfbc18f2c70e813805ec23c28fbc35bf7866ffc | [
"Apache-2.0"
] | 10 | 2020-05-12T06:58:15.000Z | 2022-02-26T23:59:35.000Z | bgx/validator-bgx/sawtooth_validator/journal/consensus/consensus_factory.py | DGT-Network/DGT-Mississauga | 52b5f1f4015db2aa7196e727a25b399de5fbf3c3 | [
"Apache-2.0"
] | 1 | 2021-01-12T21:38:01.000Z | 2021-01-12T21:38:01.000Z | # Copyright 2017 NTRLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import importlib
import logging
from sawtooth_validator.exceptions import UnknownConsensusModuleError
from sawtooth_validator.journal.block_wrapper import NULL_BLOCK_IDENTIFIER
from sawtooth_validator.state.settings_view import SettingsView
LOGGER = logging.getLogger(__name__)
PROXY = '_proxy_'
class ConsensusFactory(object):
"""ConsensusFactory returns consensus modules by short name.
"""
@staticmethod
def get_consensus_module(module_name):
"""Returns a consensus module by name.
Args:
module_name (str): The name of the module to load.
Returns:
module: The consensus module.
Raises:
UnknownConsensusModuleError: Raised if the given module_name does
not correspond to a consensus implementation.
"""
module_package = module_name
if module_name == 'genesis':
module_package = (
'sawtooth_validator.journal.consensus.genesis.'
'genesis_consensus'
)
elif module_name == 'devmode':
module_package = (
'sawtooth_validator.journal.consensus.dev_mode.'
'dev_mode_consensus'
)
elif module_name == PROXY:
module_package = (
'sawtooth_validator.journal.consensus.proxy.'
'proxy_consensus'
)
elif module_name == 'poet':
module_package = 'sawtooth_poet.poet_consensus'
elif module_name == 'pbft':
module_package = 'pbft.bgx_pbft.consensus'
try:
return importlib.import_module(module_package)
except ImportError:
raise UnknownConsensusModuleError(
'Consensus module "{}" does not exist.'.format(module_name))
@staticmethod
def try_configured_proxy_consensus():
"""Returns the proxy onsensus_module based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
block_id (str): the block id associated with the current state_view
state_view (:obj:`StateView`): the current state view to use for
setting values
Raises:
UnknownConsensusModuleError: Thrown when an invalid consensus
module has been configured.
"""
LOGGER.debug("ConsensusFactory::try_configured_proxy_consensus")
try:
mod = ConsensusFactory.get_consensus_module(PROXY)
except UnknownConsensusModuleError:
mod = None
return mod
@staticmethod
def try_configured_consensus_module(block_id, state_view):
"""Returns the consensus_module based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
block_id (str): the block id associated with the current state_view
state_view (:obj:`StateView`): the current state view to use for
setting values
Raises:
UnknownConsensusModuleError: Thrown when an invalid consensus
module has been configured.
"""
settings_view = SettingsView(state_view)
default_consensus = 'genesis' if block_id == NULL_BLOCK_IDENTIFIER else 'devmode'
consensus_module_name = settings_view.get_setting('bgx.consensus.algorithm', default_value=default_consensus)
consensus_version = settings_view.get_setting('bgx.consensus.version', default_value='0.1')
LOGGER.debug("ConsensusFactory::try_configured_consensus_module consensus_module_name=%s ver=%s",consensus_module_name,consensus_version)
try:
mod = ConsensusFactory.get_consensus_module(consensus_module_name)
except UnknownConsensusModuleError:
mod = None
return mod,(consensus_module_name,consensus_version)
@staticmethod
def get_configured_consensus_module(block_id, state_view):
"""Returns the consensus_module based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
block_id (str): the block id associated with the current state_view
state_view (:obj:`StateView`): the current state view to use for
setting values
Raises:
UnknownConsensusModuleError: Thrown when an invalid consensus
module has been configured.
"""
settings_view = SettingsView(state_view)
default_consensus = 'genesis' if block_id == NULL_BLOCK_IDENTIFIER else 'devmode'
consensus_module_name = settings_view.get_setting('bgx.consensus.algorithm', default_value=default_consensus)
LOGGER.debug("ConsensusFactory::get_configured_consensus_module consensus_module_name=%s",consensus_module_name)
return ConsensusFactory.get_consensus_module(consensus_module_name)
| 42.090909 | 145 | 0.667207 | 600 | 5,556 | 5.96 | 0.245 | 0.11745 | 0.047819 | 0.031879 | 0.549217 | 0.510067 | 0.4217 | 0.36689 | 0.36689 | 0.36689 | 0 | 0.002417 | 0.2554 | 5,556 | 131 | 146 | 42.412214 | 0.861977 | 0.384989 | 0 | 0.322581 | 0 | 0 | 0.19457 | 0.14415 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.112903 | 0 | 0.258065 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e7810efd3616472cfac0aa367ce42b73363d1b5 | 4,162 | py | Python | nicos_sinq/zebra/setups/monochromator.py | ess-dmsc/nicos | 755d61d403ff7123f804c45fc80c7ff4d762993b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-03-26T10:30:45.000Z | 2021-03-26T10:30:45.000Z | nicos_sinq/zebra/setups/monochromator.py | ess-dmsc/nicos | 755d61d403ff7123f804c45fc80c7ff4d762993b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_sinq/zebra/setups/monochromator.py | ess-dmsc/nicos | 755d61d403ff7123f804c45fc80c7ff4d762993b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 3 | 2020-08-04T18:35:05.000Z | 2021-04-16T11:22:08.000Z | description = 'Devices for the ZEBRA monochromator'
mota = 'SQ:ZEBRA:mota:'
motb = 'SQ:ZEBRA:motb:'
motd = 'SQ:ZEBRA:motd:'
devices = dict(
mtvl = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Lower monochromator vertical translation',
motorpv = mota + 'MTVL',
errormsgpv = mota + 'MTVL-MsgTxt',
precision = 0.5,
),
mtpl = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Lower monochromator paralell translation',
motorpv = mota + 'MTPL',
errormsgpv = mota + 'MTPL-MsgTxt',
precision = 0.5,
),
mgvl = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Lower monochromator vertical goniometer',
motorpv = mota + 'MGVL',
errormsgpv = mota + 'MGVL-MsgTxt',
precision = 0.5,
),
mgpl = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Lower monochromator paralell goniometer',
motorpv = mota + 'MGPL',
errormsgpv = mota + 'MGPL-MsgTxt',
precision = 0.5,
),
moml = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Lower monochromator omega',
motorpv = mota + 'MOML',
errormsgpv = mota + 'MOML-MsgTxt',
precision = 0.5,
),
mtvu = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Upper monochromator vertical translation',
motorpv = mota + 'MTVU',
errormsgpv = mota + 'MTVU-MsgTxt',
precision = 0.5,
),
mtpu = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Upper monochromator paralell translation',
motorpv = mota + 'MTPU',
errormsgpv = mota + 'MTPU-MsgTxt',
precision = 0.5,
),
mgvu = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Upper monochromator vertical goniometer',
motorpv = mota + 'MGVU',
errormsgpv = mota + 'MGVU-MsgTxt',
precision = 0.5,
),
mgpu = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Upper monochromator paralell goniometer',
motorpv = mota + 'MGPU',
errormsgpv = mota + 'MGPU-MsgTxt',
precision = 0.5,
),
momu = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Upper monochromator omega',
motorpv = mota + 'MOMU',
errormsgpv = mota + 'MOMU-MsgTxt',
precision = 0.5,
),
mcvl = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Lower monochromator curvature',
motorpv = mota + 'MCVL',
errormsgpv = mota + 'MCVL-MsgTxt',
precision = 0.5,
),
mcvu = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Upper monochromator curvature',
motorpv = motb + 'MCVU',
errormsgpv = motb + 'MCVU-MsgTxt',
precision = 0.5,
),
mexz = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Monochromator lift',
motorpv = motb + 'MEXZ',
errormsgpv = motb + 'MEXZ-MsgTxt',
precision = 0.5,
),
wavelength = device('nicos_sinq.zebra.devices.zebrawl.ZebraWavelength',
description = 'Wavelength for ZEBRA',
unit = 'A-1',
lift = 'mexz'
),
cex1 = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'First collimator drum',
motorpv = motd + 'CEX1',
errormsgpv = motd + 'CEX1-MsgTxt',
precision = 0.5,
),
cex2 = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Second collimator drum',
motorpv = motd + 'CEX2',
errormsgpv = motd + 'CEX2-MsgTxt',
precision = 0.5,
),
)
| 34.97479 | 75 | 0.592023 | 419 | 4,162 | 5.842482 | 0.143198 | 0.071895 | 0.085784 | 0.128676 | 0.589052 | 0.518791 | 0.518791 | 0.518791 | 0.518791 | 0.518791 | 0 | 0.022401 | 0.281355 | 4,162 | 118 | 76 | 35.271186 | 0.796055 | 0 | 0 | 0.396552 | 0 | 0 | 0.351273 | 0.155694 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e79db4019af9551976d2be7470794d639b1bb48 | 15,886 | py | Python | scripts/loading/ontology/psimi.py | dougli1sqrd/SGDBackend-Nex2 | 2ecb2436db142cf08c6f2dbab6b115a394116632 | [
"MIT"
] | 5 | 2015-11-24T23:09:46.000Z | 2019-11-06T17:48:13.000Z | scripts/loading/ontology/psimi.py | dougli1sqrd/SGDBackend-Nex2 | 2ecb2436db142cf08c6f2dbab6b115a394116632 | [
"MIT"
] | 188 | 2017-08-28T22:39:03.000Z | 2022-03-02T14:53:46.000Z | scripts/loading/ontology/psimi.py | dougli1sqrd/SGDBackend-Nex2 | 2ecb2436db142cf08c6f2dbab6b115a394116632 | [
"MIT"
] | 7 | 2018-05-13T01:58:07.000Z | 2021-06-25T19:08:33.000Z | import urllib.request, urllib.parse, urllib.error
import logging
import os
from datetime import datetime
import sys
import importlib
importlib.reload(sys) # Reload does the trick!
from src.helpers import upload_file
from scripts.loading.database_session import get_session
from scripts.loading.ontology import read_owl
from src.models import Source, Ro, Edam, Dbentity, Filedbentity, \
Psimod, Psimi, PsimiUrl, PsimiAlias, PsimiRelation
__author__ = 'sweng66'
## Created on March 2018
## This script is used to update PSI-MI ontology in NEX2.
log_file = 'scripts/loading/ontology/logs/psimi.log'
ontology = 'PSIMI'
src = 'PSI'
CREATED_BY = os.environ['DEFAULT_USER']
logging.basicConfig(format='%(message)s')
log = logging.getLogger()
log.setLevel(logging.INFO)
log.info("PSI-MI Ontology Loading Report:\n")
def load_ontology(ontology_file):
nex_session = get_session()
log.info(str(datetime.now()))
log.info("Getting data from database...")
source_to_id = dict([(x.display_name, x.source_id) for x in nex_session.query(Source).all()])
psimiid_to_psimi = dict([(x.psimiid, x) for x in nex_session.query(Psimi).all()])
term_to_ro_id = dict([(x.display_name, x.ro_id) for x in nex_session.query(Ro).all()])
roid_to_ro_id = dict([(x.roid, x.ro_id) for x in nex_session.query(Ro).all()])
edam_to_id = dict([(x.format_name, x.edam_id) for x in nex_session.query(Edam).all()])
psimi_id_to_alias = {}
for x in nex_session.query(PsimiAlias).all():
aliases = []
if x.psimi_id in psimi_id_to_alias:
aliases = psimi_id_to_alias[x.psimi_id]
aliases.append((x.display_name, x.alias_type))
psimi_id_to_alias[x.psimi_id] = aliases
psimi_id_to_parent = {}
for x in nex_session.query(PsimiRelation).all():
parents = []
if x.child_id in psimi_id_to_parent:
parents = psimi_id_to_parent[x.child_id]
parents.append((x.parent_id, x.ro_id))
psimi_id_to_parent[x.child_id] = parents
####################################
fw = open(log_file, "w")
log.info("Reading data from ontology file...")
data = read_owl(ontology_file, ontology)
log.info("Updating psimi ontology data in the database...")
[update_log, to_delete_list] = load_new_data(nex_session, data,
source_to_id,
psimiid_to_psimi,
term_to_ro_id['is a'],
roid_to_ro_id,
psimi_id_to_alias,
psimi_id_to_parent,
fw)
# log.info("Uploading file to S3...")
# update_database_load_file_to_s3(nex_session, ontology_file, source_to_id, edam_to_id)
log.info("Writing loading summary...")
write_summary_and_send_email(fw, update_log, to_delete_list)
nex_session.close()
fw.close()
log.info(str(datetime.now()))
log.info("Done!\n\n")
def load_new_data(nex_session, data, source_to_id, psimiid_to_psimi, ro_id, roid_to_ro_id, psimi_id_to_alias, psimi_id_to_parent, fw):
active_psimiid = []
update_log = {}
for count_name in ['updated', 'added', 'deleted']:
update_log[count_name] = 0
relation_just_added = {}
alias_just_added = {}
for x in data:
psimi_id = None
if "MI:" not in x['id']:
continue
if x['id'] in psimiid_to_psimi:
## in database
y = psimiid_to_psimi[x['id']]
psimi_id = y.psimi_id
if y.is_obsolete is True:
y.is_obsolete = '0'
nex_session.add(y)
nex_session.flush()
update_log['updated'] = update_log['updated'] + 1
fw.write("The is_obsolete for " + x['id'] + " has been updated from " + y.is_obsolete + " to " + 'False' + "\n")
if x['term'] != y.display_name.strip():
## update term
fw.write("The display_name for " + x['id'] + " has been updated from " + y.display_name + " to " + x['term'] + "\n")
y.display_name = x['term']
# nex_session.add(y)
# nex_session.flush()
update_log['updated'] = update_log['updated'] + 1
# print "UPDATED: ", y.psimiid, ":"+y.display_name+ ":" + ":"+x['term']+":"
# else:
# print "SAME: ", y.psimiid, y.display_name, x['definition'], x['aliases'], x['parents'], x['other_parents']
active_psimiid.append(x['id'])
else:
fw.write("NEW entry = " + x['id'] + " " + x['term'] + "\n")
this_x = Psimi(source_id = source_to_id[src],
format_name = x['id'],
psimiid = x['id'],
display_name = x['term'],
description = x['definition'],
obj_url = '/psimi/' + x['id'],
is_obsolete = '0',
created_by = CREATED_BY)
nex_session.add(this_x)
nex_session.flush()
psimi_id = this_x.psimi_id
update_log['added'] = update_log['added'] + 1
# print "NEW: ", x['id'], x['term'], x['definition']
link_id = x['id'].replace(':', '_')
insert_url(nex_session, source_to_id['Ontobee'], 'Ontobee', psimi_id,
'http://www.ontobee.org/ontology/MI?iri=http://purl.obolibrary.org/obo/'+link_id,
fw)
# insert_url(nex_session, source_to_id['BioPortal'], 'BioPortal', psimi_id,
# 'http://bioportal.bioontology.org/ontologies/MI/?p=classes&conceptid=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F' + link_id,
# fw)
insert_url(nex_session, source_to_id['OLS'], 'OLS', psimi_id,
'http://www.ebi.ac.uk/ols/ontologies/mi/terms?iri=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F' + link_id,
fw)
## add RELATIONS
for parent_psimiid in x['parents']:
parent = psimiid_to_psimi.get(parent_psimiid)
if parent is not None:
parent_id = parent.psimi_id
child_id = psimi_id
insert_relation(nex_session, source_to_id[src], parent_id,
child_id, ro_id, relation_just_added, fw)
for (parent_psimiid, roid) in x['other_parents']:
parent = psimiid_to_psimi.get(parent_psimiid)
if parent is not None:
parent_id = parent.psimi_id
child_id = psimi_id
this_ro_id = roid_to_ro_id.get(roid)
if this_ro_id is None:
log.info("The ROID:" + str(roid) + " is not found in the database")
continue
insert_relation(nex_session, source_to_id[src], parent_id,
child_id, this_ro_id, relation_just_added, fw)
## add ALIASES
for (alias, alias_type) in x['aliases']:
if alias_type != 'EAXCT':
continue
insert_alias(nex_session, source_to_id[src], alias,
alias_type, psimi_id, alias_just_added, fw)
## update RELATIONS
curr_parents = psimi_id_to_parent.get(psimi_id)
if curr_parents is None:
curr_parents = []
update_relations(nex_session, psimi_id, curr_parents, x['parents'],
x['other_parents'], roid_to_ro_id,
source_to_id[src], psimiid_to_psimi, ro_id, relation_just_added, fw)
## update ALIASES
update_aliases(nex_session, psimi_id, psimi_id_to_alias.get(psimi_id), x['aliases'],
source_to_id[src], psimiid_to_psimi, alias_just_added, fw)
to_delete = []
for psimiid in psimiid_to_psimi:
if psimiid in active_psimiid:
continue
x = psimiid_to_psimi[psimiid]
if psimiid.startswith('NTR'):
continue
to_delete.append((psimiid, x.display_name))
if x.is_obsolete is False:
x.is_obsolete = '1'
nex_session.add(x)
nex_session.flush()
update_log['updated'] = update_log['updated'] + 1
fw.write("The is_obsolete for " + x.psimiid + " has been updated from " + x.is_obsolete +" to " + 'True' + "\n")
nex_session.commit()
# nex_session.rollback()
return [update_log, to_delete]
def update_aliases(nex_session, psimi_id, curr_aliases, new_aliases, source_id, psimiid_to_psimi, alias_just_added, fw):
# print "ALIAS: ", curr_aliases, new_aliases
# return
if curr_aliases is None:
curr_aliases = []
for (alias, type) in new_aliases:
if type != 'EXACT':
continue
if (alias, type) not in curr_aliases:
insert_alias(nex_session, source_id, alias, type, psimi_id, alias_just_added, fw)
for (alias, type) in curr_aliases:
if(alias, type) not in new_aliases:
to_delete = nex_session.query(PsimiAlias).filter_by(psimi_id=psimi_id, display_name=alias, alias_type=type).first()
nex_session.delete(to_delete)
fw.write("The old alias = " + alias + " has been deleted for psimi_id = " + str(psimi_id) + "\n")
def update_relations(nex_session, child_id, curr_parent_ids, new_parents, other_parents, roid_to_ro_id, source_id, psimiid_to_psimi, ro_id, relation_just_added, fw):
# print "RELATION: ", curr_parent_ids, new_parents, other_parents
# return
new_parent_ids = []
for parent_psimiid in new_parents:
parent = psimiid_to_psimi.get(parent_psimiid)
if parent is not None:
parent_id = parent.psimi_id
new_parent_ids.append((parent_id, ro_id))
if (parent_id, ro_id) not in curr_parent_ids:
insert_relation(nex_session, source_id, parent_id, child_id,
ro_id, relation_just_added, fw)
for (parent_psimiid, roid) in other_parents:
parent = psimiid_to_psimi.get(parent_psimiid)
if parent is not None:
parent_id = parent.psimi_id
this_ro_id = roid_to_ro_id.get(roid)
if this_ro_id is None:
log.info("The ROID:" + str(roid) + " is not found in the database")
continue
new_parent_ids.append((parent_id, this_ro_id))
if (parent_id, this_ro_id) not in curr_parent_ids:
insert_relation(nex_session, source_id, parent_id, child_id,
this_ro_id, relation_just_added, fw)
for (parent_id, ro_id) in curr_parent_ids:
if (parent_id, ro_id) not in new_parent_ids:
## remove the old one
to_delete = nex_session.query(PsimiRelation).filter_by(child_id=child_id, parent_id=parent_id, ro_id=ro_id).first()
nex_session.delete(to_delete)
fw.write("The old parent: parent_id = " + str(parent_id) + " has been deleted for psimi_id = " + str(child_id)+ "\n")
def insert_url(nex_session, source_id, display_name, psimi_id, url, fw, url_type=None):
# print display_name, psimi_id, url
# return
if url_type is None:
url_type = display_name
x = PsimiUrl(display_name = display_name,
url_type = url_type,
source_id = source_id,
psimi_id = psimi_id,
obj_url = url,
created_by = CREATED_BY)
nex_session.add(x)
nex_session.flush()
fw.write("Added new URL: " + url + " for psimi_id = " + str(psimi_id) + "\n")
def insert_alias(nex_session, source_id, display_name, alias_type, psimi_id, alias_just_added, fw):
# print display_name, alias_type
# return
if (psimi_id, display_name, alias_type) in alias_just_added:
return
alias_just_added[(psimi_id, display_name, alias_type)] = 1
x = PsimiAlias(display_name = display_name,
alias_type = alias_type,
source_id = source_id,
psimi_id = psimi_id,
created_by = CREATED_BY)
nex_session.add(x)
nex_session.flush()
fw.write("Added new ALIAS: " + display_name + " for psimi_id = " + str(psimi_id) + "\n")
def insert_relation(nex_session, source_id, parent_id, child_id, ro_id, relation_just_added, fw):
# print "PARENT/CHILD: ", parent_id, child_id
# return
if (parent_id, child_id) in relation_just_added:
return
relation_just_added[(parent_id, child_id)] = 1
x = PsimiRelation(parent_id = parent_id,
child_id = child_id,
source_id = source_id,
ro_id = ro_id,
created_by = CREATED_BY)
nex_session.add(x)
nex_session.flush()
fw.write("Added new PARENT: parent_id = " + str(parent_id) + " for psimi_id = " + str(child_id) + "\n")
def update_database_load_file_to_s3(nex_session, ontology_file, source_to_id, edam_to_id):
gzip_file = ontology_file + ".gz"
import gzip
import shutil
with open(ontology_file, 'rb') as f_in, gzip.open(gzip_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
local_file = open(gzip_file, mode='rb')
import hashlib
psimi_md5sum = hashlib.md5(ontology_file.encode()).hexdigest()
psimi_row = nex_session.query(Filedbentity).filter_by(md5sum = psimi_md5sum).one_or_none()
if psimi_row is not None:
return
nex_session.query(Dbentity).filter_by(display_name=gzip_file, dbentity_status='Active').update({"dbentity_status": 'Archived'})
nex_session.commit()
data_id = edam_to_id.get('EDAM:2353') ## data:2353 Ontology data
topic_id = edam_to_id.get('EDAM:0089') ## topic:0089 Ontology and terminology
format_id = edam_to_id.get('EDAM:3262') ## format:3262 OWL/XML
from sqlalchemy import create_engine
from src.models import DBSession
engine = create_engine(os.environ['NEX2_URI'], pool_recycle=3600)
DBSession.configure(bind=engine)
upload_file(CREATED_BY, local_file,
filename=gzip_file,
file_extension='gz',
description='PSI-MI Ontology in OWL RDF/XML format',
display_name=gzip_file,
data_id=data_id,
format_id=format_id,
topic_id=topic_id,
status='Active',
is_public='0',
is_in_spell='0',
is_in_browser='0',
file_date=datetime.now(),
source_id=source_to_id['SGD'],
md5sum=psimi_md5sum)
def write_summary_and_send_email(fw, update_log, to_delete_list):
summary = "Updated: " + str(update_log['updated'])+ "\n"
summary = summary + "Added: " + str(update_log['added']) + "\n"
summary_4_email = summary
if len(to_delete_list) > 0:
summary = summary + "The following PSI-MI terms are not in the current release:\n"
for (psimiid, term) in to_delete_list:
summary = summary + "\t" + psimiid + " " + term + "\n"
fw.write(summary)
log.info(summary_4_email)
if __name__ == "__main__":
url_path = 'http://purl.obolibrary.org/obo/'
mi_owl_file = 'mi.owl'
urllib.request.urlretrieve(url_path + mi_owl_file, mi_owl_file)
load_ontology(mi_owl_file)
| 39.715 | 165 | 0.585295 | 2,080 | 15,886 | 4.153846 | 0.115865 | 0.046181 | 0.017361 | 0.015625 | 0.480208 | 0.41875 | 0.357407 | 0.306829 | 0.267361 | 0.244907 | 0 | 0.006428 | 0.304671 | 15,886 | 399 | 166 | 39.814536 | 0.775756 | 0.074153 | 0 | 0.230496 | 0 | 0.007092 | 0.094981 | 0.002667 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031915 | false | 0 | 0.056738 | 0 | 0.102837 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e7e9c74fb34d9d539ee2c2a737c83639c165ce7 | 1,765 | py | Python | aito/utils/_file_utils.py | AitoDotAI/aito-python-tools | 891d433222b04f4ff8a4eeafbb9268516fd215dc | [
"MIT"
] | 6 | 2019-10-16T02:35:06.000Z | 2021-02-03T13:39:43.000Z | aito/utils/_file_utils.py | AitoDotAI/aito-python-tools | 891d433222b04f4ff8a4eeafbb9268516fd215dc | [
"MIT"
] | 23 | 2020-03-17T13:16:02.000Z | 2021-04-23T15:09:51.000Z | aito/utils/_file_utils.py | AitoDotAI/aito-python-tools | 891d433222b04f4ff8a4eeafbb9268516fd215dc | [
"MIT"
] | null | null | null | import gzip
import json
import os
import shutil
from os import PathLike
from pathlib import Path
from typing import Dict, List
import ndjson
def check_file_is_gzipped(file_path: PathLike):
file_path = Path(file_path)
if file_path.suffixes[-2:] != ['.ndjson', '.gz']:
return False
else:
return True
def gzip_file(input_path: PathLike, output_path: PathLike = None, keep=True):
input_path = Path(input_path)
if input_path.name.endswith('.gz'):
raise ValueError(f'{input_path} is already gzipped')
output_path = Path(output_path) if output_path else input_path.parent / f"{input_path.name}.gz"
with input_path.open('rb') as f_in, gzip.open(output_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if not keep:
os.unlink(input_path)
def read_json_gz_file(input_path: PathLike, decoder='utf-8'):
input_path = Path(input_path)
with gzip.open(input_path, 'rb') as in_f:
json_bytes = in_f.read()
return json.loads(json_bytes.decode(decoder))
def read_ndjson_gz_file(input_path: PathLike, decoder='utf-8'):
input_path = Path(input_path)
records = []
with gzip.open(input_path, 'rb') as in_f:
line = in_f.readline()
while line:
records.append(json.loads(line.decode(decoder)))
line = in_f.readline()
return records
def write_to_ndjson_gz_file(data: List[Dict], output_file: PathLike):
output_file = Path(output_file)
if not output_file.name.endswith(".ndjson.gz"):
raise ValueError("Output file must end with .ndjson.gz")
ndjson_file = output_file.parent / output_file.stem
with ndjson_file.open('w') as f:
ndjson.dump(data, f)
gzip_file(ndjson_file, output_file, keep=False)
| 30.964912 | 99 | 0.689518 | 268 | 1,765 | 4.309701 | 0.25 | 0.132468 | 0.033766 | 0.054545 | 0.164502 | 0.145455 | 0.145455 | 0.145455 | 0.145455 | 0.09697 | 0 | 0.00212 | 0.1983 | 1,765 | 56 | 100 | 31.517857 | 0.814134 | 0 | 0 | 0.155556 | 0 | 0 | 0.073088 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.177778 | 0 | 0.377778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e8a599c94ab88b6f514655190020c0bde169a1f | 586 | py | Python | data/data.py | owrior/snakeMach | 1af8ca51badd3e23201ef5cc873e9179ee01c058 | [
"MIT"
] | null | null | null | data/data.py | owrior/snakeMach | 1af8ca51badd3e23201ef5cc873e9179ee01c058 | [
"MIT"
] | null | null | null | data/data.py | owrior/snakeMach | 1af8ca51badd3e23201ef5cc873e9179ee01c058 | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.datasets import make_blobs
from sklearn.preprocessing import normalize
class TestData:
def __init__(self, dimensions, points) -> None:
self.dimensions = dimensions
self.points = points
def linearly_separable(self) -> np.array:
x, y = make_blobs(
n_samples=self.points,
centers=2,
n_features=self.dimensions,
center_box=(0, 1),
)
for d in range(self.dimensions):
x[d] = x[d] - np.min(x[d]) / (np.max(x[d]) - np.min(x[d]))
return x, y
| 25.478261 | 70 | 0.583618 | 78 | 586 | 4.25641 | 0.512821 | 0.03012 | 0.036145 | 0.042169 | 0.054217 | 0.054217 | 0 | 0 | 0 | 0 | 0 | 0.007335 | 0.302048 | 586 | 22 | 71 | 26.636364 | 0.804401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e8d37dec89478528db6577f4a7c15427ede6234 | 7,013 | py | Python | sahara/tests/unit/plugins/mapr/utils/test_func_utils.py | citrix-openstack-build/sahara | 17e4f4dac5bb321ef4d5a55664cca0857127d7e6 | [
"Apache-2.0"
] | null | null | null | sahara/tests/unit/plugins/mapr/utils/test_func_utils.py | citrix-openstack-build/sahara | 17e4f4dac5bb321ef4d5a55664cca0857127d7e6 | [
"Apache-2.0"
] | null | null | null | sahara/tests/unit/plugins/mapr/utils/test_func_utils.py | citrix-openstack-build/sahara | 17e4f4dac5bb321ef4d5a55664cca0857127d7e6 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sahara.plugins.mapr.util.func_utils as fu
import sahara.tests.unit.base as b
class PredicatesTest(b.SaharaTestCase):
def test_true_predicate(self):
self.assertTrue(fu.true_predicate(None))
def test_false_predicate(self):
self.assertFalse(fu.false_predicate(None))
def test_not_predicate(self):
self.assertFalse(fu.not_predicate(fu.true_predicate)(None))
self.assertTrue(fu.not_predicate(fu.false_predicate)(None))
def test_and_predicate(self):
true_p = fu.true_predicate
false_p = fu.false_predicate
and_p = fu.and_predicate
self.assertTrue(and_p(true_p, true_p)(None))
self.assertFalse(and_p(false_p, true_p)(None))
self.assertFalse(and_p(true_p, false_p)(None))
self.assertFalse(and_p(false_p, false_p)(None))
def test_or_predicate(self):
true_p = fu.true_predicate
false_p = fu.false_predicate
or_p = fu.or_predicate
self.assertTrue(or_p(true_p, true_p)(None))
self.assertTrue(or_p(false_p, true_p)(None))
self.assertTrue(or_p(true_p, false_p)(None))
self.assertFalse(or_p(false_p, false_p)(None))
def test_field_equals_predicate(self):
field_equals_p = fu.field_equals_predicate
arg = {'a': 'a', 'b': 'b'}
self.assertTrue(field_equals_p('a', 'a')(arg))
self.assertFalse(field_equals_p('b', 'a')(arg))
def test_like_predicate(self):
like_p = fu.like_predicate
arg = {'a': 'a', 'b': 'b', 'c': 'c'}
self.assertTrue(like_p({'a': 'a', 'b': 'b', 'c': 'c'})(arg))
self.assertTrue(like_p({'a': 'a', 'b': 'b'})(arg))
self.assertTrue(like_p({'a': 'a'})(arg))
self.assertTrue(like_p({'a': 'a'}, ['a'])(arg))
self.assertTrue(like_p({})(arg))
self.assertTrue(like_p({'a': 'a', 'b': 'b', 'c': 'a'}, ['c'])(arg))
self.assertFalse(like_p({'a': 'a', 'b': 'b', 'c': 'a'})(arg))
self.assertFalse(like_p({'a': 'a', 'c': 'a'})(arg))
self.assertFalse(like_p({'c': 'a'}, ['a'])(arg))
def test_in_predicate(self):
in_p = fu.in_predicate
arg = {'a': 'a', 'b': 'b'}
self.assertTrue(in_p('a', ['a', 'b'])(arg))
self.assertFalse(in_p('a', ['c', 'b'])(arg))
self.assertFalse(in_p('a', [])(arg))
class FunctionsTest(b.SaharaTestCase):
def test_copy_function(self):
copy_f = fu.copy_function
arg = {'a': 'a'}
actual = copy_f()(arg)
expected = {'a': 'a'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
def test_append_field_function(self):
append_field_f = fu.append_field_function
arg = {'a': 'a'}
actual = append_field_f('b', 'b')(arg)
expected = {'a': 'a', 'b': 'b'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
def test_append_fields_function(self):
append_fields_f = fu.append_fields_function
arg = {'a': 'a'}
actual = append_fields_f({'b': 'b', 'c': 'c'})(arg)
expected = {'a': 'a', 'b': 'b', 'c': 'c'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
actual = append_fields_f({'b': 'b'})(arg)
expected = {'a': 'a', 'b': 'b'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
actual = append_fields_f({})(arg)
expected = {'a': 'a'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
def test_get_values_pair_function(self):
get_values_pair_f = fu.get_values_pair_function
arg = {'a': 'a', 'b': 'b'}
actual = get_values_pair_f('a', 'b')(arg)
expected = ('a', 'b')
self.assertEqual(expected, actual)
def test_get_field_function(self):
get_field_f = fu.get_field_function
arg = {'a': 'a', 'b': 'b'}
actual = get_field_f('a')(arg)
expected = ('a', 'a')
self.assertEqual(expected, actual)
def test_get_fields_function(self):
get_fields_f = fu.get_fields_function
arg = {'a': 'a', 'b': 'b'}
actual = get_fields_f(['a', 'b'])(arg)
expected = [('a', 'a'), ('b', 'b')]
self.assertEqual(expected, actual)
actual = get_fields_f(['a'])(arg)
expected = [('a', 'a')]
self.assertEqual(expected, actual)
def test_extract_fields_function(self):
extract_fields_f = fu.extract_fields_function
arg = {'a': 'a', 'b': 'b'}
actual = extract_fields_f(['a', 'b'])(arg)
expected = {'a': 'a', 'b': 'b'}
self.assertEqual(expected, actual)
actual = extract_fields_f(['a'])(arg)
expected = {'a': 'a'}
self.assertEqual(expected, actual)
def test_get_value_function(self):
get_value_f = fu.get_value_function
arg = {'a': 'a', 'b': 'b'}
actual = get_value_f('a')(arg)
expected = 'a'
self.assertEqual(expected, actual)
def test_set_default_value_function(self):
set_default_value_f = fu.set_default_value_function
arg = {'a': 'a'}
actual = set_default_value_f('b', 'b')(arg)
expected = {'a': 'a', 'b': 'b'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
actual = set_default_value_f('a', 'b')(arg)
expected = {'a': 'a'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
def test_set_default_values_function(self):
set_default_values_f = fu.set_default_values_function
arg = {'a': 'a'}
actual = set_default_values_f({'a': 'b', 'c': 'c'})(arg)
expected = {'a': 'a', 'c': 'c'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
actual = set_default_values_f({'b': 'b'})(arg)
expected = {'a': 'a', 'b': 'b'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
actual = set_default_values_f({})(arg)
expected = {'a': 'a'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
def test_values_pair_to_dict_function(self):
values_pair_to_dict_f = fu.values_pair_to_dict_function
arg = ('a', 'b')
actual = values_pair_to_dict_f('a', 'b')(arg)
expected = {'a': 'a', 'b': 'b'}
self.assertEqual(expected, actual)
| 34.546798 | 76 | 0.592756 | 950 | 7,013 | 4.148421 | 0.114737 | 0.020299 | 0.015986 | 0.020299 | 0.642223 | 0.580817 | 0.541233 | 0.445572 | 0.336717 | 0.317178 | 0 | 0.001502 | 0.240696 | 7,013 | 202 | 77 | 34.717822 | 0.738592 | 0.081135 | 0 | 0.37415 | 0 | 0 | 0.027527 | 0 | 0 | 0 | 0 | 0 | 0.367347 | 1 | 0.129252 | false | 0 | 0.013605 | 0 | 0.156463 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e8e056676584d6baf6b9485f76064371025e1cf | 8,405 | py | Python | docs/rips/tests/test_cases.py | OPM/ResInsight-UserDocumentation | 2af2c3a5ef297c0061d842944360a83bf8e49c36 | [
"MIT"
] | 1 | 2020-04-25T21:24:45.000Z | 2020-04-25T21:24:45.000Z | docs/rips/tests/test_cases.py | OPM/ResInsight-UserDocumentation | 2af2c3a5ef297c0061d842944360a83bf8e49c36 | [
"MIT"
] | 7 | 2020-02-11T07:42:10.000Z | 2020-09-28T17:18:01.000Z | docs/rips/tests/test_cases.py | OPM/ResInsight-UserDocumentation | 2af2c3a5ef297c0061d842944360a83bf8e49c36 | [
"MIT"
] | 2 | 2020-04-02T09:33:45.000Z | 2020-04-09T19:44:53.000Z | import sys
import os
import math
import pytest
import grpc
import tempfile
sys.path.insert(1, os.path.join(sys.path[0], "../../"))
import rips
import dataroot
def test_Launch(rips_instance, initialize_test):
assert rips_instance is not None
def test_EmptyProject(rips_instance, initialize_test):
cases = rips_instance.project.cases()
assert len(cases) is 0
def test_OneCase(rips_instance, initialize_test):
case = rips_instance.project.load_case(
dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID"
)
assert case.name == "TEST10K_FLT_LGR_NNC"
assert case.id == 0
cases = rips_instance.project.cases()
assert len(cases) is 1
def test_BoundingBox(rips_instance, initialize_test):
case = rips_instance.project.load_case(
dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID"
)
assert case.name == "TEST10K_FLT_LGR_NNC"
boundingbox = case.reservoir_boundingbox()
assert math.isclose(3382.90, boundingbox.min_x, abs_tol=1.0e-1)
assert math.isclose(5850.48, boundingbox.max_x, abs_tol=1.0e-1)
assert math.isclose(4157.45, boundingbox.min_y, abs_tol=1.0e-1)
assert math.isclose(7354.93, boundingbox.max_y, abs_tol=1.0e-1)
assert math.isclose(-4252.61, boundingbox.min_z, abs_tol=1.0e-1)
assert math.isclose(-4103.60, boundingbox.max_z, abs_tol=1.0e-1)
min_depth, max_depth = case.reservoir_depth_range()
assert math.isclose(4103.60, min_depth, abs_tol=1.0e-1)
assert math.isclose(4252.61, max_depth, abs_tol=1.0e-1)
def test_MultipleCases(rips_instance, initialize_test):
case_paths = []
case_paths.append(dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID")
case_paths.append(dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID")
case_paths.append(dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID")
case_names = []
for case_path in case_paths:
case_name = os.path.splitext(os.path.basename(case_path))[0]
case_names.append(case_name)
rips_instance.project.load_case(path=case_path)
cases = rips_instance.project.cases()
assert len(cases) == len(case_names)
for i, case_name in enumerate(case_names):
assert case_name == cases[i].name
def get_cell_index_with_ijk(cell_info, i, j, k):
for (idx, cell) in enumerate(cell_info):
if cell.local_ijk.i == i and cell.local_ijk.j == j and cell.local_ijk.k == k:
return idx
return -1
def check_corner(actual, expected):
assert math.isclose(actual.x, expected[0], abs_tol=0.1)
assert math.isclose(actual.y, expected[1], abs_tol=0.1)
assert math.isclose(actual.z, expected[2], abs_tol=0.1)
def test_10k(rips_instance, initialize_test):
case_path = dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID"
case = rips_instance.project.load_case(path=case_path)
assert len(case.grids()) == 2
cell_count_info = case.cell_count()
assert cell_count_info.active_cell_count == 11125
assert cell_count_info.reservoir_cell_count == 316224
time_steps = case.time_steps()
assert len(time_steps) == 9
days_since_start = case.days_since_start()
assert len(days_since_start) == 9
cell_info = case.cell_info_for_active_cells()
assert len(cell_info) == cell_count_info.active_cell_count
# Check an active cell (found in resinsight ui)
cell_index = get_cell_index_with_ijk(cell_info, 23, 44, 19)
assert cell_index != -1
cell_centers = case.active_cell_centers()
assert len(cell_centers) == cell_count_info.active_cell_count
# Check the cell center for the specific cell
assert math.isclose(3627.17, cell_centers[cell_index].x, abs_tol=0.1)
assert math.isclose(5209.75, cell_centers[cell_index].y, abs_tol=0.1)
assert math.isclose(4179.6, cell_centers[cell_index].z, abs_tol=0.1)
cell_corners = case.active_cell_corners()
assert len(cell_corners) == cell_count_info.active_cell_count
# Expected values from ResInsight UI
expected_corners = [
[3565.22, 5179.02, 4177.18],
[3655.67, 5145.34, 4176.63],
[3690.07, 5240.69, 4180.02],
[3599.87, 5275.16, 4179.32],
[3564.13, 5178.61, 4179.75],
[3654.78, 5144.79, 4179.23],
[3688.99, 5239.88, 4182.7],
[3598.62, 5274.48, 4181.96],
]
check_corner(cell_corners[cell_index].c0, expected_corners[0])
check_corner(cell_corners[cell_index].c1, expected_corners[1])
check_corner(cell_corners[cell_index].c2, expected_corners[2])
check_corner(cell_corners[cell_index].c3, expected_corners[3])
check_corner(cell_corners[cell_index].c4, expected_corners[4])
check_corner(cell_corners[cell_index].c5, expected_corners[5])
check_corner(cell_corners[cell_index].c6, expected_corners[6])
check_corner(cell_corners[cell_index].c7, expected_corners[7])
# No coarsening info for this case
coarsening_info = case.coarsening_info()
assert len(coarsening_info) == 0
def test_PdmObject(rips_instance, initialize_test):
case_path = dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID"
case = rips_instance.project.load_case(path=case_path)
assert case.id == 0
assert case.address() is not 0
assert case.__class__.__name__ == "EclipseCase"
@pytest.mark.skipif(
sys.platform.startswith("linux"),
reason="Brugge is currently exceptionally slow on Linux",
)
def test_brugge_0010(rips_instance, initialize_test):
case_path = dataroot.PATH + "/Case_with_10_timesteps/Real10/BRUGGE_0010.EGRID"
case = rips_instance.project.load_case(path=case_path)
assert len(case.grids()) == 1
cellCountInfo = case.cell_count()
assert cellCountInfo.active_cell_count == 43374
assert cellCountInfo.reservoir_cell_count == 60048
time_steps = case.time_steps()
assert len(time_steps) == 11
days_since_start = case.days_since_start()
assert len(days_since_start) == 11
@pytest.mark.skipif(
sys.platform.startswith("linux"),
reason="Brugge is currently exceptionally slow on Linux",
)
def test_replaceCase(rips_instance, initialize_test):
project = rips_instance.project.open(
dataroot.PATH + "/TEST10K_FLT_LGR_NNC/10KWithWellLog.rsp"
)
case_path = dataroot.PATH + "/Case_with_10_timesteps/Real0/BRUGGE_0000.EGRID"
case = project.case(case_id=0)
assert case is not None
assert case.name == "TEST10K_FLT_LGR_NNC"
assert case.id == 0
cases = rips_instance.project.cases()
assert len(cases) is 1
case.replace(new_grid_file=case_path)
# Check that the case object has been changed
assert case.name == "BRUGGE_0000"
assert case.id == 0
cases = rips_instance.project.cases()
assert len(cases) is 1
# Check that retrieving the case object again will yield the changed object
case = project.case(case_id=0)
assert case.name == "BRUGGE_0000"
assert case.id == 0
def test_loadNonExistingCase(rips_instance, initialize_test):
case_path = "Nonsense/Nonsense/Nonsense"
with pytest.raises(grpc.RpcError):
assert rips_instance.project.load_case(case_path)
@pytest.mark.skipif(
sys.platform.startswith("linux"),
reason="Brugge is currently exceptionally slow on Linux",
)
def test_exportFlowCharacteristics(rips_instance, initialize_test):
case_path = dataroot.PATH + "/Case_with_10_timesteps/Real0/BRUGGE_0000.EGRID"
case = rips_instance.project.load_case(case_path)
with tempfile.TemporaryDirectory(prefix="rips") as tmpdirname:
print("Temporary folder: ", tmpdirname)
file_name = tmpdirname + "/exportFlowChar.txt"
case.export_flow_characteristics(
time_steps=8, producers=[], injectors="I01", file_name=file_name
)
def test_selected_cells(rips_instance, initialize_test):
case = rips_instance.project.load_case(
dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID"
)
assert case.name == "TEST10K_FLT_LGR_NNC"
selected_cells = case.selected_cells()
assert len(selected_cells) == 0
time_step_info = case.time_steps()
for (tidx, timestep) in enumerate(time_step_info):
# Try to read for SOIL the time step (will be empty since nothing is selected)
soil_results = case.selected_cell_property("DYNAMIC_NATIVE", "SOIL", tidx)
assert len(soil_results) == 0
| 37.690583 | 87 | 0.723974 | 1,241 | 8,405 | 4.625302 | 0.205479 | 0.058537 | 0.047561 | 0.058537 | 0.529617 | 0.516376 | 0.447038 | 0.405226 | 0.366725 | 0.319512 | 0 | 0.057278 | 0.167043 | 8,405 | 222 | 88 | 37.86036 | 0.762605 | 0.04188 | 0 | 0.287356 | 0 | 0 | 0.112865 | 0.071473 | 0 | 0 | 0 | 0 | 0.304598 | 1 | 0.08046 | false | 0 | 0.045977 | 0 | 0.137931 | 0.005747 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e903b0c067785052b3d3529823e93eb770c9d80 | 2,959 | py | Python | tests/test_to_human.py | AleCandido/human_dates | 56bb10587b69e84b27a27117b2ecb3b1df09a028 | [
"MIT"
] | 1 | 2020-05-11T12:47:23.000Z | 2020-05-11T12:47:23.000Z | tests/test_to_human.py | AleCandido/human_dates | 56bb10587b69e84b27a27117b2ecb3b1df09a028 | [
"MIT"
] | 9 | 2020-04-30T13:43:30.000Z | 2020-10-19T15:32:54.000Z | tests/test_to_human.py | AleCandido/human_dates | 56bb10587b69e84b27a27117b2ecb3b1df09a028 | [
"MIT"
] | null | null | null | import datetime as dt
import pytest
import human_dates
class TestTimeAgoInWords:
"""
test time_ago_in_words function
"""
@pytest.fixture(autouse=True)
def _import_templates(self, templates):
"""
import templates from conftest local plugin
"""
self.templates = templates
@pytest.fixture(autouse=True)
def _run_time_ago_comparison(self):
"""
autoexecute after the specific test has been defined, running the
actual comparison
"""
yield
for date, expected in zip(self.dates, self.expected):
result = human_dates.time_ago_in_words(dt.datetime.now() + date)
assert expected == result
def test_time_years(self):
self.dates = [-dt.timedelta(days=366 * 4), dt.timedelta(days=366 * 4)]
self.expected = [
self.templates.past % "4 years",
self.templates.future % "4 years",
]
def test_time_months(self):
self.dates = [-dt.timedelta(days=31 * 3), dt.timedelta(days=31 * 3)]
self.expected = [
self.templates.past % "3 months",
self.templates.future % "3 months",
]
def test_time_weeks(self):
self.dates = [-dt.timedelta(days=7 * 3 + 1), dt.timedelta(days=7 * 3 + 1)]
self.expected = [
self.templates.past % "3 weeks",
self.templates.future % "3 weeks",
]
def test_time_days(self):
self.dates = [-dt.timedelta(days=5.1), dt.timedelta(days=5.1)]
self.expected = [
self.templates.past % "5 days",
self.templates.future % "5 days",
]
def test_time_one_day(self):
self.dates = [-dt.timedelta(hours=24.1), dt.timedelta(hours=24.5)]
self.expected = ["yesterday", "tomorrow"]
def test_time_hours(self):
self.dates = [
-dt.timedelta(hours=17.1),
dt.timedelta(hours=5.1),
-dt.timedelta(minutes=75),
]
self.expected = [
self.templates.past % "17 hours",
self.templates.future % "5 hours",
self.templates.past % "an hour",
]
def test_time_minutes(self):
self.dates = [
-dt.timedelta(minutes=41.3),
dt.timedelta(minutes=26.3),
dt.timedelta(seconds=67),
]
self.expected = [
self.templates.past % "41 minutes",
self.templates.future % "26 minutes",
self.templates.future % "a minute",
]
def test_time_seconds(self):
self.dates = [-dt.timedelta(seconds=19.3), dt.timedelta(seconds=45.8)]
self.expected = [
self.templates.past % "19 seconds",
self.templates.future % "45 seconds",
]
def test_time_now(self):
self.dates = [-dt.timedelta(seconds=3.7), dt.timedelta(seconds=8.1)]
self.expected = ["just now"] * 2
| 30.505155 | 82 | 0.559649 | 348 | 2,959 | 4.663793 | 0.229885 | 0.135551 | 0.060998 | 0.083179 | 0.36907 | 0.211337 | 0 | 0 | 0 | 0 | 0 | 0.036783 | 0.310916 | 2,959 | 96 | 83 | 30.822917 | 0.759196 | 0.053734 | 0 | 0.15493 | 0 | 0 | 0.055637 | 0 | 0 | 0 | 0 | 0 | 0.014085 | 1 | 0.15493 | false | 0 | 0.056338 | 0 | 0.225352 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ce8437b0fec0991f67a078f2016c53fe445b831 | 24,331 | py | Python | security_monkey/auditor.py | bungoume/security_monkey | 90c02638a315c78535869ab71a8859d17e011a6a | [
"Apache-2.0"
] | null | null | null | security_monkey/auditor.py | bungoume/security_monkey | 90c02638a315c78535869ab71a8859d17e011a6a | [
"Apache-2.0"
] | null | null | null | security_monkey/auditor.py | bungoume/security_monkey | 90c02638a315c78535869ab71a8859d17e011a6a | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.auditor
:platform: Unix
:synopsis: This class is subclassed to add audit rules.
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com>
"""
import datastore
from security_monkey import app, db
from security_monkey.watcher import ChangeItem
from security_monkey.common.jinja import get_jinja_env
from security_monkey.datastore import User, AuditorSettings, Item, ItemAudit, Technology, Account, ItemAuditScore, AccountPatternAuditScore
from security_monkey.common.utils import send_email
from security_monkey.account_manager import get_account_by_name
from security_monkey.alerters.custom_alerter import report_auditor_changes
from sqlalchemy import and_
from collections import defaultdict
auditor_registry = defaultdict(list)
class AuditorType(type):
def __init__(cls, name, bases, attrs):
super(AuditorType, cls).__init__(name, bases, attrs)
if cls.__name__ != 'Auditor' and cls.index:
# Only want to register auditors explicitly loaded by find_modules
if not '.' in cls.__module__:
found = False
for auditor in auditor_registry[cls.index]:
if auditor.__module__ == cls.__module__ and auditor.__name__ == cls.__name__:
found = True
break
if not found:
app.logger.debug("Registering auditor {} {}.{}".format(cls.index, cls.__module__, cls.__name__))
auditor_registry[cls.index].append(cls)
class Auditor(object):
"""
This class (and subclasses really) run a number of rules against the configurations
and look for any violations. These violations are saved with the object and a report
is made available via the Web UI and through email.
"""
index = None # Should be overridden
i_am_singular = None # Should be overridden
i_am_plural = None # Should be overridden
__metaclass__ = AuditorType
support_auditor_indexes = []
support_watcher_indexes = []
def __init__(self, accounts=None, debug=False):
self.datastore = datastore.Datastore()
self.accounts = accounts
self.debug = debug
self.items = []
self.team_emails = app.config.get('SECURITY_TEAM_EMAIL', [])
self.emails = []
self.current_support_items = {}
self.override_scores = None
self.current_method_name = None
if type(self.team_emails) in (str, unicode):
self.emails.append(self.team_emails)
elif type(self.team_emails) in (list, tuple):
self.emails.extend(self.team_emails)
else:
app.logger.info("Auditor: SECURITY_TEAM_EMAIL contains an invalid type")
for account in self.accounts:
users = User.query.filter(User.daily_audit_email==True).filter(User.accounts.any(name=account)).all()
self.emails.extend([user.email for user in users])
def add_issue(self, score, issue, item, notes=None):
"""
Adds a new issue to an item, if not already reported.
:return: The new issue
"""
if notes and len(notes) > 1024:
notes = notes[0:1024]
if not self.override_scores:
query = ItemAuditScore.query.filter(ItemAuditScore.technology == self.index)
self.override_scores = query.all()
# Check for override scores to apply
score = self._check_for_override_score(score, item.account)
for existing_issue in item.audit_issues:
if existing_issue.issue == issue:
if existing_issue.notes == notes:
if existing_issue.score == score:
app.logger.debug(
"Not adding issue because it was already found:{}/{}/{}/{}\n\t{} -- {}"
.format(item.index, item.region, item.account, item.name, issue, notes))
return existing_issue
app.logger.debug("Adding issue: {}/{}/{}/{}\n\t{} -- {}"
.format(item.index, item.region, item.account, item.name, issue, notes))
new_issue = datastore.ItemAudit(score=score,
issue=issue,
notes=notes,
justified=False,
justified_user_id=None,
justified_date=None,
justification=None)
item.audit_issues.append(new_issue)
return new_issue
def prep_for_audit(self):
"""
To be overridden by child classes who
need a way to prepare for the next run.
"""
pass
def audit_these_objects(self, items):
"""
Only inspect the given items.
"""
app.logger.debug("Asked to audit {} Objects".format(len(items)))
self.prep_for_audit()
self.current_support_items = {}
query = ItemAuditScore.query.filter(ItemAuditScore.technology == self.index)
self.override_scores = query.all()
methods = [getattr(self, method_name) for method_name in dir(self) if method_name.find("check_") == 0]
app.logger.debug("methods: {}".format(methods))
for item in items:
for method in methods:
self.current_method_name = method.func_name
# If the check function is disabled by an entry on Settings/Audit Issue Scores
# the function will not be run and any previous issues will be cleared
if not self._is_current_method_disabled():
method(item)
self.items = items
self.override_scores = None
def _is_current_method_disabled(self):
"""
Determines whether this method has been marked as disabled based on Audit Issue Scores
settings.
"""
for override_score in self.override_scores:
if override_score.method == self.current_method_name + ' (' + self.__class__.__name__ + ')':
return override_score.disabled
return False
def audit_all_objects(self):
"""
Read all items from the database and inspect them all.
"""
self.items = self.read_previous_items()
self.audit_these_objects(self.items)
def read_previous_items(self):
"""
Pulls the last-recorded configuration from the database.
:return: List of all items for the given technology and the given account.
"""
prev_list = []
for account in self.accounts:
prev = self.datastore.get_all_ctype_filtered(tech=self.index, account=account, include_inactive=False)
# Returns a map of {Item: ItemRevision}
for item in prev:
item_revision = prev[item]
new_item = ChangeItem(index=self.index,
region=item.region,
account=item.account.name,
name=item.name,
arn=item.arn,
new_config=item_revision.config)
new_item.audit_issues = []
new_item.db_item = item
prev_list.append(new_item)
return prev_list
def read_previous_items_for_account(self, index, account):
"""
Pulls the last-recorded configuration from the database.
:return: List of all items for the given technology and the given account.
"""
prev_list = []
prev = self.datastore.get_all_ctype_filtered(tech=index, account=account, include_inactive=False)
# Returns a map of {Item: ItemRevision}
for item in prev:
item_revision = prev[item]
new_item = ChangeItem(index=self.index,
region=item.region,
account=item.account.name,
name=item.name,
arn=item.arn,
new_config=item_revision.config)
new_item.audit_issues = []
new_item.db_item = item
prev_list.append(new_item)
return prev_list
def save_issues(self):
"""
Save all new issues. Delete all fixed issues.
"""
app.logger.debug("\n\nSaving Issues.")
# Work around for issue where previous get's may cause commit to fail
db.session.rollback()
for item in self.items:
changes = False
loaded = False
if not hasattr(item, 'db_item'):
loaded = True
item.db_item = self.datastore._get_item(item.index, item.region, item.account, item.name)
existing_issues = list(item.db_item.issues)
new_issues = item.audit_issues
for issue in item.db_item.issues:
if not issue.auditor_setting:
self._set_auditor_setting_for_issue(issue)
# Add new issues
old_scored = ["{} -- {} -- {} -- {} -- {}".format(
old_issue.auditor_setting.auditor_class,
old_issue.issue,
old_issue.notes,
old_issue.score,
self._item_list_string(old_issue)) for old_issue in existing_issues]
for new_issue in new_issues:
nk = "{} -- {} -- {} -- {} -- {}".format(self.__class__.__name__,
new_issue.issue,
new_issue.notes,
new_issue.score,
self._item_list_string(new_issue))
if nk not in old_scored:
changes = True
app.logger.debug("Saving NEW issue {}".format(nk))
item.found_new_issue = True
item.confirmed_new_issues.append(new_issue)
item.db_item.issues.append(new_issue)
else:
for issue in existing_issues:
if issue.issue == new_issue.issue and issue.notes == new_issue.notes and issue.score == new_issue.score:
item.confirmed_existing_issues.append(issue)
break
key = "{}/{}/{}/{}".format(item.index, item.region, item.account, item.name)
app.logger.debug("Issue was previously found. Not overwriting.\n\t{}\n\t{}".format(key, nk))
# Delete old issues
new_scored = ["{} -- {} -- {} -- {}".format(new_issue.issue,
new_issue.notes,
new_issue.score,
self._item_list_string(new_issue)) for new_issue in new_issues]
for old_issue in existing_issues:
ok = "{} -- {} -- {} -- {}".format(old_issue.issue,
old_issue.notes,
old_issue.score,
self._item_list_string(old_issue))
old_issue_class = old_issue.auditor_setting.auditor_class
if old_issue_class is None or (old_issue_class == self.__class__.__name__ and ok not in new_scored):
changes = True
app.logger.debug("Deleting FIXED or REPLACED issue {}".format(ok))
item.confirmed_fixed_issues.append(old_issue)
item.db_item.issues.remove(old_issue)
if changes:
db.session.add(item.db_item)
else:
if loaded:
db.session.expunge(item.db_item)
db.session.commit()
self._create_auditor_settings()
report_auditor_changes(self)
def email_report(self, report):
"""
Given a report, send an email using SES.
"""
if not report:
app.logger.info("No Audit issues. Not sending audit email.")
return
subject = "Security Monkey {} Auditor Report".format(self.i_am_singular)
send_email(subject=subject, recipients=self.emails, html=report)
def create_report(self):
"""
Using a Jinja template (jinja_audit_email.html), create a report that can be emailed.
:return: HTML - The output of the rendered template.
"""
jenv = get_jinja_env()
template = jenv.get_template('jinja_audit_email.html')
# This template expects a list of items that have been sorted by total score in
# descending order.
for item in self.items:
item.totalscore = 0
for issue in item.db_item.issues:
item.totalscore = item.totalscore + issue.score
sorted_list = sorted(self.items, key=lambda item: item.totalscore)
sorted_list.reverse()
report_list = []
for item in sorted_list:
if item.totalscore > 0:
report_list.append(item)
else:
break
if len(report_list) > 0:
return template.render({'items': report_list})
else:
return False
def applies_to_account(self, account):
"""
Placeholder for custom auditors which may only want to run against
certain types of accounts
"""
return True
def _create_auditor_settings(self):
"""
Checks to see if an AuditorSettings entry exists for each issue.
If it does not, one will be created with disabled set to false.
"""
app.logger.debug("Creating/Assigning Auditor Settings in account {} and tech {}".format(self.accounts, self.index))
query = ItemAudit.query
query = query.join((Item, Item.id == ItemAudit.item_id))
query = query.join((Technology, Technology.id == Item.tech_id))
query = query.filter(Technology.name == self.index)
issues = query.filter(ItemAudit.auditor_setting_id == None).all()
for issue in issues:
self._set_auditor_setting_for_issue(issue)
db.session.commit()
app.logger.debug("Done Creating/Assigning Auditor Settings in account {} and tech {}".format(self.accounts, self.index))
def _set_auditor_setting_for_issue(self, issue):
auditor_setting = AuditorSettings.query.filter(
and_(
AuditorSettings.tech_id == issue.item.tech_id,
AuditorSettings.account_id == issue.item.account_id,
AuditorSettings.issue_text == issue.issue,
AuditorSettings.auditor_class == self.__class__.__name__
)
).first()
if auditor_setting:
auditor_setting.issues.append(issue)
db.session.add(auditor_setting)
return auditor_setting
auditor_setting = AuditorSettings(
tech_id=issue.item.tech_id,
account_id=issue.item.account_id,
disabled=False,
issue_text=issue.issue,
auditor_class=self.__class__.__name__
)
auditor_setting.issues.append(issue)
db.session.add(auditor_setting)
db.session.commit()
db.session.refresh(auditor_setting)
app.logger.debug("Created AuditorSetting: {} - {} - {}".format(
issue.issue,
self.index,
issue.item.account.name))
return auditor_setting
def _check_cross_account(self, src_account_number, dest_item, location):
account = Account.query.filter(Account.identifier == src_account_number).first()
account_name = None
if account is not None:
account_name = account.name
src = account_name or src_account_number
dst = dest_item.account
if src == dst:
return None
notes = "SRC [{}] DST [{}]. Location: {}".format(src, dst, location)
if not account_name:
tag = "Unknown Cross Account Access"
self.add_issue(10, tag, dest_item, notes=notes)
elif account_name != dest_item.account and not account.third_party:
tag = "Friendly Cross Account Access"
self.add_issue(0, tag, dest_item, notes=notes)
elif account_name != dest_item.account and account.third_party:
tag = "Friendly Third Party Cross Account Access"
self.add_issue(0, tag, dest_item, notes=notes)
def _check_cross_account_root(self, source_item, dest_arn, actions):
if not actions:
return None
account = Account.query.filter(Account.name == source_item.account).first()
source_item_account_number = account.identifier
if source_item_account_number == dest_arn.account_number:
return None
tag = "Cross-Account Root IAM"
notes = "ALL IAM Roles/users/groups in account {} can perform the following actions:\n"\
.format(dest_arn.account_number)
notes += "{}".format(actions)
self.add_issue(6, tag, source_item, notes=notes)
def get_auditor_support_items(self, auditor_index, account):
for index in self.support_auditor_indexes:
if index == auditor_index:
audited_items = self.current_support_items.get(account + auditor_index)
if audited_items is None:
audited_items = self.read_previous_items_for_account(auditor_index, account)
if not audited_items:
app.logger.info("{} Could not load audited items for {}/{}".format(self.index, auditor_index, account))
self.current_support_items[account+auditor_index] = []
else:
self.current_support_items[account+auditor_index] = audited_items
return audited_items
raise Exception("Auditor {} is not configured as an audit support auditor for {}".format(auditor_index, self.index))
def get_watcher_support_items(self, watcher_index, account):
for index in self.support_watcher_indexes:
if index == watcher_index:
items = self.current_support_items.get(account + watcher_index)
if items is None:
items = self.read_previous_items_for_account(watcher_index, account)
# Only the item contents should be used for watcher support
# config. This prevents potentially stale issues from being
# used by the auditor
for item in items:
item.db_item.issues = []
if not items:
app.logger.info("{} Could not load support items for {}/{}".format(self.index, watcher_index, account))
self.current_support_items[account+watcher_index] = []
else:
self.current_support_items[account+watcher_index] = items
return items
raise Exception("Watcher {} is not configured as a data support watcher for {}".format(watcher_index, self.index))
def link_to_support_item_issues(self, item, sub_item, sub_issue_message=None, issue_message=None, issue=None, score=None):
"""
Creates a new issue that is linked to an issue in a support auditor
"""
matching_issues = []
for sub_issue in sub_item.issues:
if not sub_issue_message or sub_issue.issue == sub_issue_message:
matching_issues.append(sub_issue)
if len(matching_issues) > 0:
for matching_issue in matching_issues:
if issue is None:
if issue_message is None:
if sub_issue_message is not None:
issue_message = sub_issue_message
else:
issue_message = "UNDEFINED"
if score is not None:
issue = self.add_issue(score, issue_message, item)
else:
issue = self.add_issue(matching_issue.score, issue_message, item)
else:
if score is not None:
issue.score = score
else:
issue.score = issue.score + matching_issue.score
issue.sub_items.append(sub_item)
return issue
def link_to_support_item(self, score, issue_message, item, sub_item, issue=None):
"""
Creates a new issue that is linked a support watcher item
"""
if issue is None:
issue = self.add_issue(score, issue_message, item)
issue.sub_items.append(sub_item)
return issue
def _item_list_string(self, issue):
"""
Use by save_issue to generate a unique id for an item
"""
item_ids = []
for sub_item in issue.sub_items:
item_ids.append(sub_item.id)
item_ids.sort()
return str(item_ids)
def _check_for_override_score(self, score, account):
"""
Return an override to the hard coded score for an issue being added. This could either
be a general override score for this check method or one that is specific to a particular
field in the account.
:param score: the hard coded score which will be returned back if there is
no applicable override
:param account: The account name, used to look up the value of any pattern
based overrides
:return:
"""
for override_score in self.override_scores:
# Look for an oberride entry that applies to
if override_score.method == self.current_method_name + ' (' + self.__class__.__name__ + ')':
# Check for account pattern override where a field in the account matches
# one configured in Settings/Audit Issue Scores
account = get_account_by_name(account)
for account_pattern_score in override_score.account_pattern_scores:
if getattr(account, account_pattern_score.account_field, None):
# Standard account field, such as identifier or notes
account_pattern_value = getattr(account, account_pattern_score.account_field)
else:
# If there is no attribute, this is an account custom field
account_pattern_value = account.getCustom(account_pattern_score.account_field)
if account_pattern_value is not None:
# Override the score based on the matching pattern
if account_pattern_value == account_pattern_score.account_pattern:
app.logger.debug("Overriding score based on config {}:{} {}/{}".format(self.index, self.current_method_name + '(' + self.__class__.__name__ + ')', score, account_pattern_score.score))
score = account_pattern_score.score
break
else:
# No specific override pattern fund. use the generic override score
app.logger.debug("Overriding score based on config {}:{} {}/{}".format(self.index, self.current_method_name + '(' + self.__class__.__name__ + ')', score, override_score.score))
score = override_score.score
return score
| 43.063717 | 211 | 0.586371 | 2,781 | 24,331 | 4.910464 | 0.147429 | 0.01406 | 0.014353 | 0.013474 | 0.332088 | 0.277827 | 0.244508 | 0.183875 | 0.170548 | 0.155316 | 0 | 0.001662 | 0.332498 | 24,331 | 564 | 212 | 43.140071 | 0.839172 | 0.15433 | 0 | 0.276139 | 0 | 0 | 0.065035 | 0.003447 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061662 | false | 0.002681 | 0.02681 | 0 | 0.16622 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ce9c84ffdd6672839d34427b02aa2894b9eec7a | 16,473 | py | Python | rst2reveal/Parser.py | rartino/rst2reveal | c31a0939275f26219aaa19ce4e55c3c08491aac8 | [
"MIT"
] | null | null | null | rst2reveal/Parser.py | rartino/rst2reveal | c31a0939275f26219aaa19ce4e55c3c08491aac8 | [
"MIT"
] | null | null | null | rst2reveal/Parser.py | rartino/rst2reveal | c31a0939275f26219aaa19ce4e55c3c08491aac8 | [
"MIT"
] | null | null | null | try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
import os, sys, codecs
import docutils.core
from .RevealTranslator import RST2RevealTranslator, RST2RevealWriter
# Import custom directives
from .TwoColumnsDirective import *
from .PygmentsDirective import *
from .VideoDirective import *
from .PlotDirective import *
from .SmallRole import *
from .VspaceRole import *
from .ClassDirective import *
from .ClearDirective import *
from .TemplateDirective import *
class Parser:
"""Class converting a stand-alone reST file into a Reveal.js-powered HTML5 file, using the provided options."""
def __init__(self, input_file, output_file='', theme='default', transition = 'default', stylesheet='',
mathjax_path='', pygments_style='', vertical_center=False,
horizontal_center=False, title_center=False, footer=False, page_number=False,
controls=False, firstslide_template='', footer_template='', init_html=False, reveal_root='reveal'):
""" Constructor of the Parser class.
``create_slides()`` must then be called to actually produce the presentation.
Arguments:
* input_file : name of the reST file to be processed (obligatory).
* output_file: name of the HTML file to be generated (default: same as input_file, but with a .html extension).
* theme: the name of the theme to be used ({**default**, beige, night}).
* transition: the transition between slides ({**default**, cube, page, concave, zoom, linear, fade, none}).
* stylesheet: a custom CSS file which extends or replaces the used theme.
* mathjax_path: URL or path to the MathJax library (default: http://cdn.mathjax.org/mathjax/latest/MathJax.js).
* pygments_style: the style to be used for syntax color-highlighting using Pygments. The list depends on your Pygments version, type::
from pygments.styles import STYLE_MAP
print STYLE_MAP.keys()
* vertical_center: boolean stating if the slide content should be vertically centered (default: False).
* horizontal_center: boolean stating if the slide content should be horizontally centered (default: False).
* title_center: boolean stating if the title of each slide should be horizontally centered (default: False).
* footer: boolean stating if the footer line should be displayed (default: False).
* page_number: boolean stating if the slide number should be displayed (default: False).
* controls: boolean stating if the control arrows should be displayed (default: False).
* firstslide_template: template string defining how the first slide will be rendered in HTML.
* footer_template: template string defining how the footer will be rendered in HTML.
The ``firstslide_template`` and ``footer_template`` can use the following substitution variables:
* %(title)s : will be replaced by the title of the presentation.
* %(subtitle)s : subtitle of the presentation (either a level-2 header or the :subtitle: field, if any).
* %(author)s : :author: field (if any).
* %(institution)s : :institution: field (if any).
* %(email)s : :email: field (if any).
* %(date)s : :date: field (if any).
* %(is_author)s : the '.' character if the :author: field is defined, '' otherwise.
* %(is_subtitle)s : the '-' character if the subtitle is defined, '' otherwise.
* %(is_institution)s : the '-' character if the :institution: field is defined, '' otherwise.
You can also use your own fields in the templates.
"""
# Input/Output files
self.input_file = input_file
self.output_file = output_file
# Style
self.theme = theme
self.stylesheet = stylesheet
self.transition = transition
self.vertical_center=vertical_center
self.horizontal_center = horizontal_center
self.title_center = title_center
self.write_footer=footer
self.page_number=page_number
self.controls=controls
# MathJax
if mathjax_path =='':
self.mathjax_path = 'http://cdn.mathjax.org/mathjax/latest/MathJax.js'
else:
self.mathjax_path = mathjax_path
# Pygments
self.pygments_style = pygments_style
# Template for the first slide
self.firstslide_template = firstslide_template
# Temnplate for the footer
self.footer_template = footer_template
# Initalization html for reveal.js
self.init_html = init_html
# Root path to reaveal
self.reveal_root = reveal_root
def create_slides(self):
"""Creates the HTML5 presentation based on the arguments given to the constructor."""
# Copy the reveal library in the current directory
self._copy_reveal()
# Create the writer and retrieve the parts
self.html_writer = RST2RevealWriter()
self.html_writer.translator_class = RST2RevealTranslator
with codecs.open(self.input_file, 'r', 'utf8') as infile:
self.parts = docutils.core.publish_parts(source=infile.read(), writer=self.html_writer)
# Produce the html file
self._produce_output()
def _copy_reveal(self):
curr_dir = os.path.dirname(os.path.realpath(self.output_file))
cwd = os.getcwd()
# Copy the reveal subfolder
#if not os.path.isdir(curr_dir+'/reveal'):
# sources_dir = os.path.abspath(os.path.dirname(__file__)+'/reveal')
# import shutil
# shutil.copytree(sources_dir, curr_dir+'/reveal')
# Copy the rst2reveal.css
if not os.path.exists(curr_dir+'/rst2reveal.css'):
source_file = os.path.abspath(os.path.dirname(__file__)+'/reveal/css/rst2reveal.css')
import shutil
shutil.copyfile(source_file, curr_dir+'/rst2reveal.css')
# Generate the Pygments CSS file
self.is_pygments = False
if not self.pygments_style == '':
# Check if Pygments is installed
try:
import pygments
self.is_pygments = True
except:
print('Warning: Pygments is not installed, the code will not be highlighted.')
print('You should install it with `pip install pygments`')
return
os.chdir(curr_dir)
import subprocess, shutil
os.system("pygmentize -S "+self.pygments_style+" -f html -O bg=light > pygments.css")
# Fix the bug where the literal color goes to math blocks...
with codecs.open('pygments.css', 'r', 'utf8') as infile:
with codecs.open('pygments.css.tmp', 'w', 'utf8') as outfile:
for aline in infile:
outfile.write('.highlight '+aline)
shutil.move('pygments.css.tmp', 'pygments.css')
os.chdir(cwd)
def _produce_output(self):
self.title = self.parts['title']
self._analyse_metainfo()
header = self._generate_header()
body = self._generate_body()
footer = self._generate_footer()
document_content = header + body + footer
with codecs.open(self.output_file, 'w', 'utf8') as wfile:
wfile.write(document_content)
def _generate_body(self):
body = """
<body>
<div class="static-content"></div>
<div class="reveal">
<div class="slides">
%(titleslide)s
%(body)s
</div>
</div>
""" % {'body': self.parts['body'],
'titleslide' : self.titleslide}
return body
def _analyse_metainfo(self):
def clean(text):
import re
if len(re.findall(r'<paragraph>', text)) > 0:
text = re.findall(r'<paragraph>(.+)</paragraph>', text)[0]
if len(re.findall(r'<author>', text)) > 0:
text = re.findall(r'<author>(.+)</author>', text)[0]
if len(re.findall(r'<date>', text)) > 0:
text = re.findall(r'<date>(.+)</date>', text)[0]
if len(re.findall(r'<reference', text)) > 0:
text = re.findall(r'<reference refuri="mailto:(.+)">', text)[0]
return text
self.meta_info ={'author': ''}
texts=self.parts['metadata'].split('\n')
for t in texts:
if not t == '':
name=t.split('=')[0]
content=t.replace(name+'=', '')
content=clean(content)
self.meta_info[name]= content
self._generate_titleslide()
def _generate_titleslide(self):
if self.parts['title'] != '': # A title has been given
self.meta_info['title'] = self.parts['title']
elif not 'title' in self.meta_info.keys():
self.meta_info['title'] = ''
if self.parts['subtitle'] != '': # defined with a underlined text instead of :subtitle:
self.meta_info['subtitle'] = self.parts['subtitle']
elif not 'subtitle' in self.meta_info.keys():
self.meta_info['subtitle'] = ''
if not 'email' in self.meta_info.keys():
self.meta_info['email'] = ''
if not 'institution' in self.meta_info.keys():
self.meta_info['institution'] = ''
if not 'date' in self.meta_info.keys():
self.meta_info['date'] = ''
# Separators
self.meta_info['is_institution'] = '-' if self.meta_info['institution'] != '' else ''
self.meta_info['is_author'] = '.' if self.meta_info['author'] != '' else ''
self.meta_info['is_subtitle'] = '.' if self.meta_info['subtitle'] != '' else ''
if self.firstslide_template == "":
self.firstslide_template = """
<section class="titleslide">
<h1>%(title)s</h1>
<h3>%(subtitle)s</h3>
<br>
<p><a href="mailto:%(email)s">%(author)s</a> %(is_institution)s %(institution)s</p>
<p><small>%(email)s</small></p>
<p>%(date)s</p>
</section>
"""
self.titleslide=self.firstslide_template % self.meta_info
if self.footer_template=="":
self.footer_template = """<b>%(title)s %(is_subtitle)s %(subtitle)s.</b> %(author)s%(is_institution)s %(institution)s. %(date)s"""
if self.write_footer:
self.footer_html = """<footer id=\"footer\">""" + self.footer_template % self.meta_info + """<b id=\"slide_number\" style=\"padding: 1em;\"></b></footer>"""
elif self.page_number:
self.footer_html = """<footer><b id=\"slide_number\"></b></footer>"""
else:
self.footer_html = ""
def _generate_header(self):
header="""<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>%(title)s</title>
<meta name="description" content="%(title)s">
%(meta)s
<meta name="apple-mobile-web-app-capable" content="yes" />
<meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=5.0, user-scalable=no">
<link rel="stylesheet" href="%(reveal_root)s/css/reveal.css">
%(pygments)s
<link rel="stylesheet" href="rst2reveal.css">
<!--link rel="stylesheet" href="%(reveal_root)s/css/theme/default.css" id="theme"-->
<link rel="stylesheet" href="%(reveal_root)s/css/theme/%(theme)s.css" id="theme">
<link rel="stylesheet" href="%(reveal_root)s/css/print/pdf.css" type="text/css" media="print">
<script type="text/javascript" src="%(mathjax_path)s?config=TeX-AMS-MML_HTMLorMML"></script>
<!-- Extra styles -->
<style>
.reveal section {
text-align: %(horizontal_center)s;
}
.reveal h2{
text-align: %(title_center)s;
}
</style>
%(custom_stylesheet)s
<!--[if lt IE 9]>
<script src="%(reveal_root)s/lib/js/html5shiv.js"></script>
<![endif]-->
</head>
"""%{'title': self.title,
'meta' : self.parts['meta'],
'theme': self.theme,
'reveal_root' : self.reveal_root,
'pygments': '<link rel="stylesheet" href="pygments.css">' if self.is_pygments else '',
'mathjax_path': self.mathjax_path,
'horizontal_center': 'center' if self.horizontal_center else 'left',
'title_center': 'center' if self.title_center else 'left',
'custom_stylesheet' : '<link rel="stylesheet" href="%s">'%self.stylesheet if not self.stylesheet is '' else ''}
return header
def _generate_footer(self):
if self.page_number:
script_page_number = """
<script>
// Fires each time a new slide is activated
Reveal.addEventListener( 'slidechanged', function( event ) {
if(event.indexh > 0) {
if(event.indexv > 0) {
val = event.indexh + ' - ' + event.indexv
document.getElementById('slide_number').innerHTML = val;
}
else{
document.getElementById('slide_number').innerHTML = event.indexh;
}
}
else {
document.getElementById('slide_number').innerHTML = '';
}
} );
</script>"""
else:
script_page_number = ""
if self.init_html:
footer = self.init_html
else:
footer="""
<script src="%(reveal_root)s/lib/js/head.min.js"></script>
<script src="%(reveal_root)s/js/reveal.min.js"></script>
<script>
// Full list of configuration options available here:
// https://github.com/hakimel/reveal.js#configuration
Reveal.initialize({
controls: %(controls)s,
progress: false,
history: true,
overview: true,
keyboard: true,
loop: false,
touch: true,
rtl: false,
center: %(vertical_center)s,
mouseWheel: true,
fragments: true,
rollingLinks: false,
transition: '%(transition)s'
});
</script>"""
footer+="""
%(script_page_number)s
%(footer)s
</body>
</html>"""
footer = footer % {'transition' : self.transition,
'footer' : self.footer_html,
'mathjax_path': self.mathjax_path,
'reveal_root' : self.reveal_root,
'script_page_number' : script_page_number,
'vertical_center' : 'true' if self.vertical_center else 'false',
'controls': 'true' if self.controls else 'false'}
return footer
if __name__ == '__main__':
# Create the object
parser = Parser(input_file='index.rst')
# Create the slides
parser.create_slides()
| 40.375 | 169 | 0.541249 | 1,755 | 16,473 | 4.954416 | 0.216524 | 0.020242 | 0.030362 | 0.016906 | 0.190339 | 0.118689 | 0.077056 | 0.064405 | 0.025877 | 0.010351 | 0 | 0.00368 | 0.340193 | 16,473 | 407 | 170 | 40.474201 | 0.796301 | 0.215868 | 0 | 0.084615 | 0 | 0.030769 | 0.402674 | 0.087863 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0.003846 | 0.065385 | 0 | 0.126923 | 0.011538 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cee8d9dc6ecef33153612d1cf40e03aa8fb60af | 10,656 | py | Python | toqnets/nn/nltl/functional.py | C-SUNSHINE/TOQ-Nets-PyTorch-Release | 05e06bf633fb3c6b610dda9a5126ecd7af1db02f | [
"MIT"
] | 6 | 2021-08-24T21:46:01.000Z | 2022-03-09T14:34:05.000Z | toqnets/nn/nltl/functional.py | vacancy/TOQ-Nets-PyTorch-Release | 53a712be28e2ecf8d2e04a9f71a2d7e8db5430e1 | [
"MIT"
] | null | null | null | toqnets/nn/nltl/functional.py | vacancy/TOQ-Nets-PyTorch-Release | 53a712be28e2ecf8d2e04a9f71a2d7e8db5430e1 | [
"MIT"
] | 2 | 2021-08-23T03:06:20.000Z | 2021-09-30T14:17:14.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : functional.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 04/15/2020
#
# This file is part of TOQ-Nets-PyTorch.
# Distributed under terms of the MIT license.
from typing import List
import jactorch
import torch
from jacinle.utils.enum import JacEnum
__all__ = [
'TemporalPoolingImplementation', 'TemporalPoolingReduction', 'backward_pooling_1d1d',
'temporal_pooling_1d', 'temporal_pooling_2d', 'interval_pooling',
'matrix_from_diags', 'matrix_remove_diag'
]
class TemporalPoolingImplementation(JacEnum):
BROADCAST = 'broadcast'
FORLOOP = 'forloop'
class TemporalPoolingReduction(JacEnum):
MAX = 'max'
MIN = 'min'
SOFTMAX = 'softmax'
SOFTMIN = 'softmin'
def masked_min(input, mask, dim, inf=1e9):
mask = mask.type(input.dtype)
input = input * mask + inf * (1 - mask)
return input.min(dim)[0]
def masked_max(input, mask, dim, inf=1e9):
mask = mask.type(input.dtype)
input = input * mask + inf * (mask - 1)
return input.max(dim)[0]
def backward_pooling_1d1d(input, implementation='forloop', reduction='max'):
"""
:param input: [batch, nr_frames, nr_frames, hidden_dim]
"""
implementation = TemporalPoolingImplementation.from_string(implementation)
nr_frames = input.size(1)
if implementation == TemporalPoolingImplementation.BROADCAST:
indices = torch.arange(nr_frames, device=input.device)
indices_i, indices_j = jactorch.meshgrid(indices, dim=0)
mask = indices_i <= indices_j
mask = jactorch.add_dim_as_except(mask, input, 1, 2)
if reduction == 'max':
return masked_max(input, mask, dim=2)
elif reduction == 'min':
return masked_min(input, mask, dim=2)
else:
raise ValueError()
elif implementation == TemporalPoolingImplementation.FORLOOP:
all_tensors = list()
for i in range(nr_frames):
if reduction == 'max':
all_tensors.append(input[:, i, i:].max(dim=1)[0])
elif reduction == 'min':
all_tensors.append(input[:, i, i:].min(dim=1)[0])
else:
raise ValueError()
return torch.stack(all_tensors, dim=1)
else:
raise ValueError('Unknown temporal pooling implementation: {}.'.format(implementation))
def temporal_pooling_1d(input, implementation='forloop'):
implementation = TemporalPoolingImplementation.from_string(implementation)
nr_frames = input.size(1)
if implementation is TemporalPoolingImplementation.BROADCAST:
indices = torch.arange(nr_frames, device=input.device)
indices_i, indices_j = jactorch.meshgrid(indices, dim=0)
input = jactorch.add_dim(input, 1, nr_frames)
mask = indices_i <= indices_j
mask = jactorch.add_dim_as_except(mask, input, 1, 2)
return torch.cat((masked_min(input, mask, dim=2), masked_max(input, mask, dim=2)), dim=-1)
elif implementation is TemporalPoolingImplementation.FORLOOP:
all_tensors = list()
for i in range(nr_frames):
all_tensors.append(torch.cat((input[:, i:].min(dim=1)[0], input[:, i:].max(dim=1)[0]), dim=-1))
return torch.stack(all_tensors, dim=1)
else:
raise ValueError('Unknown temporal pooling implementation: {}.'.format(implementation))
def temporal_pooling_2d(input, implementation='forloop'):
implementation = TemporalPoolingImplementation.from_string(implementation)
nr_frames = input.size(1)
indices = torch.arange(nr_frames, device=input.device)
if implementation is TemporalPoolingImplementation.BROADCAST:
indices_i, indices_j, indices_k = (
jactorch.add_dim(jactorch.add_dim(indices, 1, nr_frames), 2, nr_frames),
jactorch.add_dim(jactorch.add_dim(indices, 0, nr_frames), 1, nr_frames),
jactorch.add_dim(jactorch.add_dim(indices, 0, nr_frames), 2, nr_frames)
)
input = jactorch.add_dim(input, 0, nr_frames) # input[batch, i, k, j] = input[batch, k, j]
mask = indices_i <= indices_k <= indices_j
mask = jactorch.add_dim_as_except(mask, input, 1, 2, 3)
return torch.cat((
masked_min(input, mask, dim=2),
masked_max(input, mask, dim=2)
), dim=-1)
elif implementation is TemporalPoolingImplementation.FORLOOP:
all_tensors = list()
for i in range(nr_frames):
mask = indices >= i
mask = jactorch.add_dim_as_except(mask, input, 1)
all_tensors.append(torch.cat((
masked_min(input, mask, dim=1),
masked_max(input, mask, dim=1)
), dim=-1))
return torch.stack(all_tensors, dim=1)
else:
raise ValueError('Unknown temporal pooling implementation: {}.'.format(implementation))
def interval_pooling(input, implementation='forloop', reduction='max', beta=None):
"""
Args:
input (torch.Tensor): 3D tensor of [batch_size, nr_frames, hidden_dim]
implementation (Union[TemporalPoolingImplementation, str]): the implementation. Currently only support FORLOOP.
reduction (Union[TemporalPoolingReduction, str]): reduction method. Either MAX or MIN.
Return:
output (torch.Tensor): 4D tensor of [batch_size, nr_frames, nr_frames, hidden_dim], where
```
output[:, i, j, :] = min output[:, k, :] where i <= k <= j
```
the k is cyclic-indexed.
"""
implementation = TemporalPoolingImplementation.from_string(implementation)
reduction = TemporalPoolingReduction.from_string(reduction)
batch_size, nr_frames = input.size()[:2]
if implementation is TemporalPoolingImplementation.FORLOOP:
if reduction is TemporalPoolingReduction.MAX or reduction is TemporalPoolingReduction.MIN:
input_doubled = torch.cat((input, input), dim=1) # repeat the input at dim=1.
output_tensors = list()
output_tensors.append(input)
for length in range(2, nr_frames + 1):
last_tensor = output_tensors[-1]
last_elems = input_doubled[:, length - 1:length - 1 + nr_frames]
if reduction is TemporalPoolingReduction.MAX:
this_tensor = torch.max(last_tensor, last_elems)
elif reduction is TemporalPoolingReduction.MIN:
this_tensor = torch.min(last_tensor, last_elems)
else:
raise ValueError('Wrong value {}.'.format(reduction))
output_tensors.append(this_tensor)
return matrix_from_diags(output_tensors, dim=1, triu=True)
else:
from math import exp
scale = exp(beta)
input_doubled = torch.cat((input, input), dim=1) # repeat the input at dim=1.
output_tensors = list()
if reduction is TemporalPoolingReduction.SOFTMIN:
scale = -scale
else:
assert reduction is TemporalPoolingReduction.SOFTMAX
input_arg = torch.exp(input / scale)
output_tensors.append((input * input_arg, input_arg))
for length in range(2, nr_frames + 1):
last_tensor, last_argsum = output_tensors[-1]
last_elems = input_doubled[:, length - 1:length - 1 + nr_frames]
last_elems_arg = torch.exp(last_elems / scale)
output_tensors.append((
last_tensor + last_elems * last_elems_arg,
last_argsum + last_elems_arg
))
output2 = matrix_from_diags([x[0] / x[1] for x in output_tensors], dim=1, triu=True)
# Test:
# X, Y = torch.meshgrid(torch.arange(length), torch.arange(length))
# upper = (X < Y).float().view(1, length, length, 1).to(output.device)
# print((((output - output2) ** 2) * upper).sum())
# exit()
return output2
else:
raise NotImplementedError('Unknown interval pooling implementation: {}.'.format(implementation))
def matrix_from_diags(diags: List[torch.Tensor], dim: int = 1, triu: bool = False):
"""
Construct an N by N matrix from N diags of the matrix.
Args:
diags (List[torch.Tensor]): N length-N vectors regarding the 1st, 2nd, ... diags of the output matrix.
They can also be same-dimensional tensors, where the matrix will be created at the dim and dim+1 axes.
dim (int): the matrix will be created at dim and dim+1.
triu (bool): use only the upper triangle of the matrix.
Return:
output: torch.Tensor
"""
if dim < 0:
dim += diags[0].dim()
size = diags[0].size()
diags.append(torch.zeros_like(diags[0]))
output = torch.cat(diags, dim=dim) # [..., (f+1)*f, ...]
output = output.reshape(size[:dim] + (size[dim] + 1, size[dim]) + size[dim + 1:])
output = output.transpose(dim, dim + 1)
output = output.reshape(
size[:dim] + (size[dim] + 1, size[dim]) + size[dim + 1:]) # use to reshape for auto-contiguous.
if triu:
return output.narrow(dim, 0, size[dim])
output = torch.cat((
output.narrow(dim, 0, 1),
matrix_remove_diag(output.narrow(dim, 1, size[dim]), dim=dim, move_up=True)
), dim=dim)
return output
def matrix_remove_diag(matrix: torch.Tensor, dim: int = 1, move_up: bool = False):
"""
Remove the first diag of the input matrix. The result is an N x (N-1) matrix.
Args:
matrix (torch.Tensor): the input matrix. It can be a tensor where the dim and dim+1 axes form a matrix.
dim (int): the matrix is at dim and dim+1.
move_up (bool): if True, the output matrix will be of shape (N-1) x N.
In the move_left (default, move_up=False) mode, the left triangle will stay in its position and the upper triangle will move 1 element left.
While in the move_up mode, the upper triangle will stay in its position, and the left triangle will move 1 element up.
"""
if dim < 0:
dim += matrix.size()
if move_up:
matrix = matrix.transpose(dim, dim + 1)
size = matrix.size()
n = size[dim]
matrix = matrix.reshape(size[:dim] + (n * n,) + size[dim + 2:])
matrix = matrix.narrow(dim, 1, n * n - 1)
matrix = matrix.reshape(size[:dim] + (n - 1, n + 1) + size[dim + 2:])
matrix = matrix.narrow(dim + 1, 0, n)
matrix = matrix.reshape(size[:dim] + (n, n - 1) + size[dim + 2:])
if move_up:
matrix = matrix.transpose(dim, dim + 1)
return matrix
| 41.142857 | 152 | 0.62866 | 1,351 | 10,656 | 4.831236 | 0.151739 | 0.020224 | 0.025739 | 0.01195 | 0.54175 | 0.448905 | 0.386701 | 0.368316 | 0.341811 | 0.325264 | 0 | 0.017163 | 0.256381 | 10,656 | 258 | 153 | 41.302326 | 0.806537 | 0.201858 | 0 | 0.366279 | 0 | 0 | 0.052291 | 0.008875 | 0 | 0 | 0 | 0 | 0.005814 | 1 | 0.046512 | false | 0 | 0.02907 | 0 | 0.203488 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cf060a195dbf7d7e608526fbe61c86808f684c4 | 598 | py | Python | etcdb/execute/dml/use.py | box/etcdb | 0f27846a0ca13efff9750b97a38939f66172debc | [
"Apache-2.0"
] | 12 | 2016-10-25T18:03:49.000Z | 2019-06-27T13:20:22.000Z | etcdb/execute/dml/use.py | box/etcdb | 0f27846a0ca13efff9750b97a38939f66172debc | [
"Apache-2.0"
] | 30 | 2016-10-20T23:27:09.000Z | 2018-12-06T17:23:59.000Z | etcdb/execute/dml/use.py | box/etcdb | 0f27846a0ca13efff9750b97a38939f66172debc | [
"Apache-2.0"
] | 4 | 2016-10-20T23:24:48.000Z | 2022-03-01T09:59:29.000Z | """Implement USE query."""
from pyetcd import EtcdKeyNotFound
from etcdb import OperationalError
def use_database(etcd_client, tree):
"""
Return database name if it exists or raise exception.
:param etcd_client: etcd client
:type etcd_client: pyetcd.client.Client
:param tree: Parsing tree.
:type tree: SQLTree
:return: Database name
:raise OperationalError: if database doesn't exist.
"""
try:
etcd_client.read('/%s' % tree.db)
return tree.db
except EtcdKeyNotFound:
raise OperationalError("Unknown database '%s'" % tree.db)
| 26 | 65 | 0.682274 | 73 | 598 | 5.520548 | 0.479452 | 0.124069 | 0.08933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.22408 | 598 | 22 | 66 | 27.181818 | 0.868534 | 0.449833 | 0 | 0 | 0 | 0 | 0.084507 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cf0ea4689aa7bc6979159d92505dd9ac4c6f33a | 598 | py | Python | main.py | dminglv/covid19 | 4753f1574c9035c5780c6669e5a9bd3812a4bc10 | [
"MIT"
] | null | null | null | main.py | dminglv/covid19 | 4753f1574c9035c5780c6669e5a9bd3812a4bc10 | [
"MIT"
] | null | null | null | main.py | dminglv/covid19 | 4753f1574c9035c5780c6669e5a9bd3812a4bc10 | [
"MIT"
] | null | null | null | from libs.apis import getCountryInfo, getCountries, getCountriesNames
from libs.charts import visualize
def main():
arr = []
number = 10
# Get top 10 countries
countries = getCountries(number)
countries_names = getCountriesNames(number)
for i in range(len(countries)):
country = countries[i]
country_names = countries_names[i]
country_info = getCountryInfo(country)
d = {
'country': country_names,
'info': country_info
}
arr.append(d)
visualize(arr)
if __name__ == "__main__":
main()
| 19.290323 | 69 | 0.623746 | 62 | 598 | 5.790323 | 0.467742 | 0.044568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009368 | 0.285953 | 598 | 30 | 70 | 19.933333 | 0.831382 | 0.033445 | 0 | 0 | 0 | 0 | 0.032986 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cf285d186a6317622d28fa8ce936054a9456a47 | 4,158 | py | Python | app/server.py | DavidRalph/search-mendeley | 64cb3aa353d4a5571db0fb46a5b46b928af1c6b0 | [
"Apache-2.0"
] | 2 | 2020-05-15T02:06:46.000Z | 2020-05-15T02:14:52.000Z | app/server.py | DavidRalph/search-mendeley | 64cb3aa353d4a5571db0fb46a5b46b928af1c6b0 | [
"Apache-2.0"
] | 1 | 2018-05-16T12:55:14.000Z | 2018-05-18T14:29:14.000Z | app/server.py | DavidRalph/search-mendeley | 64cb3aa353d4a5571db0fb46a5b46b928af1c6b0 | [
"Apache-2.0"
] | 1 | 2020-05-15T02:14:55.000Z | 2020-05-15T02:14:55.000Z | from flask import Flask, redirect, render_template, request, session
import yaml
from mendeley import Mendeley
from mendeley.session import MendeleySession
with open('config.yml') as f:
config = yaml.load(f)
REDIRECT_URI = 'http://localhost:5000/oauth'
app = Flask(__name__)
app.debug = True
app.secret_key = config['clientSecret']
mendeley = Mendeley(config['clientId'], config['clientSecret'], REDIRECT_URI)
@app.route('/')
def login():
# TODO Check for token expiry
# if 'token' in session:
# return redirect('/library')
auth = mendeley.start_authorization_code_flow()
session['state'] = auth.state
return redirect(auth.get_login_url())
@app.route('/oauth')
def auth_return():
auth = mendeley.start_authorization_code_flow(state=session['state'])
mendeley_session = auth.authenticate(request.url)
session.clear()
session['token'] = mendeley_session.token
return redirect('/library')
@app.route('/library')
def list_documents():
if 'token' not in session:
return redirect('/')
query = request.args.get('query') or ''
titleQuery = request.args.get('titleQuery') or ''
authorQuery = request.args.get('authorQuery') or ''
sourceQuery = request.args.get('sourceQuery') or ''
abstractQuery = request.args.get('abstractQuery') or ''
noteQuery = request.args.get('noteQuery') or ''
advancedSearch = request.args.get('advancedSearch')
mendeley_session = get_session_from_cookies()
docs = []
# Get iterator for user's document library
if advancedSearch and (titleQuery or authorQuery or sourceQuery or abstractQuery):
docsIter = mendeley_session.documents.advanced_search(
title=titleQuery,
author=authorQuery,
source=sourceQuery,
abstract=abstractQuery,
view='client').iter()
elif query:
docsIter = mendeley_session.documents.search(
query, view='client').iter()
else:
docsIter = mendeley_session.documents.iter(view='client')
# Accumulate all the documents
for doc in docsIter:
docs.append(doc)
# Apply filter for annotations
if noteQuery:
nq = noteQuery.lower()
noteDocIDs = set()
# Find the IDs of all documents with at least one matching annotation
for note in mendeley_session.annotations.iter():
if (note.text):
text = note.text.lower()
if (text.find(nq) > -1):
noteDocIDs.add(note.document().id)
# Filter the document list
docs = [doc for doc in docs if doc.id in noteDocIDs]
# Render results
return render_template(
'library.html',
docs=docs,
query=query,
titleQuery=titleQuery,
authorQuery=authorQuery,
sourceQuery=sourceQuery,
abstractQuery=abstractQuery,
noteQuery=noteQuery,
advancedSearch=advancedSearch)
@app.route('/document')
def get_document():
if 'token' not in session:
return redirect('/')
mendeley_session = get_session_from_cookies()
document_id = request.args.get('document_id')
doc = mendeley_session.documents.get(document_id)
return render_template('details.html', doc=doc)
@app.route('/detailsLookup')
def details_lookup():
if 'token' not in session:
return redirect('/')
mendeley_session = get_session_from_cookies()
doi = request.args.get('doi')
doc = mendeley_session.catalog.by_identifier(doi=doi)
return render_template('details.html', doc=doc)
@app.route('/download')
def download():
if 'token' not in session:
return redirect('/')
mendeley_session = get_session_from_cookies()
document_id = request.args.get('document_id')
doc = mendeley_session.documents.get(document_id)
doc_file = doc.files.list().items[0]
return redirect(doc_file.download_url)
@app.route('/logout')
def logout():
session.pop('token', None)
return redirect('/')
def get_session_from_cookies():
return MendeleySession(mendeley, session['token'])
if __name__ == '__main__':
app.run()
| 26.316456 | 86 | 0.664262 | 478 | 4,158 | 5.631799 | 0.267782 | 0.083581 | 0.052006 | 0.042719 | 0.219168 | 0.219168 | 0.177563 | 0.165305 | 0.165305 | 0.131872 | 0 | 0.001847 | 0.218615 | 4,158 | 157 | 87 | 26.484076 | 0.826716 | 0.069264 | 0 | 0.186275 | 0 | 0 | 0.088342 | 0 | 0 | 0 | 0 | 0.006369 | 0 | 1 | 0.078431 | false | 0 | 0.039216 | 0.009804 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cf2e08da44d6148a770dc0050be540bbf3f5a61 | 3,025 | py | Python | util/mathUtil.py | herougan/TradeHunter | 1270a1d9807d1f2107db6bc78b98b584431840cc | [
"MIT"
] | null | null | null | util/mathUtil.py | herougan/TradeHunter | 1270a1d9807d1f2107db6bc78b98b584431840cc | [
"MIT"
] | null | null | null | util/mathUtil.py | herougan/TradeHunter | 1270a1d9807d1f2107db6bc78b98b584431840cc | [
"MIT"
] | 1 | 2022-02-09T08:45:05.000Z | 2022-02-09T08:45:05.000Z | from math import floor
import talib
from util.dataRetrievalUtil import try_stdev
from util.langUtil import try_mean, try_int
def quartile_out(quartile, data):
"""Takes out extremities"""
pass
def moving_average(period, data):
avg = []
if len(data) < period:
return avg
for i in range(period - 1, len(data)):
avg.append(try_mean(data[i - period + 1:i]))
return avg
def moving_stddev(period, data):
avg = []
if len(data) < period:
return avg
for i in range(period - 1, len(data)):
avg.append(try_stdev(data[i - period + 1:i]))
return avg
def adjusted_dev(period, data, order=1):
# Does not work!
above, below = data, data
stdev_data = talib.STDDEV(data, period)
for i, row in above.iterrows():
above.iloc[i].data += stdev_data.iloc[i].data * order
for u, row in below.iterrows():
above.iloc[i].data -= stdev_data.iloc[i].data * order
return above, below
def index_arr_to_date(date_index, index):
"""Given an index, return date from date_index."""
if index < 0 or index > len(date_index):
return 0
return date_index.iloc[index]
def date_to_index_arr(index, dates_index, dates):
"""Given an index that corresponds to a date_array, find the relative index of input date."""
try:
_dates = []
for date in dates:
_dates.append(index[list(dates_index).index(date)])
return _dates
except:
print('Error! Date cannot be found. Continuing with 0.')
return [0 for date in dates]
def is_integer(x):
y = try_int(x)
if not y or y - x != 0:
return False
return True
def get_scale_colour(col1, col2, val):
"""Takes in two colours and the val (between 1 and 0) to decide
the colour value in the continuum from col1 to col2.
col1 and col2 must be named colours."""
pass
def to_candlestick(ticker_data, interval: str, inc=False):
pass
def get_scale_grey(val):
hexa = 15*16+15 * val
first_digit = hexa//16
second_digit = hexa - first_digit * 16
hexa = F'{to_single_hex(first_digit)}{to_single_hex(second_digit)}'
return F'#{hexa}{hexa}{hexa}'
def get_inverse_single_hex(val):
val = try_int(val)
_val = val % 16
_val = 16 - _val
if _val < 10:
return str(_val)
elif 10 <= _val < 11:
return 'A'
elif 11 <= _val < 12:
return 'B'
elif 12 <= _val < 13:
return 'C'
elif 13 <= _val < 14:
return 'D'
elif 14 <= _val < 15:
return 'E'
elif 15 <= _val < 16:
return 'F'
return None
def to_single_hex(val):
val = try_int(val)
_val = val % 16
if _val < 10:
return str(_val)
elif 10 <= _val < 11:
return 'A'
elif 11 <= _val < 12:
return 'B'
elif 12 <= _val < 13:
return 'C'
elif 13 <= _val < 14:
return 'D'
elif 14 <= _val < 15:
return 'E'
elif 15 <= _val < 16:
return 'F'
return None | 23.632813 | 97 | 0.597686 | 454 | 3,025 | 3.823789 | 0.264317 | 0.020737 | 0.020737 | 0.017281 | 0.360599 | 0.360599 | 0.360599 | 0.360599 | 0.331797 | 0.331797 | 0 | 0.040056 | 0.290248 | 3,025 | 128 | 98 | 23.632813 | 0.768514 | 0.106116 | 0 | 0.505376 | 0 | 0 | 0.050448 | 0.0213 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0.032258 | 0.043011 | 0 | 0.473118 | 0.010753 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cf5f3ec8ad78eb84a1e2c101567b1b3b4dc3a79 | 5,057 | py | Python | 09-gui/terremoto_antiguo.py | Agc96/matplotlib-examples | bc2db2d14c1822b05f99356ebf538ebcd14f262a | [
"MIT"
] | null | null | null | 09-gui/terremoto_antiguo.py | Agc96/matplotlib-examples | bc2db2d14c1822b05f99356ebf538ebcd14f262a | [
"MIT"
] | null | null | null | 09-gui/terremoto_antiguo.py | Agc96/matplotlib-examples | bc2db2d14c1822b05f99356ebf538ebcd14f262a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 18:38:21 2019
@author: Agutierrez
"""
# -*- coding: utf-8 -*-
"""
Interfaz gráfica para el movimiento armónico de un edificio, de forma similar
a un terremoto.
"""
import numpy as np
import tkinter as tk
from matplotlib.animation import FuncAnimation
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from tkinter.messagebox import showerror
# Inicializar la ventana
window = tk.Tk()
window.title("Movimiento armónico de un edificio")
window.geometry("800x600")
# Inicializar el frame de ingreso de datos
frame = tk.Frame(window)
frame.pack(side=tk.LEFT)
# Declarar los valores por defecto
base = 0.75
altura = 5.71
masa = 164200
radio = 5.76
amplitud = 10
periodo = 2
# Función auxiliar para generar datos de entrada
def generar_dato_entrada(frame, text, index, default=None):
variable = tk.DoubleVar(value=default)
# Configurar etiqueta para los datos
label = tk.Label(frame, text=text)
label.grid(row=index, column=0, padx=5, pady=5)
# Configurar entrada para los datos
entry = tk.Entry(frame, textvariable=variable, justify="right")
entry.grid(row=index, column=1, padx=5, pady=5)
return variable
# Inicializar datos de entrada
base_var = generar_dato_entrada(frame, "Semi-base (m):", 0, base)
altura_var = generar_dato_entrada(frame, "Semi-altura (m):", 1, altura)
masa_var = generar_dato_entrada(frame, "Masa (kg):", 2, masa)
radio_var = generar_dato_entrada(frame, "Radio (m):" , 3, radio)
amplitud_var = generar_dato_entrada(frame, "Amplitud (m):", 4, amplitud)
periodo_var = generar_dato_entrada(frame, "Periodo (s):", 5, periodo)
def calcular_posicion(tiempo, masa, amplitud, elastica, viscosidad):
"""
Simula la posición de un movimiento armónico amortiguado con los datos
del edificio.
"""
parte1 = -viscosidad/(2*masa) # Constante decreciente de amplitud
parte2 = np.sqrt(elastica/masa - parte1**2) # Velocidad angular
return amplitud * np.exp(parte1*tiempo) * np.cos(parte2*tiempo)
# Generar gráfico principal
principal_fig = Figure(figsize=(5, 2))
principal_ax = principal_fig.gca(xlim=(-100, 100), ylim=(0, 10))
principal_ax.grid(True)
principal_canvas = FigureCanvasTkAgg(principal_fig, master=window)
principal_canvas.draw()
principal_canvas.get_tk_widget().grid(row=0, column=1)
def calcular_aceleracion(tiempo, masa, amplitud, elastica, viscosidad):
"""
Simula la segunda derivada de la posición (es decir, la aceleración) de
un movimiento armónico amortiguado con los datos del edificio.
"""
parte1 = -viscosidad/(2*masa) # Constante decreciente de amplitud
parte2 = np.sqrt(elastica/masa - parte1**2) # Velocidad angular
parte3 = (parte1**2 - parte2**2)*np.cos(parte2*tiempo)
parte4 = (2*parte1*parte2)*np.sin(parte2*tiempo)
return amplitud * np.exp(parte1*tiempo) * (parte3 - parte4)
def obtener_valor(variable, mensaje_error):
try:
return variable.get()
except Exception as ex:
raise AssertionError(mensaje_error) from ex
def iniciar_simulacion():
try:
base = obtener_valor(base_var, "La semibase no es válida.")
altura = obtener_valor(altura_var, "La semialtura no es válida.")
masa = obtener_valor(masa_var, "La masa no es válida.")
radio = obtener_valor(radio_var, "El radio no es válido.")
amplitud = obtener_valor(amplitud_var, "La amplitud no es válida.")
elastica = obtener_valor(elastica_var, "La const. elástica no es válida.")
viscosidad = obtener_valor(viscosidad_var, "El coef. viscosidad no es válido.")
# Calcular el ángulo entre la base y la altura
assert altura != 0, "La altura no puede ser 0."
alfa = np.arctan(base/altura)
# Verificar que es un movimiento amortiguado
msg = ("Los datos para el movimiento amortiguado no son correctos. "
"Debe cumplirse que b^2 < 4*k*m, donde:\n"
"- b es el coeficiente de viscosidad\n"
"- k es la constante elástica\n"
"- m es la masa del edificio.")
assert viscosidad**2 < 4*elastica*masa, msg
# Mostrar los gráficos
frames = np.linspace(0, 100, 1001)
posiciones = calcular_posicion(frames, masa, amplitud, elastica,
viscosidad)
principal_ax.plot(frames, posiciones, '-o')
print(posiciones)
except Exception as ex:
showerror("Error", str(ex))
def detener_simulacion():
pass
# Inicializar botones
btn_start = tk.Button(frame, text="Iniciar", command=iniciar_simulacion)
btn_start.grid(row=7, column=0)
btn_stop = tk.Button(frame, text="Detener", command=detener_simulacion)
btn_stop.grid(row=7, column=1)
"""
# Mostrar los gráficos
frames = np.linspace(0, 100, 1001)
posiciones = calcular_posicion(frames, masa, amplitud, elastica,
viscosidad)
principal_ax.plot(frames, posiciones, '-o')
"""
# Interactuar con la ventana
window.mainloop()
| 36.381295 | 87 | 0.693692 | 683 | 5,057 | 5.045388 | 0.313324 | 0.027858 | 0.036564 | 0.046721 | 0.276262 | 0.228671 | 0.193268 | 0.167731 | 0.167731 | 0.167731 | 0 | 0.028304 | 0.196559 | 5,057 | 138 | 88 | 36.644928 | 0.819838 | 0.167886 | 0 | 0.097561 | 0 | 0 | 0.142708 | 0 | 0 | 0 | 0 | 0 | 0.036585 | 1 | 0.073171 | false | 0.012195 | 0.073171 | 0 | 0.195122 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cf67afad445851293cf259134cd16fdc9dcfa88 | 1,914 | py | Python | app/tests/test_questions.py | Gichia/questioner-v2 | b93ffdc521e364c191b770bf1bcb93964e7fa1f3 | [
"MIT"
] | null | null | null | app/tests/test_questions.py | Gichia/questioner-v2 | b93ffdc521e364c191b770bf1bcb93964e7fa1f3 | [
"MIT"
] | 6 | 2019-01-22T17:35:28.000Z | 2022-01-13T01:01:48.000Z | app/tests/test_questions.py | Gichia/questioner-v2 | b93ffdc521e364c191b770bf1bcb93964e7fa1f3 | [
"MIT"
] | null | null | null | """File to test all meetup endpoints"""
import os
import psycopg2 as pg2
import json
from app.tests.basetest import BaseTest
data = {
"title": "Test Title",
"body": "body"
}
comment = {
"comment": "Comment 1"
}
class TestQuestions(BaseTest):
""" Class to test all user endpoints """
def test_post_question(self):
"""Method to test post meetup endpoint"""
url = "http://localhost:5000/api/questions/1"
response = self.post(url, data)
result = json.loads(response.data.decode("UTF-8"))
self.assertEqual(result["status"], 201)
self.assertEqual(result["message"], "Succesfully added!")
def test_get_questions(self):
"""Test all meetups questions"""
url = "http://localhost:5000/api/questions/8"
response = self.get_items(url)
result = json.loads(response.data.decode("UTF-8"))
self.assertEqual(result["status"], 200)
def test_meetup_not_found(self):
"""Test correct response for question not found"""
url = "http://localhost:5000/api/questions/0"
response = self.post(url, data)
result = json.loads(response.data.decode("UTF-8"))
self.assertEqual(result["message"], "Meetup not found!")
def test_bad_question_url(self):
"""Test correct response for wrong question url endpoint"""
url = "http://localhost:5000/api/question/0"
response = self.post(url, data)
result = json.loads(response.data.decode("UTF-8"))
self.assertEqual(result["message"], "Resource not found!")
def test_comment_question(self):
"""Method to test comment question endpoint"""
url = "http://localhost:5000/api/comments/1"
response = self.post(url, comment)
result = json.loads(response.data.decode("UTF-8"))
self.assertEqual(result["status"], 201)
self.delete_comment("Comment 1")
| 28.147059 | 67 | 0.636886 | 237 | 1,914 | 5.084388 | 0.261603 | 0.074689 | 0.104564 | 0.082988 | 0.575934 | 0.475519 | 0.337759 | 0.337759 | 0.337759 | 0.337759 | 0 | 0.028782 | 0.219436 | 1,914 | 67 | 68 | 28.567164 | 0.777778 | 0.141066 | 0 | 0.25641 | 0 | 0 | 0.217175 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.128205 | false | 0 | 0.102564 | 0 | 0.25641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cf7844c0843b1293636cf8069df2f14c752925e | 392 | py | Python | coinbase_commerce/aio/api_resources/base/create_api_resource.py | nkoshell/coinbase-commerce-python | 94dc57951ac897ffbc7861dc909f413028d6a0b9 | [
"Apache-2.0"
] | null | null | null | coinbase_commerce/aio/api_resources/base/create_api_resource.py | nkoshell/coinbase-commerce-python | 94dc57951ac897ffbc7861dc909f413028d6a0b9 | [
"Apache-2.0"
] | null | null | null | coinbase_commerce/aio/api_resources/base/create_api_resource.py | nkoshell/coinbase-commerce-python | 94dc57951ac897ffbc7861dc909f413028d6a0b9 | [
"Apache-2.0"
] | null | null | null | from coinbase_commerce import util
from . import APIResource
__all__ = (
'CreateAPIResource',
)
class CreateAPIResource(APIResource):
"""
Create operations mixin
"""
@classmethod
async def create(cls, **params):
response = await cls._api_client.post(cls.RESOURCE_PATH, data=params)
return util.convert_to_api_object(response, cls._api_client, cls)
| 21.777778 | 77 | 0.706633 | 44 | 392 | 6 | 0.659091 | 0.045455 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.19898 | 392 | 17 | 78 | 23.058824 | 0.840764 | 0.058673 | 0 | 0 | 0 | 0 | 0.048159 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cfa55d58decf3e1c5433b4c930ab763da369af0 | 392 | py | Python | api/app.py | loudest/vision_zero | 91b094d864fabedbaa56cb9d1639aa75aa19bb00 | [
"MIT"
] | 2 | 2015-03-25T00:51:45.000Z | 2015-06-18T10:54:24.000Z | api/app.py | loudest/vision_zero | 91b094d864fabedbaa56cb9d1639aa75aa19bb00 | [
"MIT"
] | null | null | null | api/app.py | loudest/vision_zero | 91b094d864fabedbaa56cb9d1639aa75aa19bb00 | [
"MIT"
] | null | null | null | #!flask/bin/python
from flask import Flask, jsonify
import requests
app = Flask(__name__)
@app.route('/')
def index():
return "Hello, World!"
@app.route('/signed_data', methods=['GET'])
def signed_map():
r = requests.get('http://data.seattle.gov/resource/kb3s-zi3s.json')
json_data = r.json()
return jsonify({'data': json_data})
if __name__ == '__main__':
app.run(debug=True)
| 18.666667 | 69 | 0.683673 | 56 | 392 | 4.5 | 0.589286 | 0.063492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005865 | 0.130102 | 392 | 20 | 70 | 19.6 | 0.733138 | 0.043367 | 0 | 0 | 0 | 0 | 0.236559 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.153846 | 0.076923 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cfc0222b944d024264d6196a63452889c5cce0e | 5,482 | py | Python | Modules/scan.py | mafiamasterhere/EvilNet | 5b93d69ff9b6b16edfd3053f1f56857173b59eb1 | [
"MIT"
] | 91 | 2020-06-19T22:08:32.000Z | 2022-03-28T08:27:10.000Z | scan.py | lunnar211/CRACK_WIFI | 654af29306dd6582bf3ece38e9dd2de196f09aab | [
"MIT"
] | null | null | null | scan.py | lunnar211/CRACK_WIFI | 654af29306dd6582bf3ece38e9dd2de196f09aab | [
"MIT"
] | 22 | 2020-06-29T13:19:40.000Z | 2021-11-26T11:22:40.000Z | import nmap3
from colored import fg, bg, attr
import colored
import socket as sock
from Modules import intro
class nmap3_Scan() :
def __init__(self):
self.angry1 = colored.fg("green") + colored.attr("bold")
self.angry = colored.fg("white") + colored.attr("bold")
print(f"""{self.angry1}
1 - Os
2 - Top PORT
3- Xmas Scan
4 - Fin Scan
5 - Dns brute
6 - UDP Scan
7 - TCP Scan
99 - back
""")
self.number = str(input("[?]>>"))
if self.number == str(1) or "use os" in self.number :
self.Host = str(input("%s[*] Host >>"%(self.angry1)))
self.Timing = int(input("[*] Timing >>"))
self.OS(self.Host,self.Timing)
if self.number == str(2) or "use top port" in self.number :
self.Host = str(input("%s[*] Host >>"%(self.angry1)))
self.Timing = int(input("[*] Timing >>"))
if self.Timing == None:
self.Top_port(self.Host)
else:
self.Top_port(self.Host,self.Timing)
if self.number == str(3) or "use xmas" in self.number :
self.Host = str(input("%s[*] Host >>"%(self.angry1)))
self.Timing = int(input("[*] Timing >>"))
if self.Timing == None:
self.Xmas_Scan(self.Host)
else:
self.Xmas_Scan(self.Host,self.Timing)
if self.number == str(4) or "use fin" in self.number :
self.Host = str(input("%s[*] Host >>"%(self.angry1)))
self.Timing = int(input("[*] Timing >>"))
if self.Timing == None:
self.Fin_Scan(self.Host)
else:
self.Fin_Scan(self.Host,self.Timing)
if self.number == str(5) or "use brute dns" in self.number :
self.Host = str(input("%s[*] Domain >>"%(self.angry1)))
self.Dns_Brute(self.Host)
if self.number == str(6) or "use udp" in self.number :
self.Host = str(input("%s[*] Host >>"%(self.angry1)))
self.Timing = int(input("[*] Timing >>"))
if self.Timing == None:
self.UDP_Scan(self.Host)
else:
self.UDP_Scan(self.Host,self.Timing)
if self.number == str(7) or "use tcp" in self.number :
self.Host = str(input("%s[*] Host >>"%(self.angry1)))
self.Timing = int(input("[*] Timing >>"))
if self.Timing == None:
self.TCP_Scan(self.Host)
else:
self.TCP_Scan(self.Host,self.Timing)
if self.number == str(99) or "back" in self.number :
intro.main()
def OS(self,Host,Timing=4):
self.Host = Host
self.Timing = Timing
try :
print("Loading ........................................")
HOST_lib = nmap3.Nmap()
System=HOST_lib.nmap_os_detection(str(self.Host),args=f"-T{self.Timing} -vv")
for i in System:
print(f"System:{i['name']} CPE : {i['cpe']} ")
except :
pass
def Top_port (self,Host,Timing=4):
print("Loading ........................................")
self.Host = sock.gethostbyname(self.Host)
HOST_lib = nmap3.Nmap()
System = HOST_lib.scan_top_ports(self.Host,self.Timing)
for z in System[self.Host]:
print(z['portid'],z['service']['name'],z['state'])
def Dns_Brute(self,Host,Timing=4):
print("Loading ........................................")
HOST_lib = nmap3.NmapHostDiscovery()
System = HOST_lib.nmap_dns_brute_script(self.Host)
for output in System:
print(" "+output['address']," "+output['hostname']+self.angry)
def Xmas_Scan (self,Host,Timing=4):
print("Loading ........................................")
self.Host = sock.gethostbyname(self.Host)
HOST_lib = nmap3.NmapHostDiscovery()
System=HOST_lib.nmap_portscan_only(str(self.Host),args=f" -sX -T{self.Timing} -vv")
for z in System[self.Host]:
print(z['portid'],z['service']['name'],z['state']+self.angry)
def Fin_Scan(self,Host,Timing=4):
print("Loading ........................................")
self.Host = sock.gethostbyname(self.Host)
HOST_lib = nmap3.NmapHostDiscovery()
System=HOST_lib.nmap_portscan_only(str(self.Host),args=f" -sF -T{self.Timing} -vv")
for z in System[self.Host]:
print(z['portid'],z['service']['name'],z['state']+self.angry)
def UDP_Scan(self,Host,Timing=4):
print("Loading ........................................")
self.Host = sock.gethostbyname(self.Host)
HOST_lib = nmap3.NmapScanTechniques()
System=HOST_lib.nmap_udp_scan(str(self.Host),args=f"-T{self.Timing} -vv")
for z in System[self.Host]:
print(z['portid'],z['service']['name'],z['state']+self.angry)
def TCP_Scan(self,Host,Timing=4):
print("Loading ........................................")
self.Host = sock.gethostbyname(self.Host)
HOST_lib = nmap3.NmapScanTechniques()
System=HOST_lib.nmap_tcp_scan(str(self.Host),args=f"-T{self.Timing} -vv")
for z in System[self.Host]:
print(z['portid'],z['service']['name'],z['state']+self.angry)
| 42.169231 | 91 | 0.506202 | 669 | 5,482 | 4.071749 | 0.13154 | 0.143906 | 0.052863 | 0.044053 | 0.733113 | 0.68025 | 0.670338 | 0.653451 | 0.601689 | 0.547357 | 0 | 0.011082 | 0.292229 | 5,482 | 129 | 92 | 42.496124 | 0.690979 | 0 | 0 | 0.431034 | 0 | 0 | 0.191901 | 0.051076 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0.008621 | 0.043103 | 0 | 0.12069 | 0.12931 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cfea876092666973d9916499f12af7785c199a1 | 1,524 | py | Python | api/serializers.py | Wholefolio/marketmanager | 5a8314707806a6790c507e1bd817891e8dc88811 | [
"Apache-2.0"
] | null | null | null | api/serializers.py | Wholefolio/marketmanager | 5a8314707806a6790c507e1bd817891e8dc88811 | [
"Apache-2.0"
] | null | null | null | api/serializers.py | Wholefolio/marketmanager | 5a8314707806a6790c507e1bd817891e8dc88811 | [
"Apache-2.0"
] | null | null | null | """Serializers module."""
from rest_framework import serializers
from django_celery_results.models import TaskResult
from api import models
class ExchangeSerializer(serializers.ModelSerializer):
"""Serializer to map the Model instance into JSON format."""
class Meta:
"""Meta class to map serializer's fields with the model fields."""
model = models.Exchange
fields = ('id', 'name', 'created', 'updated', "url", "api_url",
"volume", "top_pair", "top_pair_volume", "interval",
"enabled", "last_data_fetch", "logo")
read_only_fields = ('created', 'updated')
def get_type(self, obj):
return obj.get_type_display()
class MarketSerializer(serializers.ModelSerializer):
class Meta:
model = models.Market
fields = ("id", "name", "exchange", "volume", "last", "bid", "ask",
"base", "quote", "updated")
class ExchangeStatusSerializer(serializers.ModelSerializer):
"""Serializer to map the Model instance into JSON format."""
class Meta:
"""Meta class to map serializer's fields with the model fields."""
model = models.ExchangeStatus
fields = ('id', 'exchange', 'last_run', 'last_run_id',
'last_run_status', 'time_started', 'running')
class TaskResultSerializer(serializers.ModelSerializer):
class Meta:
model = TaskResult
fields = ("id", "date_done", "meta", "status", "result",
"traceback", "task_id")
| 31.75 | 75 | 0.631234 | 163 | 1,524 | 5.766871 | 0.435583 | 0.110638 | 0.076596 | 0.080851 | 0.382979 | 0.297872 | 0.297872 | 0.297872 | 0.297872 | 0.297872 | 0 | 0 | 0.238189 | 1,524 | 47 | 76 | 32.425532 | 0.809647 | 0.164698 | 0 | 0.148148 | 0 | 0 | 0.207698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.111111 | 0.037037 | 0.481481 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cfff87f1a992e437041fea9fa36fffc753143d6 | 3,228 | py | Python | invenio_oaiserver/views/server.py | ParthS007/invenio-oaiserver | 6fa5d2e2a770377ffe34a44bc60b0a817853da95 | [
"MIT"
] | null | null | null | invenio_oaiserver/views/server.py | ParthS007/invenio-oaiserver | 6fa5d2e2a770377ffe34a44bc60b0a817853da95 | [
"MIT"
] | null | null | null | invenio_oaiserver/views/server.py | ParthS007/invenio-oaiserver | 6fa5d2e2a770377ffe34a44bc60b0a817853da95 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
# Copyright (C) 2022 Graz University of Technology.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""OAI-PMH 2.0 server."""
from flask import Blueprint, make_response
from invenio_pidstore.errors import PIDDoesNotExistError
from itsdangerous import BadSignature
from lxml import etree
from marshmallow.exceptions import ValidationError
from webargs.flaskparser import use_args
from .. import response as xml
from ..errors import OAINoRecordsMatchError
from ..verbs import make_request_validator
blueprint = Blueprint(
'invenio_oaiserver',
__name__,
static_folder='../static',
template_folder='../templates',
)
@blueprint.errorhandler(ValidationError)
@blueprint.errorhandler(422)
def validation_error(exception):
"""Return formatter validation error."""
messages = getattr(exception, 'messages', None)
if messages is None:
messages = getattr(exception, 'data', {'messages': None})['messages']
def extract_errors():
"""Extract errors from exception."""
if isinstance(messages, dict):
for field, message in messages.items():
if field == 'verb':
yield 'badVerb', '\n'.join(message)
else:
yield 'badArgument', '\n'.join(message)
else:
for field in exception.field_names:
if field == 'verb':
yield 'badVerb', '\n'.join(messages)
else:
yield 'badArgument', '\n'.join(messages)
if not exception.field_names:
yield 'badArgument', '\n'.join(messages)
return (etree.tostring(xml.error(extract_errors())),
422,
{'Content-Type': 'text/xml'})
@blueprint.errorhandler(PIDDoesNotExistError)
def pid_error(exception):
"""Handle PID Exceptions."""
return (etree.tostring(xml.error([('idDoesNotExist',
'No matching identifier')])),
422,
{'Content-Type': 'text/xml'})
@blueprint.errorhandler(BadSignature)
def resumptiontoken_error(exception):
"""Handle resumption token exceptions."""
return (etree.tostring(xml.error([(
'badResumptionToken',
'The value of the resumptionToken argument is invalid or expired.')
])), 422, {'Content-Type': 'text/xml'})
@blueprint.errorhandler(OAINoRecordsMatchError)
def no_records_error(exception):
"""Handle no records match Exceptions."""
return (etree.tostring(xml.error([('noRecordsMatch',
'')])),
422,
{'Content-Type': 'text/xml'})
@blueprint.route('/oai2d', methods=['GET', 'POST'])
@use_args(make_request_validator)
def response(args):
"""Response endpoint."""
e_tree = getattr(xml, args['verb'].lower())(**args)
response = make_response(etree.tostring(
e_tree,
pretty_print=True,
xml_declaration=True,
encoding='UTF-8',
))
response.headers['Content-Type'] = 'text/xml'
return response
| 31.339806 | 77 | 0.629492 | 340 | 3,228 | 5.891176 | 0.405882 | 0.052421 | 0.037444 | 0.044933 | 0.218173 | 0.161258 | 0.090864 | 0 | 0 | 0 | 0 | 0.013109 | 0.243804 | 3,228 | 102 | 78 | 31.647059 | 0.807456 | 0.145601 | 0 | 0.188406 | 0 | 0 | 0.141805 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.289855 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf01e9ecb22b1e70b4470ec1161d194bd76c4e67 | 6,847 | py | Python | searching/models.py | netvigator/auctions | f88bcce800b60083a5d1a6f272c51bb540b8342a | [
"MIT"
] | null | null | null | searching/models.py | netvigator/auctions | f88bcce800b60083a5d1a6f272c51bb540b8342a | [
"MIT"
] | 13 | 2019-12-12T03:07:55.000Z | 2022-03-07T12:59:27.000Z | searching/models.py | netvigator/auctions | f88bcce800b60083a5d1a6f272c51bb540b8342a | [
"MIT"
] | null | null | null | from django.db import models
from core.utils import getReverseWithUpdatedQuery
from ebayinfo.models import EbayCategory
from categories.models import Category
from core.dj_import import get_user_model
User = get_user_model()
from searching import ALL_PRIORITIES
from pyPks.Time.Output import getIsoDateTimeFromDateTime
# ### models can be FAT but not too FAT! ###
class Search(models.Model):
cTitle = models.CharField( 'short description',
help_text = 'This is just a short description -- ebay will not search for this<br>'
'you must have a) key word(s) and/or b) an ebay category',
max_length = 38, null = True )
cKeyWords = models.TextField(
'key words -- search for these (maximum length 350 characters)',
max_length = 350, null = True, blank = True,
help_text = 'What you type here will go into the ebay search box '
'-- mulitple terms will result in an AND search '
'(ebay will look for all terms).<br>'
'search for red OR green handbags as follows: '
'handbags (red,green)<br>'
'TIPS: to exclude words, put a - in front '
'(without any space),<br>'
'search handbags but exclude red as follows: '
'handbags -red<br>'
'search for handbags but '
'exclude red and green as follows: handbags -red -green' )
# max length for a single key word is 98
#models.ForeignKey( EbayCategory, models.PositiveIntegerField(
iEbayCategory = models.ForeignKey( EbayCategory,
on_delete=models.CASCADE,
verbose_name = 'ebay category',
null = True, blank = True,
help_text = 'Limit search to items listed in this category' )
# ### after updating ebay categories, check whether ###
# ### searches that were connected are still connected !!! ###
iDummyCategory = models.PositiveIntegerField( 'ebay category number',
null = True, blank = True,
help_text = 'Limit search to items listed in this category<br>'
'copy the category number from ebay and paste here!!! (sorry)' )
cPriority = models.CharField( 'processing priority',
max_length = 2, null = True,
choices = ALL_PRIORITIES,
help_text = 'high priority A1 A2 A3 ... Z9 low priority' )
bGetBuyItNows = models.BooleanField(
"also get 'Buy It Nows' (fixed price non auctions)?",
help_text = 'You may get an avalanche of useless junk '
'if you turn this on -- be careful!',
blank = True, null = True,
default = False )
bInventory = models.BooleanField(
"also get 'Store Inventory' "
"(fixed price items in ebay stores)?",
help_text = 'You may get an avalanche of useless junk '
'if you turn this on -- be careful!',
blank = True, null = True,
default = False )
iMyCategory = models.ForeignKey( Category,
on_delete=models.DO_NOTHING,
verbose_name = 'my category that matches ebay category',
null = True, blank = True,
help_text = 'Example: if you have a category for "Manuals" and '
'this search is in the ebay category "Vintage Manuals" '
'put your "Manuals" category here.<br>If you have a '
'category "Widgets" and this search finds an item '
'with "Widget Manual" in the title, the bot will know '
'this item is for a manual, NOT a widget.')
tBegSearch = models.DateTimeField( 'last search started',
null = True )
tEndSearch = models.DateTimeField( 'last search completed',
null = True )
cLastResult = models.TextField( 'last search outcome', null = True )
iUser = models.ForeignKey( User, on_delete=models.CASCADE,
verbose_name = 'Owner' )
tCreate = models.DateTimeField( 'created on', auto_now_add= True )
tModify = models.DateTimeField( 'updated on', auto_now = True )
def __str__(self):
return self.cTitle
class Meta:
verbose_name_plural = 'searches'
db_table = 'searching'
unique_together = ( ( 'iUser', 'cPriority' ),
( 'iUser', 'cTitle' ),
( 'iUser', 'cKeyWords', 'iEbayCategory',) )
ordering = ('cTitle',)
def get_absolute_url(self):
#
return getReverseWithUpdatedQuery(
'searching:detail',
kwargs = { 'pk': self.pk, 'tModify': self.tModify } )
class SearchLog(models.Model):
iSearch = models.ForeignKey( Search, on_delete=models.CASCADE,
verbose_name = 'Search that first found this item' )
tBegSearch = models.DateTimeField( 'search started',
db_index = True )
tEndSearch = models.DateTimeField( 'search completed',
null = True )
tBegStore = models.DateTimeField( 'processing started',
null = True )
tEndStore = models.DateTimeField( 'processing completed',
null = True )
iItems = models.PositiveIntegerField( 'items found',
null = True )
iStoreItems = models.PositiveIntegerField( 'items stored',
null = True )
iStoreUsers = models.PositiveIntegerField( 'stored for owner',
null = True )
iItemHits = models.PositiveIntegerField(
'have category, brand & model',
null = True )
cResult = models.TextField( 'search outcome', null = True )
cStoreDir = models.CharField( 'search files directory',
max_length = 10,
null = True, blank = True )
def __str__(self):
sSayDir = ( self.cStoreDir
if self.cStoreDir
else getIsoDateTimeFromDateTime( self.tBegSearch ) )
return '%s - %s' % ( sSayDir, self.iSearch.cTitle )
class Meta:
verbose_name_plural = 'searchlogs'
db_table = verbose_name_plural
| 47.548611 | 91 | 0.535417 | 668 | 6,847 | 5.411677 | 0.33982 | 0.04426 | 0.017981 | 0.023513 | 0.17538 | 0.151591 | 0.100415 | 0.100415 | 0.08686 | 0.08686 | 0 | 0.004033 | 0.384402 | 6,847 | 143 | 92 | 47.881119 | 0.853618 | 0.035636 | 0 | 0.205128 | 0 | 0 | 0.282781 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.059829 | 0.017094 | 0.358974 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf025b07d576f1c46ac2887dea5c3dde0c945bf5 | 367 | py | Python | Maths/fibonacciSeries.py | baiyongzhen/python | a8f367d2136f1aaeab63345e160e59fe16d62a11 | [
"MIT"
] | 1 | 2018-10-16T13:41:06.000Z | 2018-10-16T13:41:06.000Z | Maths/fibonacciSeries.py | baiyongzhen/python | a8f367d2136f1aaeab63345e160e59fe16d62a11 | [
"MIT"
] | null | null | null | Maths/fibonacciSeries.py | baiyongzhen/python | a8f367d2136f1aaeab63345e160e59fe16d62a11 | [
"MIT"
] | 2 | 2018-10-03T15:47:30.000Z | 2019-10-23T16:35:48.000Z | # Fibonacci Sequence Using Recursion
def recur_fibo(n):
if n <= 1:
return n
else:
return(recur_fibo(n-1) + recur_fibo(n-2))
limit = int(input("How many terms to include in fionacci series:"))
if limit <= 0:
print("Plese enter a positive integer")
else:
print("Fibonacci series:")
for i in range(limit):
print(recur_fibo(i))
| 21.588235 | 67 | 0.640327 | 56 | 367 | 4.125 | 0.607143 | 0.155844 | 0.12987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014286 | 0.237057 | 367 | 16 | 68 | 22.9375 | 0.810714 | 0.092643 | 0 | 0.166667 | 0 | 0 | 0.277946 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.166667 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf02f2ac21df464b0428c1c4c3f886070ec8055f | 6,319 | py | Python | extract_tokens.py | anuprulez/similar_galaxy_tools | 19eefa567fdb56781dc5f42a0bea8af0969f5978 | [
"MIT"
] | 2 | 2018-02-02T18:52:12.000Z | 2018-02-03T08:36:44.000Z | extract_tokens.py | anuprulez/similar_galaxy_tools | 19eefa567fdb56781dc5f42a0bea8af0969f5978 | [
"MIT"
] | null | null | null | extract_tokens.py | anuprulez/similar_galaxy_tools | 19eefa567fdb56781dc5f42a0bea8af0969f5978 | [
"MIT"
] | 1 | 2018-02-03T08:36:57.000Z | 2018-02-03T08:36:57.000Z | """
Extract useful tokens from multiple attributes of Galaxy tools
"""
import os
import numpy as np
import pandas as pd
import operator
import json
import utils
class ExtractTokens:
@classmethod
def __init__( self, tools_data_path ):
self.tools_data_path = tools_data_path
@classmethod
def _read_file( self ):
"""
Read the description of all tools
"""
return pd.read_csv( self.tools_data_path )
@classmethod
def _extract_tokens( self, file, tokens_source ):
"""
Extract tokens from the description of all tools
"""
tools_tokens_source = dict()
for source in tokens_source:
tools_tokens = dict()
for row in file.iterrows():
tokens = self._get_tokens_from_source( row[ 1 ], source )
tools_tokens[ row[ 1 ][ "id" ] ] = tokens
tools_tokens_source[ source ] = tools_tokens
return tools_tokens_source
@classmethod
def _get_tokens_from_source( self, row, source ):
"""
Fetch tokens from different sources namely input and output files, names and desc of tools and
further help and EDAM sources
"""
tokens = ''
if source == 'input_output':
# remove duplicate file type individually from input and output file types and merge
input_tokens = utils._restore_space( utils._get_text( row, "inputs" ) )
input_tokens = utils._remove_duplicate_file_types( input_tokens )
output_tokens = utils._restore_space( utils._get_text( row, "outputs" ) )
output_tokens = utils._remove_duplicate_file_types( output_tokens )
if input_tokens is not "" and output_tokens is not "":
tokens = input_tokens + ' ' + output_tokens
elif output_tokens is not "":
tokens = output_tokens
elif input_tokens is not "":
tokens = input_tokens
elif source == 'name_desc_edam':
tokens = utils._restore_space( utils._get_text( row, "name" ) ) + ' '
tokens += utils._restore_space( utils._get_text( row, "description" ) ) + ' '
tokens += utils._get_text( row, "edam_topics" )
elif source == "help_text":
tokens = utils._get_text( row, "help" )
return utils._remove_special_chars( tokens )
@classmethod
def _refine_tokens( self, tokens ):
"""
Refine the set of tokens by removing words like 'to', 'with'
"""
k = 1.75
b = 0.75
stop_words_file = "stop_words.txt"
all_stopwords = list()
refined_tokens_sources = dict()
# collect all the stopwords
with open( stop_words_file ) as file:
lines = file.read()
all_stopwords = lines.split( "\n" )
for source in tokens:
refined_tokens = dict()
files = dict()
inverted_frequency = dict()
file_id = -1
total_file_length = 0
for item in tokens[ source ]:
file_id += 1
file_tokens = tokens[ source ][ item ].split(" ")
if source in "name_desc_edam" or source in "help_text":
file_tokens = utils._clean_tokens( file_tokens, all_stopwords )
total_file_length += len( file_tokens )
term_frequency = dict()
for token in file_tokens:
if token is not '':
file_ids = list()
if token not in inverted_frequency:
file_ids.append( file_id )
else:
file_ids = inverted_frequency[ token ]
if file_id not in file_ids:
file_ids.append( file_id )
inverted_frequency[ token ] = file_ids
# for term frequency
if token not in term_frequency:
term_frequency[ token ] = 1
else:
term_frequency[ token ] += 1
files[ item ] = term_frequency
N = len( files )
average_file_length = float( total_file_length ) / N
# find BM25 score for each token of each tool. It helps to determine
# how important each word is with respect to the tool and other tools
for item in files:
file_item = files[ item ]
file_length = len( file_item )
for token in file_item:
tf = file_item[ token ]
# normalize the term freq of token for each document
tf = float( tf ) / file_length
idf = np.log2( N / len( inverted_frequency[ token ] ) )
alpha = ( 1 - b ) + ( float( b * file_length ) / average_file_length )
tf_star = tf * float( ( k + 1 ) ) / ( k * alpha + tf )
tf_idf = tf_star * idf
file_item[ token ] = tf_idf
# filter tokens based on the BM25 scores and stop words. Not all tokens are important
for item in files:
file_tokens = files[ item ]
tokens_scores = [ ( token, score ) for ( token, score ) in file_tokens.items() ]
sorted_tokens = sorted( tokens_scores, key=operator.itemgetter( 1 ), reverse=True )
refined_tokens[ item ] = sorted_tokens
tokens_file_name = 'tokens_' + source + '.txt'
token_file_path = os.path.join( os.path.dirname( self.tools_data_path ) + '/' + tokens_file_name )
with open( token_file_path, 'w' ) as file:
file.write( json.dumps( refined_tokens ) )
file.close()
refined_tokens_sources[ source ] = refined_tokens
return refined_tokens_sources
@classmethod
def get_tokens( self, data_source ):
"""
Get refined tokens
"""
print( "Extracting tokens..." )
dataframe = self._read_file()
tokens = self._extract_tokens( dataframe, data_source )
return dataframe, self._refine_tokens( tokens )
| 42.126667 | 110 | 0.549929 | 704 | 6,319 | 4.673295 | 0.21733 | 0.027356 | 0.021885 | 0.027356 | 0.159574 | 0.084498 | 0.046201 | 0.046201 | 0 | 0 | 0 | 0.005304 | 0.373477 | 6,319 | 149 | 111 | 42.409396 | 0.825714 | 0.118531 | 0 | 0.106195 | 0 | 0 | 0.028713 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053097 | false | 0 | 0.053097 | 0 | 0.159292 | 0.00885 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |