hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
97849e29d8bc30894785e486625de3eacbf655df | 570 | py | Python | src/odontology/person/migrations/0008_patient_date_created.py | nanomolina/JP | 248a47bced4dac850f85d28968ddf279cd123400 | [
"Apache-2.0"
] | 2 | 2016-06-23T15:35:29.000Z | 2022-01-11T00:55:21.000Z | src/odontology/person/migrations/0008_patient_date_created.py | nanomolina/JP | 248a47bced4dac850f85d28968ddf279cd123400 | [
"Apache-2.0"
] | 27 | 2016-06-24T12:28:01.000Z | 2022-01-13T00:37:25.000Z | src/odontology/person/migrations/0008_patient_date_created.py | nanomolina/JP | 248a47bced4dac850f85d28968ddf279cd123400 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('person', '0007_auto_20160214_2019'),
]
operations = [
migrations.AddField(
model_name='patient',
name='date_created',
field=models.DateField(default=datetime.datetime(2016, 2, 15, 1, 6, 14, 723509, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
]
| 24.782609 | 124 | 0.64386 | 63 | 570 | 5.619048 | 0.746032 | 0.056497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.078886 | 0.24386 | 570 | 22 | 125 | 25.909091 | 0.742459 | 0.036842 | 0 | 0 | 0 | 0 | 0.087751 | 0.042048 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97856bbd18162d533ea54eb1abdafde5f0b28d54 | 1,358 | py | Python | Sensor_Debug/TestCase/test_01_Openapp.py | sdwfclcyk1/AutoTestCase | 63a6a6a4acf2a9dc572bd917b186638eae65aee7 | [
"MIT"
] | 1 | 2018-09-28T11:35:07.000Z | 2018-09-28T11:35:07.000Z | Sensor_Debug/TestCase/test_01_Openapp.py | sdwfclcyk1/AutoTestCase | 63a6a6a4acf2a9dc572bd917b186638eae65aee7 | [
"MIT"
] | null | null | null | Sensor_Debug/TestCase/test_01_Openapp.py | sdwfclcyk1/AutoTestCase | 63a6a6a4acf2a9dc572bd917b186638eae65aee7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/6/25 18:59
# @Author : Kay
# @Site :
# @File : test_01_openapp.py
# @Software: PyCharm Community Edition
import uiautomator2 as u2
import unittest
from Public.Decorator import *
from Public.BasePage import BasePage
from Public.ReadConfig import ReadConfig
from Public.JugementSensorData import JugementSensorData
from TestSuit_SenSorData.ExpectResult.OpenApp import OpenApp_Expection
event_name = ReadConfig().get_testEvent("打开App")
apkpage = ReadConfig().get_pkg_name()
apkActivity = ReadConfig().get_pkg_activity()
class OpenApp(unittest.TestCase,BasePage):
@classmethod
@setupclass
def setUpClass(cls):
cls.set_fastinput_ime()
cls.unlock_device()
cls.d.app_stop_all()
@classmethod
@setupclass
def tearDownClass(cls):
cls.d.app_stop(apkpage)
@testcase
def test_01_coldapp(self):
self.d.app_start(apkpage,apkActivity)
server = OpenApp_Expection()
JugementSensorData.JugementData("test_01_coldapp",server)
@testcase
def test_01_hotapp(self):
self.d.app_start(apkpage, apkActivity)
time.sleep(5)
self.d.app_stop(apkpage)
self.d.app_start(apkpage, apkActivity)
server = OpenApp_Expection()
JugementSensorData.JugementData("test_01_hotapp",server)
| 24.690909 | 71 | 0.715022 | 162 | 1,358 | 5.808642 | 0.450617 | 0.025505 | 0.034006 | 0.041445 | 0.230606 | 0.230606 | 0.230606 | 0.18916 | 0.18916 | 0.18916 | 0 | 0.022563 | 0.184094 | 1,358 | 54 | 72 | 25.148148 | 0.826715 | 0.119293 | 0 | 0.323529 | 0 | 0 | 0.02874 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.205882 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
978a831843efed4c9a546c08310214668dcc7a6e | 7,009 | py | Python | docs/database_tables.py | wdr-data/wdr-okr | 71c9e6e8d3521b1bb67d30310a93584389de2127 | [
"MIT"
] | 2 | 2021-07-28T08:46:13.000Z | 2022-01-19T17:05:48.000Z | docs/database_tables.py | wdr-data/wdr-okr | 71c9e6e8d3521b1bb67d30310a93584389de2127 | [
"MIT"
] | 3 | 2020-11-10T23:34:17.000Z | 2021-03-31T16:19:21.000Z | docs/database_tables.py | wdr-data/wdr-okr | 71c9e6e8d3521b1bb67d30310a93584389de2127 | [
"MIT"
] | null | null | null | """Read database information from Django models and create a HTML table for the
documentation.
"""
import collections
import os
import sys
import django
from loguru import logger
DOCS_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(DOCS_DIR)
sys.path.insert(0, BASE_DIR)
os.environ["DJANGO_SETTINGS_MODULE"] = "app.settings"
django.setup()
from django_extensions.management.modelviz import generate_graph_data # noqa: E402
from database_tables_config import ( # noqa: E402
APP_LABELS,
HTML_TOP,
HTML_BOTTOM,
FILENAME,
)
def tabulate(head, rows, head_classes=None, row_classes=None):
head_classes = head_classes or {}
row_classes = row_classes or {}
th = []
for i, item in enumerate(head):
attrs = []
if i in head_classes:
attrs.append(f'class="{head_classes[i]}"')
th.append(f"<th{' ' if attrs else ''}{' '.join(attrs)}>{item}</th>")
tr = []
for row in rows:
td = []
for i, item in enumerate(row):
attrs = []
if i in row_classes:
attrs.append(f'class="{row_classes[i]}"')
td.append(f"<td{' ' if attrs else ''}{' '.join(attrs)}>{item}</td>")
tr.append(f"<tr>{' '.join(td)}</tr>")
head_html = f"<thead><tr>{' '.join(th)}</tr></thead>"
body_html = "<tbody>" + "\n".join(tr) + "</tbody>"
return f"<table>{head_html}\n{body_html}</table>"
def build_html( # noqa: C901
app_labels: list = APP_LABELS,
html_top: str = HTML_TOP,
html_bottom: str = HTML_BOTTOM,
) -> str:
"""Create an HTML page with a series of html tables for each table in the database.
Args:
app_labels (list): List of Django apps to include in HTML page. Defaults to APP_LABELS.
html_top (str): HTML code to insert above generated table. Defaults to HTML_TOP.
html_bottom (str): HTML code to insert below generated table. Defaults to HTML_BOTTOM.
Returns:
str: HTML page.
"""
# generate a dict with the table names as keys
output_table_dict = {}
for label in app_labels:
# read basic data with django_extensions.management.modelviz
data = generate_graph_data([label])
for db_table in data["graphs"][0]["models"]:
# generate data for each table (include help_text if present, if not use verbose_name)
table_fields = []
for field in db_table["fields"]:
if field["help_text"]:
description = field["help_text"]
else:
description = field["verbose_name"]
data_type = f'<code>{field["db_type"]}</code>'
if field["relation"]:
field_type = field["internal_type"]
field_type = field_type.replace("ForeignKey", "FK")
data_type = f"{data_type} (<b>{field_type}</b>)"
# elif field["type"] == "AutoField":
# data_type = f'{data_type}<br/><b>{field["type"]}</b>'
nullable = "✅" if field["null"] else "❌"
unique = "✅" if field["unique"] else "❌"
table_fields.append(
[
f"<code>{field['column_name']}</code>",
data_type,
unique,
nullable,
description,
]
)
# only include tables that are stored in db
if (
db_table["fields"][0]["name"] == "id"
and db_table["fields"][0]["type"] == "AutoField"
):
# create table info text from docstring
docstring_html = db_table["docstring"].replace("\n\n", "<br />\n")
info_text = f"<p>{docstring_html}</p>"
# if table uses foreign keys: create a list of foreign keys with links
if db_table["relations"]:
relation_text = ""
for relation in db_table["relations"]:
if relation["type"] == "ForeignKey":
relation_text += f'<li><a href="#{relation["target"]}"><code>{relation["target_table_name"]}</code></a> via <code>{relation["column_name"]}</code></li>'
# elif relation["type"] == "ManyToManyField":
# relation_text += f'<li><code>{relation["column_name"]}</code> aus der Tabelle <a href="#{relation["target"]}">{relation["target_table_name"]}</a> (ManyToMany)</li>'
if relation_text:
if db_table["is_m2m_table"]:
info_text += "<p>Sie verbindet die folgenden Tabellen:</p>"
else:
info_text += "<p>Diese Tabelle hat folgende Relationen zu anderen Tabellen:</p>"
info_text += "<ul>"
info_text += relation_text
info_text += "</ul>"
if db_table["unique_together"]:
info_text += "Für die Tabelle sind die folgenden <code>UNIQUE</code> Constraints definiert: <ul>"
for tup in db_table["unique_together"]:
info_text += f"<li>{', '.join(f'<code>{field}</code>' for field in tup)}</li>"
info_text += "</ul>"
# combine table name, table info text, table fields, and Django model name
output_table_dict[db_table["db_table_name"]] = [
info_text,
table_fields,
db_table["name"],
]
# sort dict of database tables alphabetically
output_sorted = collections.OrderedDict(sorted(output_table_dict.items()))
# collect HTML items in a string
html_tables = ""
for table_name, table_infos in output_sorted.items():
# convert output table to HTML
html_tables += f"<a name='{table_infos[2]}'></a>" # For backwards compatibility
html_tables += (
f"<h3><a name='{table_name}' href='#{table_name}'>{table_name}</a></h3>"
+ f"<div class='docstring'>{table_infos[0]}</div>"
+ "\n"
+ tabulate(
["Name", "Type", "UNIQUE", "NULL", "Beschreibung"],
table_infos[1],
head_classes={2: "mono", 3: "mono"},
row_classes={2: "hcenter vcenter", 3: "hcenter vcenter"},
)
+ "\n"
)
return str(html_top + html_tables + html_bottom)
if __name__ == "__main__":
# generate html page (based on constants from database_tables_config)
html_page = build_html(APP_LABELS, HTML_TOP, HTML_BOTTOM)
# write output to file
filepath = os.path.join(DOCS_DIR, FILENAME)
with open(filepath, "wt") as output_file:
output_file.write(html_page)
logger.success("Data written to {}", filepath)
| 38.092391 | 194 | 0.542017 | 826 | 7,009 | 4.417676 | 0.245763 | 0.024938 | 0.015073 | 0.017539 | 0.135928 | 0.067964 | 0 | 0 | 0 | 0 | 0 | 0.004869 | 0.326009 | 7,009 | 183 | 195 | 38.300546 | 0.766723 | 0.206449 | 0 | 0.066116 | 0 | 0.016529 | 0.238502 | 0.101073 | 0.008264 | 0 | 0 | 0 | 0 | 1 | 0.016529 | false | 0 | 0.057851 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
978c454e2c28df828f082014fc81cd881d17aa27 | 2,497 | py | Python | device/container/src/baseline_device/service/jobs/sample2.py | MartinMReed/aws-iot-baseline | 61bdc51708e6f4480d0117a43f0adde5f6a63506 | [
"MIT"
] | 1 | 2021-12-31T05:05:30.000Z | 2021-12-31T05:05:30.000Z | device/container/src/baseline_device/service/jobs/sample2.py | nelsestu/thing-expert | 2e105d718c386258d8efdb329ea60da1072ffbe8 | [
"MIT"
] | null | null | null | device/container/src/baseline_device/service/jobs/sample2.py | nelsestu/thing-expert | 2e105d718c386258d8efdb329ea60da1072ffbe8 | [
"MIT"
] | 1 | 2021-04-05T23:44:12.000Z | 2021-04-05T23:44:12.000Z | import json
import logging
import os
import sys
import threading
import time
import paho.mqtt.client as paho
import paho.mqtt.publish as paho_publish
from baseline_device.util.config import config
from baseline_device.util.mqtt import MqttLoggingHandler
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
client_id = os.environ['BASELINE_CLIENT_ID']
program = 'sample2'
connected = threading.Event()
def on_connect(client: paho.Client, userdata: dict, flags: dict, rc: int) -> None:
# The value of rc determines success or not:
# 0: Connection successful
# 1: Connection refused - incorrect protocol version
# 2: Connection refused - invalid client identifier
# 3: Connection refused - server unavailable
# 4: Connection refused - bad username or password
# 5: Connection refused - not authorised
# 6-255: Currently unused.
if rc == 0: connected.set()
client = None
job_id = None
try:
with open(f'/tmp/{config.app_name}/jobs/{program}', 'r') as f:
execution = json.load(f)
job_id = execution['jobId']
client = paho.Client(clean_session=True)
client.on_connect = on_connect
client.enable_logger(logger)
logger.addHandler(MqttLoggingHandler(client, f'$aws/rules/{config.topic_prefix}/things/{client_id}/log'))
client.connect_async('localhost')
client.loop_start()
while not connected.is_set():
time.sleep(1)
logger.info('Job started!')
time.sleep(30)
logger.info('Job complete!')
client.publish(f'$aws/things/{client_id}/jobs/{job_id}/update', qos=2, payload=json.dumps({
'status': 'SUCCEEDED',
'expectedVersion': execution['versionNumber'],
'executionNumber': execution['executionNumber']
}))
sys.exit(0)
except SystemExit as e:
raise e
except:
logger.critical('Fatal shutdown...', exc_info=True)
if job_id:
try:
client_publish = client.publish if client.is_connected() else paho_publish.single
client_publish(f'$aws/things/{client_id}/jobs/{job_id}/update', qos=2, payload=json.dumps({
'status': 'FAILED',
'expectedVersion': execution['versionNumber'],
'executionNumber': execution['executionNumber']
}))
except:
logger.warning('Unable to send job status as FAILED', exc_info=True)
sys.exit(1)
finally:
if client:
client.loop_stop()
client.disconnect()
| 26.284211 | 109 | 0.673208 | 308 | 2,497 | 5.344156 | 0.435065 | 0.024301 | 0.025516 | 0.026731 | 0.17983 | 0.17983 | 0.087485 | 0.087485 | 0.087485 | 0.087485 | 0 | 0.009635 | 0.210252 | 2,497 | 94 | 110 | 26.56383 | 0.825051 | 0.132559 | 0 | 0.169492 | 0 | 0 | 0.203987 | 0.083449 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016949 | false | 0 | 0.169492 | 0 | 0.186441 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
978cde5321a929429de3c79977503bd4e3e5a2f0 | 12,183 | py | Python | odoo-13.0/addons/sale/tests/test_onchange.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 12 | 2021-03-26T08:39:40.000Z | 2022-03-16T02:20:10.000Z | odoo-13.0/addons/sale/tests/test_onchange.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 13 | 2020-12-20T16:00:21.000Z | 2022-03-14T14:55:30.000Z | odoo-13.0/addons/sale/tests/test_onchange.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 17 | 2020-08-31T11:18:49.000Z | 2022-02-09T05:57:31.000Z | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import Form
from odoo.tests.common import TransactionCase
class TestOnchangeProductId(TransactionCase):
"""Test that when an included tax is mapped by a fiscal position, the included tax must be
subtracted to the price of the product.
"""
def setUp(self):
super(TestOnchangeProductId, self).setUp()
self.fiscal_position_model = self.env['account.fiscal.position']
self.fiscal_position_tax_model = self.env['account.fiscal.position.tax']
self.tax_model = self.env['account.tax']
self.so_model = self.env['sale.order']
self.po_line_model = self.env['sale.order.line']
self.res_partner_model = self.env['res.partner']
self.product_tmpl_model = self.env['product.template']
self.product_model = self.env['product.product']
self.product_uom_model = self.env['uom.uom']
self.supplierinfo_model = self.env["product.supplierinfo"]
self.pricelist_model = self.env['product.pricelist']
def test_onchange_product_id(self):
uom_id = self.product_uom_model.search([('name', '=', 'Units')])[0]
pricelist = self.pricelist_model.search([('name', '=', 'Public Pricelist')])[0]
partner_id = self.res_partner_model.create(dict(name="George"))
tax_include_id = self.tax_model.create(dict(name="Include tax",
amount='21.00',
price_include=True,
type_tax_use='sale'))
tax_exclude_id = self.tax_model.create(dict(name="Exclude tax",
amount='0.00',
type_tax_use='sale'))
product_tmpl_id = self.product_tmpl_model.create(dict(name="Voiture",
list_price=121,
taxes_id=[(6, 0, [tax_include_id.id])]))
product_id = product_tmpl_id.product_variant_id
fp_id = self.fiscal_position_model.create(dict(name="fiscal position", sequence=1))
fp_tax_id = self.fiscal_position_tax_model.create(dict(position_id=fp_id.id,
tax_src_id=tax_include_id.id,
tax_dest_id=tax_exclude_id.id))
# Create the SO with one SO line and apply a pricelist and fiscal position on it
order_form = Form(self.env['sale.order'].with_context(tracking_disable=True))
order_form.partner_id = partner_id
order_form.pricelist_id = pricelist
order_form.fiscal_position_id = fp_id
with order_form.order_line.new() as line:
line.name = product_id.name
line.product_id = product_id
line.product_uom_qty = 1.0
line.product_uom = uom_id
sale_order = order_form.save()
# Check the unit price of SO line
self.assertEquals(100, sale_order.order_line[0].price_unit, "The included tax must be subtracted to the price")
def test_pricelist_application(self):
""" Test different prices are correctly applied based on dates """
support_product = self.env.ref('product.product_product_2')
support_product.list_price = 100
partner = self.res_partner_model.create(dict(name="George"))
christmas_pricelist = self.env['product.pricelist'].create({
'name': 'Christmas pricelist',
'item_ids': [(0, 0, {
'date_start': "2017-12-01",
'date_end': "2017-12-24",
'compute_price': 'percentage',
'base': 'list_price',
'percent_price': 20,
'applied_on': '3_global',
'name': 'Pre-Christmas discount'
}), (0, 0, {
'date_start': "2017-12-25",
'date_end': "2017-12-31",
'compute_price': 'percentage',
'base': 'list_price',
'percent_price': 50,
'applied_on': '3_global',
'name': 'Post-Christmas super-discount'
})]
})
# Create the SO with pricelist based on date
order_form = Form(self.env['sale.order'].with_context(tracking_disable=True))
order_form.partner_id = partner
order_form.date_order = '2017-12-20'
order_form.pricelist_id = christmas_pricelist
with order_form.order_line.new() as line:
line.product_id = support_product
so = order_form.save()
# Check the unit price and subtotal of SO line
self.assertEqual(so.order_line[0].price_unit, 80, "First date pricelist rule not applied")
self.assertEquals(so.order_line[0].price_subtotal, so.order_line[0].price_unit * so.order_line[0].product_uom_qty, 'Total of SO line should be a multiplication of unit price and ordered quantity')
# Change order date of the SO and check the unit price and subtotal of SO line
with Form(so) as order:
order.date_order = '2017-12-30'
with order.order_line.edit(0) as line:
line.product_id = support_product
self.assertEqual(so.order_line[0].price_unit, 50, "Second date pricelist rule not applied")
self.assertEquals(so.order_line[0].price_subtotal, so.order_line[0].price_unit * so.order_line[0].product_uom_qty, 'Total of SO line should be a multiplication of unit price and ordered quantity')
def test_pricelist_uom_discount(self):
""" Test prices and discounts are correctly applied based on date and uom"""
computer_case = self.env.ref('product.product_product_16')
computer_case.list_price = 100
partner = self.res_partner_model.create(dict(name="George"))
categ_unit_id = self.ref('uom.product_uom_categ_unit')
goup_discount_id = self.ref('product.group_discount_per_so_line')
self.env.user.write({'groups_id': [(4, goup_discount_id, 0)]})
new_uom = self.env['uom.uom'].create({
'name': '10 units',
'factor_inv': 10,
'uom_type': 'bigger',
'rounding': 1.0,
'category_id': categ_unit_id
})
christmas_pricelist = self.env['product.pricelist'].create({
'name': 'Christmas pricelist',
'discount_policy': 'without_discount',
'item_ids': [(0, 0, {
'date_start': "2017-12-01",
'date_end': "2017-12-30",
'compute_price': 'percentage',
'base': 'list_price',
'percent_price': 10,
'applied_on': '3_global',
'name': 'Christmas discount'
})]
})
so = self.env['sale.order'].create({
'partner_id': partner.id,
'date_order': '2017-12-20',
'pricelist_id': christmas_pricelist.id,
})
order_line = self.env['sale.order.line'].new({
'order_id': so.id,
'product_id': computer_case.id,
})
# force compute uom and prices
order_line.product_id_change()
order_line.product_uom_change()
order_line._onchange_discount()
self.assertEqual(order_line.price_subtotal, 90, "Christmas discount pricelist rule not applied")
self.assertEqual(order_line.discount, 10, "Christmas discount not equalt to 10%")
order_line.product_uom = new_uom
order_line.product_uom_change()
order_line._onchange_discount()
self.assertEqual(order_line.price_subtotal, 900, "Christmas discount pricelist rule not applied")
self.assertEqual(order_line.discount, 10, "Christmas discount not equalt to 10%")
def test_pricelist_based_on_other(self):
""" Test price and discount are correctly applied with a pricelist based on an other one"""
computer_case = self.env.ref('product.product_product_16')
computer_case.list_price = 100
partner = self.res_partner_model.create(dict(name="George"))
goup_discount_id = self.ref('product.group_discount_per_so_line')
self.env.user.write({'groups_id': [(4, goup_discount_id, 0)]})
first_pricelist = self.env['product.pricelist'].create({
'name': 'First pricelist',
'discount_policy': 'without_discount',
'item_ids': [(0, 0, {
'compute_price': 'percentage',
'base': 'list_price',
'percent_price': 10,
'applied_on': '3_global',
'name': 'First discount'
})]
})
second_pricelist = self.env['product.pricelist'].create({
'name': 'Second pricelist',
'discount_policy': 'without_discount',
'item_ids': [(0, 0, {
'compute_price': 'formula',
'base': 'pricelist',
'base_pricelist_id': first_pricelist.id,
'price_discount': 10,
'applied_on': '3_global',
'name': 'Second discount'
})]
})
so = self.env['sale.order'].create({
'partner_id': partner.id,
'date_order': '2018-07-11',
'pricelist_id': second_pricelist.id,
})
order_line = self.env['sale.order.line'].new({
'order_id': so.id,
'product_id': computer_case.id,
})
# force compute uom and prices
order_line.product_id_change()
order_line._onchange_discount()
self.assertEqual(order_line.price_subtotal, 81, "Second pricelist rule not applied")
self.assertEqual(order_line.discount, 19, "Second discount not applied")
def test_pricelist_with_other_currency(self):
""" Test prices are correctly applied with a pricelist with an other currency"""
computer_case = self.env.ref('product.product_product_16')
computer_case.list_price = 100
partner = self.res_partner_model.create(dict(name="George"))
categ_unit_id = self.ref('uom.product_uom_categ_unit')
other_currency = self.env['res.currency'].create({'name': 'other currency',
'symbol': 'other'})
self.env['res.currency.rate'].create({'name': '2018-07-11',
'rate': 2.0,
'currency_id': other_currency.id,
'company_id': self.env.company.id})
self.env['res.currency.rate'].search(
[('currency_id', '=', self.env.company.currency_id.id)]
).unlink()
new_uom = self.env['uom.uom'].create({
'name': '10 units',
'factor_inv': 10,
'uom_type': 'bigger',
'rounding': 1.0,
'category_id': categ_unit_id
})
# This pricelist doesn't show the discount
first_pricelist = self.env['product.pricelist'].create({
'name': 'First pricelist',
'currency_id': other_currency.id,
'discount_policy': 'with_discount',
'item_ids': [(0, 0, {
'compute_price': 'percentage',
'base': 'list_price',
'percent_price': 10,
'applied_on': '3_global',
'name': 'First discount'
})]
})
so = self.env['sale.order'].create({
'partner_id': partner.id,
'date_order': '2018-07-12',
'pricelist_id': first_pricelist.id,
})
order_line = self.env['sale.order.line'].new({
'order_id': so.id,
'product_id': computer_case.id,
})
# force compute uom and prices
order_line.product_id_change()
self.assertEqual(order_line.price_unit, 180, "First pricelist rule not applied")
order_line.product_uom = new_uom
order_line.product_uom_change()
self.assertEqual(order_line.price_unit, 1800, "First pricelist rule not applied")
| 44.790441 | 204 | 0.583682 | 1,442 | 12,183 | 4.690014 | 0.136616 | 0.050569 | 0.019518 | 0.023658 | 0.672926 | 0.607275 | 0.577406 | 0.538518 | 0.508206 | 0.46296 | 0 | 0.025983 | 0.298695 | 12,183 | 271 | 205 | 44.95572 | 0.765566 | 0.075187 | 0 | 0.568182 | 0 | 0 | 0.238723 | 0.024336 | 0 | 0 | 0 | 0 | 0.059091 | 1 | 0.027273 | false | 0 | 0.009091 | 0 | 0.040909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
978e392b87cb419fb505337ab4cee06b350097b0 | 8,390 | py | Python | IEX_29id/utils/folders.py | kellyjelly0904/macros_29id | 573946d13eee7f85da049ac666b5dd2d18d19bb1 | [
"MIT"
] | null | null | null | IEX_29id/utils/folders.py | kellyjelly0904/macros_29id | 573946d13eee7f85da049ac666b5dd2d18d19bb1 | [
"MIT"
] | 1 | 2021-11-10T02:00:41.000Z | 2021-11-11T03:02:23.000Z | IEX_29id/utils/folders.py | kellyjelly0904/macros_29id | 573946d13eee7f85da049ac666b5dd2d18d19bb1 | [
"MIT"
] | 2 | 2021-09-28T21:19:47.000Z | 2021-10-12T20:51:43.000Z | from epics import caput
from IEX_29id.utils.exp import Check_run, BL_Mode_Set, BL_ioc
from IEX_29id.mda.file import MDA_CurrentDirectory
from IEX_29id.mda.file import MDA_CurrentRun
import os
import re
def Make_DataFolder(run,folder,UserName,scanIOC,ftp): #JM was here ->print full crontab command and change permissions on kip -still needs work!
"""
Creates the User Folder on the dserv
if ftp = True: creates the folders on kip (ftp server) and modifies the cronjob
"""
crontime={
'mda2ascii':'0,30 * * * * ',
'chmod':'1,31 * * * * ',
'data_other':'2,32 * * * * ',
'notebook':'*/3 * * * * ',
}
if (folder == 'c'or folder == 'd'):
if ftp:
print('-------------------------------------------------------------')
#mda2ascii
MyPath_kip_run='/net/kip/sftp/pub/29id'+folder+'ftp/files/'+run+'/'
MyPath_kip='/net/kip/sftp/pub/29id'+folder+'ftp/files/'+run+'/'+UserName+'/'
cmd_mda2ascii=crontime['mda2ascii']+' /net/s29dserv/APSshare/bin/mda2ascii -d '+MyPath_kip+'ascii '+MyPath_kip+'mda/*.mda'
print(cmd_mda2ascii)
#chmode
cmd_chmod=crontime['chmod']+' chmod 664 '+MyPath_kip+'ascii/*.asc'
print(cmd_chmod)
#notebooks
cmd_notebook=crontime['notebook']+' /usr/bin/rsync -av --exclude=core /home/beams22/29IDUSER/Documents/User_Folders/'+UserName+'/* kip:'+MyPath_kip+'notebook > /home/beams22/29ID/cronfiles/cptoftp-currrun-d-User.log 2>&1'
print(cmd_notebook)
print('-------------------------------------------------------------\n\n')
#making folders
print("\n\n")
print(MyPath_kip)
print(MyPath_kip+"ascii")
if not (os.path.exists(MyPath_kip_run)):
os.mkdir(MyPath_kip_run)
os.chmod(MyPath_kip_run, 0o775)
if not (os.path.exists(MyPath_kip)):
os.mkdir(MyPath_kip)
os.chmod(MyPath_kip, 0o775)
if not (os.path.exists(MyPath_kip+"ascii")):
os.mkdir(MyPath_kip+'ascii')
os.chmod(MyPath_kip+'ascii', 0o775)
if not (os.path.exists(MyPath_kip+"notebook")):
os.mkdir(MyPath_kip+"notebook")
os.chmod(MyPath_kip+"notebook", 0o775)
else:
print("To create ftp folders & update contrab, you need to run the following as 29id:")
print("\tFolder_"+str(scanIOC)+"('"+str(run)+"','"+str(UserName)+"',ftp=True)")
MyPath_File='/home/beams/29IDUSER/Documents/User_Folders/'+UserName
UserName = "/"+UserName
if not (os.path.exists(MyPath_File)):
os.mkdir(MyPath_File)
#if folder == 'd':
#MyPath_File_hkl='/home/beams/29IDUSER/Documents/User_Folders/'+UserName+'/hkl'
#if not(os.path.exists(MyPath_File_hkl)):
# os.mkdir(MyPath_File_hkl)
if folder == 'b':
UserName = ''
#MyPath_run='/net/s29data/export/data_29id'+folder+'/'+run
MyPath_run=os.path.dirname(_userDataFolder(UserName,scanIOC))
if not (os.path.exists(MyPath_run)):
os.mkdir(MyPath_run)
#MyPath_Data=MyPath_run+UserName
MyPath_Data=_userDataFolder(UserName,scanIOC)
if not (os.path.exists(MyPath_Data)):
os.mkdir(MyPath_Data)
def _userDataFolder(userName,scanIOC,**kwargs):
"""
Returns the path to a user folder
dataFolder='/net/s29data/export/data_29id'+folder+'/'+run+'/'+userName
kwargs:
run: Check_run(); unless specified
BLmode: Staff / User; based on userName unless specified
folder: determined by UserName and scanIOC
folder = b (Staff)
folder = c (User and ARPES)
folder = d (User and Kappa)
"""
kwargs.setdefault('run',Check_run())
folder=""
run=kwargs['run']
if userName == 'Staff':
folder="b"
if "BLmode" in kwargs:
BL_Mode_Set(kwargs["BLmode"])
else:
BL_Mode_Set("Staff")
else:
BL_Mode_Set("User")
if scanIOC=="ARPES":
folder="c"
if scanIOC=="Kappa":
folder="d"
dataFolder='/net/s29data/export/data_29id'+folder+'/'+run+'/'+userName
return dataFolder
def _filename_key(filename):
return (len(filename), filename)
def Folder_mda(run,folder,UserName,scanIOC):
"""
For Staff: folder='b', UserName='Staff'
For ARPES: folder ='c'
For Kappa or RSoXS: folder = 'd'
"""
FilePrefix=scanIOC
if UserName == 'Staff':
UserName=""
else:
UserName=UserName+"/"
MyPath="/net/s29data/export/data_29id"+folder+"/"+run+"/"+UserName+"mda"
print("\nMDA folder: " + MyPath)
if not (os.path.exists(MyPath)):
os.mkdir(MyPath)
FileNumber=1
else:
FileNumber=getNextFileNumber(MyPath,FilePrefix)
if scanIOC=="Test" or scanIOC=="Kappa" or scanIOC=="ARPES" or scanIOC=="RSoXS":
caput("29id"+scanIOC+":saveData_fileSystem","/net/s29data/export/data_29id"+folder+"/"+run)
os.sleep(0.25) #needed so that it has time to write
caput("29id"+scanIOC+":saveData_subDir","/"+UserName+"mda")
else:
caput("29id"+scanIOC+":saveData_fileSystem","//s29data/export/data_29id"+folder+"/"+run)
os.sleep(0.25)
caput("29id"+scanIOC+":saveData_subDir",UserName+"mda")
caput("29id"+scanIOC+":saveData_baseName",FilePrefix+"_")
caput("29id"+scanIOC+":saveData_scanNumber",FileNumber)
def getNextFileNumber(data_dir, file_prefix,**kwargs):
"""
gets the next file number for the pattern
data_dir/file_prefix_filenum
kwargs:
debug = False (default); if True then print lo
q = True (default); if False then prints next file number
"""
kwargs.setdefault("debug",False)
kwargs.setdefault("q",True)
onlyfiles = [f for f in os.listdir(data_dir) if os.isfile(os.join(data_dir, f)) and f[:len(file_prefix)] == file_prefix]
sortedfiles = sorted(onlyfiles, key=_filename_key)
pattern = re.compile('(.*)_(.*)\.(.*)')
try:
lastFile = sortedfiles[-1]
except IndexError as errObj:
nextFileNumber = 1
if kwargs["debug"]:
print("Data directory = ", data_dir)
print("File prefix =", file_prefix)
print("File number =", None)
print("File extension =", "TBD")
print("Next File number =", nextFileNumber)
else:
matchObj = pattern.match(lastFile)
nextFileNumber = int(matchObj.group(2)) + 1
if kwargs["debug"]:
print("Data directory = ", data_dir)
print("File prefix =", matchObj.group(1))
print("File number =", matchObj.group(2))
print("File extension =", matchObj.group(3))
print("Next File number =", nextFileNumber)
if kwargs["q"] == False:
print("Next File Number: ",nextFileNumber)
return nextFileNumber
def Check_Staff_Directory(**kwargs):
"""
Switchs to the staff directory
Uses Fold
"""
kwargs.setdefault("scanIOC",BL_ioc())
kwargs.setdefault("run",Check_run())
scanIOC=kwargs["scanIOC"]
run= kwargs["run"]
directory = MDA_CurrentDirectory(scanIOC)
current_run = MDA_CurrentRun(scanIOC)
if directory.find('data_29idb') < 1 or current_run != run:
print('You are not currently saving in the Staff directory and/or the desired run - REPLY "yes" to switch folder.\nThis will only work if the run directory already exists.\nOtherwise, you must open ipython as 29id to create a new run directory using:\n\tFolder_'+scanIOC+'(run,\'Staff\')')
foo=input('\nAre you ready to switch to the '+run+' Staff directory? >')
if foo == 'Y' or foo == 'y' or foo == 'yes'or foo == 'YES':
print('Switching directory...')
if scanIOC=='ARPES':
Folder_ARPES('Staff',mdaOnly=True,**kwargs)
elif scanIOC=='Kappa':
Folder_Kappa('Staff',create_only=False)
else:
print('\nFolder not set.')
else:
print('Staff directory OK.')
directory = MDA_CurrentDirectory(scanIOC)
print('\nCurrent directory: '+directory)
| 40.143541 | 297 | 0.590703 | 998 | 8,390 | 4.846693 | 0.229459 | 0.037213 | 0.013025 | 0.020467 | 0.251809 | 0.198677 | 0.193922 | 0.123217 | 0.095514 | 0.038867 | 0 | 0.018856 | 0.254112 | 8,390 | 208 | 298 | 40.336538 | 0.754075 | 0.148272 | 0 | 0.128378 | 0 | 0.02027 | 0.249498 | 0.066495 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040541 | false | 0 | 0.040541 | 0.006757 | 0.101351 | 0.182432 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
978f1fcb3bef9348f27a0824ad9894eb219d2595 | 2,932 | py | Python | sceance/set_watchlist.py | sjmignot/film-to-cal | 82d5e96b65197ff96522324d6527fca6f18cc76b | [
"MIT"
] | 6 | 2020-02-05T21:31:57.000Z | 2020-03-08T00:35:16.000Z | sceance/set_watchlist.py | sjmignot/film-to-cal | 82d5e96b65197ff96522324d6527fca6f18cc76b | [
"MIT"
] | null | null | null | sceance/set_watchlist.py | sjmignot/film-to-cal | 82d5e96b65197ff96522324d6527fca6f18cc76b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Starts a Firfox headless brower to see if movies on your watchlist are playing
at any of your favorite theaters.
Favorite theaters are taken from a txt file (extracted from "theaters.txt").
These showtimes are compared to a watchlist (extracted from "watchlist.txt")
-samuel mignot-
'''
# ------------------- #
# imports #
# ------------------- #
import configparser
import os
from os import listdir
from os.path import isfile, join
# internal
import file_helpers
# ------------------- #
# constants #
# ------------------- #
THIS_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = '/data'
WATCHLIST_DIR = '/watchlists'
OSCARS_WATCHLIST = 'oscar-winners-watchlist.txt'
ERROR_MESSAGE = {
"watchlist_select": "response must be an integer in the range 1-{max_show}",
"yes_no": "response must be one of the following: 'yes', 'y', 'no', 'n'"
}
def robert_easter_eggers(original_choice):
res = get_input(
f"Are you sure you want to set your default watchlist to...the oscars..? [y/n]: ",
{'y', 'yes', 'no', 'n'},
ERROR_MESSAGE['yes_no']
)
if res in {'y', 'yes'}:
res = get_input(
f"The awards that gave Best Picture to Forrest Gump over Pulp fiction..? [y/n]: ",
{'y', 'yes', 'no', 'n'},
ERROR_MESSAGE['yes_no']
)
if res in {'y', 'yes'}:
print('... Aight...')
return original_choice
if res in {'n', 'no'}:
return set_watchlist(no_oscars=True)
def get_watchlists():
mypath = THIS_DIRECTORY+DATA_DIR+WATCHLIST_DIR
return [f for f in listdir(mypath) if isfile(join(mypath, f))]
def get_input(question, response_format, error_message):
''' loops until a response that is in response_format is met'''
while True:
res = input(question)
if res in response_format:
return res
print(error_message)
def set_watchlist(no_oscars=False):
'''for each movie left filtering, asks the user if they want to watch it and provides showtimes to pick from'''
watchlist_files = get_watchlists()
if(no_oscars): watchlist_files = list(filter(lambda x: x!=OSCARS_WATCHLIST, watchlist_files))
for i, watchlist in enumerate(watchlist_files, start=1):
print(f"[{i}]: {watchlist}")
res = get_input(
f"select your watchlist [1-{len(watchlist_files)} or n to cancel]: ",
set(map(str, range(1, len(watchlist_files)+1)))|{'n', 'no'},
ERROR_MESSAGE['watchlist_select'].format(max_show=str(len(watchlist_files)))
)
print()
if res in {'n', 'no'}:
return None
chosen_watchlist = watchlist_files[int(res)-1]
if(not no_oscars):
if chosen_watchlist == OSCARS_WATCHLIST:
chosen_watchlist = robert_easter_eggers(chosen_watchlist)
return chosen_watchlist
if __name__ == "__main__":
set_watchlist()
| 32.21978 | 115 | 0.62824 | 391 | 2,932 | 4.531969 | 0.365729 | 0.063205 | 0.019752 | 0.020316 | 0.059819 | 0.059819 | 0.041761 | 0.041761 | 0.041761 | 0.041761 | 0 | 0.00308 | 0.224761 | 2,932 | 90 | 116 | 32.577778 | 0.776507 | 0.206003 | 0 | 0.186441 | 0 | 0 | 0.216558 | 0.022658 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067797 | false | 0 | 0.084746 | 0 | 0.254237 | 0.067797 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
978fd61709e98a6a4163084a70975c96d6b7f512 | 21,143 | py | Python | ARHMM-code/olfactory_search_xval.py | Smear-Lab/Olfactory_Search | 92ea57cdd49b9c1d88ffe5d7b18a0be2cd73f0ff | [
"MIT"
] | null | null | null | ARHMM-code/olfactory_search_xval.py | Smear-Lab/Olfactory_Search | 92ea57cdd49b9c1d88ffe5d7b18a0be2cd73f0ff | [
"MIT"
] | null | null | null | ARHMM-code/olfactory_search_xval.py | Smear-Lab/Olfactory_Search | 92ea57cdd49b9c1d88ffe5d7b18a0be2cd73f0ff | [
"MIT"
] | null | null | null | #Misc
import os, time, argparse
import h5py, json
import glob, fnmatch,pdb
from tqdm import tqdm
import multiprocessing
#Base
import numpy as np
import pandas as pd
import scipy.stats as st
from sklearn.model_selection import StratifiedKFold
#Plotting
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.gridspec as gridspec
#State-Space Modeling
#S.Linderman
import ssm
#M.Johnson
from pyhsmm.util.text import progprint_xrange
from pybasicbayes.distributions import Gaussian, AutoRegression
import autoregressive.models as pyhmm
#User Modules
import utilities as util
import plotting_YAA as plots_YAA
##===== Run Command =====##
# OMP_NUM_THREADS=1 python olfactory_search_xval.py --model_type "ARHMM_MJ" --Kmin 12 --Kmax 20
##===== ============================ =====##
##===== Parse Command Line Arguments =====##
parser = argparse.ArgumentParser(description='ARHMM Mouse')
parser.add_argument('--save',type=bool, default=1,
help='Save Results?')
parser.add_argument('--json_dir', type=str,
help='Directory Path of model parameter json file; not required if using other arguments')
##===== Data Options =====##
parser.add_argument('--mID',type=str, default='all_mice',
help='mouse to fit model to')
parser.add_argument('--condition', type=str, default='all_conds',
help='trial condition type')
parser.add_argument('--data_type', type=str, default='BHNx',
help='BHNx vs BHNxv vs EgoAllo_xv')
parser.add_argument('--HMM_inputs', type=str, default='BHNx',
help='BHNx vs BHNxv')
parser.add_argument('--x_units', type=str, default='pixels',
help='pixels or arena_length')
##===== Model Type =====##
parser.add_argument('--model_type', type=str, default='ARHMM_MJ',
help='ARHMM_SL or ARHMM_MJ')
parser.add_argument('--robust', type=bool, default=0,
help='autoregressive(0) or robust_autoregressive(1)')
parser.add_argument('--sticky', type=bool, default=0,
help='standard(0) or sticky(1) ARHMM')
parser.add_argument('--inputdriven', type=bool, default=0,
help='HMM transitions dependent on some input in addition to previous HMM state')
##===== Model Parameters =====##
parser.add_argument('--kappa', type=float, default=1e5,
help='sticky arhmm kappa')
parser.add_argument('--AR_lags', type=str, default=1,
help='Autoregressive lags')
parser.add_argument('--l2_penalty_A', type=float, default=0,
help='AR l2_penalty_A')
parser.add_argument('--l2_penalty_b', type=float, default=0,
help='AR l2_penalty_b')
parser.add_argument('--l2_penalty_V', type=float, default=0,
help='AR l2_penalty_V')
parser.add_argument('--MAP_threshold', type=float, default=0.80,
help='MAP threshold')
parser.add_argument('--nGibbs', type=int, default=200,
help='number of iterations to run the Gibbs sampler')
parser.add_argument('--burn_fraction', type=float, default=0.66,
help='Calculate MAP sequence with the last 37.5% of samples; of nGibbs = 400, 250 samples are burned')
##===== Run Options =====##
parser.add_argument('--Kmin', type=int, default=80,
help='minimum number of HMM states')
parser.add_argument('--Kmax', type=int, default=100,
help='maximum number of HMM states')
parser.add_argument('--kXval', type=int, default=5,
help='number of kfold')
parser.add_argument('--EM_tolerance', type=float, default=1e-5,
help='SSM EM algorithm tolerance')
parser.add_argument('--EM_iters', type=int, default=200,
help='EM Iterations')
parser.add_argument('--max_processes', type=int, default=18,
help='max # of parallel processes to run')
args = parser.parse_args()
def set_arhmm_hyperparams(opt,K):
D_obs = opt['D_obs']
Mobs = 0
#Autoregressive keyword arguments
ar_kwargs = dict(
# l2_penalty_A= args_dic['l2_penalty_A'],
# l2_penalty_b= args_dic['l2_penalty_b'],
# l2_penalty_V= args_dic['l2_penalty_V'],
lags = opt['AR_lags']
)
#HMM Transition parameters
trans_kwargs = dict(
# alpha= args_dic['alpha'],
)
#Gaussian or t-distribution
if not opt['robust']:
observation_type = "autoregressive"
else:
observation_type = "robust_autoregressive"
#What model are we going to run?
if not opt['inputdriven']:
M = 0
if not opt['sticky']:
if opt['model_type'] == 'ARHMM_MJ':
print('Bayesian ARHMM')
else:
print('Vanilla ARHMM')
transition_type = "standard"
else:
print('sticky ARHMM')
transition_type = "sticky"
trans_kwargs['kappa'] = opt['kappa']
else:
M = D_obs
# trans_kwargs['l2_penalty'] = args_dic['l2_penalty_W'] #coeff of l2-regul penalty on W (weights of logistic regression)
transition_type = "inputdriven"
if not opt['sticky']:
print('input-driven ARHMM')
else:
print('input-driven sticky ARHMM')
trans_kwargs['kappa'] = opt['kappa']
#If we're using matt Johnsons code, most of the above parameters don't matter
#Initialize Observation distribution and set it to ar_kwargs
if opt['model_type'] == 'ARHMM_MJ':
affine = True
dynamics_hypparams = \
dict(nu_0=D_obs + 2,
S_0=np.eye(D_obs),
M_0=np.hstack((np.eye(D_obs), np.zeros((D_obs,int(affine))))),
K_0=np.eye(D_obs + affine),
affine=affine)
# Initialize a list of autorgressive objects given the size of the
# observations and number of max discrete states
ar_kwargs = [AutoRegression(A=np.column_stack((0.99 * np.eye(D_obs),\
np.zeros((D_obs, int(affine))))),sigma=np.eye(D_obs),\
**dynamics_hypparams) for _ in range(K)]
return D_obs, M, Mobs, observation_type, ar_kwargs, transition_type, trans_kwargs
def make_hyperparams_dic(opt, K, M, trans_kwargs, ar_kwargs):
hyperparams = opt.copy()
del hyperparams['Kmin'], hyperparams['Kmax']
hyperparams['K'] = K
hyperparams['M'] = M
# hyperparams['Mobs'] = Mobs
hyperparams['trans_kwargs'] = trans_kwargs
if opt['model_type'] == 'ARHMM_SL':
hyperparams['ar_kwargs'] = ar_kwargs
return hyperparams
def arhmm_bayesian_fit(arhmm, data_train, data_test, opt, i_fold):
# Add test data to ARHMM
for data in data_train:
# Add data per trial
arhmm.add_data(data)
#Create data structures to contain gibb samples
nGibbs = opt['nGibbs']
nTrials = len(data_train)
K = arhmm.num_states; D_obs = arhmm.D;
stateseq_smpls = [[] for i in range(nTrials)]
AB_smpls = np.zeros((nGibbs,K,D_obs,D_obs+1))
sqrt_sigmas_smpls = np.zeros((nGibbs,K,D_obs,D_obs))
trans_matrix_smpls = np.zeros((nGibbs,K,K))
GibbsLLs = np.zeros((nGibbs))
# Loop over samples
for iSample in tqdm(range(nGibbs)):
# Sample Model
arhmm.resample_model()
#keep track of model log_likelihood's as a check for "convergence"
GibbsLLs[iSample] = arhmm.log_likelihood()
# Append each Gibbs sample for each trial
for iTrial in range(len(arhmm.states_list)):
stateseq_smpls[iTrial].append(arhmm.states_list[iTrial].stateseq.copy())
# Append the ARHMM matrix A and transition matrix for this sample
for state in range(K):
AB_smpls[iSample,state] = arhmm.obs_distns[state].A.copy()
sqrt_sigmas_smpls[iSample,state] = np.linalg.cholesky(arhmm.obs_distns[state].sigma)
trans_matrix_smpls[iSample] = arhmm.trans_distn.trans_matrix.copy()
# Calculate the mean A, B, and transition matrix for all
burn = opt['burn_fraction']
ABs_mean = np.mean(AB_smpls[int(burn*nGibbs):],axis=0)
As = ABs_mean[:,:,:D_obs]; Bs = ABs_mean[:,:,D_obs]
sqrt_Sigmas = np.mean(sqrt_sigmas_smpls[int(burn*nGibbs):],axis=0)
obs = {'ABs': ABs_mean, 'As': As,'Bs': Bs, 'sqrt_Sigmas': sqrt_Sigmas}
log_mean_transition_matrix = np.log(np.mean(trans_matrix_smpls[int(burn*nGibbs):,:,:],axis=0))
trans = {'log_Ps': log_mean_transition_matrix}
init = {'P0': arhmm.init_state_distn.pi_0}
param_dict = {}
param_dict['transitions'] = trans
param_dict['observations'] = obs
param_dict['init_state_distn'] = init
#llhood of heldout
ll_heldout = arhmm.log_likelihood(data=data_test)
state_usage = arhmm.state_usages
#Lists to contain import stuffff
trMAPs = []
trPosteriors = []
trMasks = []
#Plot convergence here
SaveDir, fname_sffx = util.make_sub_dir(K, opt, i_fold)
plots_YAA.plot_model_convergence(stateseq_smpls, AB_smpls, trans_matrix_smpls, GibbsLLs, sorted(arhmm.used_states), SaveDir, fname='-'.join(('Model_convergence',fname_sffx))+'.pdf')
#All of the data has been used to fit the model
#All of the data is contained with the ARHMM object already
if i_fold == -1:
#Calculate the MAP estimate
for iTrial in range(nTrials):
# Take the gibbs samples after the burn fraction to construct MAP
z_smpls = np.array(stateseq_smpls[iTrial][int(burn*nGibbs):])
state_probs_trial = []
for state in range(K):
state_occurances = np.isin(z_smpls,state)
state_probs_trial.append(np.sum(state_occurances,axis=0)/z_smpls.shape[0])
#Save the maximum posterior probability for each time step
pprob = np.vstack((np.zeros((1,K)),np.array(state_probs_trial).T))
trPosteriors.append(pprob)
mask = np.max(pprob,axis=1) < opt['MAP_threshold']
trMasks.append(mask)
#Use the maximum posterior probability to determine a robust MAP State sequence
MAP = np.hstack(([-1],np.ndarray.flatten(st.mode(z_smpls)[0])))
#Add MAP to list
trMAPs.append(MAP)
ll_heldout
#Else this is a fold of the x-validation
else:
#Get the state sequences and state marginal distributions of the heldout data
for data in data_test:
#Get state marginals
state_marginals = arhmm.heldout_state_marginals(data)
trPosteriors.append(state_marginals)
#Create mask
mask = np.max(state_marginals,axis=1) < opt['MAP_threshold']
trMasks.append(mask)
#Get the state sequence with the max probability
stateseq = np.argmax(state_marginals,axis=1)
trMAPs.append(stateseq)
return trMAPs, trPosteriors, trMasks, state_usage, ll_heldout, param_dict, GibbsLLs
def map_seq_n_usage(arhmm, data_test, opt, inputs=None):
"""
Compute the local MAP state (arg-max of marginal state probabilities at each time step)
and overall state usages.
thresh: if marginal probability of MAP state is below threshold, replace with np.nan
(or rather output a mask array with nan's in those time steps)
Also output average state usages and the marginal state probabilities
"""
T = 0; ll_heldout = 0
state_usage = np.zeros(arhmm.K)
trMAPs = []
trPosteriors = []
trMasks = []
#Loop over data to obtain MAP sequence for each trial
for index, data in enumerate(data_test):
#Get state probabilities and log-likelihood
if opt['inputdriven']:
inputdata = inputs[index]
Ez, _, ll = arhmm.expected_states(data,input=inputdata)
else:
Ez, _, ll = arhmm.expected_states(data)
#Update number of data points, state usage, and llood of data
T += Ez.shape[0]
state_usage += Ez.sum(axis=0)
ll_heldout += ll
#maximum a posteriori probability estimate of states
map_seq = np.argmax(Ez,axis=1)
max_prob = Ez[np.r_[0:Ez.shape[0]],map_seq]
#Save sequences
trMAPs.append(map_seq)
trPosteriors.append(Ez)
trMasks.append(max_prob < opt['MAP_threshold'])
#Normalize
state_usage /= T
#Get parameters from ARHMM object
param_dict = util.params_to_dict(arhmm.params, HMM_INPUTS = opt['inputdriven'], ROBUST = opt['robust'])
return trMAPs, trPosteriors, trMasks, state_usage, ll_heldout, param_dict
def fit_arhmm_get_llhood(data_list, trsum, K, opt, train_inds=None, test_inds=None, i_fold=-1):
#Go!
startTime = time.time()
#Separate the data into a training and test set based on the indices given
if train_inds is not None and test_inds is not None:
data_train = [data_list[ii] for ii in train_inds]
data_test = [data_list[ii] for ii in test_inds]
trsum_test = trsum.iloc[test_inds]
else:
#fit model on all data
data_train = data_list
data_test = data_list
trsum_test = trsum
#adding 10 so i_fold == -1 case doesn't give error
np.random.seed(10+i_fold)
# set hyperparameters
D_obs, M, Mobs, observation_type, ar_kwargs, transition_type, trans_kwargs = set_arhmm_hyperparams(opt,K)
##===== Create the ARHMM object either from Scott's package =====##
if opt['model_type'] == 'ARHMM_SL':
arhmm = ssm.HMM(K, D_obs, M=M,
observations=observation_type, observation_kwargs=ar_kwargs,
transitions=transition_type, transition_kwargs=trans_kwargs)
if opt['inputdriven']:
#Separate inputs from the data_list into training and test sets
raise Exception('TODO: Separate inputs from the data_list into training and test sets')
else:
inputs_train = None
inputs_test = None
##===== Fit on training data =====##
model_convergence = arhmm.fit(data_train, inputs=inputs_train, method="em", num_em_iters=opt['EM_iters'], tolerance=opt['EM_tolerance'])
#Get MAP sequences for heldout data (or all of the data if this isn't part of the xval)
trMAPs, trPosteriors, trMasks, state_usage, ll_heldout_old, param_dict = map_seq_n_usage(arhmm, data_test, opt, inputs_test)
#Calculate loglikehood of the test and training data
ll_heldout = arhmm.log_likelihood(data_test)
ll_training = arhmm.log_likelihood(data_train, inputs=inputs_train)
##===== Or from Matt Johnson's packages =====##
else:
#Sticky or Standard ARHMM
if opt['sticky']:
# Create AR-HDP-HMM Object
arhmm = pyhmm.ARWeakLimitStickyHDPHMM(
init_state_distn='uniform',
init_emission_distn=init_distn,
obs_distns=ar_kwargs,
alpha=1.0, kappa=opt['kappa'], gamma=3.0)
else:
#Vanilla ARHMM
arhmm = pyhmm.ARHMM(
alpha=4.,
init_state_distn='uniform',
obs_distns=ar_kwargs)
##===== Fit on training data =====##
trMAPs, trPosteriors, trMasks, state_usage, ll_heldout, param_dict, model_convergence = \
arhmm_bayesian_fit(arhmm, data_train, data_test, opt, i_fold)
#Calculate loglikehood of training data
ll_training = arhmm.log_likelihood()
#Sort based on state-usage
trMAPs, trPosteriors, state_usage, state_perm = util.sort_states_by_usage(state_usage, trMAPs, trPosteriors)
##===== Calculate Log-likelihood =====##
#Count total number of time steps in data
tTest = sum(map(len, data_test))
ll_heldout_perstep = ll_heldout/tTest
#For Training
tTrain = sum(map(len, data_train))
ll_training_perstep = ll_training/tTrain
llhood_tuple = (ll_heldout,ll_heldout_perstep,ll_training,ll_training_perstep)
##===== Save & Plot =====##
#Create subdirectory under base directory for kfold
SaveDir, fname_sffx = util.make_sub_dir(K, opt, i_fold)
#Convert hyperparameters into dictionary for save process
hyperparams = make_hyperparams_dic(opt, K, M, trans_kwargs, ar_kwargs)
RunTime = time.perf_counter() - startTime
#Plot model parameters
plots_YAA.save_plot_parameters(SaveDir, fname_sffx, llhood_tuple, state_usage, hyperparams, param_dict, state_perm, model_convergence, RunTime)
#if this is fit on all data (which i_fold==-1 signifies) plot and save MAP seqs (and state-posteriors)
# if i_fold == -1:
# plots_YAA.save_plot_MAPseqs(SaveDir, fname_sffx, trsum, trMAPs, trPosteriors, trMasks, state_usage, opt, K, state_perm)
return ll_training_perstep, ll_heldout_perstep, K
##===== ===== =====##
##===== Start =====##
if __name__ == "__main__":
#GO!
startTime = time.time()
#Convert arguments into dictionary; opt <-> options
opt = args.__dict__
#Create base folder for saved results
SaveDirRoot = util.make_base_dir(opt['model_type'],opt['data_type'],opt['mID'])
#Save script options in JSON file
opt['SaveDirRoot'] = SaveDirRoot
opt['json_dir'] = SaveDirRoot
js_fname = 'ARHMM_hyperparameters.json'
if opt['save']:
with open(os.path.join(SaveDirRoot, js_fname), 'w') as jsfile:
json.dump(opt, jsfile, indent=4)
##====== ======================== ======##
##====== Read in Observation Data ======##
data_list, trsum, angle_list = util.read_data(opt['mID'],opt['condition'],opt['data_type'])
# Number of obserations per time step
D_obs = data_list[0].shape[1]
opt.update(D_obs = D_obs)
# Total Trials
nTrials = len(trsum)
#Save which trials are being used to fit the ARHMM
if opt['save']:
trsum.to_csv(os.path.join(SaveDirRoot,'inputted_trials.txt'),header=False,index=False,sep='\t',float_format='%.4f')
##===== ==================== =====##
##===== Perform X-validation =====##
k_fold = StratifiedKFold(n_splits=opt['kXval'])
#Stratify data per mice and per condition for kfolds
include = ['{}_C{}'.format(i,j) for i,j in zip(list(trsum['mID']),list(trsum['cond']))]
# Creates parallel processes
pool = multiprocessing.Pool(processes=opt['max_processes'])
#Preallocate matrix for cross-validation llhood values
Ks = np.arange(opt['Kmin'],opt['Kmax']+1,10)
ll_heldout = np.zeros((len(Ks),opt['kXval']+1))
ll_training = np.zeros((len(Ks),opt['kXval']+1))
model_fullfit = []
process_outputs = []
#Loop over number of HMM states
for index, K in enumerate(np.arange(opt['Kmin'],opt['Kmax']+1,20)):
#Fit the model to all of the data, and then for each kfold of x-validation
model_fullfit.append(pool.apply_async(fit_arhmm_get_llhood, args=(data_list,trsum,K,opt)))
#Loop over kfolds
kfold_outputs = []
for iK, (train_indices, test_indices) in enumerate(k_fold.split(data_list, include)):
kfold_outputs.append(pool.apply_async(fit_arhmm_get_llhood, args= \
(data_list, trsum, K, opt, train_indices, test_indices, iK)))
process_outputs.append(kfold_outputs)
##===== =========== =====##
##===== Get results =====##
#Extract log_likelihood results from parallel kfold processing
for index, results in enumerate(process_outputs):
ll_training[index,:-1] = np.array([iFold.get()[0] for iFold in results])
ll_heldout[index,:-1] = np.array([iFold.get()[1] for iFold in results])
Ks[index] = results[0].get()[2]
time
#For full fit
Ks_ff = Ks.copy()
for index, results in enumerate(model_fullfit):
ll_training[index,-1] = results.get()[0]
ll_heldout[index,-1] = results.get()[1]
Ks_ff = results.get()[2]
#Close Parallel pool
pool.close()
#Total Run Time
RunTime = time.perf_counter() - startTime
opt.update(RunTime = RunTime)
hrs=int(RunTime//3600); mins=int((RunTime%3600)//60); secs=int(RunTime - hrs*3600 - mins*60)
print('\tTotal run time = {:02d}:{:02d}:{:02d} for {} total trials and {} K\'s\n'.format(hrs,mins,secs,nTrials,opt['Kmax']+1-opt['Kmin']))
# Save summary data of all x-validation results
plots_YAA.save_plot_xval_lls(ll_training, ll_heldout, Ks,opt)
| 40.895551 | 185 | 0.62328 | 2,755 | 21,143 | 4.602541 | 0.189837 | 0.017744 | 0.033517 | 0.006703 | 0.188801 | 0.138722 | 0.108438 | 0.092666 | 0.074054 | 0.060252 | 0 | 0.00998 | 0.255924 | 21,143 | 516 | 186 | 40.974806 | 0.796021 | 0.21714 | 0 | 0.132686 | 0 | 0.003236 | 0.119868 | 0.006425 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016181 | false | 0 | 0.064725 | 0 | 0.097087 | 0.022654 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9793545d6d0e47925c766734e2f8f75b3b0595d3 | 1,402 | py | Python | src/pyeff_io.py | pyflosic/pyeff | 4b76fcc4a0bfb25f9f4106567d01b5ea02db6737 | [
"Apache-2.0"
] | 3 | 2019-06-24T08:04:25.000Z | 2020-05-26T03:45:45.000Z | src/pyeff_io.py | pyflosic/pyeff | 4b76fcc4a0bfb25f9f4106567d01b5ea02db6737 | [
"Apache-2.0"
] | null | null | null | src/pyeff_io.py | pyflosic/pyeff | 4b76fcc4a0bfb25f9f4106567d01b5ea02db6737 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 PyEFF developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
def write_cfg(f_name,types,chemical_symbols,spins,x):
# write a new cfg file based on the x vector
o = open(f_name,'w')
o.write('@params\n')
o.write('calc = minimize\n')
o.write('@nuclei\n')
idx = 0
for p in range(len(types)):
if types[p] == 'nuclei':
o.write(str(x[idx+0])+' '+str(x[idx+1])+' '+str(x[idx+2])+' '+str(chemical_symbols[p])+'\n')
idx = idx + 3
o.write('@electrons\n')
for p in range(len(types)):
if types[p] == 'electron':
o.write(str(x[idx+0])+' '+str(x[idx+1])+' '+str(x[idx+2])+' '+str(spins[p])+' '+str(np.exp(x[idx+3]))+'\n')
idx = idx +4
o.close()
| 40.057143 | 131 | 0.574893 | 209 | 1,402 | 3.832536 | 0.478469 | 0.034956 | 0.052434 | 0.03995 | 0.149813 | 0.149813 | 0.149813 | 0.149813 | 0.149813 | 0.082397 | 0 | 0.017893 | 0.282454 | 1,402 | 34 | 132 | 41.235294 | 0.77833 | 0.437946 | 0 | 0.117647 | 0 | 0 | 0.095052 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
979375047a16643d63291a0d8aad8d8fc63735f2 | 22,196 | py | Python | declarations_site/catalog/data/mapping_chesno.py | li-ar/declarations.com.ua | 343cd86cc5a4bd895f2859ed896728f6416ac223 | [
"MIT"
] | 32 | 2015-04-01T15:17:35.000Z | 2021-05-02T20:46:33.000Z | declarations_site/catalog/data/mapping_chesno.py | li-ar/declarations.com.ua | 343cd86cc5a4bd895f2859ed896728f6416ac223 | [
"MIT"
] | 52 | 2015-03-23T21:37:04.000Z | 2022-02-10T07:27:13.000Z | declarations_site/catalog/data/mapping_chesno.py | li-ar/declarations.com.ua | 343cd86cc5a4bd895f2859ed896728f6416ac223 | [
"MIT"
] | 18 | 2015-03-16T22:10:44.000Z | 2021-11-01T12:56:12.000Z | from collections import namedtuple
SubDocument = namedtuple("SubDocument", ["path_prefix", "mapping"])
NumericOperation = namedtuple("NumericOperation", ["path_prefix", "field", "operation"])
JoinOperation = namedtuple("JoinOperation", ["paths", "separator"])
MAPPING = {
"_id": "details/id",
"intro": {
"isnotdeclaration": "",
"declaration_year": "details/year"
},
"general": {
"full_name": "full_name",
"last_name": "last_name",
"name": "first_name",
"patronymic": "second_name",
"name_hidden": "",
"name_unclear": "",
"last_name_hidden": "",
"last_name_unclear": "",
"patronymic_hidden": "",
"patronymic_unclear": "",
"inn": "",
"addresses": [
{
"place": "",
"place_district": "",
"place_city": "",
"place_city_type": "",
"place_address": "",
"place_address_type": ""
}
],
"addresses_raw": "",
"addresses_raw_hidden": "",
"post": {
"region": "office/region",
"office": "office/office",
"post": "office/position"
},
"post_raw": "office/position",
"family": SubDocument(
"details/fields/4.0/items",
{
"relations": "",
"family_name": "family_member",
"inn": ""
}
),
"family_raw": ""
},
"income": {
"5": {
"value": "details/fields/5.0/items/0/value",
"comment": "",
"family": "details/fields/5.1/items/0/value",
"family_comment": ""
},
"6": {
"value": "details/fields/6.0/items/0/value",
"comment": "",
"family": "details/fields/6.1/items/0/value",
"family_comment": ""
},
"7": {
"value": "details/fields/7.0/items/0/value",
"comment": "",
"family": "details/fields/7.1/items/0/value",
"family_comment": "",
"source_name": "details/fields/7.0/items/0/comment"
},
"8": {
"value": "details/fields/8.0/items/0/value",
"comment": "",
"family": "details/fields/8.1/items/0/value",
"family_comment": ""
},
"9": {
"value": "details/fields/9.0/items/0/value",
"comment": "",
"family": "details/fields/9.1/items/0/value",
"family_comment": ""
},
"10": {
"value": "details/fields/10.0/items/0/value",
"comment": "",
"family": "details/fields/10.1/items/0/value",
"family_comment": ""
},
"11": {
"value": "details/fields/11.0/items/0/value",
"comment": "",
"family": "details/fields/11.1/items/0/value",
"family_comment": ""
},
"12": {
"value": "details/fields/12.0/items/0/value",
"comment": "",
"family": "details/fields/12.1/items/0/value",
"family_comment": ""
},
"13": {
"value": "details/fields/13.0/items/0/value",
"comment": "",
"family": "details/fields/13.1/items/0/value",
"family_comment": ""
},
"14": {
"value": "details/fields/14.0/items/0/value",
"comment": "",
"family": "details/fields/14.1/items/0/value",
"family_comment": ""
},
"15": {
"value": "details/fields/15.0/items/0/value",
"comment": "",
"family": "details/fields/15.1/items/0/value",
"family_comment": ""
},
"16": {
"value": "details/fields/16.0/items/0/value",
"comment": "",
"family": "details/fields/16.1/items/0/value",
"family_comment": ""
},
"17": {
"value": "details/fields/17.0/items/0/value",
"comment": "",
"family": "details/fields/17.1/items/0/value",
"family_comment": ""
},
"18": {
"value": "details/fields/18.0/items/0/value",
"comment": "",
"family": "details/fields/18.1/items/0/value",
"family_comment": ""
},
"19": {
"value": "details/fields/19.0/items/0/value",
"comment": "",
"family": "details/fields/19.1/items/0/value",
"family_comment": ""
},
"20": {
"value": "details/fields/20.0/items/0/value",
"comment": "",
"family": "details/fields/20.1/items/0/value",
"family_comment": ""
},
"21": SubDocument(
"details/fields/21.0/items",
{
"country": "",
"country_comment": "",
"cur": "",
"uah_equal": "value"
}
),
"22": SubDocument(
"details/fields/22.0/items",
{
"country": "",
"country_comment": "",
"cur": "",
"uah_equal": "value"
}
)
},
"estate": {
"23": SubDocument(
"details/fields/23.0/items",
{
"location_raw": "comment",
"region": "",
"address": "",
"space": "value",
"space_units": "",
"space_comment": "",
"costs": "purchase",
"costs_comment": "",
"costs_rent": "rent",
"costs_rent_comment": ""
}
),
"24": SubDocument(
"details/fields/24.0/items",
{
"location_raw": "comment",
"region": "",
"address": "",
"space": "value",
"space_units": "",
"space_comment": "",
"costs": "purchase",
"costs_comment": "",
"costs_rent": "rent",
"costs_rent_comment": ""
}
),
"25": SubDocument(
"details/fields/25.0/items",
{
"location_raw": "comment",
"region": "",
"address": "",
"space": "value",
"space_units": "",
"space_comment": "",
"costs": "purchase",
"costs_comment": "",
"costs_rent": "rent",
"costs_rent_comment": ""
}
),
"26": SubDocument(
"details/fields/26.0/items",
{
"location_raw": "comment",
"region": "",
"address": "",
"space": "value",
"space_units": "",
"space_comment": "",
"costs": "purchase",
"costs_comment": "",
"costs_rent": "rent",
"costs_rent_comment": ""
}
),
"27": SubDocument(
"details/fields/27.0/items",
{
"location_raw": "comment",
"region": "",
"address": "",
"space": "value",
"space_units": "",
"space_comment": "",
"costs": "purchase",
"costs_comment": "",
"costs_rent": "rent",
"costs_rent_comment": ""
}
),
"28": SubDocument(
"details/fields/28.0/items",
{
"location_raw": "comment",
"region": "",
"address": "",
"space": "value",
"space_units": "",
"space_comment": "",
"costs": "purchase",
"costs_comment": "",
"costs_rent": "rent",
"costs_rent_comment": ""
}
),
"29": SubDocument(
"details/fields/29.0/items",
{
"location_raw": "comment",
"region": "",
"address": "",
"space": "value",
"space_units": "",
"space_comment": "",
"costs_property": "purchase",
"costs_property_comment": "",
"costs_rent": "rent",
"costs_rent_comment": ""
}
),
"30": SubDocument(
"details/fields/30.0/items",
{
"location_raw": "comment",
"region": "",
"address": "",
"space": "value",
"space_units": "",
"space_comment": "",
"costs_property": "purchase",
"costs_property_comment": "",
"costs_rent": "rent",
"costs_rent_comment": ""
}
),
"31": SubDocument(
"details/fields/31.0/items",
{
"location_raw": "comment",
"region": "",
"address": "",
"space": "value",
"space_units": "",
"space_comment": "",
"costs_property": "purchase",
"costs_property_comment": "",
"costs_rent": "rent",
"costs_rent_comment": ""
}
),
"32": SubDocument(
"details/fields/32.0/items",
{
"location_raw": "comment",
"region": "",
"address": "",
"space": "value",
"space_units": "",
"space_comment": "",
"costs_property": "purchase",
"costs_property_comment": "",
"costs_rent": "rent",
"costs_rent_comment": ""
}
),
"33": SubDocument(
"details/fields/33.0/items",
{
"location_raw": "comment",
"region": "",
"address": "",
"space": "value",
"space_units": "",
"space_comment": "",
"costs_property": "purchase",
"costs_property_comment": "",
"costs_rent": "rent",
"costs_rent_comment": ""
}
),
"34": SubDocument(
"details/fields/34.0/items",
{
"location_raw": "comment",
"region": "",
"address": "",
"space": "value",
"space_units": "",
"space_comment": "",
"costs_property": "purchase",
"costs_property_comment": "",
"costs_rent": "rent",
"costs_rent_comment": ""
}
)
},
"vehicle": {
"35": SubDocument(
"details/fields/35.0/items",
{
"brand": JoinOperation(("mark", "model", "description"), ' '),
"brand_info": "",
"year": "year",
"sum": "purchase",
"sum_comment": "",
"sum_rent": "rent",
"sum_rent_comment": "",
"brand_hidden": ""
}
),
"36": SubDocument(
"details/fields/36.0/items",
{
"brand": JoinOperation(("mark", "model", "description"), ' '),
"brand_info": "",
"year": "year",
"sum": "purchase",
"sum_comment": "",
"sum_rent": "rent",
"sum_rent_comment": "",
"brand_hidden": ""
}
),
"37": SubDocument(
"details/fields/37.0/items",
{
"brand": JoinOperation(("mark", "model", "description"), ' '),
"brand_info": "",
"year": "year",
"sum": "purchase",
"sum_comment": "",
"sum_rent": "rent",
"sum_rent_comment": "",
"brand_hidden": ""
}
),
"38": SubDocument(
"details/fields/38.0/items",
{
"brand": JoinOperation(("mark", "model", "description"), ' '),
"brand_info": "",
"year": "year",
"sum": "purchase",
"sum_comment": "",
"sum_rent": "rent",
"sum_rent_comment": "",
"brand_hidden": ""
}
),
"39": SubDocument(
"details/fields/39.0/items",
{
"brand": JoinOperation(("mark", "model", "description"), ' '),
"brand_info": "",
"year": "year",
"sum": "purchase",
"sum_comment": "",
"sum_rent": "rent",
"sum_rent_comment": "",
"brand_hidden": ""
}
),
"40": SubDocument(
"details/fields/40.0/items",
{
"brand": JoinOperation(("mark", "model", "description"), ' '),
"brand_info": "",
"year": "year",
"sum": "purchase",
"sum_comment": "",
"sum_rent": "rent",
"sum_rent_comment": "",
"brand_hidden": ""
}
),
"41": SubDocument(
"details/fields/41.0/items",
{
"brand": JoinOperation(("mark", "model", "description"), ' '),
"brand_info": "",
"year": "year",
"sum": "purchase",
"sum_comment": "",
"sum_rent": "rent",
"sum_rent_comment": "",
"brand_hidden": ""
}
),
"42": SubDocument(
"details/fields/42.0/items",
{
"brand": JoinOperation(("mark", "model", "description"), ' '),
"brand_info": "",
"year": "year",
"sum": "purchase",
"sum_comment": "",
"sum_rent": "rent",
"sum_rent_comment": "",
"brand_hidden": ""
}
),
"43": SubDocument(
"details/fields/43.0/items",
{
"brand": JoinOperation(("mark", "model", "description"), ' '),
"brand_info": "",
"year": "year",
"sum": "purchase",
"sum_comment": "",
"sum_rent": "rent",
"sum_rent_comment": "",
"brand_hidden": ""
}
),
"44": SubDocument(
"details/fields/44.0/items",
{
"brand": JoinOperation(("mark", "model", "description"), ' '),
"brand_info": "",
"year": "year",
"sum": "purchase",
"sum_comment": "",
"sum_rent": "rent",
"sum_rent_comment": "",
"brand_hidden": ""
}
)
},
"banks": {
"45": [{
"sum": NumericOperation("details/fields/45.0/items", "value", sum),
"sum_units": "details/fields/45.0/units",
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/45.1/items", "value", sum),
"sum_foreign_units": "details/fields/45.1/units",
"sum_foreign_comment": ""
}],
"46": [{
"sum": NumericOperation("details/fields/46.0/items", "value", sum),
"sum_units": "details/fields/46.0/units",
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/46.1/items", "value", sum),
"sum_foreign_units": "details/fields/46.1/units",
"sum_foreign_comment": ""
}],
"47": [{
"sum": NumericOperation("details/fields/47.0/items", "value", sum),
"sum_units": "details/fields/47.0/units",
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/47.1/items", "value", sum),
"sum_foreign_units": "details/fields/47.1/units",
"sum_foreign_comment": ""
}],
"48": [{
"sum": NumericOperation("details/fields/48.0/items", "value", sum),
"sum_units": "details/fields/48.0/units",
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/48.1/items", "value", sum),
"sum_foreign_units": "details/fields/48.1/units",
"sum_foreign_comment": ""
}],
"49": [{
"sum": NumericOperation("details/fields/49.0/items", "value", sum),
"sum_units": "details/fields/49.0/units",
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/49.1/items", "value", sum),
"sum_foreign_units": "details/fields/49.1/units",
"sum_foreign_comment": ""
}],
"50": [{
"sum": NumericOperation("details/fields/50.0/items", "value", sum),
"sum_units": "details/fields/50.0/units",
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/50.1/items", "value", sum),
"sum_foreign_units": "details/fields/50.1/units",
"sum_foreign_comment": ""
}],
"51": [{
"sum": NumericOperation("details/fields/51.0/items", "value", sum),
"sum_units": "details/fields/51.0/units",
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/51.1/items", "value", sum),
"sum_foreign_units": "details/fields/51.1/units",
"sum_foreign_comment": ""
}],
"52": [{
"sum": NumericOperation("details/fields/52.0/items", "value", sum),
"sum_units": "details/fields/52.0/units",
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/52.1/items", "value", sum),
"sum_foreign_units": "details/fields/52.1/units",
"sum_foreign_comment": ""
}],
"53": [{
"sum": NumericOperation("details/fields/53.0/items", "value", sum),
"sum_units": "details/fields/53.0/units",
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/53.1/items", "value", sum),
"sum_foreign_units": "details/fields/53.1/units",
"sum_foreign_comment": ""
}]
},
"liabilities": {
"54": {
"sum": NumericOperation("details/fields/54.0/items", "value", sum),
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/54.1/items", "value", sum),
"sum_foreign_comment": ""
},
"55": {
"sum": NumericOperation("details/fields/55.0/items", "value", sum),
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/55.1/items", "value", sum),
"sum_foreign_comment": ""
},
"56": {
"sum": NumericOperation("details/fields/56.0/items", "value", sum),
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/56.1/items", "value", sum),
"sum_foreign_comment": ""
},
"57": {
"sum": NumericOperation("details/fields/57.0/items", "value", sum),
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/57.1/items", "value", sum),
"sum_foreign_comment": ""
},
"58": {
"sum": NumericOperation("details/fields/58.0/items", "value", sum),
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/58.1/items", "value", sum),
"sum_foreign_comment": ""
},
"59": {
"sum": NumericOperation("details/fields/59.0/items", "value", sum),
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/59.1/items", "value", sum),
"sum_foreign_comment": ""
},
"60": {
"sum": NumericOperation("details/fields/60.0/items", "value", sum),
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/60.1/items", "value", sum),
"sum_foreign_comment": ""
},
"61": {
"sum": NumericOperation("details/fields/61.0/items", "value", sum),
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/61.1/items", "value", sum),
"sum_foreign_comment": ""
},
"62": {
"sum": NumericOperation("details/fields/62.0/items", "value", sum),
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/62.1/items", "value", sum),
"sum_foreign_comment": ""
},
"63": {
"sum": NumericOperation("details/fields/63.0/items", "value", sum),
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/63.1/items", "value", sum),
"sum_foreign_comment": ""
},
"64": {
"sum": NumericOperation("details/fields/64.0/items", "value", sum),
"sum_comment": "",
"sum_foreign": NumericOperation("details/fields/64.1/items", "value", sum),
"sum_foreign_comment": ""
}
},
"declaration": {
"date": "",
"notfull": "",
"notfull_lostpages": "",
"additional_info": "details/comment",
"additional_info_text": "details/comment",
"needs_scancopy_check": "",
"url": "details/url",
"link": "details/link",
"source": "%CHESNO%"
}
}
| 34.573209 | 88 | 0.421157 | 1,760 | 22,196 | 5.128409 | 0.082955 | 0.167073 | 0.128518 | 0.070906 | 0.699092 | 0.676158 | 0.627964 | 0.590184 | 0.487924 | 0.381675 | 0 | 0.03649 | 0.402415 | 22,196 | 641 | 89 | 34.627145 | 0.643999 | 0 | 0 | 0.486614 | 0 | 0 | 0.388674 | 0.148045 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.001575 | 0 | 0.001575 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9793ec4954370ea87c1224757843713b7976c1e3 | 3,179 | py | Python | common/threads/thread_pool.py | ziizhuwy/cify | 627ae74f6a27d803521df213e8644366dbba183f | [
"Apache-2.0"
] | 8 | 2018-10-11T16:05:14.000Z | 2020-12-30T08:21:15.000Z | common/threads/thread_pool.py | keven1z/cify | 627ae74f6a27d803521df213e8644366dbba183f | [
"Apache-2.0"
] | 1 | 2020-04-22T03:36:59.000Z | 2020-06-11T06:42:42.000Z | common/threads/thread_pool.py | ziizhuwy/cify | 627ae74f6a27d803521df213e8644366dbba183f | [
"Apache-2.0"
] | 4 | 2019-07-10T06:51:45.000Z | 2020-04-19T09:52:09.000Z |
# !/usr/bin/env python
# -*- coding:utf-8 -*-
import queue
import threading
import traceback
from data.config import *
from common.log.log_util import LogUtil as log
logger = log.getLogger(__name__)
class ThreadPool(object):
def __init__(self):
self.task_queue = queue.Queue()
self.threads = []
self.__init_thread_pool(THREAD_NUMBER)
def __init_thread_pool(self, thread_num):
"""
the number of workers means the number of parallel running threads
"""
for i in range(thread_num):
worker = Worker(self.task_queue)
worker.setDaemon(True) # comment this line to avoid the main thread was end before subthread
worker.start()
self.threads.append(worker)
# logger.debug('constructed a thread pool with %d workers', len(self.threads))
def add_task(self, func, *args):
"""
add a task to task queue
"""
self.task_queue.put((func, args))
def wait_all_complete(self):
"""
this will block the thread until the task queue was empty
"""
self.task_queue.join()
self._terminate_workers()
def force_complete(self):
self.clear_tasks()
self._terminate_workers()
def clear_tasks(self):
# logger.info('there are %d tasks in the queue that will be removed' % self.task_queue.qsize())
while not self.task_queue.empty():
self.task_queue.get_nowait()
self.task_queue.task_done()
# logger.debug('removed a task and %d remains' % self.task_queue.qsize())
# logger.info('task queue was cleared and the size=%d' % self.task_queue.qsize())
def _terminate_workers(self):
# logger.debug('will terminate %d workers in thread pool', len(self.threads))
for worker in self.threads:
worker.terminate()
class Worker(threading.Thread):
def __init__(self, task_queue):
super(Worker, self).__init__()
self.task_queue = task_queue
self.stop = False
def run(self):
max_len = 64
while not self.stop:
try:
do, args = self.task_queue.get(timeout=1)
args_desc = str(args)
if len(args_desc) > max_len:
args_desc = '%s...' % args_desc[0:max_len]
# logger.debug('get a task(function=%s, params=%s) and there are %d in queue' %
# (do, args_desc, self.task_queue.qsize()))
try:
do(*args)
except:
logger.warn(traceback.format_exc())
# logger.debug('finish a task(function=%s, params=%s) and there are %d in queue' %
# (do, args_desc, self.task_queue.qsize()))
if self.stop:
# logger.info('the worker in thread pool was terminated')
pass
self.task_queue.task_done()
except:
pass
def terminate(self):
self.stop = True
| 32.773196 | 113 | 0.557093 | 385 | 3,179 | 4.418182 | 0.314286 | 0.10582 | 0.122281 | 0.05291 | 0.104644 | 0.079953 | 0.079953 | 0.079953 | 0.079953 | 0.079953 | 0 | 0.002389 | 0.341617 | 3,179 | 96 | 114 | 33.114583 | 0.81032 | 0.334382 | 0 | 0.178571 | 0 | 0 | 0.002459 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178571 | false | 0.035714 | 0.089286 | 0 | 0.303571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9794d489e11c9d85b61fcfda17b5fcf122b391c0 | 4,431 | py | Python | whatsappy/group.py | YohananDiamond/whatsappy | 2474839baf32295fea568c4dd30c59edace11e58 | [
"MIT"
] | null | null | null | whatsappy/group.py | YohananDiamond/whatsappy | 2474839baf32295fea568c4dd30c59edace11e58 | [
"MIT"
] | null | null | null | whatsappy/group.py | YohananDiamond/whatsappy | 2474839baf32295fea568c4dd30c59edace11e58 | [
"MIT"
] | null | null | null | from time import sleep
from selenium.webdriver.common.keys import Keys
from os import path
from .tool import *
from .error import BadPathError
import traceback
def change_group_description(self, description: str):
"""Changes the group description
Args:
description (str): New group description
"""
try:
# Abre as informações do grupo
self.driver.find_element_by_xpath('//*[@id="main"]/header/div[2]').click()
if not is_admin(self):
print("You are not a group admin!")
return
self.driver.find_element_by_xpath(
'//*[@id="app"]/div[1]/div[1]/div[2]/div[3]/span/div[1]/span/div[1]/div/section/div[2]/div[2]/div/div/span[2]/div'
).click() # Tenta clicar na caneta de edição da descrição
description_dom = self.driver.find_element_by_xpath(
'//*[@id="app"]/div[1]/div[1]/div[2]/div[3]/span/div[1]/span/div[1]/div/section/div[2]/div[2]/div/div[1]/div/div[2]'
) # Seleciona a descrição para editar
description_dom.clear() # Limpa
if description.find("\n"): # Escreve
for line in description.split("\n"):
description_dom.send_keys(line)
description_dom.send_keys(Keys.SHIFT + Keys.ENTER)
description_dom.send_keys(Keys.ENTER)
else:
description_dom.send_keys(description)
except:
error_log(traceback.format_exc())
try: # Fecha as informações do grupo
self.driver.find_element_by_xpath(
'//*[@id="app"]/div[1]/div[1]/div[2]/div[3]/span/div[1]/span/div[1]/header/div/div[1]/button'
).click()
except:
pass
def change_group_name(self, name: str):
"""Changes the group name
Args:
name (str): New group name
"""
try:
# Abre as informações do grupo
self.driver.find_element_by_xpath('//*[@id="main"]/header/div[2]').click()
if not is_admin(self):
print("You are not a group admin!")
return
# Clica para editar o nome do grupo
self.driver.find_element_by_xpath(
'//*[@id="app"]/div[1]/div[1]/div[2]/div[3]/span/div[1]/span/div[1]/div/section/div[1]/div[2]/div[1]/span[2]/div'
).click()
group_name_dom = self.driver.find_element_by_xpath(
'//*[@id="app"]/div[1]/div[1]/div[2]/div[3]/span/div[1]/span/div[1]/div/section/div[1]/div[2]/div[1]/div/div[2]'
) # Seleciona o texto do nome do grupo
group_name_dom.clear() # Limpa
group_name_dom.send_keys(name + Keys.ENTER) # Escreve
except:
error_log(traceback.format_exc())
try:
self.driver.find_element_by_xpath(
'//*[@id="app"]/div[1]/div[1]/div[2]/div[3]/span/div[1]/span/div[1]/header/div/div[1]/button'
).click() # Fecha as informações do grupo
except:
pass
def change_group_pfp(self, file_path: str):
try:
if not path.isabs(file_path):
raise BadPathError("The file path is not absolute")
# Abre as informações do grupo
self.driver.find_element_by_xpath('//*[@id="main"]/header/div[2]').click()
if not is_admin(self):
print("You are not a group admin!")
return
self.driver.find_element_by_xpath(
'//*[@id="app"]/div[1]/div[1]/div[2]/div[3]/span/div[1]/span/div[1]/div/section/div[1]/div[1]/div/input'
).send_keys(file_path) # Envia a foto
sleep(1)
self.driver.find_element_by_xpath(
'//*[@id="app"]/div[1]/span[2]/div[1]/div/div/div/div/div/span/div[1]/div/div[2]/span/div'
).click() # Confima
except:
error_log(traceback.format_exc())
try:
self.driver.find_element_by_xpath(
'//*[@id="app"]/div[1]/div[1]/div[2]/div[3]/span/div[1]/span/div[1]/header/div/div[1]/button'
).click() # Fecha as informações do grupo
except:
pass
def leave_group(self):
"""Leaves the group you are"""
# Abre as informações do grupo
self.driver.find_element_by_xpath('//*[@id="main"]/header/div[2]').click()
self.driver.find_element_by_xpath(
'//*[@id="app"]/div[1]/div[1]/div[2]/div[3]/span/div[1]/span/div[1]/div/section/div[6]/div'
).click()
self.driver.find_element_by_xpath(
'//*[@id="app"]/div[1]/span[2]/div[1]/div/div/div/div/div/div[2]/div[2]'
).click()
| 31.204225 | 128 | 0.594223 | 666 | 4,431 | 3.83033 | 0.154655 | 0.079969 | 0.090553 | 0.123481 | 0.670325 | 0.624069 | 0.611133 | 0.597413 | 0.597413 | 0.597413 | 0 | 0.026447 | 0.232002 | 4,431 | 141 | 129 | 31.425532 | 0.723185 | 0.129091 | 0 | 0.586207 | 0 | 0.126437 | 0.340963 | 0.31176 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045977 | false | 0.034483 | 0.068966 | 0 | 0.149425 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97959af172f3896c43a9153dc3a145cbbaa7178b | 365 | py | Python | Books/GodOfPython/P16_Networking/SimplehttpServer.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | 2 | 2020-12-05T07:42:55.000Z | 2021-01-06T23:23:18.000Z | Books/GodOfPython/P16_Networking/SimplehttpServer.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | null | null | null | Books/GodOfPython/P16_Networking/SimplehttpServer.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | null | null | null | from http.server import HTTPServer, SimpleHTTPRequestHandler
import sys
ip = '127.0.0.1'
port = 8000
addr = (ip, port)
httpd = HTTPServer(addr, SimpleHTTPRequestHandler)
Servip, Servport = httpd.socket.getsockname()
try:
httpd.serve_forever()
except KeyboardInterrupt:
print('Keyboard interrupt received, exiting.')
httpd.server_close()
sys.exit(0) | 20.277778 | 60 | 0.750685 | 44 | 365 | 6.181818 | 0.704545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035032 | 0.139726 | 365 | 18 | 61 | 20.277778 | 0.83121 | 0 | 0 | 0 | 0 | 0 | 0.125683 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
979851dc526feb93a60dbe69c32893824847ba79 | 1,217 | py | Python | dataclasses_serialization/serializer_base/dataclasses.py | blfoster/python-dataclasses-serialization | 1a2d1fc15ca1800c2b4953fe5cb2557f37d1475d | [
"MIT"
] | 19 | 2019-04-15T15:57:20.000Z | 2021-07-09T07:01:12.000Z | dataclasses_serialization/serializer_base/dataclasses.py | blfoster/python-dataclasses-serialization | 1a2d1fc15ca1800c2b4953fe5cb2557f37d1475d | [
"MIT"
] | 14 | 2019-08-01T13:03:53.000Z | 2021-04-20T13:26:54.000Z | dataclasses_serialization/serializer_base/dataclasses.py | blfoster/python-dataclasses-serialization | 1a2d1fc15ca1800c2b4953fe5cb2557f37d1475d | [
"MIT"
] | 11 | 2019-06-13T21:38:55.000Z | 2022-02-28T08:53:20.000Z | from toolz import curry
from dataclasses_serialization.serializer_base.errors import DeserializationError
from dataclasses_serialization.serializer_base.noop import noop_deserialization
from dataclasses_serialization.serializer_base.typing import (
dataclass_field_types,
isinstance,
)
__all__ = ["dict_to_dataclass"]
@curry
def dict_to_dataclass(cls, dct, deserialization_func=noop_deserialization):
if not isinstance(dct, dict):
raise DeserializationError(
"Cannot deserialize {} {!r} using {}".format(
type(dct), dct, dict_to_dataclass
)
)
try:
fld_types = dataclass_field_types(cls, require_bound=True)
except TypeError:
raise DeserializationError("Cannot deserialize unbound generic {}".format(cls))
try:
return cls(
**{
fld.name: deserialization_func(fld_type, dct[fld.name])
for fld, fld_type in fld_types
if fld.name in dct
}
)
except TypeError:
raise DeserializationError(
"Missing one or more required fields to deserialize {!r} as {}".format(
dct, cls
)
)
| 29.682927 | 87 | 0.642564 | 126 | 1,217 | 5.97619 | 0.420635 | 0.059761 | 0.111554 | 0.151394 | 0.167331 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.282662 | 1,217 | 40 | 88 | 30.425 | 0.862543 | 0 | 0 | 0.176471 | 0 | 0 | 0.123254 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.117647 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
979b57b2f520e53382dbdd26bd190261e6e49b86 | 9,359 | py | Python | DQN_Qlearning/agent.py | WoShiDongZhiWu/Reinforcement-learning-Algorithm | 59fdf29e7feb73048b9ddf3b4755b55f0459efcb | [
"Apache-2.0"
] | 1 | 2019-12-23T02:59:13.000Z | 2019-12-23T02:59:13.000Z | DQN_Qlearning/agent.py | WoShiDongZhiWu/reinforcement-learning-algorithm | 59fdf29e7feb73048b9ddf3b4755b55f0459efcb | [
"Apache-2.0"
] | null | null | null | DQN_Qlearning/agent.py | WoShiDongZhiWu/reinforcement-learning-algorithm | 59fdf29e7feb73048b9ddf3b4755b55f0459efcb | [
"Apache-2.0"
] | null | null | null | '''
#################################################################################################
# author wudong
# date 20190812
# 功能
# 实现各类算法,Srasa(0),Sarsa(λ),Q-learning,DQN
#################################################################################################
'''
from random import random, choice
from gym import Env, spaces
import gym
import numpy as np
from core import Transition, Experience, Agent
from utils import str_key, set_dict, get_dict
from utils import epsilon_greedy_pi, epsilon_greedy_policy
from utils import greedy_policy, learning_curve
from approximator import NetApproximator
class SarsaAgent(Agent):
def __init__(self, env:Env, capacity:int = 20000):
super(SarsaAgent, self).__init__(env, capacity)
self.Q = {} #增加Q字典存储行为价值
def policy(self, A, s, Q, epsilon): #重写基类的policy函数
'''
使用ε-greedy策略
return
a 策略得到的行为
'''
return epsilon_greedy_policy(A, s, Q, epsilon)
def learning_method(self, gamma = 0.9, alpha = 0.1, epsilon = 1e-5, display = False, lambda_ = None):
'''重写基类的函数
遍历一个episode
'''
self.state = self.env.reset() #个体当前的状态
s0 = self.state
if display:
self.env.render()
a0 = self.perform_policy(s0, epsilon) #执行策略,生成动作
time_in_episode, total_reward = 0, 0
is_done = False
while not is_done:
# add code here
s1, r1, is_done, info, total_reward = self.act(a0)
if display:
self.env.render()
a1 = self.perform_policy(s1, epsilon)
old_q = get_dict(self.Q, s0, a0)
q_prime = get_dict(self.Q, s1, a1)
td_target = r1 + gamma * q_prime
new_q = old_q + alpha * (td_target - old_q)
set_dict(self.Q, new_q, s0, a0)
s0, a0 = s1, a1
time_in_episode += 1
if display:
print(self.experience.last_episode)
return time_in_episode, total_reward
class SarsaLambdaAgent(Agent):
def __init__(self, env:Env, capacity:int = 20000):
super(SarsaLambdaAgent, self).__init__(env, capacity)
self.Q = {}
def policy(self, A, s, Q, epsilon):
return epsilon_greedy_policy(A, s, Q, epsilon)
def learning_method(self, lambda_ = 0.9, gamma = 0.9, alpha = 0.1, epsilon = 1e-5, display = False):
self.state = self.env.reset()
s0 = self.state
if display:
self.env.render()
a0 = self.perform_policy(s0, epsilon)
# print(self.action_t.name)
time_in_episode, total_reward = 0, 0
is_done = False
E = {}
while not is_done:
# add code here
s1, r1, is_done, info, total_reward = self.act(a0)
if display:
self.env.render()
a1 = self.perform_policy(s1, epsilon)
q = get_dict(self.Q, s0, a0)
q_prime = get_dict(self.Q, s1, a1)
delta = r1 + gamma * q_prime - q
e = get_dict(E, s0, a0)
e += 1
set_dict(E, e, s0, a0)
for s in self.S:
for a in self.A:
e_value = get_dict(E, s, a)
old_q = get_dict(self.Q, s, a)
new_q = old_q + alpha * delta * e_value
new_e = gamma * lambda_ * e_value
set_dict(self.Q, new_q, s, a)
set_dict(E, new_e, s, a)
s0, a0 = s1, a1
time_in_episode += 1
if display:
print(self.experience.last_episode)
return time_in_episode, total_reward
class QAgent(Agent):
def __init__(self, env:Env, capacity:int = 20000):
super(QAgent, self).__init__(env, capacity)
self.Q = {}
def policy(self, A, s, Q, epsilon):
return epsilon_greedy_policy(A, s, Q, epsilon)
def learning_method(self, gamma = 0.9, alpha = 0.1, epsilon = 1e-5, display = False, lambda_ = None):
self.state = self.env.reset()
s0 = self.state
if display:
self.env.render()
time_in_episode, total_reward = 0, 0
is_done = False
while not is_done:
# add code here
a0 = self.perform_policy(s0, epsilon) #使用ε-greedy
s1, r1, is_done, info, total_reward = self.act(a0)
if display:
self.env.render()
self.policy = greedy_policy
a1 = greedy_policy(self.A, s1, self.Q) #使用完全贪婪策略
old_q = get_dict(self.Q, s0, a0)
q_prime = get_dict(self.Q, s1, a1)
td_target = r1 + gamma * q_prime
new_q = old_q + alpha * (td_target - old_q)
set_dict(self.Q, new_q, s0, a0)
s0 = s1
time_in_episode += 1
if display:
print(self.experience.last_episode)
return time_in_episode, total_reward
class DQNAgent(Agent):
'''使用近似的价值函数(全连接网络)实现的DQN
'''
def __init__(self, env: Env = None,
capacity = 20000,
hidden_dim: int = 32,
batch_size = 128,
epochs = 2):
if env is None:
raise "agent should have an environment"
super(DQNAgent, self).__init__(env, capacity)
self.input_dim = env.observation_space.shape[0] # 状态连续,输入维度
self.output_dim = env.action_space.n # 行为离散,输出维度
self.hidden_dim = hidden_dim #隐藏层的维度
# 行为网络,该网络用来计算产生行为,以及对应的Q值,每次更新
self.behavior_Q = NetApproximator(input_dim = self.input_dim,
output_dim = self.output_dim,
hidden_dim = self.hidden_dim)
self.target_Q = self.behavior_Q.clone() # 计算价值目标的Q,不定期更新
self.batch_size = batch_size # 批学习一次状态转换数量
self.epochs = epochs # 统一批状态转换学习的次数
return
def _update_target_Q(self):
'''将更新策略的Q网络(连带其参数)复制给输出目标Q值的网络
'''
self.target_Q = self.behavior_Q.clone() # 更新计算价值目标的Q网络
def policy(self, A, s, Q = None, epsilon = None):
'''依据更新策略的价值函数(网络)产生一个行为,遵循greedy或ε-greedy
'''
Q_s = self.behavior_Q(s) # 行为价值函数,预测得到Q
rand_value = random()
if epsilon is not None and rand_value < epsilon:
return self.env.action_space.sample()
else:
return int(np.argmax(Q_s)) #返回得到最大Q值的动作a
def _learn_from_memory(self, gamma, learning_rate):
'''从记忆中进行学习,(experience-episode-transmition)
'''
trans_pieces = self.sample(self.batch_size) # 随机获取记忆里的Transmition,取batch_size个
states_0 = np.vstack([x.s0 for x in trans_pieces])
actions_0 = np.array([x.a0 for x in trans_pieces])
reward_1 = np.array([x.reward for x in trans_pieces])
is_done = np.array([x.is_done for x in trans_pieces])
states_1 = np.vstack([x.s1 for x in trans_pieces])
X_batch = states_0
y_batch = self.target_Q(states_0) # 得到numpy格式的预测结果
Q_target = reward_1 + gamma * np.max(self.target_Q(states_1), axis=1)*\
(~ is_done) # is_done则Q_target==reward_1
# switch this on will make DQN to DDQN
# 行为a'从行为价值网络中得到
#a_prime = np.argmax(self.behavior_Q(states_1), axis=1).reshape(-1)
# (s',a')的价值从目标价值网络中得到
#Q_states_1 = self.target_Q(states_1)
#temp_Q = Q_states_1[np.arange(len(Q_states_1)), a_prime]
# (s,a)的目标价值根据贝尔曼方程得到
#Q_target = reward_1 + gamma * temp_Q * (~ is_done) # is_done则Q_target==reward_1
## end of DDQN part
y_batch[np.arange(len(X_batch)), actions_0] = Q_target #作为目标值、监督值
# 训练行为价值网络,更新其参数
loss = self.behavior_Q.fit(x = X_batch, #行为价值网络,利用X_batch生成预测值,与y_batch计算损失
y = y_batch,
learning_rate = learning_rate,
epochs = self.epochs)
mean_loss = loss.sum() / self.batch_size
# 可根据需要设定一定的目标价值网络参数的更新频率
self._update_target_Q()
return mean_loss
def learning_method(self, gamma = 0.9, alpha = 0.1, epsilon = 1e-5,
display = False, lambda_ = None):
'''遍历单个eposide
'''
self.state = self.env.reset()
s0 = self.state
if display:
self.env.render()
time_in_episode, total_reward = 0, 0
is_done = False
loss = 0
while not is_done:
# add code here
s0 = self.state
a0 = self.perform_policy(s0, epsilon) #行为价值网络behavior产生动作a0
s1, r1, is_done, info, total_reward = self.act(a0) #执行动作,得到观测值
if display:
self.env.render()
# 当经历里有足够大小的transition时,开始启用基于experience的学习
if self.total_trans > self.batch_size:
loss += self._learn_from_memory(gamma, alpha)
# s0 = s1
time_in_episode += 1
loss /= time_in_episode
if display:
print("epsilon:{:3.2f},loss:{:3.2f},{}".format(epsilon,loss,self.experience.last_episode))
return time_in_episode, total_reward
| 35.316981 | 105 | 0.543862 | 1,172 | 9,359 | 4.116894 | 0.196246 | 0.024663 | 0.035026 | 0.026529 | 0.516269 | 0.459689 | 0.429016 | 0.395855 | 0.395855 | 0.395855 | 0 | 0.027433 | 0.337857 | 9,359 | 265 | 106 | 35.316981 | 0.75117 | 0.147452 | 0 | 0.517045 | 0 | 0 | 0.008036 | 0.003954 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079545 | false | 0 | 0.051136 | 0.011364 | 0.215909 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
979c4c7ab54f0e47ec7248d0082811b92e99c917 | 3,649 | py | Python | DMProject/15.package/15.7.MNIST.py | gongjunhuang/Spider | c683137dafac9c7f4afd359baf9d0717d1a127e2 | [
"Apache-2.0"
] | 1 | 2018-02-26T15:45:17.000Z | 2018-02-26T15:45:17.000Z | DMProject/15.package/15.7.MNIST.py | gongjunhuang/Spider | c683137dafac9c7f4afd359baf9d0717d1a127e2 | [
"Apache-2.0"
] | null | null | null | DMProject/15.package/15.7.MNIST.py | gongjunhuang/Spider | c683137dafac9c7f4afd359baf9d0717d1a127e2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
from sklearn import svm
import matplotlib.colors
import matplotlib.pyplot as plt
from PIL import Image
from sklearn.metrics import accuracy_score
import pandas as pd
import os
import csv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from time import time
from pprint import pprint
def save_image(im, i):
im = 255 - im.values.reshape(28, 28)
a = im.astype(np.uint8)
output_path = '.\\HandWritten'
if not os.path.exists(output_path):
os.mkdir(output_path)
Image.fromarray(a).save(output_path + ('\\%d.png' % i))
def save_result(model):
data_test_pred = model.predict(data_test)
data_test['Label'] = data_test_pred
data_test.to_csv('Prediction.csv', header=True, index=True, columns=['Label'])
if __name__ == "__main__":
classifier_type = 'RF'
print('载入训练数据...')
t = time()
data = pd.read_csv('MNIST.train.csv', header=0, dtype=np.int)
print('载入完成,耗时%f秒' % (time() - t))
x, y = data.iloc[:, 1:], data['label']
x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=0.2, random_state=1)
print(x.shape, x_valid.shape)
print('图片个数:%d,图片像素数目:%d' % x.shape)
print('载入测试数据...')
t = time()
data_test = pd.read_csv('MNIST.test.csv', header=0, dtype=np.int)
print('载入完成,耗时%f秒' % (time() - t))
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False
plt.figure(figsize=(15, 9), facecolor='w')
for index in range(16):
image = x.iloc[index, :]
plt.subplot(4, 8, index + 1)
plt.imshow(image.values.reshape(28, 28), cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('训练图片: %i' % y[index])
for index in range(16):
image = data_test.iloc[index, :]
plt.subplot(4, 8, index + 17)
plt.imshow(image.values.reshape(28, 28), cmap=plt.cm.gray_r, interpolation='nearest')
save_image(image.copy(), index)
plt.title('测试图片')
plt.tight_layout(2)
plt.show()
if classifier_type == 'SVM':
model = svm.SVC(C=1000, kernel='rbf', gamma=1e-10)
print('SVM开始训练...')
else:
model = RandomForestClassifier(100, criterion='gini', min_samples_split=2, min_impurity_decrease=1e-10)
print('随机森林开始训练...')
t = time()
model.fit(x_train, y_train)
t = time() - t
print('%s训练结束,耗时%d分钟%.3f秒' % (classifier_type, int(t/60), t - 60*int(t/60)))
t = time()
y_train_pred = model.predict(x_train)
t = time() - t
print('%s训练集准确率:%.3f%%,耗时%d分钟%.3f秒' % (classifier_type, accuracy_score(y_train, y_train_pred)*100, int(t/60), t - 60*int(t/60)))
t = time()
y_valid_pred = model.predict(x_valid)
t = time() - t
print('%s测试集准确率:%.3f%%,耗时%d分钟%.3f秒' % (classifier_type, accuracy_score(y_valid, y_valid_pred)*100, int(t/60), t - 60*int(t/60)))
save_result(model)
err = (y_valid != y_valid_pred)
err_images = x_valid[err]
err_y_hat = y_valid_pred[err]
err_y = y_valid[err]
print(err_y_hat)
print(err_y)
plt.figure(figsize=(10, 8), facecolor='w')
for index in range(12):
image = err_images.iloc[index, :]
plt.subplot(3, 4, index + 1)
plt.imshow(image.values.reshape(28, 28), cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('错分为:%i,真实值:%i' % (err_y_hat[index], err_y.values[index]), fontsize=12)
plt.suptitle('数字图片手写体识别:分类器%s' % classifier_type, fontsize=15)
plt.tight_layout(rect=(0, 0, 1, 0.95))
plt.show()
| 35.086538 | 132 | 0.64456 | 562 | 3,649 | 4.021352 | 0.307829 | 0.011947 | 0.015929 | 0.015487 | 0.316372 | 0.253097 | 0.218142 | 0.197345 | 0.197345 | 0.163717 | 0 | 0.03488 | 0.190737 | 3,649 | 103 | 133 | 35.427184 | 0.730444 | 0.01014 | 0 | 0.191011 | 0 | 0 | 0.096953 | 0.014958 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022472 | false | 0 | 0.157303 | 0 | 0.179775 | 0.157303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
979c80762641e4c8d009fdd05535ea43eb570cd2 | 4,122 | py | Python | examples/collision-avoid/pyplot_staticplan.py | yinanl/rocs | bf2483903e39f4c0ea254a9ef56720a1259955ad | [
"BSD-3-Clause"
] | null | null | null | examples/collision-avoid/pyplot_staticplan.py | yinanl/rocs | bf2483903e39f4c0ea254a9ef56720a1259955ad | [
"BSD-3-Clause"
] | null | null | null | examples/collision-avoid/pyplot_staticplan.py | yinanl/rocs | bf2483903e39f4c0ea254a9ef56720a1259955ad | [
"BSD-3-Clause"
] | null | null | null | import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
import matplotlib.colors as mcolors
from scipy.interpolate import interp1d
from scipy.integrate import solve_ivp
import os
import re
import numpy as np
import h5py
import sys
from os.path import dirname, realpath
pypath = dirname(dirname(dirname(realpath(__file__)))) + '/python/'
sys.path.insert(1, pypath)
import utils
from odes import car2
dirpath = dirname(realpath(__file__))
transfile = '/abstfull_0.2-0.2-0.2.h5'
labelfile = '/labels_dba1_abstfull_0.2-0.2-0.2.h5'
ctlrfile = '/controller_dba1_0.2-0.2-0.2.h5'
specfile = '/dba1.txt'
# # Simulation of static motion planning
tau, X, eta, _, winids, controller = \
utils.read_controller_abst_from_h5(dirpath+ctlrfile)
# # Compute the percentage of winning set on the state space
winset = controller.xgrid[winids, :]
print("\nWinning set coverage:")
winper = "{:.2%}".format(winids.size/controller.xgrid.shape[0])
print(winper)
# # Load specification
dba = utils.read_spec_from_txt(dirpath+specfile)
# # Simulation
Tsim = 50
num_acc = 3
x0 = np.array([1.0, 1.0, np.pi/3.])
i0 = utils.index_in_grid(x0, controller.xgrid)
if(not np.any(winids == i0)):
sys.exit("The initial condition is not in the winning set.\n")
xsim, usim, qsim, tsim = utils.simulate_abstbased_dba_control(
tau, Tsim, num_acc, x0, car2, dba, controller)
# # Display workspace
fig = plt.figure()
ax = plt.axes()
ax.set_xlim(0,10)
ax.set_ylim(0,10)
xgrid = np.array([])
eta = np.array([])
labels = np.array([])
obs = np.array([])
with h5py.File(dirpath+transfile, 'r') as ft,\
h5py.File(dirpath+labelfile, 'r') as fl:
eta = ft['eta'][...]
xgrid = ft['xgrid'][...]
obs = ft['obs'][...]
labels = fl['labels'][...]
oset = xgrid[obs, :]
ax.add_collection(
utils.polycoll_grid_array(oset, eta, True, 'gray', 0.7)
)
gset = xgrid[np.where(labels>0),:].squeeze()
ax.add_collection(
utils.polycoll_grid_array(gset, eta, True, 'palegreen', 0.7)
)
obstacles = [[0.0, 0.5, 5.0, 6.0],
[2.4, 2.6, 0.0, 3.2],
[3.9, 4.1, 9.0, 10.0], #[3.9, 4.1, 8.0, 10.0]
[5.9, 6.1, 0.0, 0.6],
[5.9, 6.1, 3.8, 5.1], # [5.9, 6.1, 3.8, 6.1],
[6.1, 10.0, 4.9, 5.1]] # [6.1, 10.0, 5.9, 6.1]
rects_obs = [patches.Rectangle((obstacles[i][0], obstacles[i][2]),
obstacles[i][1]-obstacles[i][0],
obstacles[i][3]-obstacles[i][2],
linewidth=1,edgecolor='k',facecolor='k')
for i in range(len(obstacles))]
goals = [[0.5, 2.0, 7.5, 9.5], # a
[7.5, 9.5, 0.8, 3.0]] # d
rects_gs = [patches.Rectangle((goals[i][0], goals[i][2]),
goals[i][1]-goals[i][0],
goals[i][3]-goals[i][2],
linewidth=1,edgecolor='y',fill=False)
for i in range(len(goals))]
circ_gs = patches.Circle([8.0, 8.0], radius=0.8, linewidth=1,
edgecolor='y', fill=False)
for rect in rects_obs:
ax.add_patch(rect)
for rect in rects_gs:
ax.add_patch(rect)
ax.add_patch(circ_gs)
ax.text((goals[0][0]+goals[0][1])/2.0-0.5, (goals[0][2]+goals[0][3])/2.0, "pickup")
ax.text((goals[1][0]+goals[1][1])/2.0-0.5, (goals[1][2]+goals[1][3])/2.0, "drop")
ax.text(7.8, 7.8, "count")
# # # Plot winning set in 2D plane
# ax.add_collection(
# utils.polycoll_grid_array(winset, eta, True, 'palegoldenrod', 0.7)
# )
# # Plot closed-loop trajectoryies
colors = list(mcolors.TABLEAU_COLORS.keys())
qdiff = np.diff(np.insert(qsim, 0, dba.q0))
qchks = np.argwhere(qdiff != 0).squeeze()
qchks = np.insert(qchks, 0, 0)
qchks = np.append(qchks, qsim.size)
q = dba.q0
for k in range(qchks.size-1):
q = q + qdiff[qchks[k]]
xs = xsim[qchks[k]:qchks[k+1]+1, :]
ax.plot(xs[:, 0], xs[:, 1], color=colors[q])
ax.plot(xsim[0, 0], xsim[0, 1], marker='^',
markerfacecolor='r', markeredgecolor='r')
ax.plot(xsim[-1, 0], xsim[-1, 1], marker='v',
markerfacecolor='g', markeredgecolor='g')
plt.show()
| 30.761194 | 83 | 0.607472 | 679 | 4,122 | 3.612666 | 0.273932 | 0.008969 | 0.007338 | 0.009784 | 0.157358 | 0.1064 | 0.087648 | 0.013045 | 0 | 0 | 0 | 0.068285 | 0.200631 | 4,122 | 133 | 84 | 30.992481 | 0.676176 | 0.08952 | 0 | 0.039604 | 0 | 0 | 0.065451 | 0.02441 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.138614 | 0 | 0.138614 | 0.019802 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97a1ff60f08e5d9b17054c1f2b8239c0c712ac7b | 17,655 | py | Python | vol3/vol3-python-examples/lib/aifh/som.py | Sun-Joong/aifh | 1b6363d26f54b77348020ce88ced0670568ed736 | [
"Apache-2.0"
] | 777 | 2015-01-17T22:48:26.000Z | 2022-03-31T01:10:07.000Z | vol3/vol3-python-examples/lib/aifh/som.py | Sun-Joong/aifh | 1b6363d26f54b77348020ce88ced0670568ed736 | [
"Apache-2.0"
] | 17 | 2015-01-02T14:41:24.000Z | 2017-09-02T02:57:09.000Z | vol3/vol3-python-examples/lib/aifh/som.py | Sun-Joong/aifh | 1b6363d26f54b77348020ce88ced0670568ed736 | [
"Apache-2.0"
] | 445 | 2015-01-26T17:01:49.000Z | 2022-03-24T07:16:58.000Z | #!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import scipy.spatial
import numpy as np
import scipy as sp
import sys
class SelfOrganizingMap:
"""
The weights of the output neurons base on the input from the input
neurons.
"""
def __init__(self, input_count, output_count):
"""
The constructor.
:param input_count: Number of input neurons
:param output_count: Number of output neurons
:return:
"""
self.input_count = input_count
self.output_count = output_count
self.weights = np.zeros([self.output_count, self.input_count])
self.distance = sp.spatial.distance.euclidean
def calculate_error(self, data):
bmu = BestMatchingUnit(self)
bmu.reset()
# Determine the BMU for each training element.
for input in data:
bmu.calculate_bmu(input)
# update the error
return bmu.worst_distance / 100.0
def classify(self, input):
if len(input) > self.input_count:
raise Exception("Can't classify SOM with input size of {} "
"with input data of count {}".format(self.input_count, len(input)))
min_dist = sys.maxfloat
result = -1
for i in range(self.output_count):
dist = self.distance.calculate(input, self.weights[i])
if dist < min_dist:
min_dist = dist
result = i
return result
def reset(self):
self.weights = (np.random.rand(self.weights.shape[0], self.weights.shape[1]) * 2.0) - 1
class BestMatchingUnit:
"""
The "Best Matching Unit" or BMU is a very important concept in the training
for a SOM. The BMU is the output neuron that has weight connections to the
input neurons that most closely match the current input vector. This neuron
(and its "neighborhood") are the neurons that will receive training.
This class also tracks the worst distance (of all BMU's). This gives some
indication of how well the network is trained, and thus becomes the "error"
of the entire network.
"""
def __init__(self, som):
"""
Construct a BestMatchingUnit class. The training class must be provided.
:param som: The SOM to evaluate.
"""
# The owner of this class.
self.som = som
# What is the worst BMU distance so far, this becomes the error for the
# entire SOM.
self.worst_distance = 0
def calculate_bmu(self, input):
"""
Calculate the best matching unit (BMU). This is the output neuron that
has the lowest Euclidean distance to the input vector.
:param input: The input vector.
:return: The output neuron number that is the BMU.
"""
result = 0
if len(input) > self.som.input_count:
raise Exception(
"Can't train SOM with input size of {} with input data of count {}.".format(self.som.input_count,
len(input)))
# Track the lowest distance so far.
lowest_distance = float("inf")
for i in range(self.som.output_count):
distance = self.calculate_euclidean_distance(self.som.weights, input, i)
# Track the lowest distance, this is the BMU.
if distance < lowest_distance:
lowest_distance = distance
result = i
# Track the worst distance, this is the error for the entire network.
if lowest_distance > self.worst_distance:
self.worst_distance = lowest_distance
return result
def calculate_euclidean_distance(self, matrix, input, output_neuron):
"""
Calculate the Euclidean distance for the specified output neuron and the
input vector. This is the square root of the squares of the differences
between the weight and input vectors.
:param matrix: The matrix to get the weights from.
:param input: The input vector.
:param outputNeuron: The neuron we are calculating the distance for.
:return: The Euclidean distance.
"""
result = 0
# Loop over all input data.
diff = input - matrix[output_neuron]
return np.sqrt(sum(diff*diff))
class BasicTrainSOM:
"""
This class implements competitive training, which would be used in a
winner-take-all neural network, such as the self organizing map (SOM). This
is an unsupervised training method, no ideal data is needed on the training
set. If ideal data is provided, it will be ignored.
Training is done by looping over all of the training elements and calculating
a "best matching unit" (BMU). This BMU output neuron is then adjusted to
better "learn" this pattern. Additionally, this training may be applied to
other "nearby" output neurons. The degree to which nearby neurons are update
is defined by the neighborhood function.
A neighborhood function is required to determine the degree to which
neighboring neurons (to the winning neuron) are updated by each training
iteration.
Because this is unsupervised training, calculating an error to measure
progress by is difficult. The error is defined to be the "worst", or longest,
Euclidean distance of any of the BMU's. This value should be minimized, as
learning progresses.
Because only the BMU neuron and its close neighbors are updated, you can end
up with some output neurons that learn nothing. By default these neurons are
not forced to win patterns that are not represented well. This spreads out
the workload among all output neurons. This feature is not used by default,
but can be enabled by setting the "forceWinner" property.
"""
def __init__(self, network, learning_rate, training, neighborhood):
# The neighborhood function to use to determine to what degree a neuron
# should be "trained".
self.neighborhood = neighborhood
# The learning rate. To what degree should changes be applied.
self.learning_rate = learning_rate
# The network being trained.
self.network = network
# How many neurons in the input layer.
self.input_neuron_count = network.input_count
# How many neurons in the output layer.
self.output_neuron_count = network.output_count
# Utility class used to determine the BMU.
self.bmu_util = BestMatchingUnit(network)
# Correction matrix.
self.correction_matrix = np.zeros([network.output_count, network.input_count])
# True is a winner is to be forced, see class description, or forceWinners
# method. By default, this is true.
self.force_winner = False
# When used with autodecay, this is the starting learning rate.
self.start_rate = 0
# When used with autodecay, this is the ending learning rate.
self.end_rate = 0
# When used with autodecay, this is the starting radius.
self.start_radius = 0
# When used with autodecay, this is the ending radius.
self.end_radius = 0
# This is the current autodecay learning rate.
self.auto_decay_rate = 0
# This is the current autodecay radius.
self.auto_decay_radius = 0
# The current radius.
self.radius = 0
# Training data.
self.training = training
def _apply_correction(self):
"""
Loop over the synapses to be trained and apply any corrections that were
determined by this training iteration.
"""
np.copyto(self.network.weights, self.correction_matrix)
def auto_decay(self):
"""
Should be called each iteration if autodecay is desired.
"""
if self.radius > self.end_radius:
self.radius += self.auto_decay_radius
if self.learning_rate > self.end_rate:
self.learning_rate += self.auto_decay_rate
self.neighborhood.radius = self.radius
def copy_input_pattern(self, matrix, output_neuron, input):
"""
Copy the specified input pattern to the weight matrix. This causes an
output neuron to learn this pattern "exactly". This is useful when a
winner is to be forced.
:param matrix: The matrix that is the target of the copy.
:param output_neuron: The output neuron to set.
:param input: The input pattern to copy.
"""
matrix[output_neuron, :] = input
def decay(self, decay_rate, decay_radius):
"""
Decay the learning rate and radius by the specified amount.
:param decay_rate: The percent to decay the learning rate by.
:param decay_radius: The percent to decay the radius by.
"""
self.radius *= (1.0 - decay_radius)
self.learning_rate *= (1.0 - decay_rate)
self.neighborhood.radius = self.radius
def _determine_new_weight(self, weight, input, currentNeuron, bmu):
"""
Determine the weight adjustment for a single neuron during a training
iteration.
:param weight: The starting weight.
:param input: The input to this neuron.
:param currentNeuron: The neuron who's weight is being updated.
:param bmu: The neuron that "won", the best matching unit.
:return: The new weight value.
"""
return weight \
+ (self.neighborhood.fn(currentNeuron, bmu) \
* self.learning_rate * (input - weight))
def _force_winners(self, matrix, won, least_represented):
"""
Force any neurons that did not win to off-load patterns from overworked
neurons.
:param matrix: An array that specifies how many times each output neuron has "won".
:param won: The training pattern that is the least represented by this neural network.
:param least_represented: The synapse to modify.
:return: True if a winner was forced.
"""
max_activation = float("-inf")
max_activation_neuron = -1
output = self.compute(self.network, self.least_represented)
# Loop over all of the output neurons. Consider any neurons that were
# not the BMU (winner) for any pattern. Track which of these
# non-winning neurons had the highest activation.
for output_neuron in range(len(won)):
# Only consider neurons that did not "win".
if won[output_neuron] == 0:
if (max_activation_neuron == -1) \
or (output[output_neuron] > max_activation):
max_activation = output[output_neuron]
max_activation_neuron = output_neuron
# If a neurons was found that did not activate for any patterns, then
# force it to "win" the least represented pattern.
if max_activation_neuron != -1:
self.copy_input_pattern(matrix, max_activation_neuron, least_represented)
return True
else:
return False
def iteration(self):
"""
Perform one training iteration.
"""
# Reset the BMU and begin this iteration.
self.bmu_util.reset()
won = [0] * self.output_neuron_count
least_represented_activation = float("inf")
least_represented = None
# Reset the correction matrix for this synapse and iteration.
self.correctionMatrix.clear()
# Determine the BMU for each training element.
for input in self.training:
bmu = self.bmu_util.calculate_bmu(input)
won[bmu] += 1
# If we are to force a winner each time, then track how many
# times each output neuron becomes the BMU (winner).
if self.force_winner:
# Get the "output" from the network for this pattern. This
# gets the activation level of the BMU.
output = self.compute(self.network, input)
# Track which training entry produces the least BMU. This
# pattern is the least represented by the network.
if output[bmu] < least_represented_activation:
least_represented_activation = output[bmu]
least_represented = input.getInput()
self.train(bmu, self.network.getWeights(), input.getInput())
if self.force_winner:
# force any non-winning neurons to share the burden somewhat
if not self.force_winners(self.network.weights, won, least_represented):
self.apply_correction()
else:
self.apply_correction()
def set_auto_decay(self, planned_iterations, start_rate, end_rate, start_radius, end_radius):
"""
Setup autodecay. This will decrease the radius and learning rate from the
start values to the end values.
:param planned_iterations: The number of iterations that are planned. This allows the
decay rate to be determined.
:param start_rate: The starting learning rate.
:param end_rate: The ending learning rate.
:param start_radius: The starting radius.
:param end_radius: The ending radius.
"""
self.start_rate = start_rate
self.end_rate = end_rate
self.start_radius = start_radius
self.end_radius = end_radius
self.auto_decay_radius = (end_radius - start_radius) / planned_iterations
self.auto_decay_rate = (end_rate - start_rate) / planned_iterations
self.set_params(self.start_rate, self.start_radius)
def set_params(self, rate, radius):
"""
Set the learning rate and radius.
:param rate: The new learning rate.
:param radius:
:return: The new radius.
"""
self.radius = radius
self.learning_rate = rate
self.neighborhood.radius = radius
def get_status(self):
"""
:return: A string display of the status.
"""
result = "Rate="
result += str(self.learning_rate)
result += ", Radius="
result += str(self.radius)
return result
def _train(self, bmu, matrix, input):
"""
Train for the specified synapse and BMU.
:param bmu: The best matching unit for this input.
:param matrix: The synapse to train.
:param input: The input to train for.
:return:
"""
# adjust the weight for the BMU and its neighborhood
for output_neuron in range(self.output_neuron_count):
self._train_pattern(matrix, input, output_neuron, bmu)
def _train_pattern(self, matrix, input, current, bmu):
"""
Train for the specified pattern.
:param matrix: The synapse to train.
:param input: The input pattern to train for.
:param current: The current output neuron being trained.
:param bmu: The best matching unit, or winning output neuron.
"""
for input_neuron in range(self.input_neuron_count):
current_weight = matrix[current][input_neuron]
input_value = input[input_neuron]
new_weight = self._determine_new_weight(current_weight,
input_value, current, bmu)
self.correction_matrix[current][input_neuron] = new_weight
def train_single_pattern(self, pattern):
"""
Train the specified pattern. Find a winning neuron and adjust all neurons
according to the neighborhood function.
:param pattern: The pattern to train.
"""
bmu = self.bmu_util.calculate_bmu(pattern)
self._train(bmu, self.network.weights, pattern)
self._apply_correction()
def compute(self, som, input):
"""
Calculate the output of the SOM, for each output neuron. Typically,
you will use the classify method instead of calling this method.
:param som: The input pattern.
:param input: The output activation of each output neuron.
:return:
"""
result = np.zeros(som.output_count)
for i in range(som.output_count):
optr = som.weights[i]
matrix_a = np.zeros([input.length,1])
for j in range(len(input)):
matrix_a[0][j] = input[j]
matrix_b = np.zeros(1,input.length)
for j in range(len(optr)):
matrix_b[0][j] = optr[j]
result[i] = np.dot(matrix_a, matrix_b)
return result
| 37.886266 | 114 | 0.63625 | 2,265 | 17,655 | 4.86181 | 0.170861 | 0.030512 | 0.008173 | 0.009807 | 0.162368 | 0.088086 | 0.048039 | 0.048039 | 0.03587 | 0.025608 | 0 | 0.003458 | 0.295667 | 17,655 | 465 | 115 | 37.967742 | 0.882107 | 0.470292 | 0 | 0.095238 | 0 | 0 | 0.019378 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130952 | false | 0 | 0.02381 | 0 | 0.22619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97a57543f9dc26b5f9e4368297bf82071ccf16c7 | 1,855 | py | Python | src/GUI/custom_widgets.py | QWERTSKIHACK/peniot | 8b5c5c4dddb5adf53977c3e2e99e645a086f1f0b | [
"MIT"
] | 143 | 2019-12-31T08:12:36.000Z | 2022-03-31T15:59:51.000Z | src/GUI/custom_widgets.py | QWERTSKIHACK/peniot | 8b5c5c4dddb5adf53977c3e2e99e645a086f1f0b | [
"MIT"
] | 5 | 2020-01-28T15:47:23.000Z | 2022-02-23T11:18:55.000Z | src/GUI/custom_widgets.py | QWERTSKIHACK/peniot | 8b5c5c4dddb5adf53977c3e2e99e645a086f1f0b | [
"MIT"
] | 39 | 2019-12-30T22:19:38.000Z | 2022-03-17T10:24:37.000Z | from Tkinter import *
from hard_coded_texts import get_project_name
class Header(Frame):
"""
Generic header template class which is used for construction of different screens
"""
def __init__(self, parent_window):
Frame.__init__(self, parent_window)
# Configure the window
self.configure(background="white")
# Create the header
header = Label(self, text=get_project_name(), width=55, font=("Arial", 20), height=5)
header.grid()
header.configure(background="white")
class CustomButton(Button):
"""
Generic button template to use them throughout the screens
"""
def __init__(self, parent_window, text, _function, row, columnspan=None, sticky=None, column=None, height=2,
foreground="black"):
Button.__init__(self, parent_window, command=_function, text=text, font=("Arial", 15), borderwidth=0,
height=height, highlightthickness=0, background="white", foreground=foreground)
self.grid(row=row, columnspan=columnspan, sticky=sticky, column=column)
class CustomLabel(Label):
"""
Generic label template to use them throughout the screens
"""
def __init__(self, parent_window, text, row, column, rowspan=None, columnspan=None, sticky=None):
Label.__init__(self, parent_window, text=text, font=("Arial", 15))
self.grid(row=row, column=column, rowspan=rowspan, columnspan=columnspan, sticky=sticky)
class CustomRadiobutton(Radiobutton):
"""
Generic radio button template to use them throughout the screens
"""
def __init__(self, parent_window, text, row, column, sticky, variable, value):
Radiobutton.__init__(self, parent_window, text=text, font=("Arial", 13), variable=variable, value=value)
self.grid(row=row, column=column, sticky=sticky)
| 37.1 | 112 | 0.686253 | 222 | 1,855 | 5.518018 | 0.315315 | 0.052245 | 0.091429 | 0.130612 | 0.325714 | 0.308571 | 0.241633 | 0.241633 | 0.181224 | 0.181224 | 0 | 0.009447 | 0.201078 | 1,855 | 49 | 113 | 37.857143 | 0.817139 | 0.163342 | 0 | 0 | 0 | 0 | 0.026846 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.086957 | 0 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97a757d0e58b59160fb7713ada622222e925c521 | 4,479 | py | Python | Code/search.py | Keyology/cs-1.3-2020 | 7b6f02c76dc16f1abafc613ebe6088d51b36b3be | [
"MIT"
] | null | null | null | Code/search.py | Keyology/cs-1.3-2020 | 7b6f02c76dc16f1abafc613ebe6088d51b36b3be | [
"MIT"
] | 4 | 2020-02-17T23:27:06.000Z | 2020-03-10T20:21:22.000Z | Code/search.py | Keyology/cs-1.3-2020 | 7b6f02c76dc16f1abafc613ebe6088d51b36b3be | [
"MIT"
] | null | null | null | #!python
def linear_search(array, item):
"""return the first index of item in array or None if item is not found"""
# implement linear_search_iterative and linear_search_recursive below, then
# change this to call your implementation to verify it passes all tests
# return linear_search_iterative(array, item)
return linear_search_recursive(array, item)
def linear_search_iterative(array, item):
# loop over all array values until item is found
for index, value in enumerate(array):
if item == value:
return index # found
return None # not found
def linear_search_recursive(array, item, index=0):
# TODO: implement linear search recursively here
''' Time complexity is O(n) '''
# check if item is in array
if array[index] == item:
return index
# if item is not in array retun NONE
if index == len(array) -1 and array[index] != item:
return None
index += 1
return linear_search_recursive(array, item, index)
# save the index to a variable
# create a condition to check if array[index] == item
# if yes return index
#otherwise call the function again and update the index
# create another condition to check if the index is at the last item in the list
# once implemented, change linear_search to call linear_search_recursive
# to verify that your recursive implementation passes all tests
def binary_search(array, item):
"""return the index of item in sorted array or None if item is not found"""
# # implement binary_search_iterative and binary_search_recursive below, then
# # change this to call your implementation to verify it passes all tests
# return binary_search_iterative(array, item)
return binary_search_recursive(array, item)
def binary_search_iterative(array, item):
# TODO: implement binary search iteratively here
''' Time complexity is O(log n) '''
# Sort the array
array = sorted(array)
# set the left value to the first index of the list which is zero
left = 0
# set the right to the last index in the array
right = len(array) - 1
while left <= right:
# get the middle index of the array
middle_value = (left + right) // 2
# if the middle value is less than item than move to the left index to the right once
if array[middle_value] < item:
left = middle_value + 1
# if the item is greater than the middle index move the right to the left once
if array[middle_value] > item:
right = middle_value - 1
# if the middle value == the target value return the middle value index
if array[middle_value] == item:
return middle_value
# if the item is not in the array return NONE
return None
# reasign array to sorted array
# only sort once
# create a variable named median and set it to int(len(array) / 2)
# once implemented, change binary_search to call binary_search_iterative
# to verify that your iterative implementation passes all tests
def binary_search_recursive(array, item, left=None, right=None):
# TODO: implement binary search recursively here
''' Time complexity is O(log n) '''
# Sort the array
array = sorted(array)
# get the middle index of the array
if left == None and right == None:
left = 0
right = len(array) -1
middle_value = (left + right) // 2
print('MIDDLE VALUE', middle_value)
if left > right:
return None
# if the middle value == the target value return the middle value index
if array[middle_value] == item:
print('---MIDDLE Value ---',middle_value)
return middle_value
# if the middle value is less than item than move to the left index to the right once
if array[middle_value] < item:
left = middle_value + 1
print('---LEFT----', left)
return binary_search_recursive(array, item, left, right)
# if the item is greater than the middle index move the right to the left once
if array[middle_value] > item:
right = middle_value - 1
print('----RIGHT 1----', right)
return binary_search_recursive(array, item, left, right)
# once implemented, change binary_search to call binary_search_recursive
# to verify that your recursive implementation passes all tests
| 31.542254 | 95 | 0.663764 | 636 | 4,479 | 4.584906 | 0.150943 | 0.090535 | 0.048011 | 0.057613 | 0.644033 | 0.540123 | 0.481824 | 0.424554 | 0.39369 | 0.336077 | 0 | 0.00459 | 0.270373 | 4,479 | 141 | 96 | 31.765957 | 0.887699 | 0.523778 | 0 | 0.571429 | 0 | 0 | 0.027683 | 0 | 0 | 0 | 0 | 0.007092 | 0 | 1 | 0.122449 | false | 0 | 0 | 0 | 0.387755 | 0.081633 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97affcf5430ea542d75da78b8065ecc199a3cc76 | 1,898 | py | Python | PyMess/FIPS/ANN/ConvertData.py | mattkjames7/PyMess | f2c68285a7845a24d98284e20ed4292ed5e58138 | [
"MIT"
] | null | null | null | PyMess/FIPS/ANN/ConvertData.py | mattkjames7/PyMess | f2c68285a7845a24d98284e20ed4292ed5e58138 | [
"MIT"
] | null | null | null | PyMess/FIPS/ANN/ConvertData.py | mattkjames7/PyMess | f2c68285a7845a24d98284e20ed4292ed5e58138 | [
"MIT"
] | null | null | null | import numpy as np
from ... import Globals
from .ReadData import ReadData
import os
import RecarrayTools as RT
def _DateStrToDateUT(s):
'''
convert date on the format YYYY-MM-DDThh:mm:ss.sss to an integer date
and a floating point time.
'''
Y = np.array([np.int32(x[0:4]) for x in s])
M = np.array([np.int32(x[5:7]) for x in s])
D = np.array([np.int32(x[8:10]) for x in s])
h = np.array([np.float32(x[11:13]) for x in s])
m = np.array([np.float32(x[14:16]) for x in s])
s = np.array([np.float32(x[17:]) for x in s])
Date = (Y*10000 + M*100 + D).astype('int32')
ut = (h + m/60.0 + s/3600.0).astype('float32')
return Date,ut
def ConvertData():
'''
Convert the James et al 2020 data to binaries
'''
#create the output dtype
dtype = [ ('Date','int32'),
('ut','float32'),
('nk','float32'),
('tk','float32'),
('K','float32'),
('SplitProb','float32',(8,)),
('Prob','float32'),
('SplitClass','int8',(8,)),
('Class','int8')]
#read in the data file
data = ReadData()
#create a recarray
out = np.recarray(data.size,dtype=dtype)
#convert dates and times
out.Date,out.ut = _DateStrToDateUT(data.UT)
#copy the other fields across
out.nk = data.Density
out.tk = data.Temperature
out.K = data.Kappa
for i in range(0,8):
out.SplitProb[:,i] = data['P{:d}'.format(i)].astype('float32')
out.SplitClass[:,i] = data['Class{:d}'.format(i)].astype('int8')
out.Prob = data.P
out.Class = data.Class
#find the unique dates
ud = np.unique(out.Date)
#create the output directory
outdir = Globals.MessPath + 'FIPS/ANN/bin/'
if not os.path.isdir(outdir):
os.system('mkdir -pv '+outdir)
#file name format
fnfmt = outdir + '{:08d}.bin'
#loop through dates, saving a recarray file for each one
for i in range(ud.size):
use = np.where(out.Date == ud[i])[0]
fname = fnfmt.format(ud[i])
RT.SaveRecarray(out[use],fname)
| 24.025316 | 70 | 0.630664 | 318 | 1,898 | 3.757862 | 0.383648 | 0.035146 | 0.045188 | 0.035146 | 0.093724 | 0.028452 | 0.028452 | 0.028452 | 0 | 0 | 0 | 0.050837 | 0.181243 | 1,898 | 78 | 71 | 24.333333 | 0.718147 | 0.19705 | 0 | 0 | 0 | 0 | 0.109772 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.111111 | 0 | 0.177778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97b281cd2e09060653c37e8623382835d9e1206e | 3,891 | py | Python | 2_data_files/plotter.py | Abhipanda4/RQs_in_Regex_Graphs | 80b86b5b3f92ef28102ac0f5049bb495b5cc07f9 | [
"Apache-2.0"
] | 2 | 2018-10-09T09:59:45.000Z | 2021-11-21T17:01:47.000Z | 2_data_files/plotter.py | Abhipanda4/RQs_in_Regex_Graphs | 80b86b5b3f92ef28102ac0f5049bb495b5cc07f9 | [
"Apache-2.0"
] | null | null | null | 2_data_files/plotter.py | Abhipanda4/RQs_in_Regex_Graphs | 80b86b5b3f92ef28102ac0f5049bb495b5cc07f9 | [
"Apache-2.0"
] | null | null | null | import csv
import matplotlib.pyplot as plt
import numpy as np
def index_sizes():
fp = open("./index_size.csv")
x = csv.reader(fp, delimiter='\t')
sizes = []
for line in x:
size = float(line[0].strip()[:-1])
sizes.append(size)
temp = sorted(sizes[:-1])
nodes = [i for i in range(100, 5001, 100)]
f = plt.figure()
plt.xlabel("Number of node-color pairs stored")
plt.ylabel("Size of partial index(in MBs)")
plt.plot(nodes, temp)
plt.show()
f.savefig("../figures/index_sizes.pdf", bbox_inches='tight')
def MC_size_dependence():
fp = open("./MC_analysis.csv", 'r')
x = csv.reader(fp)
times = [int(i[1]) for i in x]
p = []
for i in range(0, len(times), 100):
p.append(np.mean(times[i:i + 100]))
parts = [i for i in range(10, 501, 10)]
print(p)
plt.plot(parts, p)
plt.xlabel("Number of pairs stored")
plt.ylabel("Average time in ms")
plt.title("Variation of query times with size of index stored")
plt.show()
def pre_process_time():
fp1 = open("./TC_pre_pro.txt", 'r')
fp2 = open("./MC_pre_pro.txt", 'r')
nodes = [i for i in range(10, 21, 2)]
f1 = csv.reader(fp1)
f2 = csv.reader(fp2)
p2 = [[]] * 5
p1 = [int(i[0]) for i in f1]
p2_temp = [int(i[0]) for i in f2]
print(p2_temp)
for i in range(5):
p2[i] = [p2_temp[j] / 20 for j in range(i, len(p2_temp), 5)]
f = plt.figure()
plt.plot(nodes, p1, label="Complete transitive closure")
for i in range(5):
plt.plot(nodes, p2[i], label=str((i + 1) * 10) + "% pairs computed")
plt.xlabel("|V|(in thousands)")
plt.ylabel("Time taken to build table(in ms)")
plt.title("Comparision of pre-processing times for Algorithm 1 & 3")
plt.legend()
plt.show()
f.savefig("../figures/pre_pro_10iter_MC.pdf", bbox_inches='tight')
def hits_vs_miss():
fp = csv.reader(open("./MC_size_dependence.csv", 'r'))
tmp = list(fp)
lines = [i for i in tmp if i[0].find("Time") == -1]
T = []
H = []
M = []
for i in range(0, len(lines), 5):
times = [int(lines[i + j][1]) for j in range(5)]
T.append(sum(times) / 5)
hits = [int(lines[i + j][2]) for j in range(5)]
H.append(sum(hits) / 5)
misses = [int(lines[i + j][3]) for j in range(5)]
M.append(sum(misses) / 5)
# print(T)
# print(H)
# print(M)
X = [6000, 12000, 18000, 24000, 30000]
plt.subplot(1, 2, 1)
plt.plot(X, H, label="Hits")
plt.plot(X, M, label="Misses")
plt.xlabel("Number of node-color pairs stored")
plt.ylabel("Number of hits/misses")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(X, T, label="Query Times")
plt.xlabel("Number of node-color pairs stored")
plt.ylabel("Time in ms")
plt.title("Query times")
plt.show()
def f(r):
timings = []
x = []
count = 0
for line in r:
x.append(int(line[1]))
count += 1
if count == 20:
timings.append(sum(x) / 20)
x = []
count = 0
return timings
def edge_size():
f1 = open("./data_algo1.csv", 'r')
f2 = open("./data_algo2.csv", 'r')
f3 = open("./data_algo3.csv", 'r')
r1 = csv.reader(f1)
r2 = csv.reader(f2)
r3 = csv.reader(f3)
x1 = f(r1)
x2 = f(r2)
x3 = f(r3)
x1[3] = x1[3] / 100
print(x1)
print(x2)
print(x3)
x = [2, 3, 4, 5, 6]
fig = plt.figure()
plt.plot(x, x1, label="Full Transitive Closure")
plt.plot(x, x2, label="Partial Transitive Closure")
plt.plot(x[:3], x3[:3], label="BFS")
plt.legend()
plt.xlabel("Edge-Node Ratio")
plt.ylabel("Average Time taken for query in milliseconds")
plt.show()
fig.savefig("../figures/edge_variation.pdf")
# pre_process_time()
# hits_vs_miss()
# MC_size_dependence()
# edge_size()
index_sizes()
| 25.431373 | 76 | 0.561809 | 627 | 3,891 | 3.425837 | 0.23445 | 0.020484 | 0.030726 | 0.035847 | 0.217877 | 0.111732 | 0.064246 | 0.064246 | 0.064246 | 0.064246 | 0 | 0.053472 | 0.25983 | 3,891 | 152 | 77 | 25.598684 | 0.692361 | 0.023901 | 0 | 0.161017 | 0 | 0 | 0.207388 | 0.029288 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050847 | false | 0 | 0.025424 | 0 | 0.084746 | 0.042373 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97b47597bcd7e262415d73a5d1a8d5d991bcfe66 | 3,022 | py | Python | generic_editor.py | jcooper-korg/talon_user | ef086f9890448f7d633a4f02b36a18de853581a8 | [
"0BSD"
] | 1 | 2018-09-22T22:34:35.000Z | 2018-09-22T22:34:35.000Z | generic_editor.py | jcooper-korg/talon_user | ef086f9890448f7d633a4f02b36a18de853581a8 | [
"0BSD"
] | null | null | null | generic_editor.py | jcooper-korg/talon_user | ef086f9890448f7d633a4f02b36a18de853581a8 | [
"0BSD"
] | null | null | null | # https://github.com/JonathanNickerson/talon_voice_user_scripts
# jsc added indent/outdent and simplified jolt
from talon.voice import Key, press, Str, Context
ctx = Context('generic_editor') # , bundle='com.microsoft.VSCode')
numeral_map = dict((str(n), n) for n in range(0, 20))
for n in [20, 30, 40, 50, 60, 70, 80, 90]:
numeral_map[str(n)] = n
numeral_map["oh"] = 0 # synonym for zero
numerals = ' (' + ' | '.join(sorted(numeral_map.keys())) + ')+'
optional_numerals = ' (' + ' | '.join(sorted(numeral_map.keys())) + ')*'
def text_to_number(words):
tmp = [str(s).lower() for s in words]
words = [parse_word(word) for word in tmp]
result = 0
factor = 1
for word in reversed(words):
if word not in numerals:
raise Exception('not a number')
result = result + factor * int(numeral_map[word])
factor = 10 * factor
return result
def parse_word(word):
word = word.lstrip('\\').split('\\', 1)[0]
return word
def jump_to_bol(m):
line = text_to_number(m)
press('cmd-l')
Str(str(line))(None)
press('enter')
def jump_to_end_of_line():
press('cmd-right')
def jump_to_beginning_of_text():
press('cmd-left')
def jump_to_nearly_end_of_line():
press('left')
def jump_to_bol_and(then):
def fn(m):
if len(m._words) > 1:
jump_to_bol(m._words[1:])
else:
press('ctrl-a')
press('cmd-left')
then()
return fn
def jump_to_eol_and(then):
def fn(m):
if len(m._words) > 1:
jump_to_bol(m._words[1:])
press('cmd-right')
then()
return fn
def toggle_comments():
# works in VSCode with Finnish keyboard layout
# press('cmd-shift-7')
# does not work in VSCode, see https://github.com/talonvoice/talon/issues/3
press('cmd-/')
def snipline():
press('shift-cmd-right')
press('delete')
press('delete')
press('ctrl-a')
press('cmd-left')
keymap = {
'sprinkle' + optional_numerals: jump_to_bol,
'spring' + optional_numerals: jump_to_eol_and(jump_to_beginning_of_text),
'dear' + optional_numerals: jump_to_eol_and(lambda: None),
'smear' + optional_numerals: jump_to_eol_and(jump_to_nearly_end_of_line),
'trundle' + optional_numerals: jump_to_bol_and(toggle_comments),
'jolt': Key('ctrl-a cmd-left shift-down cmd-c down cmd-v' ), # jsc simplified
'snipline' + optional_numerals: jump_to_bol_and(snipline),
# NB these do not work properly if there is a selection
'snipple': Key('shift-cmd-left delete'),
'snipper': Key('shift-cmd-right delete'),
'shackle': Key('cmd-right shift-cmd-left'),
'bracken': [Key('cmd-shift-ctrl-right')],
'shockey': Key('ctrl-a cmd-left enter up'),
'shockoon': Key('cmd-right enter'),
'sprinkoon' + numerals: jump_to_eol_and(lambda: press('enter')),
'olly': Key('cmd-a'),
# jsc added
'(indent | shabble)': Key('cmd-['),
'(outdent | shabber)': Key('cmd-]'),
}
ctx.keymap(keymap)
| 26.743363 | 82 | 0.625745 | 438 | 3,022 | 4.127854 | 0.319635 | 0.056416 | 0.034845 | 0.073009 | 0.277102 | 0.22677 | 0.082965 | 0.082965 | 0.045354 | 0.045354 | 0 | 0.013491 | 0.215089 | 3,022 | 112 | 83 | 26.982143 | 0.748735 | 0.12409 | 0 | 0.246753 | 0 | 0 | 0.1783 | 0 | 0.025974 | 0 | 0 | 0 | 0 | 1 | 0.155844 | false | 0 | 0.012987 | 0 | 0.220779 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97b62ed540d6ffc6ab71d19b389a5534151aeb3d | 4,260 | py | Python | setup.py | blschatz/pyHDT | 9553fd49e1e89a89d248e5d75b3a49ad3b3e124f | [
"MIT"
] | null | null | null | setup.py | blschatz/pyHDT | 9553fd49e1e89a89d248e5d75b3a49ad3b3e124f | [
"MIT"
] | null | null | null | setup.py | blschatz/pyHDT | 9553fd49e1e89a89d248e5d75b3a49ad3b3e124f | [
"MIT"
] | null | null | null | # setup.py
# Author: Thomas MINIER - MIT License 2017-2018
from setuptools import setup, Extension
from os import listdir
import pybind11
import distutils
import platform
__pyhdt_version__ = "1.2.1"
with open('README.rst') as file:
long_description = file.read()
def list_files(path, extension=".cpp", exclude="S.cpp"):
"""List paths to all files that ends with a given extension"""
return ["%s/%s" % (path, f) for f in listdir(path) if f.endswith(extension) and (not f.endswith(exclude))]
# pyHDT source files
sources = [
"src/hdt.cpp",
"src/hdt_document.cpp",
"src/triple_iterator.cpp",
"src/tripleid_iterator.cpp",
"src/join_iterator.cpp"
]
# HDT source files
sources += list_files("serd-0.30.0/src/", extension=".c")
sources += list_files("hdt-cpp-1.3.2/libcds/src/static/bitsequence")
sources += list_files("hdt-cpp-1.3.2/libcds/src/static/coders")
sources += list_files("hdt-cpp-1.3.2/libcds/src/static/mapper")
sources += list_files("hdt-cpp-1.3.2/libcds/src/static/sequence")
sources += list_files("hdt-cpp-1.3.2/libcds/src/static/permutation")
sources += list_files("hdt-cpp-1.3.2/libcds/src/utils")
sources += list_files("hdt-cpp-1.3.2/libhdt/src/bitsequence")
sources += list_files("hdt-cpp-1.3.2/libhdt/src/dictionary")
sources += list_files("hdt-cpp-1.3.2/libhdt/src/hdt")
sources += list_files("hdt-cpp-1.3.2/libhdt/src/header")
sources += list_files("hdt-cpp-1.3.2/libhdt/src/huffman")
sources += list_files("hdt-cpp-1.3.2/libhdt/src/libdcs")
sources += list_files("hdt-cpp-1.3.2/libhdt/src/libdcs/fmindex")
sources += list_files("hdt-cpp-1.3.2/libhdt/src/rdf")
sources += list_files("hdt-cpp-1.3.2/libhdt/src/sequence")
sources += list_files("hdt-cpp-1.3.2/libhdt/src/triples")
sources += list_files("hdt-cpp-1.3.2/libhdt/src/util")
sources += list_files("hdt-cpp-1.3.2/libhdt/src/sparql")
# pybind11 + pyHDT + libcds + HDT-lib headers
include_dirs = [
pybind11.get_include(),
pybind11.get_include(True),
"include/",
"hdt-cpp-1.3.2/libhdt/include/",
"hdt-cpp-1.3.2/libhdt/src/dictionary/",
"hdt-cpp-1.3.2/libhdt/src/sparql/",
"hdt-cpp-1.3.2/libcds/include/",
"hdt-cpp-1.3.2/libcds/src/static/bitsequence",
"hdt-cpp-1.3.2/libcds/src/static/coders",
"hdt-cpp-1.3.2/libcds/src/static/mapper",
"hdt-cpp-1.3.2/libcds/src/static/permutation",
"hdt-cpp-1.3.2/libcds/src/static/sequence",
"hdt-cpp-1.3.2/libcds/src/utils",
"serd-0.30.0"
]
# Need to build in c++11 minimum
# TODO add a check to use c++14 or c++17 if available
extra_compile_args_macos = ["-std=c++11", "-DHAVE_SERD", "-DHAVE_POSIX_MEMALIGN"]
extra_compile_args_win = ["-DHAVE_SERD", "-DWIN32", "-D_AMD64_", "-DUNICODE"]
plaf = platform.system()
if plaf == "Windows":
extra_compile_args = extra_compile_args_win
elif plaf == "Darwin":
extra_compile_args = extra_compile_args_macos
else:
extra_compile_args = ["-std=c++11", "-DHAVE_SERD", "-DHAVE_POSIX_MEMALIGN"]
# build HDT extension
hdt_extension = Extension("hdt", sources=sources, include_dirs=include_dirs,
extra_compile_args=extra_compile_args, language='c++')
# monkey patch the distutils compiler to enable compilation of the Serd parser source
# it is C, and the C++11 compile argument is incompatible
c = distutils.ccompiler.new_compiler
def wrapped_new_compiler_fn(*args, **kwargs):
compiler = c(*args, **kwargs)
c_c = compiler._compile
def wrapped_compiler_compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == ".c":
return c_c(obj, src, ext, cc_args, [ "-DHAVE_SERD", "-std=c99" ], pp_opts)
else:
return c_c(obj, src, ext, cc_args, extra_postargs, pp_opts)
compiler._compile = wrapped_compiler_compile
return compiler
distutils.ccompiler.new_compiler = wrapped_new_compiler_fn
setup(
name="hdt",
version=__pyhdt_version__,
author="Thomas Minier",
author_email="thomas.minier@univ-nantes.fr",
url="https://github.com/Callidon/pyHDT",
description="Read and query HDT document with ease in Python",
long_description=long_description,
keywords=["hdt", "rdf", "semantic web", "search"],
license="MIT",
install_requires=['pybind11==2.2.4'],
ext_modules=[hdt_extension]
)
| 37.368421 | 110 | 0.698122 | 675 | 4,260 | 4.251852 | 0.248889 | 0.060627 | 0.068293 | 0.078049 | 0.447735 | 0.447735 | 0.40662 | 0.396516 | 0.329268 | 0.227875 | 0 | 0.036374 | 0.135211 | 4,260 | 113 | 111 | 37.699115 | 0.742671 | 0.102347 | 0 | 0.022727 | 0 | 0 | 0.38514 | 0.292465 | 0 | 0 | 0 | 0.00885 | 0 | 1 | 0.034091 | false | 0 | 0.056818 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97ba1bc69fa28bb340ca97364cb86adfcaf60e62 | 2,880 | py | Python | src/rest_api/mir_coords_to_csv.py | jonathanleinola/radiohead-master | f0854441c07aba0ccf51bf9ec8904b860eefd683 | [
"MIT"
] | null | null | null | src/rest_api/mir_coords_to_csv.py | jonathanleinola/radiohead-master | f0854441c07aba0ccf51bf9ec8904b860eefd683 | [
"MIT"
] | null | null | null | src/rest_api/mir_coords_to_csv.py | jonathanleinola/radiohead-master | f0854441c07aba0ccf51bf9ec8904b860eefd683 | [
"MIT"
] | null | null | null | import time
import sys
import urllib3
from time import sleep
import json
import csv
import datetime
import requests
from datetime import datetime
import subprocess # just for changing file ownership at the end of script
http = urllib3.PoolManager()
###############################################################################
DURATION = 2000 # How many timestamps you want? it 100 takes 10s
TIMES = 10 # How many times per sec you want the timestamp
###############################################################################
### define filename to save timestamps (coords3.csv)
with open('coords.csv', mode='w') as csvfile: # open the csv file
writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
writer.writerow(["X", "Y", "orientation","timestamp"])
print ("Coord queries running, wait"),
print(DURATION/TIMES),
print ("s")
def main():
#######################################################################
### change the url localhost to match actual addrest for REST API calls
#######################################################################
url = 'http://192.168.12.20/api/v2.0.0/status' # url where to call the rest api
error=0
response = http.request('GET', url) # response values from REST API
### get the values from response jason object x,y,orientation ###
try:
x = json.loads(response.data)['position']['x']
y = json.loads(response.data)['position']['y']
orientation = json.loads(response.data)['position']['orientation']
except KeyError as error:
error=1
### get the timestamp %f')[:-3] gives second with 3 digits ###
timestamp = datetime.now().strftime('%Y/%m/%d %H:%M:%S.%f')[:-3]
### write the REST API values into csv file
if error != 1:
writer.writerow([x,y,orientation,timestamp])
else:
error=0
if __name__ == '__main__':
time_start = time.time()
i = 1
while True:
time_current = time.time()
if time_current > time_start + i / float(TIMES):
# print('{}: {}'.format(i, time_current))
main() # execute main function after every 100ms
i += 1
if i > DURATION: # break the prog when duration reached
break
print ("Coord queries done, have a nice day!")
################################################################################
### If issues with ownership of the file u can use subprocess.call function to
### execute shell commands such as:
### subprocess.call(['chown', '[user]:root','/home/user/Documents/coords3.csv'])
### change [user] to match username and the file path to correct folder
################################################################################
| 39.452055 | 87 | 0.518056 | 323 | 2,880 | 4.575851 | 0.473684 | 0.005413 | 0.026387 | 0.042625 | 0.107578 | 0.048714 | 0 | 0 | 0 | 0 | 0 | 0.017849 | 0.221875 | 2,880 | 72 | 88 | 40 | 0.641678 | 0.301042 | 0 | 0.044444 | 0 | 0 | 0.135099 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.222222 | 0 | 0.244444 | 0.088889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97be40091dbb9d0bd5f45ca454971f59e1fb204d | 3,511 | py | Python | timeeval_experiments/algorithms/mscred.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | 2 | 2022-01-29T03:46:31.000Z | 2022-02-14T14:06:35.000Z | timeeval_experiments/algorithms/mscred.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | null | null | null | timeeval_experiments/algorithms/mscred.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | null | null | null | from durations import Duration
from typing import Any, Dict, Optional
from timeeval import Algorithm, TrainingType, InputDimensionality
from timeeval.adapters import DockerAdapter
from timeeval.params import ParameterConfig
import numpy as np
import numpy as np
from timeeval.utils.window import ReverseWindowing
# post-processing for MSCRED
def post_mscred(scores: np.ndarray, args: dict) -> np.ndarray:
ds_length = args.get("dataset_details").length # type: ignore
gap_time = args.get("hyper_params", {}).get("gap_time", 10)
window_size = args.get("hyper_params", {}).get("window_size", 5)
max_window_size = max(args.get("hyper_params", {}).get("windows", [10, 30, 60]))
offset = (ds_length - (max_window_size - 1)) % gap_time
image_scores = ReverseWindowing(window_size=window_size).fit_transform(scores)
return np.concatenate([np.repeat(image_scores[:-offset], gap_time), image_scores[-offset:]])
_mscred_parameters: Dict[str, Dict[str, Any]] = {
"batch_size": {
"defaultValue": 32,
"description": "Number of instances trained at the same time",
"name": "batch_size",
"type": "int"
},
"early_stopping_delta": {
"defaultValue": 0.05,
"description": "If 1 - (loss / last_loss) is less than `delta` for `patience` epochs, stop",
"name": "early_stopping_delta",
"type": "float"
},
"early_stopping_patience": {
"defaultValue": 10,
"description": "If 1 - (loss / last_loss) is less than `delta` for `patience` epochs, stop",
"name": "early_stopping_patience",
"type": "int"
},
"epochs": {
"defaultValue": 1,
"description": "Number of training iterations over entire dataset",
"name": "epochs",
"type": "int"
},
"gap_time": {
"defaultValue": 10,
"description": "Number of points to skip over between the generation of signature matrices",
"name": "gap_time",
"type": "int"
},
"learning_rate": {
"defaultValue": 0.001,
"description": "Learning rate for Adam optimizer",
"name": "learning_rate",
"type": "float"
},
"random_state": {
"defaultValue": 42,
"description": "Seed for the random number generator",
"name": "random_state",
"type": "int"
},
"split": {
"defaultValue": 0.8,
"description": "Train-validation split for early stopping",
"name": "split",
"type": "float"
},
"test_batch_size": {
"defaultValue": 256,
"description": "Number of instances used for validation and testing at the same time",
"name": "test_batch_size",
"type": "int"
},
"window_size": {
"defaultValue": 5,
"description": "Size of the sliding windows",
"name": "window_size",
"type": "int"
},
"windows": {
"defaultValue": [
10,
30,
60
],
"description": "Number and size of different signature matrices (correlation matrices) to compute as a preprocessing step",
"name": "windows",
"type": "List[int]"
}
}
def mscred(params: ParameterConfig = None, skip_pull: bool = False, timeout: Optional[Duration] = None) -> Algorithm:
return Algorithm(
name="MSCRED",
main=DockerAdapter(
image_name="registry.gitlab.hpi.de/akita/i/mscred",
skip_pull=skip_pull,
timeout=timeout,
group_privileges="akita",
),
preprocess=None,
postprocess=post_mscred,
param_schema=_mscred_parameters,
param_config=params or ParameterConfig.defaults(),
data_as_file=True,
training_type=TrainingType.SEMI_SUPERVISED,
input_dimensionality=InputDimensionality("multivariate")
)
| 30.530435 | 125 | 0.672173 | 419 | 3,511 | 5.479714 | 0.362769 | 0.034843 | 0.033101 | 0.023519 | 0.111063 | 0.068815 | 0.068815 | 0.068815 | 0.068815 | 0.068815 | 0 | 0.013923 | 0.181715 | 3,511 | 114 | 126 | 30.798246 | 0.785242 | 0.011108 | 0 | 0.152381 | 0 | 0.019048 | 0.405592 | 0.023926 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019048 | false | 0 | 0.07619 | 0.009524 | 0.114286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97bf1504b82eb929f132872535cb5630bd14f3ad | 7,470 | py | Python | fedot/core/operations/evaluation/operation_implementations/data_operations/sklearn_selectors.py | vishalbelsare/FEDOT | 3a6f06b29cf2f173008d119f7cb5dc705a45f695 | [
"BSD-3-Clause"
] | null | null | null | fedot/core/operations/evaluation/operation_implementations/data_operations/sklearn_selectors.py | vishalbelsare/FEDOT | 3a6f06b29cf2f173008d119f7cb5dc705a45f695 | [
"BSD-3-Clause"
] | null | null | null | fedot/core/operations/evaluation/operation_implementations/data_operations/sklearn_selectors.py | vishalbelsare/FEDOT | 3a6f06b29cf2f173008d119f7cb5dc705a45f695 | [
"BSD-3-Clause"
] | null | null | null | from typing import Optional
import numpy as np
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from fedot.core.data.data import OutputData
from fedot.core.operations.evaluation.operation_implementations.implementation_interfaces import \
DataOperationImplementation
class FeatureSelectionImplementation(DataOperationImplementation):
""" Class for applying feature selection operations on tabular data """
def __init__(self, **params: Optional[dict]):
super().__init__()
self.inner_model = None
self.operation = None
self.is_not_fitted = None
# Number of columns in features table
self.features_columns_number = None
# Bool mask where True - remain column and False - drop it
self.remain_features_mask = None
def fit(self, input_data):
""" Method for fit feature selection
:param input_data: data with features, target and ids to process
:return operation: trained operation (optional output)
"""
features = input_data.features
target = input_data.target
# Define number of columns in the features table
if len(features.shape) == 1:
self.features_columns_number = 1
else:
self.features_columns_number = features.shape[1]
if self.features_columns_number > 1:
if self._is_input_data_one_dimensional(features):
self.is_not_fitted = True
return self.operation
try:
self.operation.fit(features, target)
except ValueError:
# For time series forecasting not available multi-targets
self.operation.fit(features, target[:, 0])
else:
self.is_not_fitted = True
return self.operation
def transform(self, input_data, is_fit_pipeline_stage: Optional[bool]):
""" Method for making prediction
:param input_data: data with features, target and ids to process
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return output_data: filtered input data by columns
"""
if self.is_not_fitted:
return self._convert_to_output(input_data, input_data.features)
features = input_data.features
source_features_shape = features.shape
transformed_features = self._make_new_table(features)
# Update features
output_data = self._convert_to_output(input_data,
transformed_features)
self._update_column_types(source_features_shape, output_data)
return output_data
def get_params(self):
return self.operation.get_params()
def _update_column_types(self, source_features_shape, output_data: OutputData):
""" Update column types after applying feature selection operations """
if len(source_features_shape) < 2:
return output_data
else:
if self.features_columns_number > 1:
cols_number_removed = source_features_shape[1] - output_data.predict.shape[1]
if cols_number_removed > 0:
# There are several columns, which were dropped
col_types = output_data.supplementary_data.column_types['features']
# Calculate
remained_column_types = np.array(col_types)[self.remain_features_mask]
output_data.supplementary_data.column_types['features'] = list(remained_column_types)
def _make_new_table(self, features):
"""
The method creates a table based on transformed data and source boolean
features
:param features: tabular data for processing
:return transformed_features: transformed features table
"""
# Bool vector - mask for columns
self.remain_features_mask = self.operation.support_
transformed_features = features[:, self.remain_features_mask]
return transformed_features
@staticmethod
def _is_input_data_one_dimensional(features_to_process: np.array):
""" Check if features table contain only one column """
return features_to_process.shape[1] == 1
class LinearRegFSImplementation(FeatureSelectionImplementation):
"""
Class for feature selection based on Recursive Feature Elimination (RFE) and
LinearRegression as core model
Task type - regression
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
self.inner_model = LinearRegression(normalize=True)
if not params:
# Default parameters
self.operation = RFE(estimator=self.inner_model)
else:
# Checking the appropriate params are using or not
rfe_params = {k: params[k] for k in
['n_features_to_select', 'step']}
self.operation = RFE(estimator=self.inner_model, **rfe_params)
self.params = params
class NonLinearRegFSImplementation(FeatureSelectionImplementation):
"""
Class for feature selection based on Recursive Feature Elimination (RFE) and
DecisionTreeRegressor as core model
Task type - regression
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
self.inner_model = DecisionTreeRegressor()
if not params:
# Default parameters
self.operation = RFE(estimator=self.inner_model)
else:
# Checking the appropriate params are using or not
rfe_params = {k: params[k] for k in
['n_features_to_select', 'step']}
self.operation = RFE(estimator=self.inner_model, **rfe_params)
self.params = params
class LinearClassFSImplementation(FeatureSelectionImplementation):
"""
Class for feature selection based on Recursive Feature Elimination (RFE) and
LogisticRegression as core model
Task type - classification
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
self.inner_model = LogisticRegression()
if not params:
# Default parameters
self.operation = RFE(estimator=self.inner_model)
else:
# Checking the appropriate params are using or not
rfe_params = {k: params[k] for k in
['n_features_to_select', 'step']}
self.operation = RFE(estimator=self.inner_model, **rfe_params)
self.params = params
class NonLinearClassFSImplementation(FeatureSelectionImplementation):
"""
Class for feature selection based on Recursive Feature Elimination (RFE) and
DecisionTreeClassifier as core model
Task type - classification
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
self.inner_model = DecisionTreeClassifier()
if not params:
# Default parameters
self.operation = RFE(estimator=self.inner_model)
else:
# Checking the appropriate params are using or not
rfe_params = {k: params[k] for k in
['n_features_to_select', 'step']}
self.operation = RFE(estimator=self.inner_model, **rfe_params)
self.params = params
| 37.35 | 105 | 0.654485 | 817 | 7,470 | 5.749082 | 0.194614 | 0.041516 | 0.038748 | 0.04258 | 0.47839 | 0.447733 | 0.409836 | 0.390249 | 0.374069 | 0.374069 | 0 | 0.002215 | 0.274833 | 7,470 | 199 | 106 | 37.537688 | 0.86487 | 0.247925 | 0 | 0.476636 | 0 | 0 | 0.020837 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102804 | false | 0 | 0.065421 | 0.009346 | 0.28972 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97c09d6d0c463e1a6cbc1d5065aab627ff77af00 | 18,595 | py | Python | budou/budou.py | aodag/budou | 97be13eb87745d5ac78e9c42eda97ac923226259 | [
"Apache-2.0"
] | null | null | null | budou/budou.py | aodag/budou | 97be13eb87745d5ac78e9c42eda97ac923226259 | [
"Apache-2.0"
] | null | null | null | budou/budou.py | aodag/budou | 97be13eb87745d5ac78e9c42eda97ac923226259 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Budou, an automatic CJK line break organizer."""
from __future__ import print_function
from .cachefactory import load_cache
import collections
from xml.etree import ElementTree as ET
import html5lib
import re
import six
import unicodedata
cache = load_cache()
class Chunk(object):
"""Chunk object. This represents a unit for word segmentation.
Attributes:
word: Surface word of the chunk. (str)
pos: Part of speech. (str)
label: Label information. (str)
dependency: Dependency to neighbor words. None for no dependency, True for
dependency to the following word, and False for the dependency to the
previous word. (bool or None)
"""
SPACE_POS = 'SPACE'
BREAK_POS = 'BREAK'
DEPENDENT_LABEL = (
'P', 'SNUM', 'PRT', 'AUX', 'SUFF', 'AUXPASS', 'RDROP', 'NUMBER', 'NUM',
'PREF')
def __init__(self, word, pos=None, label=None, dependency=None):
self.word = word
self.pos = pos
self.label = label
self.dependency = dependency
self._add_dependency_if_punct()
def __repr__(self):
return 'Chunk(%s, %s, %s, %s)' % (
repr(self.word), self.pos, self.label, self.dependency)
@classmethod
def space(cls):
"""Creates space Chunk."""
chunk = cls(u' ', cls.SPACE_POS)
return chunk
@classmethod
def breakline(cls):
"""Creates breakline Chunk."""
chunk = cls(u'\n', cls.BREAK_POS)
return chunk
def is_space(self):
"""Checks if this is space Chunk."""
return self.pos == self.SPACE_POS
def has_cjk(self):
"""Checks if the word of the chunk contains CJK characters
Using range from
https://github.com/nltk/nltk/blob/develop/nltk/tokenize/util.py#L149
"""
for char in self.word:
if any([start <= ord(char) <= end for start, end in
[(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215),
(63744, 64255), (65072, 65103), (65381, 65500),
(131072, 196607)]
]):
return True
return False
def update_word(self, word):
"""Updates the word of the chunk."""
self.word = word
def serialize(self):
"""Returns serialized chunk data in dictionary."""
return {
'word': self.word,
'pos': self.pos,
'label': self.label,
'dependency': self.dependency,
'has_cjk': self.has_cjk(),
}
def maybe_add_dependency(self, default_dependency_direction):
"""Adds dependency if any dependency is not assigned yet."""
if self.dependency is None and self.label in self.DEPENDENT_LABEL:
self.dependency = default_dependency_direction
def _add_dependency_if_punct(self):
"""Adds dependency if the chunk is punctuation."""
if self.pos == 'PUNCT':
try:
# Getting unicode category to determine the direction.
# Concatenates to the following if it belongs to Ps or Pi category.
# Ps: Punctuation, open (e.g. opening bracket characters)
# Pi: Punctuation, initial quote (e.g. opening quotation mark)
# Otherwise, concatenates to the previous word.
# See also https://en.wikipedia.org/wiki/Unicode_character_property
category = unicodedata.category(self.word)
self.dependency = category in ('Ps', 'Pi')
except:
pass
class ChunkList(list):
"""Chunk list. """
def get_overlaps(self, offset, length):
"""Returns chunks overlapped with the given range.
Args:
offset: Begin offset of the range. (int)
length: Length of the range. (int)
Returns:
Overlapped chunks. (list of Chunk)
"""
# In case entity's offset points to a space just before the entity.
if ''.join([chunk.word for chunk in self])[offset] == ' ':
offset += 1
index = 0
result = []
for chunk in self:
if offset < index + len(chunk.word) and index < offset + length:
result.append(chunk)
index += len(chunk.word)
return result
def swap(self, old_chunks, new_chunk):
"""Swaps old consecutive chunks with new chunk.
Args:
old_chunks: List of consecutive Chunks to be removed. (list of Chunk)
new_chunk: A Chunk to be inserted. (Chunk)
"""
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:indexes[-1] + 1]
self.insert(indexes[0], new_chunk)
class Budou(object):
"""A parser for CJK line break organizer.
Attributes:
service: A Resource object with methods for interacting with the service.
(googleapiclient.discovery.Resource)
"""
DEFAULT_CLASS_NAME = 'ww'
def __init__(self, service):
self.service = service
@classmethod
def authenticate(cls, json_path=None):
"""Authenticates for Cloud Natural Language API and returns a parser.
If a service account private key file is not given, it tries to authenticate
with default credentials.
Args:
json_path: A file path to a service account's JSON private keyfile.
(str, optional)
Returns:
Budou parser. (Budou)
"""
import google_auth_httplib2
from googleapiclient import discovery
scope = ['https://www.googleapis.com/auth/cloud-platform']
if json_path:
try:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
json_path)
scoped_credentials = credentials.with_scopes(scope)
except ImportError:
print('''Failed to load google.oauth2.service_account module.
If you are running this script in Google App Engine environment,
please call `authenticate` method with empty argument to
authenticate with default credentials.''')
else:
import google.auth
scoped_credentials, project = google.auth.default(scope)
authed_http = google_auth_httplib2.AuthorizedHttp(scoped_credentials)
service = discovery.build('language', 'v1beta2', http=authed_http)
return cls(service)
def parse(self, source, attributes=None, use_cache=True, language=None,
max_length=None, use_entity=False, classname=None):
"""Parses input HTML code into word chunks and organized code.
Args:
source: Text to be processed. (str)
attributes: A key-value mapping for attributes of output elements.
(dictionary, optional)
**This argument used to accept a string or a list of strings to
specify class names of the output chunks, but this designation method
is now deprecated. Please use a dictionary to designate attributes.**
use_cache: Whether to use caching. (bool, optional)
language: A language used to parse text. (str, optional)
max_length: Maximum length of span enclosed chunk. (int, optional)
use_entity: Whether to use entities Entity Analysis results. Note that it
makes additional request to API, which may incur additional cost.
(bool, optional)
classname: A class name of output elements. (str, optional)
**This argument is deprecated. Please use attributes argument
instead.**
Returns:
A dictionary with the list of word chunks and organized HTML code.
For example:
{
'chunks': [
{'dependency': None, 'label': 'NSUBJ', 'pos': 'NOUN', 'word': '今日も'},
{'dependency': None, 'label': 'ROOT', 'pos': 'VERB', 'word': '食べる'}
],
'html_code': '<span class="ww">今日も</span><span class="ww">食べる</span>'
}
"""
if use_cache:
result_value = cache.get(source, language)
if result_value: return result_value
input_text = self._preprocess(source)
if language == 'ko':
# Korean has spaces between words, so this simply parses words by space
# and wrap them as chunks.
chunks = self._get_chunks_per_space(input_text)
else:
chunks, tokens, language = self._get_chunks_with_api(
input_text, language, use_entity)
attributes = self._get_attribute_dict(attributes, classname)
html_code = self._html_serialize(chunks, attributes, max_length)
result_value = {
'chunks': [chunk.serialize() for chunk in chunks],
'html_code': html_code,
'language': language,
'tokens': tokens,
}
if use_cache:
cache.set(source, language, result_value)
return result_value
def _get_chunks_per_space(self, input_text):
"""Returns a chunk list by separating words by spaces.
Args:
input_text: String to parse. (str)
Returns:
A chunk list. (ChunkList)
"""
chunks = ChunkList()
words = input_text.split()
for i, word in enumerate(words):
chunks.append(Chunk(word))
if i < len(words) - 1: # Add no space after the last word.
chunks.append(Chunk.space())
return chunks
def _get_chunks_with_api(self, input_text, language=None, use_entity=False):
"""Returns a chunk list by using Google Cloud Natural Language API.
Args:
input_text: String to parse. (str)
language: A language code. 'ja' and 'ko' are supported. (str, optional)
use_entity: Whether to use entities in Natural Language API response.
(bool, optional)
Returns:
A chunk list. (ChunkList)
"""
chunks, tokens, language = self._get_source_chunks(input_text, language)
if use_entity:
entities = self._get_entities(input_text, language)
chunks = self._group_chunks_by_entities(chunks, entities)
chunks = self._resolve_dependency(chunks)
chunks = self._insert_breakline(chunks)
return chunks, tokens, language
def _get_attribute_dict(self, attributes, classname=None):
"""Returns a dictionary of HTML element attributes.
Args:
attributes: If a dictionary, it should be a map of name-value pairs for
attributes of output elements. If a string, it should be a class name of
output elements. (dict or str)
classname: Optional class name. (str, optional)
Returns:
An attribute dictionary. (dict of (str, str))
"""
if attributes and isinstance(attributes, six.string_types):
return {
'class': attributes
}
if not attributes:
attributes = {}
if not classname:
classname = self.DEFAULT_CLASS_NAME
attributes.setdefault('class', classname)
return attributes
def _preprocess(self, source):
"""Removes unnecessary break lines and white spaces.
Args:
source: HTML code to be processed. (str)
Returns:
Preprocessed HTML code. (str)
"""
doc = html5lib.parseFragment(source)
source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8')
source = source.replace(u'\n', u'').strip()
source = re.sub(r'\s\s+', u' ', source)
return source
def _get_source_chunks(self, input_text, language=None):
"""Returns a chunk list retrieved from Syntax Analysis results.
Args:
input_text: Text to annotate. (str)
language: Language of the text. 'ja' and 'ko' are supported.
(str, optional)
Returns:
A chunk list. (ChunkList)
"""
chunks = ChunkList()
sentence_length = 0
tokens, language = self._get_annotations(input_text, language)
for i, token in enumerate(tokens):
word = token['text']['content']
begin_offset = token['text']['beginOffset']
label = token['dependencyEdge']['label']
pos = token['partOfSpeech']['tag']
if begin_offset > sentence_length:
chunks.append(Chunk.space())
sentence_length = begin_offset
chunk = Chunk(word, pos, label)
# Determining default concatenating direction based on syntax dependency.
chunk.maybe_add_dependency(
i < token['dependencyEdge']['headTokenIndex'])
chunks.append(chunk)
sentence_length += len(word)
return chunks, tokens, language
def _group_chunks_by_entities(self, chunks, entities):
"""Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks: The list of chunks to be processed. (ChunkList)
entities: List of entities. (list of dict)
Returns:
A chunk list. (ChunkList)
"""
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat: continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks
def _html_serialize(self, chunks, attributes, max_length):
"""Returns concatenated HTML code with SPAN tag.
Args:
chunks: The list of chunks to be processed. (ChunkList)
attributes: If a dictionary, it should be a map of name-value pairs for
attributes of output SPAN tags. If a string, it should be a class name
of output SPAN tags. If an array, it should be a list of class names
of output SPAN tags. (str or dict or list of str)
max_length: Maximum length of span enclosed chunk. (int, optional)
Returns:
The organized HTML code. (str)
"""
doc = ET.Element('span')
for chunk in chunks:
if chunk.is_space():
if doc.getchildren():
if doc.getchildren()[-1].tail is None:
doc.getchildren()[-1].tail = ' '
else:
doc.getchildren()[-1].tail += ' '
else:
if doc.text is not None:
# We want to preserve space in cases like "Hello 你好"
# But the space in " 你好" can be discarded.
doc.text += ' '
else:
if chunk.has_cjk() and not (max_length and len(chunk.word) > max_length):
ele = ET.Element('span')
ele.text = chunk.word
for k, v in attributes.items():
ele.attrib[k] = v
doc.append(ele)
else:
# add word without span tag for non-CJK text (e.g. English)
# by appending it after the last element
if doc.getchildren():
if doc.getchildren()[-1].tail is None:
doc.getchildren()[-1].tail = chunk.word
else:
doc.getchildren()[-1].tail += chunk.word
else:
if doc.text is None:
doc.text = chunk.word
else:
doc.text += chunk.word
result = ET.tostring(doc, encoding='utf-8').decode('utf-8')
result = html5lib.serialize(
html5lib.parseFragment(result), sanitize=True,
quote_attr_values="always")
return result
def _resolve_dependency(self, chunks):
"""Resolves chunk dependency by concatenating them.
Args:
chunks: a chink list. (ChunkList)
Returns:
A chunk list. (ChunkList)
"""
chunks = self._concatenate_inner(chunks, True)
chunks = self._concatenate_inner(chunks, False)
return chunks
def _concatenate_inner(self, chunks, direction):
"""Concatenates chunks based on each chunk's dependency.
Args:
direction: Direction of concatenation process. True for forward. (bool)
Returns:
A chunk list. (ChunkList)
"""
tmp_bucket = []
source_chunks = chunks if direction else chunks[::-1]
target_chunks = ChunkList()
for chunk in source_chunks:
if (
# if the chunk has matched dependency, do concatenation.
chunk.dependency == direction or
# if the chunk is SPACE, concatenate to the previous chunk.
(direction == False and chunk.is_space())
):
tmp_bucket.append(chunk)
continue
tmp_bucket.append(chunk)
if not direction: tmp_bucket = tmp_bucket[::-1]
new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])
chunk.update_word(new_word)
target_chunks.append(chunk)
tmp_bucket = []
if tmp_bucket: target_chunks += tmp_bucket
return target_chunks if direction else target_chunks[::-1]
def _insert_breakline(self, chunks):
"""Inserts a breakline instead of a trailing space if the chunk is in CJK.
Args:
chunks: a chunk list. (ChunkList)
Returns:
A chunk list. (ChunkList)
"""
target_chunks = ChunkList()
for chunk in chunks:
if chunk.word[-1] == ' ' and chunk.has_cjk():
chunk_to_add = Chunk(
chunk.word[:-1], chunk.pos, chunk.label, chunk.dependency)
target_chunks.append(chunk_to_add)
target_chunks.append(chunk.breakline())
else:
target_chunks.append(chunk)
return target_chunks
def _get_annotations(self, text, language='', encoding='UTF32'):
"""Returns the list of annotations from the given text."""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': encoding,
}
if language:
body['document']['language'] = language
request = self.service.documents().annotateText(body=body)
response = request.execute()
tokens = response.get('tokens', [])
language = response.get('language')
return tokens, language
def _get_entities(self, text, language='', encoding='UTF32'):
"""Returns the list of annotations from the given text."""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'encodingType': encoding,
}
if language:
body['document']['language'] = language
request = self.service.documents().analyzeEntities(body=body)
response = request.execute()
result = []
for entity in response.get('entities', []):
mentions = entity.get('mentions', [])
if not mentions: continue
entity_text = mentions[0]['text']
offset = entity_text['beginOffset']
for word in entity_text['content'].split():
result.append({'content': word, 'beginOffset': offset})
offset += len(word)
return result
| 33.625678 | 81 | 0.645012 | 2,348 | 18,595 | 5.002129 | 0.202726 | 0.013027 | 0.009366 | 0.014474 | 0.192337 | 0.134951 | 0.115028 | 0.079183 | 0.079183 | 0.079183 | 0 | 0.009344 | 0.251788 | 18,595 | 552 | 82 | 33.686594 | 0.834831 | 0.370046 | 0 | 0.227119 | 0 | 0 | 0.076359 | 0.002624 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091525 | false | 0.00678 | 0.044068 | 0.00339 | 0.237288 | 0.00678 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97c0f0316a610972e2430f4b0813c029c750b789 | 1,456 | py | Python | lpd/callbacks/collect_outputs.py | RoySadaka/lpd | 921454d9730d8228f4b0ca5349b0558ebd123c65 | [
"MIT"
] | 4 | 2020-10-02T10:04:19.000Z | 2022-01-19T12:45:02.000Z | lpd/callbacks/collect_outputs.py | RoySadaka/lpd | 921454d9730d8228f4b0ca5349b0558ebd123c65 | [
"MIT"
] | 1 | 2020-10-06T17:43:57.000Z | 2020-10-06T17:47:43.000Z | lpd/callbacks/collect_outputs.py | RoySadaka/lpd | 921454d9730d8228f4b0ca5349b0558ebd123c65 | [
"MIT"
] | 1 | 2020-10-03T17:21:32.000Z | 2020-10-03T17:21:32.000Z | from lpd.enums import Phase, State
from lpd.callbacks.callback_base import CallbackBase
from lpd.callbacks.callback_context import CallbackContext
from typing import Union, List
class CollectOutputs(CallbackBase):
"""
This callback will collect outputs per each state, (it is currently used in trainer.predict() method.)
It will collect the numpy outputs in the defined states to a dictionary (state->outputs)
Methods:
get_outputs_for_state - for a given state, returns the collected outputs
Args:
apply_on_phase - see in CallbackBase
apply_on_states - see in CallbackBase
"""
def __init__(self,
apply_on_phase: Phase,
apply_on_states: Union[State, List[State]]):
super(CollectOutputs, self).__init__(apply_on_phase, apply_on_states)
self.state_to_outputs = {}
def get_outputs_for_state(self, state: State):
return [data.cpu().numpy() for data in self.state_to_outputs[state]]
def __call__(self, callback_context: CallbackContext):
c = callback_context #READABILITY DOWN THE ROAD
state = c.trainer_state
if self.should_apply_on_state(c):
if state not in self.state_to_outputs:
self.state_to_outputs[state] = []
last_outputs = c.trainer._last_data[state].outputs.data
self.state_to_outputs[state].append(last_outputs)
| 37.333333 | 110 | 0.677885 | 187 | 1,456 | 5 | 0.347594 | 0.052406 | 0.058824 | 0.096257 | 0.097326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.249313 | 1,456 | 38 | 111 | 38.315789 | 0.855444 | 0.270604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.2 | 0.05 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97c42e24a090f424ea4838812d93847accbf8363 | 15,022 | py | Python | snakebot.py | paulolima18/Snake_Python | f872f374c573963b4347333e4a2099a8956c9de4 | [
"MIT"
] | null | null | null | snakebot.py | paulolima18/Snake_Python | f872f374c573963b4347333e4a2099a8956c9de4 | [
"MIT"
] | null | null | null | snakebot.py | paulolima18/Snake_Python | f872f374c573963b4347333e4a2099a8956c9de4 | [
"MIT"
] | null | null | null | 'Game1'
'''
x.type são os seguintes
(Tudo em Capslock)
quit
atctiveevent
keydown
keyup
mousemotion
mousebuttonup
mousebuttondown
videioresize
'''
#40pygame
import pygame_textinput
import pygame
import random
width=800
height=600
bsize=20
thick=20
fps=60
direction = 270
wall = 10
pygame.init()
gdisplay=pygame.display.set_mode((width,height))
pygame.display.update()
pygame.display.set_caption('SNAKE PL')
colors={'red':(128,0,0),'black':(0,0,0),'white':(255,255,255),'green':(0,155,0),'blue':(0,0,155),'yellow':(255,170,0),'darkblue':(51,102,204),'darkgreen':(51,102,0),
'violet':(102,0,102),'brown':(77,38,0),'pink':(255,204,255)}
'music = pygame.mixer.Sound('')'
def music1():
pygame.mixer.music.stop()
pygame.mixer.music.load('assets/music/reloaded.ogg')
pygame.mixer.music.play(-1)
def music2():
pygame.mixer.music.stop()
pygame.mixer.music.load('assets/music/lionel.ogg')
pygame.mixer.music.play(-1)
def music3():
pygame.mixer.music.stop()
pygame.mixer.music.load('assets/music/lana.ogg')
pygame.mixer.music.play()
def music4():
pygame.mixer.music.stop()
pygame.mixer.music.load('assets/music/george.ogg')
pygame.mixer.music.play()
icon = pygame.image.load('assets/img/s32.png')
snakepng = pygame.image.load('assets/img/snakehead20.png')
applepng = pygame.image.load('assets/img/apple20.png')
pygame.display.set_icon(icon)
textinput = pygame_textinput.TextInput()
clock=pygame.time.Clock()
smallfont = pygame.font.SysFont('impact', 30)
medfont = pygame.font.SysFont('impact', 60)
largefont = pygame.font.SysFont('impact', 90)
introfont = pygame.font.SysFont('Impact', 150)
def snake(bsize,snakelist,x):
head = pygame.transform.rotate(snakepng,direction)
gdisplay.blit(head, (snakelist[-1][0], snakelist[-1][1]))
for xy in snakelist[:-1]:
pygame.draw.rect(gdisplay,colors['green'],[xy[0],xy[1],bsize,bsize])
def text_objetcts(text,color,size):
textsurface = size.render(text, True,color)
return textsurface, textsurface.get_rect()
def msm(msg,color,change=0,size=medfont):
text1,text2 = text_objetcts(msg,color,size)
text2.center = (width/2),((height/2)+change)
gdisplay.blit(text1,text2)
def score(score):
text = smallfont.render('Score: ' + str(score), True, colors['black'])
gdisplay.blit(text,(bsize,bsize))
def pause():
pause = True
pygame.mixer.music.pause()
gdisplay.fill(colors['darkblue'])
msm('PAUSE',colors['black'],-100,largefont)
msm('(C) to Continue (Q) to Quit',colors['black'],0,smallfont)
msm('New music press from (1 to 4)',colors['black'],50,smallfont)
pygame.display.update()
while pause:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
pygame.quit()
quit()
if event.key == pygame.K_c:
pygame.mixer.music.unpause()
pause = False
if event.key == pygame.K_1:
music1()
if event.key == pygame.K_2:
music2()
if event.key == pygame.K_3:
music3()
if event.key == pygame.K_4:
music4()
clock.tick(5)
def introg():
music1()
pygame.display.update()
intro = True
while intro:
gdisplay.fill(colors['darkgreen'])
msm("SNAKE PL",colors['violet'],-100,introfont)
msm('Simple Snake Game', colors['black'],0,smallfont)
msm('Press (S) to Start (P) to Pause (Q) to Quit',colors['blue'],height/2-100,smallfont)
pygame.display.update()
clock.tick(fps)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_s:
intro = False
gameloop()
if event.key == pygame.QUIT:
pygame.quit()
quit()
if event.key == pygame.K_q:
pygame.quit()
quit()
def inputed():
inpute = True
while inpute:
gdisplay.fill(colors['darkblue'])
msm('|Place Player Name|',colors['yellow'],0,largefont)
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
return textinput.get_text()
inpute = False
# Feed it with events every frame
textinput.update(events)
# Blit its surface onto the screen
gdisplay.blit(textinput.get_surface(), (10, 10))
pygame.display.update()
clock.tick(30)
def topscore(score,name):
eva = True
pre = ["?","meh"]
pos = ["crazy","veryStack"]
listz = []
while eva:
with open('db/topscore.txt','r') as fd:
lines = [x.split() for x in fd.readlines()]
check = False
for line in lines:
if (score > int(line[1]) ) and (not check):
#Save current value
pre[0] = line[0]
pre[1] = line[1]
#Insert New score
line[1] = score
line[0] = name
listz.append(line)
check = True
elif check:
#Position Value
pos[0] = line[0]
pos[1] = line[1]
#Player that got downgraded
line[1] = pre[1]
line[0] = pre[0]
#Player that will get downgraded
pre[0] = pos[0]
pre[1] = pos[1]
listz.append(line)
else:
listz.append(line)
x = [x for x in listz]
with open('db/topscore.txt','w') as fd:
for i in range(len(x)):
if i == 0:
fd.write('%s %s\n'%(x[i][0],x[i][1]))
elif 0<i<9:
fd.write('%s %s\n'%(x[i][0],x[i][1]))
elif i == 9:
fd.write('%s %s'%(x[i][0],x[i][1]))
eva = False
'''fd = open('top10.txt',a)
'Nome de Jogador | score '
fd.close()
'''
def final():
fin = True
while fin:
with open('db/topscore.txt','r') as fd:
lines = [line.split() for line in fd.readlines()]
gdisplay.fill(colors['darkblue'])
msm('TOP SCORES',colors['yellow'],-250,medfont)
msm(str('%s:%s'%(lines[0][0],lines[0][1])),colors['black'],-200,smallfont)
msm(str('%s:%s'%(lines[1][0],lines[1][1])),colors['black'],-150,smallfont)
msm(str('%s:%s'%(lines[2][0],lines[2][1])),colors['black'],-100,smallfont)
msm(str('%s:%s'%(lines[3][0],lines[3][1])),colors['black'],-50,smallfont)
msm(str('%s:%s'%(lines[4][0],lines[4][1])),colors['black'],0,smallfont)
msm(str('%s:%s'%(lines[5][0],lines[5][1])),colors['black'],50,smallfont)
msm(str('%s:%s'%(lines[6][0],lines[6][1])),colors['black'],100,smallfont)
msm(str('%s:%s'%(lines[7][0],lines[7][1])),colors['black'],150,smallfont)
msm(str('%s:%s'%(lines[8][0],lines[8][1])),colors['black'],200,smallfont)
msm(str('%s:%s'%(lines[9][0],lines[9][1])),colors['black'],250,smallfont)
text = smallfont.render('Q (Quit) B (Start Screen)', True, colors['black'])
gdisplay.blit(text,(width-300,height-50))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
pygame.quit()
quit()
fin == False
if event.key == pygame.K_b:
introg()
def walls():
left = pygame.draw.rect(gdisplay,colors['pink'],[0,0,wall,height])
right = pygame.draw.rect(gdisplay,colors['pink'],[width-wall,0,wall,height])
up = pygame.draw.rect(gdisplay,colors['pink'],[0,0,width,wall])
down = pygame.draw.rect(gdisplay,colors['pink'],[0,height-wall,width,wall])
def butt(snakelist,snakehead,direction):
for i in snakelist[:-1]:
if direction == 270:
if [snakehead[0]+10.0,snakehead[1]] == i:
return True
if direction == 90:
if [snakehead[0]-10.0,snakehead[1]] == i:
return True
if direction == 0:
if [snakehead[0],snakehead[1]-10.0] == i:
return True
if direction == 180:
if [snakehead[0],snakehead[1]+10.0] == i:
return True
return False
def gameloop():
global direction
exitg = False
overg = False
x_lead=width/2 #incio
y_lead=height/2
xclead,yclead=bsize,0
snakehead = []
snakelist = []
snakelen = 1
xapple = int(random.randrange(bsize,width-thick,bsize))
yapple = int(random.randrange(bsize,height-thick,bsize))
while not exitg:
while overg == True:
gdisplay.fill(colors['darkblue'])
msm('GAME OVER',colors['red'],-100,largefont)
msm('Press C (Repeat) Q (Quit) B (Music Selection) T (Scores)',colors['yellow'],height/2-100,smallfont)
msm('Your score is %d'%(snakelen-1), colors['black'], 50)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
exitg = True
overg = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
gameloop()
if event.key == pygame.K_q:
exitg = True
overg = False
if event.key == pygame.K_b:
introg()
if event.key == pygame.K_t:
playername = inputed()
topscore(snakelen-1,playername)
final()
'''if event.key == pygame.K_t:
topscore(snakelen-1,playername)
'''
if (x_lead > xapple):
if direction == 270 and (not butt(snakelist,snakehead,270)):#(right)
if (y_lead < yapple) and (not butt(snakelist,snakehead,180)):
direction = 180
yclead = bsize
xclead = 0
#faker
if (y_lead < yapple) and (not butt(snakelist,snakehead,0)):
direction = 0
yclead = -bsize
xclead = 0
if (y_lead > yapple) and (not butt(snakelist,snakehead,0)):
direction = 0
yclead = -bsize
xclead = 0
#faker
if (y_lead > yapple)and (not butt(snakelist,snakehead,180)):
direction = 180
yclead = bsize
xclead = 0
elif (not butt(snakelist,snakehead,90)):
direction = 90
xclead = -bsize
yclead = 0
print(not butt(snakelist,snakehead,90))
if (x_lead < xapple):
if direction == 90 and (not butt(snakelist,snakehead,90)):#(left)
if (y_lead < yapple) and (not butt(snakelist,snakehead,180)):
direction = 180
yclead = bsize
xclead = 0
#faker
if (y_lead < yapple) and (not butt(snakelist,snakehead,0)):
direction = 0
yclead = -bsize
xclead = 0
if (y_lead > yapple)and (not butt(snakelist,snakehead,0)):
direction = 0
yclead = -bsize
xclead = 0
#faker
if (y_lead > yapple)and (not butt(snakelist,snakehead,180)):
direction = 180
yclead = bsize
xclead = 0
elif (not butt(snakelist,snakehead,270)):
direction = 270
xclead = bsize
yclead = 0
if (x_lead == xapple):
if (not butt(snakelist,snakehead,180)) and (y_lead < yapple):#(left)
direction = 180
yclead = bsize
xclead = 0
elif (not butt(snakelist,snakehead,0)) and (y_lead > yapple):
direction = 0
yclead = -bsize
xclead = 0
elif (not butt(snakelist,snakehead,90)):
direction = 90
yclead = 0
xclead = -bsize
elif (not butt(snakelist,snakehead,270)):
direction = 270
yclead = 0
xclead = bsize
if x_lead > width-wall-bsize or x_lead < wall or y_lead < wall or y_lead > height-wall-bsize:
overg = True
'''if event.type == pygame.KEYUP: #só move quando pressionado o butão
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
xclead = 0
elif event.key == pygame.K_UP or event.key == pygame.K_DOWN:
yclear=0
'''
dt = clock.tick(fps)
x_lead+=xclead /2
y_lead+=yclead /2
if x_lead+bsize > xapple and x_lead < xapple+thick:
if y_lead+bsize > yapple and y_lead < yapple+thick:
xapple = int(random.randrange(bsize,width-bsize,bsize))
yapple = int(random.randrange(bsize,height-bsize,bsize))
snakelen+=1
'''for i in colors:
gdisplay.fill(colors[i])
pygame.display.update()
'''
gdisplay.fill(colors['brown'])
gdisplay.blit(applepng,(xapple,yapple))
snakehead = []
snakehead.append(x_lead)
snakehead.append(y_lead)
snakelist.append(snakehead)
if len(snakelist) > snakelen:
del snakelist[0]
for i in snakelist[:-1]:
if snakehead == i:
overg = True
snake(bsize,snakelist,direction)
walls()
#gdisplay.fill(colors[''], rect=[x,y,w,h])
score(snakelen-1)
pygame.display.update()
#pygame.draw.rect(local,cor,[x,y,w,h])
clock.tick(fps)
pygame.quit()
quit()
introg()
gameloop()
| 34.853828 | 165 | 0.503462 | 1,756 | 15,022 | 4.274487 | 0.159453 | 0.026112 | 0.039169 | 0.039968 | 0.478684 | 0.384226 | 0.329869 | 0.306288 | 0.277511 | 0.264988 | 0 | 0.042361 | 0.357276 | 15,022 | 430 | 166 | 34.934884 | 0.73506 | 0.02037 | 0 | 0.419825 | 0 | 0 | 0.062491 | 0.010033 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046647 | false | 0 | 0.008746 | 0 | 0.075802 | 0.002915 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97c76c081c460323a55942e3974a52a93f0623d4 | 804 | py | Python | run_tests.py | scottwittenburg/vcs | 5b9f17fb78f7ab186fc0132ab81ada043a7ba348 | [
"BSD-3-Clause"
] | 11 | 2018-10-10T03:14:33.000Z | 2022-01-05T14:18:15.000Z | run_tests.py | scottwittenburg/vcs | 5b9f17fb78f7ab186fc0132ab81ada043a7ba348 | [
"BSD-3-Clause"
] | 196 | 2018-03-21T19:44:56.000Z | 2021-12-21T21:56:24.000Z | run_tests.py | scottwittenburg/vcs | 5b9f17fb78f7ab186fc0132ab81ada043a7ba348 | [
"BSD-3-Clause"
] | 5 | 2019-12-09T21:54:45.000Z | 2022-03-20T04:22:14.000Z | #!/usr/bin/env python
import os
import sys
import cdat_info
class VCSTestRunner(cdat_info.TestRunnerBase):
def _prep_nose_options(self):
opt = super(VCSTestRunner, self)._prep_nose_options()
if self.args.no_vtk_ui:
opt += ["-A", "not vtk_ui"]
if self.args.vtk is not None:
cdat_info.run_command(
"conda install -f -y -c {} vtk-cdat".format(self.args.vtk))
return opt
test_suite_name = 'vcs'
workdir = os.getcwd()
runner = VCSTestRunner(test_suite_name, options=["--no-vtk-ui", "--vtk"],
options_files=["tests/vcs_runtests.json"],
get_sample_data=True,
test_data_files_info="share/test_data_files.txt")
ret_code = runner.run(workdir)
sys.exit(ret_code)
| 28.714286 | 75 | 0.618159 | 108 | 804 | 4.342593 | 0.527778 | 0.051173 | 0.063966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.258706 | 804 | 27 | 76 | 29.777778 | 0.786913 | 0.024876 | 0 | 0 | 0 | 0 | 0.144317 | 0.061303 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97c77c550cf0c53433815e5c8467ef4ace730897 | 1,007 | py | Python | pyrl/cli/util.py | jponf/pyrl | 1353d59deee2731c509991a6cca90a7b991779bc | [
"Apache-2.0"
] | 2 | 2021-01-25T15:04:45.000Z | 2021-11-05T06:15:40.000Z | pyrl/cli/util.py | jponf/pyrl | 1353d59deee2731c509991a6cca90a7b991779bc | [
"Apache-2.0"
] | null | null | null | pyrl/cli/util.py | jponf/pyrl | 1353d59deee2731c509991a6cca90a7b991779bc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import random
import six
# SciPy Stack
import numpy as np
# Torch
import torch
###############################################################################
def initialize_seed(seed):
"""Initializes the seed of different PRNGs.
:param seed: Value to initialize the PRNGs.
"""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
def evaluate(agent, env, max_steps, render):
"""Evaluates the given agent on an environment.
:return: A numpy array with the reward of each step taken by the agent.
"""
rewards = []
infos = []
done = False
state = env.reset()
for _ in six.moves.range(max_steps):
action = agent.compute_action(state)
next_state, reward, done, info = env.step(action)
if render:
env.render()
state = next_state
rewards.append(reward)
infos.append(info)
if done:
break
return np.array(rewards), infos, done
| 20.55102 | 79 | 0.569017 | 121 | 1,007 | 4.669421 | 0.504132 | 0.056637 | 0.049558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00134 | 0.259186 | 1,007 | 48 | 80 | 20.979167 | 0.756032 | 0.242304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97c97448c31c9699860a83ac252dd71c1be4c6a6 | 1,522 | py | Python | code/src/main/python/mos/blocks/contest_meta_block.py | anonfse/COSAL_Anonymized | 709906294fd775131f3e019862bbdd554d83773d | [
"Unlicense"
] | null | null | null | code/src/main/python/mos/blocks/contest_meta_block.py | anonfse/COSAL_Anonymized | 709906294fd775131f3e019862bbdd554d83773d | [
"Unlicense"
] | 1 | 2021-11-03T08:28:31.000Z | 2021-11-03T08:28:31.000Z | code/src/main/python/mos/blocks/contest_meta_block.py | anonfse/COSAL_Anonymized | 709906294fd775131f3e019862bbdd554d83773d | [
"Unlicense"
] | 1 | 2022-03-22T14:24:13.000Z | 2022-03-22T14:24:13.000Z |
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "COSAL"
from utils.lib import O
class ContestMeta(O):
def __init__(self, **kwargs):
O.__init__(self, **kwargs)
self.submission_id = None
self.contest_type = None
self.contest_id = None
self.problem_id = None
self.exec_time = None
self.code_size = None
def to_bson(self):
bson = {
"submissionId": self.submission_id
}
if self.contest_type is not None:
bson["contestType"] = self.contest_type
if self.contest_id is not None:
bson["contestId"] = self.contest_id
if self.problem_id is not None:
bson["problemId"] = self.problem_id
if self.code_size is not None:
bson["codeSize"] = self.code_size
if self.exec_time is not None:
bson["execTime"] = self.exec_time
return bson
@staticmethod
def from_bson(bson):
block = ContestMeta()
block.submission_id = bson["submissionId"]
if "contestType" in bson:
block.contest_type = bson["contestType"]
if "contestId" in bson:
block.contest_id = bson["contestId"]
else:
block.contest_id = 0
if "problemId" in bson:
block.problem_id = bson["problemId"]
else:
block.problem_id = 0
if "codeSize" in bson:
block.code_size = bson["codeSize"]
if "execTime" in bson:
block.exec_time = bson["execTime"]
return block
def make_key(self):
return "C:%d-P:%d" % (self.contest_id, self.problem_id)
| 24.548387 | 59 | 0.653745 | 212 | 1,522 | 4.485849 | 0.254717 | 0.080967 | 0.047319 | 0.068349 | 0.031546 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001711 | 0.231932 | 1,522 | 61 | 60 | 24.95082 | 0.811805 | 0 | 0 | 0.039216 | 0 | 0 | 0.114398 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078431 | false | 0 | 0.058824 | 0.019608 | 0.215686 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97cc531294afb11301fe771674b2ba6517514180 | 562 | py | Python | data/coco_korean/coco_load_image.py | Pixir/Pixir | 63a6fc0728403af92eadf188f532f9f41cd9f912 | [
"MIT"
] | null | null | null | data/coco_korean/coco_load_image.py | Pixir/Pixir | 63a6fc0728403af92eadf188f532f9f41cd9f912 | [
"MIT"
] | 1 | 2020-02-10T08:11:23.000Z | 2020-02-10T08:11:23.000Z | data/coco_korean/coco_load_image.py | Pixir/Pixir | 63a6fc0728403af92eadf188f532f9f41cd9f912 | [
"MIT"
] | 3 | 2020-02-09T11:14:33.000Z | 2020-04-11T16:10:17.000Z | from PIL import Image
import json
import numpy as np
from tqdm import tqdm
with open('../../../coco/MSCOCO_train_val_Korean.json', 'r', encoding='utf-8') as f:
info = json.load(f)
# print(info[0]['file_path'])
img_path = '../../../coco/'
img_size = 64
images = np.empty((len(info), img_size, img_size, 3), dtype=np.uint8)
for i in tqdm(range(len(info))):
img = Image.open(img_path + info[i]['file_path']).convert('RGB')
img = img.resize((img_size, img_size))
img_arr = np.array(img)
images[i] = img_arr
np.save('coco_images.npy', images)
| 26.761905 | 84 | 0.658363 | 96 | 562 | 3.697917 | 0.489583 | 0.098592 | 0.084507 | 0.078873 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012526 | 0.147687 | 562 | 20 | 85 | 28.1 | 0.728601 | 0.048043 | 0 | 0 | 0 | 0 | 0.166979 | 0.078799 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97ccf40733199c207a29b866cea4353f6edc523b | 630 | py | Python | programming/udemy/SLLCycle.py | vamsitallapudi/Coderefer-Python-Projects | a7acc682251661e296c64533f4a85d47e6eedda2 | [
"Apache-2.0"
] | 1 | 2021-01-03T06:42:58.000Z | 2021-01-03T06:42:58.000Z | programming/udemy/SLLCycle.py | vamsitallapudi/Coderefer-Python-Projects | a7acc682251661e296c64533f4a85d47e6eedda2 | [
"Apache-2.0"
] | null | null | null | programming/udemy/SLLCycle.py | vamsitallapudi/Coderefer-Python-Projects | a7acc682251661e296c64533f4a85d47e6eedda2 | [
"Apache-2.0"
] | null | null | null | class Node:
def __init__(self, value):
self.value = value
self.nextnode = None
def cycle_check(node):
if not node:
return False
head = node
node = node.nextnode
while node:
if node == head:
return True
node = node.nextnode
return False
if __name__ == '__main__':
# CREATE CYCLE LIST
a = Node(1)
b = Node(2)
c = Node(3)
a.nextnode = b
b.nextnode = c
c.nextnode = a # Cycle Here!
# CREATE NON CYCLE LIST
x = Node(1)
y = Node(2)
z = Node(3)
x.nextnode = y
y.nextnode = z
print(cycle_check(a))
| 17.027027 | 33 | 0.539683 | 87 | 630 | 3.747126 | 0.37931 | 0.07362 | 0.09816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014888 | 0.360317 | 630 | 36 | 34 | 17.5 | 0.794045 | 0.080952 | 0 | 0.148148 | 0 | 0 | 0.013913 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0 | 0 | 0.222222 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97cea0095c84b4a1f87650614e47111614016fd2 | 3,619 | py | Python | awesome/plugins/other_xsh/__init__.py | Lparksi/bot | 8a38953d09436b60e8edff4ebe86bf19fe3b7046 | [
"MIT"
] | 3 | 2020-03-31T10:36:31.000Z | 2020-04-23T12:01:10.000Z | awesome/plugins/other_xsh/__init__.py | Lparksi/bot | 8a38953d09436b60e8edff4ebe86bf19fe3b7046 | [
"MIT"
] | 1 | 2020-07-16T14:51:26.000Z | 2020-07-30T12:46:55.000Z | awesome/plugins/other_xsh/__init__.py | Lparksi/bot | 8a38953d09436b60e8edff4ebe86bf19fe3b7046 | [
"MIT"
] | null | null | null | from nonebot import on_command, CommandSession
from nonebot import on_natural_language, NLPSession, IntentCommand
from jieba import posseg
@on_command("sushe", aliases=("宿舍", "寝室"), only_to_me=False)
async def sushe(session: CommandSession):
if session.event.group_id == 818278353:
await session.send("""一般都是6人间,上下铺,桌子一侧排,有空调,另外租(租比较贵,就这两年,跟舍友商量好要不要租)。
男生一般都是十里铺,也就是校外宿舍,当然还有三里屯之类的,就认准在十里铺就好)""")
@on_command("sushe_img", aliases=("宿舍照片", "寝室照片"), only_to_me=False)
async def suzheimg(session: CommandSession):
if session.event.group_id == 818278353:
await session.send("""[CQ:image,file=https://s1.ax1x.com/2020/07/27/aPVaff.jpg]""")
@on_natural_language(keywords={"宿舍", "寝室"}, only_to_me=False)
async def _(session: NLPSession):
stripped_msg = session.msg_text.strip()
words = posseg.lcut(stripped_msg)
for word in words:
if word.word == "照片":
return IntentCommand(60.0, 'sushe_img')
if word.word == "洗澡":
return IntentCommand(60.0, 'xizao')
return IntentCommand(61.0, 'sushe')
@on_command('xizao', aliases="洗澡", only_to_me=False)
async def xizao(session: CommandSession):
if session.event.group_id == 818278353:
await session.send("""男生宿舍有洗澡间,女生在校内澡堂,不过只有2楼可以洗热水澡,需要刷单独洗澡卡,可以好几个人同时洗有问题请联系:[CQ:at,qq=331456218]""")
@on_natural_language(keywords="洗澡", only_to_me=False)
async def _(session: NLPSession):
return IntentCommand(60.0, 'xizao')
@on_command("kaixue", only_to_me=False, aliases="开学")
async def kaixue(session: CommandSession):
if session.event.group_id == 818278353:
await session.send("""具体开学时候还未确定,一般9月份,咱学校有病例,估计9月份开不了学,会延期或不开学。
有问题请联系:[CQ:at,qq=331456218]""")
@on_natural_language(keywords={"开学"}, only_to_me=False)
async def _(session: NLPSession):
return IntentCommand(60.0, 'kaixue')
@on_command("zuidifen", aliases="最低分", only_to_me=False)
async def zuidifen(session: CommandSession):
if session.event.group_id == 818278353:
await session.send("""按最低分是不能考虑能不能上的,应该以你的专业招生的院校排名和人数拉一个单子,依次累加,根据你的排名来算,你可以问我如何算排名。
有问题请联系:[CQ:at,qq=331456218]""")
@on_natural_language(keywords={"最低分", "多少分"}, only_to_me=False)
async def _(session: NLPSession):
return IntentCommand(60.0, "zuidifen")
@on_command("whoisgood", aliases="哪个好", only_to_me=False)
async def whoisgood(session: CommandSession):
if session.event.group_id == 818278353:
await session.send("""河南省近几年大量扩招专升本的学生,说明对专升本学生更加重视,也说明确实对省内本科教育越来越看中,同时也为河南学子争取本科权益。
当然,河南省内本科都差不多,如果考虑普通高考中的普通二本来对比专升本哪个学校好,其实省内院校都一样的,无需纠结,因为都是省内,又不是郑大那种特别好的学校,出门人家看的是学历
有问题请联系:[CQ:at,qq=331456218]""")
@on_natural_language(keywords={"哪个好"}, only_to_me=False)
async def _(session: NLPSession):
return IntentCommand(60.0, "whoisgood")
@on_command("yonon", aliases="能不能上", only_to_me=False)
async def yonon(session: CommandSession):
if session.event.group_id == 818278353:
await session.send("[CQ:image,file=https://s1.ax1x.com/2020/07/27/aPnGgf.jpg]")
@on_natural_language(keywords={"能不能", "能"}, only_to_me=False)
async def _(session: NLPSession):
stripped_msg = session.msg_text.strip()
words = posseg.lcut(stripped_msg)
for word in words:
if word.word == "上":
return IntentCommand(60.0, "yonon")
@on_command("spm", aliases="算排名", only_to_me=False)
async def spm(session: CommandSession):
if session.event.group_id == 818278353:
await session.send("[CQ:image,file=https://s1.ax1x.com/2020/07/27/aPQAde.jpg]")
@on_natural_language(keywords={"算排名", "排名"}, only_to_me=False)
async def _(session: NLPSession):
return IntentCommand(60.0, "spm") | 35.480392 | 109 | 0.721194 | 482 | 3,619 | 5.253112 | 0.255187 | 0.035545 | 0.047393 | 0.077014 | 0.64376 | 0.597946 | 0.564771 | 0.544234 | 0.516983 | 0.46248 | 0 | 0.053583 | 0.128489 | 3,619 | 102 | 110 | 35.480392 | 0.749207 | 0 | 0 | 0.361111 | 0 | 0.041667 | 0.231492 | 0.137293 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.041667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
97cf4d5a480c01da483d0f38460e002acb1f26fe | 3,387 | py | Python | fabric_cf/broker/core/broker_kernel.py | fabric-testbed/ActorBase | 3c7dd040ee79fef0759e66996c93eeec57c790b2 | [
"MIT"
] | null | null | null | fabric_cf/broker/core/broker_kernel.py | fabric-testbed/ActorBase | 3c7dd040ee79fef0759e66996c93eeec57c790b2 | [
"MIT"
] | null | null | null | fabric_cf/broker/core/broker_kernel.py | fabric-testbed/ActorBase | 3c7dd040ee79fef0759e66996c93eeec57c790b2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: Komal Thareja (kthare10@renci.org)
from datetime import datetime, timezone
from fabric_cf.actor.core.common.constants import Constants
from fabric_cf.actor.core.common.exceptions import BrokerException
from fabric_cf.actor.core.kernel.broker_query_model_publisher import BrokerQueryModelPublisher
from fabric_cf.actor.core.manage.management_utils import ManagementUtils
class BrokerKernel:
"""
Class responsible for starting Broker Periodic; also holds Management Actor
"""
def __init__(self):
from fabric_cf.actor.core.container.globals import GlobalsSingleton
self.logger = GlobalsSingleton.get().get_logger()
self.broker = GlobalsSingleton.get().get_container().get_actor()
self.producer = GlobalsSingleton.get().get_simple_kafka_producer()
self.kafka_topic = GlobalsSingleton.get().get_config().get_global_config().get_bqm_config().get(
Constants.KAFKA_TOPIC, None)
self.publish_interval = GlobalsSingleton.get().get_config().get_global_config().get_bqm_config().get(
Constants.PUBLISH_INTERVAL, None)
self.last_query_time = None
def do_periodic(self):
"""
Periodically publish BQM to a Kafka Topic to be consumed by Portal
"""
if self.kafka_topic is not None and self.publish_interval is not None and self.producer is not None:
current_time = datetime.now(timezone.utc)
if self.last_query_time is None or (current_time - self.last_query_time).seconds > self.publish_interval:
bqm = BrokerQueryModelPublisher(broker=self.broker, logger=self.logger,
kafka_topic=self.kafka_topic, producer=self.producer)
bqm.execute()
self.last_query_time = datetime.now(timezone.utc)
class BrokerKernelSingleton:
__instance = None
def __init__(self):
if self.__instance is not None:
raise BrokerException(msg="Singleton can't be created twice !")
def get(self):
"""
Actually create an instance
"""
if self.__instance is None:
self.__instance = BrokerKernel()
return self.__instance
get = classmethod(get) | 43.423077 | 117 | 0.71804 | 441 | 3,387 | 5.371882 | 0.399093 | 0.037146 | 0.025327 | 0.03588 | 0.14141 | 0.079358 | 0.056564 | 0.056564 | 0.056564 | 0.056564 | 0 | 0.002609 | 0.207854 | 3,387 | 78 | 118 | 43.423077 | 0.880358 | 0.384411 | 0 | 0.058824 | 0 | 0 | 0.017 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.441176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ad8e73f0765a04eca466c875d8845aef87a9bad | 372 | py | Python | tests/lib/bes/hardware/test_Ftdi.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | tests/lib/bes/hardware/test_Ftdi.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | tests/lib/bes/hardware/test_Ftdi.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import unittest
from bes.hardware.Ftdi import Ftdi
class TestFtdi(unittest.TestCase):
def test_find_devices(self):
devices = Ftdi.find_devices()
for device in devices:
print('DEVICE: ', device)
if __name__ == "__main__":
unittest.main()
| 23.25 | 90 | 0.693548 | 53 | 372 | 4.660377 | 0.716981 | 0.089069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009585 | 0.158602 | 372 | 15 | 91 | 24.8 | 0.779553 | 0.293011 | 0 | 0 | 0 | 0 | 0.061303 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8adba14d1116a00200adf306c8aff70161525c2c | 6,504 | py | Python | pyhdx/fitting_torch.py | sebaztiano/PyHDX | 12fc2b5f67200885706226823bd8e1f46e3b5db1 | [
"MIT"
] | null | null | null | pyhdx/fitting_torch.py | sebaztiano/PyHDX | 12fc2b5f67200885706226823bd8e1f46e3b5db1 | [
"MIT"
] | null | null | null | pyhdx/fitting_torch.py | sebaztiano/PyHDX | 12fc2b5f67200885706226823bd8e1f46e3b5db1 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
from torch.optim import SGD
import torch as t
from scipy import constants
import numpy as np
import pandas as pd
from pyhdx.models import Protein
class DeltaGFit(nn.Module):
def __init__(self, deltaG):
super(DeltaGFit, self).__init__()
self.deltaG = deltaG
def forward(self, temperature, X, k_int, timepoints):
"""
# inputs, list of:
temperatures: scalar (1,)
X (N_peptides, N_residues)
k_int: (N_peptides, 1)
"""
pfact = t.exp(self.deltaG / (constants.R * temperature))
uptake = 1 - t.exp(-t.matmul((k_int / (1 + pfact)), timepoints))
return t.matmul(X, uptake)
def estimate_errors(series, deltaG): #todo refactor to data_obj
# boolean array to select residues which are exchanging (ie no nterminal resiudes, no prolines, no regions without coverage)
bools = series.coverage['exchanges'].to_numpy()
r_number = series.coverage.r_number[bools] # Residue number which exchange
deltaG = t.tensor(deltaG[bools], dtype=t.float64)
tensors = series.get_tensors(exchanges=True)
def calc_loss(deltaG_input):
criterion = t.nn.MSELoss(reduction='sum')
pfact = t.exp(deltaG_input.unsqueeze(-1) / (constants.R * tensors['temperature']))
uptake = 1 - t.exp(-t.matmul((tensors['k_int'] / (1 + pfact)), tensors['timepoints']))
output = t.matmul(tensors['X'], uptake)
loss = criterion(output, tensors['uptake'])
return loss
hessian = t.autograd.functional.hessian(calc_loss, deltaG)
hessian_inverse = t.inverse(-hessian)
covariance = np.sqrt(np.abs(np.diagonal(hessian_inverse)))
#todo return pd series?
return Protein({'covariance': covariance, 'r_number': r_number}, index='r_number')
class TorchFitResult(object):
def __init__(self, fit_object, model, **metadata):
self.fit_object = fit_object
self.model = model
self.metadata = metadata
@property
def mse_loss(self):
"""obj:`float`: Losses from mean squared error part of Lagrangian"""
mse_loss = self.metadata['mse_loss'][-1]
return mse_loss
@property
def total_loss(self):
"""obj:`float`: Total loss value of the Lagrangian"""
total_loss = self.metadata['total_loss'][-1]
return total_loss
@property
def reg_loss(self):
"""obj:`float`: Losses from regularization part of Lagrangian"""
return self.total_loss - self.mse_loss
@property
def regularization_percentage(self):
"""obj:`float`: Percentage part of the total loss that is regularization loss"""
return (self.reg_loss / self.total_loss) * 100
@property
def deltaG(self):
return self.model.deltaG.detach().numpy().squeeze()
class TorchSingleFitResult(TorchFitResult):
#todo perhaps pass KineticsFitting object (which includes temperature) (yes do then it can also have methods which return inputs)
def __init__(self, *args, **kwargs):
super(TorchSingleFitResult, self).__init__(*args, **kwargs)
#todo refactor series
@property
def series(self):
return self.fit_object
@property
def temperature(self):
return self.series.temperature
@property
def output(self):
out_dict = {}
out_dict['r_number'] = self.series.coverage.r_number
out_dict['sequence'] = self.series.coverage['sequence'].to_numpy()
out_dict['_deltaG'] = self.deltaG
out_dict['deltaG'] = out_dict['_deltaG'].copy()
out_dict['deltaG'][~self.series.coverage['exchanges']] = np.nan
if self.temperature is not None:
pfact = np.exp(out_dict['deltaG'] / (constants.R * self.temperature))
out_dict['pfact'] = pfact
#todo add possibility to add append series to protein?
#todo update order of columns
protein = Protein(out_dict, index='r_number')
protein_cov = estimate_errors(self.fit_object, self.deltaG)
protein = protein.join(protein_cov)
return protein
def __call__(self, timepoints):
"""output: Np x Nt array"""
#todo fix and tests
with t.no_grad():
#tensors = self.series.get_tensors()
temperature = t.Tensor([self.temperature])
X = t.Tensor(self.series.coverage.X) # Np x Nr
k_int = t.Tensor(self.series.coverage['k_int'].to_numpy()).unsqueeze(-1) # Nr x 1
timepoints = t.Tensor(timepoints).unsqueeze(0) # 1 x Nt
inputs = [temperature, X, k_int, timepoints]
output = self.model(*inputs)
return output.detach().numpy()
class TorchBatchFitResult(TorchFitResult):
def __init__(self, *args, **kwargs):
super(TorchBatchFitResult, self).__init__(*args, **kwargs)
@property
def output(self):
#todo directly create dataframe
quantities = ['_deltaG', 'deltaG', 'covariance', 'pfact']
names = [data_obj.name or data_obj.state for data_obj in self.fit_object.data_objs]
iterables = [names, quantities]
col_index = pd.MultiIndex.from_product(iterables, names=['State', 'Quantity'])
output_data = np.zeros((self.fit_object.Nr, self.fit_object.Ns * len(quantities)))
g_values = self.deltaG
g_values_nan = g_values.copy()
g_values_nan[~self.fit_object.exchanges] = np.nan
pfact = np.exp(g_values / (constants.R * self.fit_object.temperature[:, np.newaxis]))
output_data[:, 0::len(quantities)] = g_values.T
output_data[:, 1::len(quantities)] = g_values_nan.T
for i, data_obj in enumerate(self.fit_object.data_objs):
#todo this could use some pandas
i0 = data_obj.coverage.interval[0] - self.fit_object.interval[0]
i1 = data_obj.coverage.interval[1] - self.fit_object.interval[0]
cov = estimate_errors(data_obj, g_values[i, i0:i1]) # returns a protein? should be series
pd_series = cov['covariance']
pd_series = pd_series.reindex(self.fit_object.r_number)
output_data[:, 2+i*len(quantities)] = pd_series.to_numpy()
output_data[:, 3::len(quantities)] = pfact.T
df = pd.DataFrame(output_data, index=self.fit_object.r_number, columns=col_index)
return Protein(df)
# use multi index df: https://stackoverflow.com/questions/24290495/constructing-3d-pandas-dataframe | 36.745763 | 133 | 0.64837 | 835 | 6,504 | 4.875449 | 0.252695 | 0.033161 | 0.044706 | 0.011791 | 0.0958 | 0.039794 | 0.014247 | 0 | 0 | 0 | 0 | 0.0078 | 0.231242 | 6,504 | 177 | 134 | 36.745763 | 0.8064 | 0.164975 | 0 | 0.115044 | 0 | 0 | 0.041745 | 0 | 0 | 0 | 0 | 0.00565 | 0 | 1 | 0.150442 | false | 0 | 0.070796 | 0.026549 | 0.371681 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ae1bf6090dd889c0f197f2b8a758940dc94c4c9 | 992 | py | Python | migrations/versions/035e7209663c_tags_and_base_with_unique.py | microservice-experiment-flask-0hsn/pocket-ws-flask | e7582a6ebe4b554070f183e43042c87762633085 | [
"MIT"
] | null | null | null | migrations/versions/035e7209663c_tags_and_base_with_unique.py | microservice-experiment-flask-0hsn/pocket-ws-flask | e7582a6ebe4b554070f183e43042c87762633085 | [
"MIT"
] | null | null | null | migrations/versions/035e7209663c_tags_and_base_with_unique.py | microservice-experiment-flask-0hsn/pocket-ws-flask | e7582a6ebe4b554070f183e43042c87762633085 | [
"MIT"
] | null | null | null | """tags and base with unique
Revision ID: 035e7209663c
Revises:
Create Date: 2022-04-16 11:22:34.040818
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '035e7209663c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tags',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('user_rel', sa.String(length=64), nullable=True),
sa.Column('title', sa.String(length=128), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('title', 'user_rel', name='uniq_title_user_rel')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tags')
# ### end Alembic commands ###
| 26.810811 | 72 | 0.680444 | 129 | 992 | 5.139535 | 0.503876 | 0.060332 | 0.063348 | 0.090498 | 0.229261 | 0.229261 | 0.229261 | 0.13273 | 0 | 0 | 0 | 0.054414 | 0.166331 | 992 | 36 | 73 | 27.555556 | 0.747279 | 0.297379 | 0 | 0 | 0 | 0 | 0.134848 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ae264be6fcb91ac2eb22ef29be6e415fafa0087 | 6,079 | py | Python | recline/commands/man_utils.py | NetApp/recline | 065d9d90b6f5b63b535a091f14552e4790c26ecc | [
"BSD-3-Clause"
] | 4 | 2020-05-29T22:54:41.000Z | 2021-10-03T07:59:07.000Z | recline/commands/man_utils.py | NetApp/recline | 065d9d90b6f5b63b535a091f14552e4790c26ecc | [
"BSD-3-Clause"
] | 2 | 2020-08-28T07:39:43.000Z | 2021-04-05T12:45:39.000Z | recline/commands/man_utils.py | NetApp/recline | 065d9d90b6f5b63b535a091f14552e4790c26ecc | [
"BSD-3-Clause"
] | null | null | null | """
This module holds some utility functions used as part of the man command to format
text from CLI commands into consistent man pages that respond to the terminal
width.
"""
import curses
from recline.arg_types.positional import Positional
from recline.arg_types.remainder import Remainder
from recline.commands.cli_command import get_annotation_type
def wrapped_string(text, screen_width, prefix=0):
"""This function will take a string and make sure it can fit within the
given screen_width.
If the string is too long to fit, it will be broken on word boundaries
(specifically the ' ' character) if it can or the word will be split with
a '-' character and the second half moved to the next line.
If a prefix is given, the line(s) will be prefixed with that many ' '
characters, including any wrapped lines.
If the given string includes embeded newline characters, then each line
will be evaluated according to the rules above including breaking on word
boundaries and injecting a prefix.
"""
if not text:
return ''
new_text = ''
# if we have multiple paragraphs, then wrap each one as if it were a single line
lines = text.split('\n')
if len(lines) > 1:
for index, line in enumerate(lines):
if index > 0:
new_text += ' ' * prefix
new_text += wrapped_string(line, screen_width, prefix=prefix) + '\n'
return new_text.rstrip()
if len(text) + prefix < screen_width:
return text
words = text.split(' ')
current_line = ''
for word in words:
if prefix + len(current_line) + len(word) + 1 < screen_width:
# if word fits on line, just add it
current_line += word + ' '
else:
space_left = screen_width - (prefix + len(current_line))
if space_left < 3 or len(word) - space_left < 3:
# if not much room, move whole word to the next line
new_text += '%s\n' % current_line.rstrip()
current_line = '%s%s ' % (' ' * prefix, word)
else:
# split the word across lines with a hyphen
current_line += word[:space_left - 1] + '-'
new_text += current_line.rstrip() + "\n"
current_line = ' ' * prefix + word[space_left - 1:] + ' '
new_text += current_line.rstrip()
return new_text
def generate_help_text(screen_width, command_class):
"""Generates lines of help text which are formatted using the curses library.
The final document resembles a typical Linux-style manpage. See here:
https://www.tldp.org/HOWTO/Man-Page/q3.html
"""
# generate styled man page, one section at a time
help_text = []
indent = ' '
# command name and short description
help_text.append(('NAME\n', curses.A_BOLD))
help_text.append((indent,))
help_text.append((command_class.name, curses.A_BOLD))
help_text.append((' -- ',))
description = wrapped_string(
command_class.docstring.short_description, screen_width,
prefix=(len(command_class.name) + len(indent) + 4),
)
for line in description.split('\n'):
help_text.append(('%s\n' % line,))
help_text.append(('\n',))
# command usage details
help_text.append(('SYNOPSIS\n', curses.A_BOLD))
help_text.append((indent,))
description = wrapped_string(
command_class.get_command_usage(), screen_width, prefix=len(indent),
)
for line in description.split('\n'):
help_text.append(('%s\n' % line,))
help_text.append(('\n',))
# command detailed description
if command_class.docstring.long_description:
help_text.append(('DESCRIPTION\n', curses.A_BOLD))
help_text.append((indent,))
description = wrapped_string(
command_class.docstring.long_description, screen_width,
prefix=len(indent),
)
for line in description.split('\n'):
help_text.append(('%s\n' % line,))
help_text.append(('\n',))
# each command parameter with description, constraints, and defaults
if command_class.docstring.params:
def print_arg(arg):
meta = command_class.get_arg_metavar(arg)
description = command_class.get_arg_description(arg, indent=None)
annotation_type = get_annotation_type(arg)
positional = '' if issubclass(annotation_type, (Remainder, Positional)) else '-'
arg_name = '' if issubclass(annotation_type, Positional) else '%s ' % arg.name
prefix = '%s %s%s%s ' % (indent, positional, arg_name, meta)
help_text.append((prefix,))
description = wrapped_string(
description, screen_width, prefix=len(prefix)
)
for line in description.split('\n'):
help_text.append(('%s\n' % line,))
help_text.append(('\n'),)
help_text.append(('OPTIONS\n', curses.A_BOLD))
if command_class.required_args:
help_text.append((indent,))
help_text.append(('Required:\n', curses.A_UNDERLINE))
for arg in command_class.required_args:
print_arg(arg)
if command_class.optional_args:
help_text.append((indent,))
help_text.append(('Optional:\n', curses.A_UNDERLINE))
for arg in command_class.optional_args:
print_arg(arg)
# each command example
if command_class.docstring.examples:
help_text.append(('EXAMPLES\n', curses.A_BOLD))
for example in command_class.docstring.examples:
prefix = indent + ' '
help_text.append((indent,))
help_text.append(('%s:' % (example.name), curses.A_UNDERLINE))
help_text.append(('\n%s' % prefix,))
description = wrapped_string(
example.description, screen_width, prefix=len(prefix),
)
for line in description.split('\n'):
help_text.append(('%s\n' % line,))
return help_text
| 39.474026 | 92 | 0.623787 | 775 | 6,079 | 4.730323 | 0.236129 | 0.067649 | 0.10311 | 0.032733 | 0.281779 | 0.259411 | 0.237861 | 0.224768 | 0.195308 | 0.154392 | 0 | 0.002248 | 0.268136 | 6,079 | 153 | 93 | 39.732026 | 0.821758 | 0.226024 | 0 | 0.269231 | 0 | 0 | 0.035251 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028846 | false | 0 | 0.038462 | 0 | 0.115385 | 0.028846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ae39804dc5f4ef4b01469e35dccf93770837275 | 1,759 | py | Python | bigml/tests/compare_forecasts_steps.py | pertinkoira/python | c486060f7f7c79ef9f48ced567f118ac7aae3f84 | [
"Apache-2.0"
] | null | null | null | bigml/tests/compare_forecasts_steps.py | pertinkoira/python | c486060f7f7c79ef9f48ced567f118ac7aae3f84 | [
"Apache-2.0"
] | 3 | 2022-03-29T17:54:19.000Z | 2022-03-29T17:54:42.000Z | bigml/tests/compare_forecasts_steps.py | pertinkoira/python | c486060f7f7c79ef9f48ced567f118ac7aae3f84 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2017-2022 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
from nose.tools import eq_, assert_almost_equal
from .world import world, res_filename
#@step(r'I create a local forecast for "(.*)"')
def i_create_a_local_forecast(step, input_data):
input_data = json.loads(input_data)
world.local_forecast = world.local_time_series.forecast(input_data)
#@step(r'the local forecast is "(.*)"')
def the_local_forecast_is(step, local_forecasts):
local_forecasts = json.loads(local_forecasts)
attrs = ["point_forecast", "model"]
for field_id in local_forecasts:
forecast = world.local_forecast[field_id]
local_forecast = local_forecasts[field_id]
eq_(len(forecast), len(local_forecast), "forecast: %s" % forecast)
for index in range(len(forecast)):
for attr in attrs:
if isinstance(forecast[index][attr], list):
for pos, item in enumerate(forecast[index][attr]):
assert_almost_equal(local_forecast[index][attr][pos],
item, places=5)
else:
eq_(forecast[index][attr], local_forecast[index][attr])
| 37.425532 | 77 | 0.675952 | 239 | 1,759 | 4.824268 | 0.464435 | 0.112749 | 0.073721 | 0.027754 | 0.036427 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010287 | 0.226265 | 1,759 | 46 | 78 | 38.23913 | 0.836885 | 0.371234 | 0 | 0 | 0 | 0 | 0.028493 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ae58acf089690caafb8cdb5422fc701ad32f66a | 2,749 | py | Python | application.py | roupenminassian/UTS-DSI-x-Disability-Research-Network | e08378594f09560a477521f22f62a47622e07cdd | [
"MIT"
] | null | null | null | application.py | roupenminassian/UTS-DSI-x-Disability-Research-Network | e08378594f09560a477521f22f62a47622e07cdd | [
"MIT"
] | null | null | null | application.py | roupenminassian/UTS-DSI-x-Disability-Research-Network | e08378594f09560a477521f22f62a47622e07cdd | [
"MIT"
] | null | null | null | import pandas as pd
import streamlit as st
import openai
import os
import jsonlines
import pickle
from rank_bm25 import BM25Okapi
openai.organization = "org-eiJyreiRZUtpiu8pm6LIIA8B"
openai.api_key = st.secrets['API_KEY']
"""
# Data Science Institute x Disability Research Network: A UTS HASS-DSI Research Project
The project involves preprocessing textual data from the Royal Commission into "Aged Care Quality and Safety", and "Violence, Abuse, Neglect and Exploitation of People with Disability" and utilising natural language processing (NLP) techniques to improve document search functionality. Initial attempts were made to create a document-fetching algorithm designed to minimise the amount of time a user may spend searching relevant information.
Please upload a file in the correct data format below; otherwise you may use an existing, preprocessed file by selecting the below box.
"""
#Load documents
input = st.file_uploader('')
if input is None:
st.write("Or use sample dataset to try the application")
sample = st.checkbox("Download sample data from GitHub")
try:
if sample:
st.markdown("""[download_link](https://gist.github.com/roupenminassian/0a17d0bf8a6410dbb1b9d3f42462c063)""")
except:
pass
else:
with open("test_final.txt","rb") as fp:# Unpickling
contents = pickle.load(fp)
#Preparing model
tokenized_corpus = [doc.split(" ") for doc in contents]
bm25 = BM25Okapi(tokenized_corpus)
user_input = st.text_input('Please Enter a Query:')
corpus_selected = st.slider("Select the number of relevant documents to present:", min_value=0, max_value=5, step=1)
temperature_selected = st.slider("Set the temperature (controls how much randomness is in the output):", min_value=0.0, max_value=1.0, step=0.05)
if user_input is None:
st.write('Please enter a query above.')
else:
tokenized_query = user_input.split(" ")
doc_scores = bm25.get_scores(tokenized_query)
if st.button('Generate Text'):
generated_text = bm25.get_top_n(tokenized_query, contents, n=corpus_selected)
for i in range(corpus_selected):
st.write(generated_text[i])
GPT_text = openai.Answer.create(
search_model="davinci",
model="davinci",
question=user_input,
documents=["test.jsonl"],
#file = "file-nYWFf5V4zKtZMv82WyakRZme",
examples_context="In 2017, U.S. life expectancy was 78.6 years.",
examples=[["What is human life expectancy in the United States?","78 years."]],
max_tokens=50,
temperature = temperature_selected,
stop=["\n", "<|endoftext|>"],
)
st.write('GPT-3 Answer: ' + GPT_text['answers'][0])
| 36.171053 | 441 | 0.706075 | 370 | 2,749 | 5.148649 | 0.52973 | 0.014698 | 0.011549 | 0.013648 | 0.018898 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028118 | 0.19789 | 2,749 | 75 | 442 | 36.653333 | 0.835828 | 0.028738 | 0 | 0.042553 | 0 | 0 | 0.282915 | 0.01407 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.021277 | 0.148936 | 0 | 0.148936 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ae5d110cb00dff54458d2c48b5bb3d0525b7694 | 2,468 | py | Python | examples/tour_examples/bootstrap_xkcd_tour.py | chau11ece/GitStudy | d2f1130d529ec99e3a08878dba7af41f2a08e27d | [
"MIT"
] | null | null | null | examples/tour_examples/bootstrap_xkcd_tour.py | chau11ece/GitStudy | d2f1130d529ec99e3a08878dba7af41f2a08e27d | [
"MIT"
] | null | null | null | examples/tour_examples/bootstrap_xkcd_tour.py | chau11ece/GitStudy | d2f1130d529ec99e3a08878dba7af41f2a08e27d | [
"MIT"
] | null | null | null | from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_bootstrap_tour(self):
self.open("https://xkcd.com/1117/")
self.assert_element('img[alt="My Sky"]')
self.create_bootstrap_tour()
self.add_tour_step("Welcome to XKCD!")
self.add_tour_step("This is the XKCD logo.", "#masthead img")
self.add_tour_step("Here's the daily webcomic.", "#comic img")
self.add_tour_step("This is the title.", "#ctitle", alignment="top")
self.add_tour_step("Click here for the next comic.", 'a[rel="next"]')
self.add_tour_step("Click here for the previous one.", 'a[rel="prev"]')
self.add_tour_step("Learn about the author here.", 'a[rel="author"]')
self.add_tour_step("Click for a random comic.", 'a[href*="/random/"]')
self.add_tour_step("Thanks for taking this tour!")
self.export_tour(filename="bootstrap_xkcd_tour.js") # Exports the tour
self.play_tour() # Plays the tour
@pytest.fixture
def default_context(self):
return {"extra_context": {}}
@pytest.fixture(
params=[
{"author": "alice"},
{"project_slug": "helloworld"},
{"author": "bob", "project_slug": "foobar"},
]
)
def extra_context(request):
return {"extra_context": request.param}
@pytest.fixture(params=["default", "extra"])
def context(request):
if request.param == "default":
return request.getfuncargvalue("default_context")
else:
return request.getfuncargvalue("extra_context")
def test_generate_project(cookies, context):
"""Call the cookiecutter API to generate a new project from a
template.
"""
result = cookies.bake(extra_context=context)
assert result.exit_code == 0
assert result.exception is None
assert result.project.isdir()
@pytest.mark.parametrize(
"test_input,expected",
[
("3+5", 8),
pytest.param("1+7", 8, marks=pytest.mark.basic),
pytest.param("2+4", 6, marks=pytest.mark.basic, id="basic_2+4"),
pytest.param(
"6*9", 42, marks=[pytest.mark.basic, pytest.mark.xfail], id="basic_6*9"
),
],
)
def test_eval(self, test_input, expected):
assert eval(test_input) == expected
# studying git branching & merging
# chautran: git checkout v1.0.24
| 36.294118 | 87 | 0.603728 | 306 | 2,468 | 4.722222 | 0.398693 | 0.043599 | 0.068512 | 0.093426 | 0.1391 | 0.07474 | 0.07474 | 0.041522 | 0 | 0 | 0 | 0.014154 | 0.255673 | 2,468 | 67 | 88 | 36.835821 | 0.772455 | 0.066856 | 0 | 0 | 0 | 0 | 0.246377 | 0.009662 | 0 | 0 | 0 | 0 | 0.09434 | 1 | 0.113208 | false | 0 | 0.018868 | 0.037736 | 0.226415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8aed0fd9be816c87487500edc230255c399c0ca8 | 8,283 | py | Python | PyReQTL/annotate.py | nalomran/PyReQTL | 020535e69dfd7ab3c074a3e28cda6cca465672e8 | [
"MIT"
] | 14 | 2020-09-23T18:51:41.000Z | 2020-10-10T11:22:58.000Z | PyReQTL/annotate.py | nalomran/PyReQTL | 020535e69dfd7ab3c074a3e28cda6cca465672e8 | [
"MIT"
] | null | null | null | PyReQTL/annotate.py | nalomran/PyReQTL | 020535e69dfd7ab3c074a3e28cda6cca465672e8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Annotate the output of ReQTL as cis or trans
Created on Aug, 29 2020
@author: Nawaf Alomran
This module annotates the output of ReQTL as cis or trans based on whether the
SNVs resides within its paired gene.
Input + Options
----------------
+ -r: the path to the ReQTL analysis result file
+ -ga: the path to the file gene location annotations
+ -o: the prefix for the output annotated result
Output
------
+ a file with the ReQTLs annotated as cis or trans
How to Run
----------
python -m PyReQTL.annotate \
-r output/ReQTL_test_all_ReQTLs.txt \
-ga data/gene_locations_hg38.txt \
-o ReQTL_test \
-c True
* Python runtime via time command 8.19s user 0.61s system 112% cpu 7.838 total
* R time command line 3.15s user 0.22s system 99% cpu 3.383 total
* Note that the speed after the importr statements Python is faster than than R
"""
import argparse
import sys
from datetime import datetime
import numpy as np # type: ignore
import pandas as pd # type: ignore
import rpy2.robjects.packages as rpackages # type: ignore
from rpy2.robjects import pandas2ri # type: ignore
from rpy2.robjects.packages import importr # type: ignore
try:
from common import (create_output_dir, output_filename_generator,
bool_conv_args)
except ModuleNotFoundError:
from PyReQTL.common import (create_output_dir, output_filename_generator,
bool_conv_args)
# install the R package GenomicFeatures from within Python
if not rpackages.isinstalled('GenomicFeatures'):
print("installing GenomicFeatures package ...")
bioc_manager = rpackages.importr('BiocManager')
bioc_manager.install('GenomicFeatures')
print("Done installing the package.")
# importing the following required R packages to be used within Python
print("Kindly wait for the required R packages to be imported into Python...")
g_ranges = importr('GenomicRanges')
print("GenomicRanges package is imported.")
g_alignments = importr('GenomicAlignments')
print("GenomicAlignments package is imported.")
iranges = importr('IRanges')
print("IRanges package is imported.")
print("Done importing.")
# This needs to be activated in order to perform pandas conversion
pandas2ri.activate()
def cis_trans_annotator(rqt_rst: str,
gene_ann: str,
out_prefx: str,
cli: bool = False) -> None:
"""Annotate the output of ReQTL as cis or trans based on whether the
SNVs resides within its paired gene
Parameter
---------
rqt_rst: the path to the ReQTL analysis result file
gene_ann: the path to the file gene location annotation
out_prefx: the prefix for the output annotated result
cli: Whether the function is been executed with the command line.
Default is False.
Return
------
reqtl_reslt_arranged: dataframe ReQTLs annotated as cis or trans
Output
------
- file with the ReQTLs annotated as cis or trans
"""
start_time = datetime.now()
# reading the ReQTL result file from run_matrix_ReQTL
reqtl_result = pd.read_table(rqt_rst, sep="\t")
# ------------------------------------------------------------------------#
# ------------------------------------------------------------------------#
# -----------------annotate which gene harbors the snp--------------------#
# classify ReQTLs in which the two members of the pair are in the same----#
# gene as cis and classify all others as trans----------------------------#
# ------------------------------------------------------------------------#
# ------------------------------------------------------------------------#
reqtl_reslt_arranged = reqtl_result.assign(new_SNP=reqtl_result.SNP)
# split them into four columns based on the pattern "[:_>]"
reqtl_reslt_arranged = reqtl_reslt_arranged.new_SNP.str.split('[:_>]',
expand=True)
reqtl_reslt_arranged.columns = ['chrom', 'start', 'ref', 'alt']
# concatenating the re-arranged dataframe with the original dataframe
reqtl_reslt_arranged = pd.concat([reqtl_result, reqtl_reslt_arranged],
axis=1)
# making the new end column the same as the start column
reqtl_reslt_arranged = reqtl_reslt_arranged.assign(
end=reqtl_reslt_arranged.start)
# convert Python Pandas DataFrame to R-dataframe
reqtl_result_df_r = pandas2ri.py2rpy(reqtl_reslt_arranged)
# read gene location file and then convert to R dataframe
gene_locs_py_df = pd.read_table(gene_ann, sep="\t")
gene_locs_df_r = pandas2ri.py2rpy(gene_locs_py_df)
# storing the location of genomic features for both R dataframes
reqtl_reslt_granges_r = g_ranges.GRanges(reqtl_result_df_r)
gene_loc_granges_r = g_ranges.GRanges(gene_locs_df_r)
# finding the overlap between the ranges
overlaps = iranges.findOverlaps(reqtl_reslt_granges_r,
gene_loc_granges_r,
select="last",
type="within")
# ignore the Pycharm warning later
overlaps = np.where(overlaps == -2147483648, None, overlaps)
overlaps = overlaps.tolist()
# reindex the gene_locs dataframe by the overlaps
genes_snp = gene_locs_py_df.ensembl_gene.reindex(overlaps)
reqtl_reslt_arranged['genes_snp'] = pd.Series(genes_snp.values.tolist())
# if genes_snp == gene in reqtl_reslt_arranged dataframe then it cis
# otherwise it will be trans
reqtl_reslt_arranged['class'] = np.where(
reqtl_reslt_arranged.genes_snp == reqtl_reslt_arranged.gene,
'cis',
'trans')
reqtl_reslt_arranged.loc[reqtl_reslt_arranged['genes_snp'].isna(),
'class'] = reqtl_reslt_arranged['genes_snp']
# drop the unneeded columns
reqtl_reslt_arranged.drop(
['chrom',
'end',
'ref',
'alt',
'start'], axis=1, inplace=True)
out_dir = create_output_dir("output")
annotated_file = output_filename_generator(out_dir,
out_prefx,
"_ReQTLs_cistrans_ann.txt")
reqtl_reslt_arranged.to_csv(annotated_file, sep="\t", index=False,
na_rep='NULL')
print(f"\nCis/trans annotated ReQTLs saved in {annotated_file}\n")
if cli:
print(f"Analysis took after importing the required packages "
f"{(datetime.now() - start_time).total_seconds()} sec")
else:
return reqtl_reslt_arranged
def main() -> None:
"""Parses the command line arguments entered by the user
Parameters
---------
None
Return
-------
None
"""
USAGE = """Annotate the output of ReQTL as cis or trans based on whether
the SNV resides within its paired gene"""
parser = argparse.ArgumentParser(description=USAGE)
parser.add_argument('-r',
dest="rqt_rst",
required=True,
help="the path to the ReQTL analysis result file")
parser.add_argument('-ga',
dest='gene_ann',
required=True,
help="the path to the file gene location annotations")
parser.add_argument('-o',
dest="out_prefx",
required=True,
help="the prefix for the output annotated result")
parser.add_argument("-c",
dest="cli",
default=False,
type=bool_conv_args,
help="""Whether the function is been executed with the
command line. Default is False!""")
args = parser.parse_args()
rqt_rst = args.rqt_rst
gene_ann = args.gene_ann
out_prefx = args.out_prefx
cli = args.cli
try:
cis_trans_annotator(rqt_rst, gene_ann, out_prefx, cli)
except KeyboardInterrupt:
sys.exit('\nthe user ends the program')
if __name__ == '__main__':
main()
| 33 | 79 | 0.608234 | 1,000 | 8,283 | 4.859 | 0.289 | 0.049393 | 0.081498 | 0.017288 | 0.28051 | 0.210949 | 0.190574 | 0.155999 | 0.118337 | 0.095287 | 0 | 0.009085 | 0.269105 | 8,283 | 250 | 80 | 33.132 | 0.793525 | 0.354219 | 0 | 0.0625 | 0 | 0 | 0.198422 | 0.010008 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017857 | false | 0 | 0.178571 | 0 | 0.205357 | 0.080357 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8aef1e6b611dba02218b5cc706c486dafbea639a | 940 | py | Python | fx_bmark_extract.py | kiike/scripts | c58386288ff889dd14c91db4487734b294ba3a81 | [
"ISC"
] | null | null | null | fx_bmark_extract.py | kiike/scripts | c58386288ff889dd14c91db4487734b294ba3a81 | [
"ISC"
] | null | null | null | fx_bmark_extract.py | kiike/scripts | c58386288ff889dd14c91db4487734b294ba3a81 | [
"ISC"
] | null | null | null | #!/usr/bin/env python
"""
Import a Firefox bookmarks file into a single json list
"""
import json
import pprint
def walk(struct, depth=0):
children = struct.get('children')
if children:
for child in children:
if child.get('type') == 'text/x-moz-place':
title = child.get('title')
uri = child.get('uri')
tags = child.get('tags')
if tags:
tag_l = [tag for tag in tags.split(',')]
else:
tag_l = []
out_dict = {'title': title,
'uri': uri,
'tags': tag_l
}
if out_dict not in my_marks:
my_marks.append(out_dict)
walk(child)
with open("bmarks") as f:
my_marks = []
j = json.load(f)
walk(j)
print(json.dumps(my_marks, indent=2))
| 24.736842 | 60 | 0.453191 | 109 | 940 | 3.816514 | 0.495413 | 0.076923 | 0.038462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003731 | 0.429787 | 940 | 37 | 61 | 25.405405 | 0.772388 | 0.080851 | 0 | 0 | 0 | 0 | 0.068925 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.076923 | 0 | 0.115385 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8af9408c776206126f843ee9b5587d4d0acab636 | 5,153 | py | Python | xcltk/utils/pileup_regions.py | Rongtingting/xcltk | 2e86207c45a1caa7f905a89e1c121c3c203eab2d | [
"Apache-2.0"
] | null | null | null | xcltk/utils/pileup_regions.py | Rongtingting/xcltk | 2e86207c45a1caa7f905a89e1c121c3c203eab2d | [
"Apache-2.0"
] | null | null | null | xcltk/utils/pileup_regions.py | Rongtingting/xcltk | 2e86207c45a1caa7f905a89e1c121c3c203eab2d | [
"Apache-2.0"
] | 2 | 2021-01-26T02:07:32.000Z | 2021-02-03T03:56:55.000Z | # Copied from cellSNP, https://github.com/single-cell-genetics/cellSNP/blob/purePython/cellSNP/utils/pileup_regions.py
# Utilility functions for pileup SNPs across regions
# originally in from pileup_utils.py
# Author: Yuanhua Huang
# Date: 21/05/2018
# Modified by: Xianjie Huang
from .pileup import *
from .sam import get_query_bases, get_query_qualities
## ealier high error in pileup whole genome might come from
## using _read.query_sequence, which has only partially aligned
## pileupread.query_position is based on the full length of the reads
## _read.qqual is based on aligned reads segment
# def pileup_bases(pileupColumn):
# """ pileup all reads mapped to the genome position.
# """
# base_list, read_list, qual_list = [], [], []
# for pileupread in pileupColumn.pileups:
# # query position is None if is_del or is_refskip is set.
# if pileupread.is_del or pileupread.is_refskip:
# continue
# #query_POS = pileupread.query_position
# query_POS = pileupread.query_position
# _read = pileupread.alignment
# try:
# _base = _read.query_sequence[query_POS - 1].upper()
# _qual = _read.qqual[query_POS - 1]
# except:
# print("warnings: a read fails to give _base or _qual.",
# query_POS, len(_read.qqual), len(_read.qual), len(_read.query_sequence))
# print(_read.qqual)
# continue
# #_qual = "J"
# read_list.append(_read)
# base_list.append(_base)
# qual_list.append(_qual)
# return base_list, qual_list, read_list
def pileup_bases(pileupColumn, real_POS, cell_tag, UMI_tag, min_MAPQ,
max_FLAG, min_LEN):
"""
Pileup all reads mapped to the genome position.
Filtering is also applied, including cell and UMI tags and read mapping
quality.
"""
base_list, qual_list, UMIs_list, cell_list = [], [], [], []
for pileupread in pileupColumn.pileups:
# query position is None if is_del or is_refskip is set.
if pileupread.is_del or pileupread.is_refskip:
continue
_read = pileupread.alignment
if real_POS is not None:
try:
idx = _read.positions.index(real_POS-1)
except:
continue
_qual = get_query_qualities(_read)[idx]
_base = get_query_bases(_read)[idx].upper()
else:
query_POS = pileupread.query_position
_qual = _read.query_qualities[query_POS - 1]
_base = _read.query_sequence[query_POS - 1].upper()
## filtering reads
if (_read.mapq < min_MAPQ or _read.flag > max_FLAG or
len(_read.positions) < min_LEN):
continue
if cell_tag is not None and _read.has_tag(cell_tag) == False:
continue
if UMI_tag is not None and _read.has_tag(UMI_tag) == False:
continue
if UMI_tag is not None:
UMIs_list.append(fmt_umi_tag(_read, cell_tag, UMI_tag))
if cell_tag is not None:
cell_list.append(_read.get_tag(cell_tag))
base_list.append(_base)
qual_list.append(_qual)
return base_list, qual_list, UMIs_list, cell_list
def pileup_regions(samFile, barcodes, out_file=None, chrom=None, cell_tag="CR",
UMI_tag="UR", min_COUNT=20, min_MAF=0.1, min_MAPQ=20,
max_FLAG=255, min_LEN=30, doublet_GL=False, verbose=True):
"""Pileup allelic specific expression for a whole chromosome in sam file.
TODO: 1) multiple sam files, e.g., bulk samples; 2) optional cell barcode
"""
samFile, chrom = check_pysam_chrom(samFile, chrom)
if out_file is not None:
fid = open(out_file, "w")
fid.writelines(VCF_HEADER + CONTIG)
if barcodes is not None:
fid.writelines("\t".join(VCF_COLUMN + barcodes) + "\n")
else:
fid.writelines("\t".join(VCF_COLUMN + ["sample0"]) + "\n")
POS_CNT = 0
vcf_lines_all = []
for pileupcolumn in samFile.pileup(contig=chrom):
POS_CNT += 1
if verbose and POS_CNT % 1000000 == 0:
print("%s: %dM positions processed." %(chrom, POS_CNT/1000000))
if pileupcolumn.n < min_COUNT:
continue
base_list, qual_list, UMIs_list, cell_list = pileup_bases(pileupcolumn,
pileupcolumn.pos + 1, cell_tag, UMI_tag, min_MAPQ, max_FLAG, min_LEN)
if len(base_list) < min_COUNT:
continue
base_merge, base_cells, qual_cells = map_barcodes(base_list, qual_list,
cell_list, UMIs_list, barcodes)
vcf_line = get_vcf_line(base_merge, base_cells, qual_cells,
pileupcolumn.reference_name, pileupcolumn.pos + 1, min_COUNT, min_MAF,
doublet_GL = doublet_GL)
if vcf_line is not None:
if out_file is None:
vcf_lines_all.append(vcf_line)
else:
fid.writelines(vcf_line)
if out_file is not None:
fid.close()
return vcf_lines_all
| 38.744361 | 118 | 0.622938 | 682 | 5,153 | 4.438416 | 0.259531 | 0.023786 | 0.026759 | 0.026429 | 0.339941 | 0.309217 | 0.26561 | 0.251734 | 0.16518 | 0.143376 | 0 | 0.01252 | 0.287017 | 5,153 | 132 | 119 | 39.037879 | 0.811377 | 0.352222 | 0 | 0.171429 | 0 | 0 | 0.014747 | 0 | 0 | 0 | 0 | 0.007576 | 0 | 1 | 0.028571 | false | 0 | 0.028571 | 0 | 0.085714 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8af96d0df5555eb37bb2040db58c0c8a963553d0 | 374 | py | Python | scrapy_framework/demotest.py | savor007/scrapy_framework | 9f1266eb2d4bb7e181d1c5352b05298e77040980 | [
"MIT"
] | null | null | null | scrapy_framework/demotest.py | savor007/scrapy_framework | 9f1266eb2d4bb7e181d1c5352b05298e77040980 | [
"MIT"
] | null | null | null | scrapy_framework/demotest.py | savor007/scrapy_framework | 9f1266eb2d4bb7e181d1c5352b05298e77040980 | [
"MIT"
] | null | null | null | import importlib
# from scrapy_framework.config.settings import SPIDERS
#
#
# for data in SPIDERS:
# print(data)
# path=data.rsplit(".",1)[0]
# cls_name=data.rsplit(".",1)[1]
# module=importlib.import_module(path)
# cls=getattr(module, cls_name)
# print(cls)
d = {'a':1,'b':4,'c':2}
c=sorted(d.items(), key=lambda x: x[1], reverse=False)
print(c) | 20.777778 | 54 | 0.63369 | 58 | 374 | 4.017241 | 0.568966 | 0.085837 | 0.094421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025723 | 0.168449 | 374 | 18 | 55 | 20.777778 | 0.723473 | 0.65508 | 0 | 0 | 0 | 0 | 0.02521 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8afad3555501f5f26b1ee0d4bb4ad784ace7da70 | 812 | py | Python | imagersite/imager_images/urls.py | katcosgrove/django-imager | 409081e6fa2933c7247fd8a9de49ec1cb053b778 | [
"MIT"
] | null | null | null | imagersite/imager_images/urls.py | katcosgrove/django-imager | 409081e6fa2933c7247fd8a9de49ec1cb053b778 | [
"MIT"
] | 2 | 2018-05-10T21:53:27.000Z | 2018-05-15T17:37:20.000Z | imagersite/imager_images/urls.py | katcosgrove/django-imager | 409081e6fa2933c7247fd8a9de49ec1cb053b778 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import LibraryView, PhotosView, AlbumsView, PhotoView, AlbumView
from .views import PhotoCreateView, AlbumCreateView, PhotoEditView, AlbumEditView
urlpatterns = [
path('library/', LibraryView.as_view(), name='library'),
path('photos/', PhotosView.as_view(), name='photos'),
path('photos/<int:pk>', PhotoView.as_view(), name='photo'),
path('photos/<int:pk>/edit', PhotoEditView.as_view(), name='photo_edit'),
path('albums/', AlbumsView.as_view(), name='albums'),
path('albums/<int:pk>', AlbumView.as_view(), name='album'),
path('albums/<int:pk>/edit', AlbumEditView.as_view(), name='album_edit'),
path('photos/add', PhotoCreateView.as_view(), name='photo_create'),
path('albums/add', AlbumCreateView.as_view(), name='album_create')
]
| 47.764706 | 81 | 0.704433 | 100 | 812 | 5.59 | 0.29 | 0.096601 | 0.161002 | 0.080501 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108374 | 812 | 16 | 82 | 50.75 | 0.772099 | 0 | 0 | 0 | 0 | 0 | 0.227833 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8afb51114e53c381340def1b8d3f0d6726b17916 | 310 | py | Python | Python/049group_anagrams.py | Apocrypse/LeetCode | 3ada2605ce8c8f6dadebf37a30c9c00a0d1ede39 | [
"MIT"
] | 4 | 2020-03-17T03:08:51.000Z | 2022-03-14T17:33:28.000Z | Python/049group_anagrams.py | Apocrypse/LeetCode | 3ada2605ce8c8f6dadebf37a30c9c00a0d1ede39 | [
"MIT"
] | null | null | null | Python/049group_anagrams.py | Apocrypse/LeetCode | 3ada2605ce8c8f6dadebf37a30c9c00a0d1ede39 | [
"MIT"
] | 3 | 2021-04-29T16:51:02.000Z | 2022-03-19T17:37:56.000Z | class Solution:
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
result = collections.defaultdict(list)
for s in strs:
key = "".join(sorted(s))
result[key].append(s)
return result.values()
| 25.833333 | 46 | 0.509677 | 33 | 310 | 4.787879 | 0.666667 | 0.088608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.354839 | 310 | 11 | 47 | 28.181818 | 0.79 | 0.145161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8afb97d50c56a425188ed738b021e57471f05003 | 758 | py | Python | app/urls.py | jhabarsingh/polling_app | 8e9d6f8489576170cacb47be76e5bc4ec6378d06 | [
"MIT"
] | 1 | 2021-05-03T14:55:20.000Z | 2021-05-03T14:55:20.000Z | app/urls.py | jhabarsingh/polling_app | 8e9d6f8489576170cacb47be76e5bc4ec6378d06 | [
"MIT"
] | 2 | 2021-03-01T16:37:30.000Z | 2021-05-03T20:37:56.000Z | app/urls.py | jhabarsingh/polling_app | 8e9d6f8489576170cacb47be76e5bc4ec6378d06 | [
"MIT"
] | null | null | null | from django.urls import path
from app import views
app_name = 'poll'
urlpatterns = [
path('', views.home, name='home'),
path('register', views.register, name='register'),
path('login', views.admin_login, name='login'),
path('create_poll/', views.create_poll, name='create_poll'),
path('show_polls/', views.show_poll, name='show_polls'),
path('show_polls/<slug:username>/', views.show_polls, name='show_polls'),
path('save_poll/', views.save_poll, name='save_poll'),
path('polling/<uuid:id>/', views.polling, name='polling'),
path('logout/', views.admin_logout, name='logout'),
path('poll_result/<uuid:id>/', views.poll_result, name='poll_result'),
path('get_data/<uuid:id>/', views.get_data, name='get_data'),
]
| 39.894737 | 77 | 0.675462 | 106 | 758 | 4.632075 | 0.254717 | 0.09165 | 0.06721 | 0.069246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.124011 | 758 | 18 | 78 | 42.111111 | 0.739458 | 0 | 0 | 0 | 0 | 0 | 0.306069 | 0.064644 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8afe34ceebaf6f7db1a23f0d092b72c7df8780de | 501 | py | Python | 1200-minimum-absolute-difference/1200-minimum-absolute-difference.py | marzy-bn/Leetcode_2022 | 07d6b9050279e82f610ed4a54209b33db3e3f8f9 | [
"MIT"
] | null | null | null | 1200-minimum-absolute-difference/1200-minimum-absolute-difference.py | marzy-bn/Leetcode_2022 | 07d6b9050279e82f610ed4a54209b33db3e3f8f9 | [
"MIT"
] | null | null | null | 1200-minimum-absolute-difference/1200-minimum-absolute-difference.py | marzy-bn/Leetcode_2022 | 07d6b9050279e82f610ed4a54209b33db3e3f8f9 | [
"MIT"
] | null | null | null | class Solution:
def minimumAbsDifference(self, arr: List[int]) -> List[List[int]]:
results = []
mini = 1000000000
arr.sort()
for a,b in zip(arr,arr[1:]):
diff = abs(a-b)
if diff == mini:
results.append([a,b])
#print("ppp",results)
elif diff < mini:
mini = diff
results = [(a,b)]
#print("sss",results)
return results
| 29.470588 | 70 | 0.423154 | 51 | 501 | 4.156863 | 0.529412 | 0.037736 | 0.066038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04 | 0.451098 | 501 | 17 | 71 | 29.470588 | 0.730909 | 0.07984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8aff4d08b156fe05c3178ca97948dcab05b52a18 | 1,588 | py | Python | famous_block/SENet.py | dongqifong/learning | a36453e82802f92c6fb4b03cd8e09938a763bac7 | [
"MIT"
] | null | null | null | famous_block/SENet.py | dongqifong/learning | a36453e82802f92c6fb4b03cd8e09938a763bac7 | [
"MIT"
] | null | null | null | famous_block/SENet.py | dongqifong/learning | a36453e82802f92c6fb4b03cd8e09938a763bac7 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class SEBlock(nn.Module):
def __init__(self, in_c, kernel_size, r, dummy_x) -> None:
super().__init__()
self.in_c = in_c
self.conv1 = nn.Conv1d(in_channels=self.in_c,
out_channels=self.in_c, kernel_size=kernel_size, padding=int(kernel_size//2))
self.fc1 = nn.Linear(in_features=self.in_c,
out_features=int(self.in_c/r))
self.fc2 = nn.Linear(in_features=int(
self.in_c/r), out_features=self.in_c)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
dumm_y = self.conv1(dummy_x)
self.L = dumm_y.shape[-1]
def forward(self, x_in):
x_res = self.conv1(x_in) # (-1,C,L)
x = self.global_avg_pooling(x_res) # (-1,C,1)
x = x.view(-1, self.in_c) # (-1,C)
x = self.fc1(x) # (-1,C/r)
x = self.relu(x) # (-1,C/r)
x = self.fc2(x) # (-1,C)
x = self.sigmoid(x) # (-1,C)
x = x.view(-1, self.in_c, 1) # (-1,C,1)
x = self.scale(x_res, x) # (-1,C,L)
x = x_in + x # (-1,C,L)
return x # (-1,C,L)
def global_avg_pooling(self, x):
net = nn.AvgPool1d(kernel_size=self.L)
return net(x) # (-1,c,1)
def scale(self, x_res, x):
return torch.mul(x_res, x)
if __name__ == "__main__":
in_c = 5
r = 4
kernel_size = 7
dummy_x = torch.randn((2, in_c, 128))
senet = SEBlock(in_c=in_c, r=r, kernel_size=kernel_size, dummy_x=dummy_x)
out = senet(dummy_x)
print(out.shape)
| 30.538462 | 108 | 0.537783 | 268 | 1,588 | 2.94403 | 0.216418 | 0.057034 | 0.08872 | 0.015209 | 0.152091 | 0.108999 | 0.038023 | 0.038023 | 0 | 0 | 0 | 0.032403 | 0.300378 | 1,588 | 51 | 109 | 31.137255 | 0.677768 | 0.063602 | 0 | 0 | 0 | 0 | 0.005427 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.04878 | 0.02439 | 0.243902 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1019da9fdbebdeb2177f41e024ec8a2375bfc50 | 355 | py | Python | corehq/motech/migrations/0002_requestlog_payload_id.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/motech/migrations/0002_requestlog_payload_id.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/motech/migrations/0002_requestlog_payload_id.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('motech', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='requestlog',
name='payload_id',
field=models.CharField(blank=True, max_length=126, null=True),
),
]
| 20.882353 | 74 | 0.588732 | 33 | 355 | 6.212121 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027888 | 0.292958 | 355 | 16 | 75 | 22.1875 | 0.788845 | 0 | 0 | 0 | 0 | 0 | 0.107042 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c10256e1a56feb6756445ccac8a450d8e5c12102 | 754 | py | Python | revibe/_errors/data.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | 2 | 2022-01-24T23:30:18.000Z | 2022-01-26T00:21:22.000Z | revibe/_errors/data.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | null | null | null | revibe/_errors/data.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | null | null | null | from . import base
from revibe._helpers import const, status
# -----------------------------------------------------------------------------
class ParameterMissingError(base.ExpectationFailedError):
default_detail = "missing paramter, please check the docs for request requirements"
class SerializerValidationError(base.ExpectationFailedError):
default_detail = "misc. serializer error, please try again"
class TooManyObjectsReturnedError(base.ProgramError):
default_detail = "Too many objects found, please try again"
class ObjectAlreadyExists(base.AlreadyReportedError):
default_detail = "The request object already exists"
class NoKeysError(base.ServiceUnavailableError):
default_detail = "Could not find any valid keys"
| 31.416667 | 87 | 0.71618 | 72 | 754 | 7.416667 | 0.638889 | 0.121723 | 0.123596 | 0.146067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.124668 | 754 | 23 | 88 | 32.782609 | 0.809091 | 0.102122 | 0 | 0 | 0 | 0 | 0.305185 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1099cb2a2632c598917595fb2f7f6e6745f4161 | 3,885 | py | Python | reddit_IG_FB.py | Wanatux/Reddit-to-FB-Page | 9f70b4cff72aecc34bad047078d77b534a1abfb4 | [
"MIT"
] | null | null | null | reddit_IG_FB.py | Wanatux/Reddit-to-FB-Page | 9f70b4cff72aecc34bad047078d77b534a1abfb4 | [
"MIT"
] | null | null | null | reddit_IG_FB.py | Wanatux/Reddit-to-FB-Page | 9f70b4cff72aecc34bad047078d77b534a1abfb4 | [
"MIT"
] | null | null | null | #Clean Code for Picture uploader for social media platform
import praw
import random
import pandas as pd
import config
from openpyxl import load_workbook
import requests
import json
#Reddit Creds
r = praw.Reddit(
client_id= "Enter Info Here",
client_secret= "Enter Info Here",
user_agent= "Enter Info Here",
username= "Enter Info Here",
password= "Enter Info Here",
)
#SubReddit List
reddit_list = config.subreddit_list
#Praw Scrape
pics = []
Reddit_Scrapper = True
while Reddit_Scrapper:
subreddit = r.subreddit(random.choice(reddit_list))
for submission in subreddit.hot(limit=10):
try:
if 'jpg' not in submission.url:
continue
if submission.stickied:
continue
pic_tittle = submission.title
url = submission.url
un_id = submission.id
pics.append((pic_tittle, url, un_id))
except:
pass
#Check if post was already posted
#If it doesnt exist in Excel then continue
df = pd.read_excel(r'subrreddit_history.xlsx')
checker = True
while checker:
if len(pics) == 0:
checker = False
if pics[0][2] in df.values:
pics.pop(0)
if len(pics) == 0:
checker = False
else:
checker = False
if len(pics) > 1:
Reddit_Scrapper = False
# If data doesnt exist then append row
df = pd.DataFrame({'un_id': [pics[0][2]],
'pic_tittle': [pics[0][0]]})
writer = pd.ExcelWriter('subrreddit_history.xlsx', engine='openpyxl')
# try to open an existing workbook
writer.book = load_workbook('subrreddit_history.xlsx')
# copy existing sheets
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)
# read existing file
reader = pd.read_excel(r'subrreddit_history.xlsx')
# write out the new sheet
df.to_excel(writer,index=False,header=False,startrow=len(reader)+1)
writer.close()
#this will get the Tittle of the post split it and add #s ad the beggining of each word for Tags.
def fb_descript():
tags = []
s = ' '
for x in pics[0][0].split():
tags.append('#' + x)
s = s.join(tags)
return ''' {a}
{c}
{c}
{c}
{c}
{c}
{c}
{c}
{c}
{c}
{c}
{b}'''.format(a=pics[0][0], b=s, c='.' *5)
#FB Credits and Photo Post
#INstagram Post Def
def postInstagramPic():
#Post the Image
image_location_1 = pics[0][1]
post_url = 'https://graph.facebook.com/v10.0/{}/media'.format(config.inst_id)
payload = {
'image_url': image_location_1,
'caption': fb_descript(),
'access_token': config.inst_acc_token,
}
r = requests.post(post_url, data=payload)
print(r.text)
#Instagram for some reason will need to convert the responde ID to a publish request.
result = json.loads(r.text)
if 'id' in result:
creation_id = result['id']
second_url = 'https://graph.facebook.com/v10.0/{}/media_publish'.format(config.inst_id)
second_payload = {
'creation_id': creation_id,
'access_token': config.inst_acc_token,
}
r = requests.post(second_url, data=second_payload)
print('--------Just posted to instagram--------')
print(r.text)
else:
pass
postInstagramPic()
#Facebook Picture Def
def post_FBpage():
image_url = 'https://graph.facebook.com/{}/photos'.format(config.fb_page_id)
image_location = pics[0][1]
msg = fb_descript()
img_payload = {
'message': msg,
'url': image_location,
'access_token': config.fb_acc_token
}
#Send the POST request
r = requests.post(image_url, data=img_payload)
print('--------Just posted to Facebook--------')
print(r.text)
post_FBpage() | 28.566176 | 97 | 0.605405 | 515 | 3,885 | 4.44466 | 0.337864 | 0.019659 | 0.010485 | 0.012232 | 0.14941 | 0.117955 | 0.098733 | 0.0699 | 0.041066 | 0 | 0 | 0.010623 | 0.273102 | 3,885 | 136 | 98 | 28.566176 | 0.799929 | 0.149807 | 0 | 0.242991 | 0 | 0 | 0.175038 | 0.028006 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028037 | false | 0.028037 | 0.065421 | 0 | 0.102804 | 0.046729 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c10de940ca61195f3abd45d17f60bcb7de5621f4 | 3,834 | py | Python | uil/core/templatetags/transformat.py | UiL-OTS-labs/django-shared-core | 702ca346f1be861108ec70ceed2ed3b99623f0a3 | [
"Apache-2.0"
] | null | null | null | uil/core/templatetags/transformat.py | UiL-OTS-labs/django-shared-core | 702ca346f1be861108ec70ceed2ed3b99623f0a3 | [
"Apache-2.0"
] | 13 | 2019-06-25T13:23:30.000Z | 2022-02-10T07:00:39.000Z | uil/core/templatetags/transformat.py | UiL-OTS-labs/django-shared-core | 702ca346f1be861108ec70ceed2ed3b99623f0a3 | [
"Apache-2.0"
] | null | null | null | from django.template import Library, Node, TemplateSyntaxError, Variable, VariableDoesNotExist
from django.template.base import render_value_in_context
from django.utils.safestring import SafeData, mark_safe
register = Library()
class FormattedTranslateNode(Node):
def __init__(self, filter_expression, noop, formatvalues, asvar=None,
message_context=None):
self.noop = noop
self.formatvalues = formatvalues
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, str):
self.filter_expression.var = Variable("'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = render_value_in_context(output, context)
# Restore percent signs. Percent signs in template text are doubled
# so they are not interpreted as string format flags.
is_safe = isinstance(value, SafeData)
value = value.replace('%%', '%')
formatvalues = []
for formatvalue in self.formatvalues:
try:
variable = Variable(formatvalue)
formatvalues.append(variable.resolve(context))
except VariableDoesNotExist:
formatvalues.append(formatvalue)
value = value.format(*formatvalues)
value = mark_safe(value) if is_safe else value
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
@register.tag('transformat')
def do_translate_format(parser, token):
"""
This tag is a modified version of the trans tag. In addition to doing all the things the trans tag does, it also
performs a str.format() on the translation. The values for the format call can be added to the tag as additional
parameters.
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument" % bits[0])
message_string = parser.compile_filter(bits[1])
remaining = bits[2:]
noop = False
asvar = None
message_context = None
seen = set()
invalid_context = {'as', 'noop'}
formatvalues = []
while remaining:
option = remaining.pop(0)
if option in seen:
raise TemplateSyntaxError(
"The '%s' option was specified more than once." % option,
)
elif option == 'noop':
noop = True
elif option == 'context':
try:
value = remaining.pop(0)
except IndexError:
raise TemplateSyntaxError(
"No argument provided to the '%s' tag for the context option." % bits[0]
)
if value in invalid_context:
raise TemplateSyntaxError(
"Invalid argument '%s' provided to the '%s' tag for the context option" % (value, bits[0]),
)
message_context = parser.compile_filter(value)
elif option == 'as':
try:
value = remaining.pop(0)
except IndexError:
raise TemplateSyntaxError(
"No argument provided to the '%s' tag for the as option." % bits[0]
)
asvar = value
else:
formatvalues.append(option)
seen.add(option)
return FormattedTranslateNode(message_string, noop, formatvalues, asvar, message_context)
| 36.865385 | 116 | 0.603286 | 408 | 3,834 | 5.568627 | 0.308824 | 0.06338 | 0.070423 | 0.050616 | 0.172535 | 0.104313 | 0.104313 | 0.104313 | 0.104313 | 0.082746 | 0 | 0.003811 | 0.315597 | 3,834 | 103 | 117 | 37.223301 | 0.862043 | 0.092853 | 0 | 0.180723 | 0 | 0 | 0.086302 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036145 | false | 0 | 0.036145 | 0 | 0.120482 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c11bc2c07ea4c7d0324366918fff40bddd73b06c | 1,585 | py | Python | crawler/scrapy_ffxiv/spiders/gathering_spider.py | shengzhc/sc-ff14-scrapy | 2d5b74980e47ec140a4b8d506079fcc94dde54a2 | [
"MIT"
] | null | null | null | crawler/scrapy_ffxiv/spiders/gathering_spider.py | shengzhc/sc-ff14-scrapy | 2d5b74980e47ec140a4b8d506079fcc94dde54a2 | [
"MIT"
] | null | null | null | crawler/scrapy_ffxiv/spiders/gathering_spider.py | shengzhc/sc-ff14-scrapy | 2d5b74980e47ec140a4b8d506079fcc94dde54a2 | [
"MIT"
] | null | null | null | import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy_ffxiv.items import FfxivGatheringNode
"""
Spider to gather info from `www.ffxiv-gathering.com`
"""
class GatheringSpider(CrawlSpider):
name = 'ff14-gathering'
allowed_domains = [
'ffxiv-gathering.com',
]
start_urls = [
'https://www.ffxiv-gathering.com/all.php',
]
rules = (
Rule(LinkExtractor(allow=('ff14fish.carbuncleplushy.com/index.html')), callback='parse_fishing_nodes'),
)
def parse_start_url(self, response):
nodes = response.selector.xpath("//table[contains(@id, 'myTable')]/tbody/tr")
for node in nodes:
yield FfxivGatheringNode(name=node.xpath(".//td[1]/text()").get(),
location=node.xpath(".//td[4]/text()").get(),
time=node.xpath(".//td[6]/text()").get(),
gclass=node.xpath(".//td[7]/text()").get())
def parse_fishing_node(self, response):
nodes = response.selector.xpath("//table[@id='fishes/tbody/tr[contains(@class, 'fish-entry')]")
for node in nodes:
yield {
'item': node.xpath(".//td//a[@class='fish-name']/text()").get(),
'level': '1',
'location': node.xpath(".//td//a[@class='location-name']/text()").get() + ' - ' + node.xpath(".//td//span[@class='zone-name']/text()").get(),
'time': 'Anytime',
'class': 'Fishing',
}
| 35.222222 | 157 | 0.55142 | 170 | 1,585 | 5.088235 | 0.435294 | 0.072832 | 0.089017 | 0.046243 | 0.182659 | 0.099422 | 0.099422 | 0 | 0 | 0 | 0 | 0.007712 | 0.263722 | 1,585 | 44 | 158 | 36.022727 | 0.733505 | 0 | 0 | 0.0625 | 0 | 0 | 0.294156 | 0.142482 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c11c4e38953d87d8740e1177783eb8eb9e19cef3 | 927 | py | Python | horizon/test_horizon-controller-node.py | cyberxml/testinfra-openstack-tests | 8b57ff2901463deeaa4d58486bb6d14f65ba3d24 | [
"MIT"
] | null | null | null | horizon/test_horizon-controller-node.py | cyberxml/testinfra-openstack-tests | 8b57ff2901463deeaa4d58486bb6d14f65ba3d24 | [
"MIT"
] | null | null | null | horizon/test_horizon-controller-node.py | cyberxml/testinfra-openstack-tests | 8b57ff2901463deeaa4d58486bb6d14f65ba3d24 | [
"MIT"
] | null | null | null | import pytest
@pytest.mark.parametrize("name", [
("openstack-dashboard"),
("httpd"),
("memcached"),
])
def test_packages(host, name):
pkg = host.package(name)
assert pkg.is_installed
@pytest.mark.parametrize("name,port", [
("httpd","80"),
("httpd-ssl","443"),
("memcached","11211"),
])
def test_listening_interfaces(host, name, port):
sckt = host.socket("tcp://0.0.0.0:" + port)
assert sckt.is_listening
@pytest.mark.parametrize("process,enabled", [
("httpd", True),
("memcached", True),
])
def test_services(host, process, enabled):
svc = host.service(process)
assert svc.is_running
if enabled:
assert svc.is_enabled
@pytest.mark.parametrize("service,conf_file", [
("openstack-dashboard", "local_settings"),
])
def test_main_services_files(host, service, conf_file):
_file = host.file("/etc/" + service + "/" + conf_file)
assert _file.exists
| 25.054054 | 58 | 0.651564 | 113 | 927 | 5.19469 | 0.39823 | 0.068143 | 0.143101 | 0.085179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018182 | 0.169364 | 927 | 36 | 59 | 25.75 | 0.744156 | 0 | 0 | 0.125 | 0 | 0 | 0.192017 | 0 | 0 | 0 | 0 | 0 | 0.15625 | 1 | 0.125 | false | 0 | 0.03125 | 0 | 0.15625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c11f7822c5bd888ca47b611e77c261bcc7260743 | 15,956 | py | Python | tests/annotator/test_structured_incident_annotator.py | langstok/EpiTator | 721fdc444382a0493702ee5976c987954753f47a | [
"Apache-2.0"
] | 40 | 2017-05-27T03:53:22.000Z | 2021-08-07T16:33:58.000Z | tests/annotator/test_structured_incident_annotator.py | langstok/EpiTator | 721fdc444382a0493702ee5976c987954753f47a | [
"Apache-2.0"
] | 25 | 2017-07-17T14:33:24.000Z | 2021-04-09T10:27:56.000Z | tests/annotator/test_structured_incident_annotator.py | langstok/EpiTator | 721fdc444382a0493702ee5976c987954753f47a | [
"Apache-2.0"
] | 9 | 2017-11-15T05:13:53.000Z | 2021-08-07T16:33:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from epitator.annotator import AnnoDoc
from epitator.structured_incident_annotator import StructuredIncidentAnnotator
import datetime
# import logging
# from .test_utils import with_log_level
def remove_empty_props(d):
return {
k: v
for k, v in d.items()
if v is not None
}
class TestStructuredIncidentAnnotator(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.annotator = StructuredIncidentAnnotator()
# @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO)
def test_count_table(self):
doc = AnnoDoc('''
Type / New / Confirmed / Probable / Suspect / Total
Cases / 3 / 293 / / 32 / 413
Deaths / 5 / 193 / 82 / 28 / 303
''')
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas, [{
# Date/country??
# Need to include because association rules are different for tables.
'type': 'caseCount',
'value': 3,
'attributes': []
}, {
'type': 'cumulativeCaseCount',
'value': 293,
'attributes': ['confirmed']
}, {
'type': 'cumulativeCaseCount',
'value': 32,
'attributes': ['suspected']
}, {
'type': 'cumulativeCaseCount',
'value': 413,
'attributes': []
}, {
'type': 'deathCount',
'value': 5,
'attributes': []
}, {
'type': 'cumulativeDeathCount',
'value': 193,
'attributes': ['confirmed']
}, {
'type': 'cumulativeDeathCount',
'value': 82,
'attributes': []
}, {
'type': 'cumulativeDeathCount',
'value': 28,
'attributes': ['suspected']
}, {
'type': 'cumulativeDeathCount',
'value': 303,
'attributes': []
}])
# @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO)
def test_location_count_table(self):
doc = AnnoDoc("""
Distribution of reported x fever cases from 1 Jul 2017-17 Apr 2018
Federal units / Reported / Discarded / Under investigation / Confirmed / Deaths
Acre (AC) / 1 / 1 / - / - / -
Amapá (AP) / 8 / 2 / 6 / - / -
Pará (PA) / 7 / 5 / 2 / - / -
Amazonas (AM) / 42 / 31 / 11 / - / -
Rondônia (RO) / 9 / 8 / 1 / - / -
Roraima (RR) / 3 / 3 / - / - / -
Tocantins (TO) / 17 / 15 / 2 / - / -
Bahia (BA) / 62 / 35 / 27 / - / -
Ceará (CE) / 4 / 3 / 1 / - / -
Maranhão (MA) / 7 / 5 / 2 / - / -
Paraíba (PB) / 5 / - / 5 / - / -
Pernambuco (PE) / 6 / 4 / 2 / - / -
Piauí (PI) / 9 / 6 / 3 / - / -
Rio Grande do Norte (RN) / 3 / 2 / 1 / - / -
Sergipe (SE) / 2 / 2 / - / - / -
Distrito Federal (DF) / 74 / 43 / 30 / 1 / 1
Goiás (GO) / 66 / 37 / 29 / - / -
Mato Grosso (MT) / 10 / 8 / 2 / - / -
Mato Grosso do Sul (MS) / 13 / 10 / 3 / - / -
Espírito Santo (ES) / 119 / 88 / 25 / 6 / 1
Minas Gerais (MG) / 1444 / 656 / 294 / 494 / 156
Rio de Janeiro (RJ) / 453 / 172 / 84 / 197 / 64
São Paulo (SP) / 2558 / 1655 / 444 / 459 / 120
Paraná (PR) / 110 / 102 / 8 / - / -
Rio Grande do Sul (RS) / 49 / 34 / 15 / - / -
Santa Catarina (SC) / 45 / 22 / 23 / - / -
Total / 5131 / 2951 / 1023 / 1157 / 342
""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
incident = metadatas[0]
self.assertEqual(incident['value'], 1)
self.assertEqual(incident['type'], 'caseCount')
self.assertEqual(incident['location']['geonameid'], '3665474')
self.assertEqual(
incident['dateRange'],
[datetime.datetime(2017, 7, 1),
datetime.datetime(2018, 4, 18)])
def test_date_count_table(self):
doc = AnnoDoc("""
Cumulative case data
Report date / Cases / Deaths / New cases per week
26 Jun 2017 / 190 / 10 /
8 Sep 2017 / 300 / 12 /
9 Sep 2017 / 309 / 13 /
15 Sep 2017 / 319 / 14 /
6 Oct 2017 / 376 / 14 /
13 Oct 2017 /
20 Oct 2017 / 431 / 17 / 34
27 Oct 2017 / 457 / 18 / 26
3 Nov 2017 / 486 / 19 / 29""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas[-1], {
'value': 29,
'type': 'caseCount',
'attributes': [],
'dateRange': [
datetime.datetime(2017, 10, 28),
datetime.datetime(2017, 11, 4)]
})
self.assertEqual(metadatas[-2], {
'value': 19,
'type': 'cumulativeDeathCount',
'attributes': [],
'dateRange': [
datetime.datetime(2017, 11, 3),
datetime.datetime(2017, 11, 4)]
})
def test_date_count_table_2(self):
doc = AnnoDoc("""
| Report date | Cases |
| 6 Oct 2017 | 26 |
| 13 Oct 2017 | 29 |
| 20 Oct 2017 | 34 |""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas, [{
'value': 26,
'type': 'caseCount',
'attributes': [],
'dateRange': [
datetime.datetime(2017, 9, 30),
datetime.datetime(2017, 10, 7)]
}, {
'value': 29,
'type': 'caseCount',
'attributes': [],
'dateRange': [
datetime.datetime(2017, 10, 7),
datetime.datetime(2017, 10, 14)]
}, {
'value': 34,
'type': 'caseCount',
'attributes': [],
'dateRange': [
datetime.datetime(2017, 10, 14),
datetime.datetime(2017, 10, 21)]
}])
def test_non_incident_counts_and_species(self):
doc = AnnoDoc("""
Species / Morbidity / Mortality / Susceptible / Cases / Deaths / Killed and disposed of / Slaughtered
Orange Spotted Snakehead (_Channa aurantimaculata_) / 100% / 1% / 32 / 30 / 1 / 28 / 3
""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas, [{
'attributes': [],
'type': 'caseCount',
'value': 30,
'species': {
'id': 'tsn:642745',
'label': 'Channa aurantimaculata'}
}, {
'attributes': [],
'type': 'deathCount',
'value': 1,
'species': {
'id': 'tsn:642745',
'label': 'Channa aurantimaculata'}
}])
def test_unknown_species_and_space_delimited_counts(self):
doc = AnnoDoc("""
The epidemiological statistics accumulated since the start of the event are included in the following "outbreak summary":
Species / Susceptible / Cases / Deaths / Killed and disposed of / Slaughtered
Birds / 6 368 632 / 1 303 173 / 1 297 617 / 3 850 608 / 0
Black-crowned night-heron / not available / 1 / 1 / 0 / 0
Passeridae (unidentified) / not available / 2 / 2 / 0 / 0
Pale thrush / not available / 1 / 1 / 0 / 0
""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas[0], {
'attributes': [],
'type': 'caseCount',
'value': 1303173,
'species': {'id': 'tsn:174371', 'label': 'Aves'}
})
self.assertEqual(metadatas[-1], {
'attributes': [],
'type': 'deathCount',
'value': 1,
'species': "Cannot parse"
})
# @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO)
def test_multi_section_table(self):
doc = AnnoDoc("""
Disease update
--------------
Confirmed, probable, and suspect cases and deaths from Ebola virus disease in Guinea, Liberia, and Sierra Leone, as of 30 Jun 2014
Type / New* / Confirmed / Probable / Suspect / Totals by country
Guinea
Cases / 3 / 293 / 88 / 32 / 413
Deaths / 5 / 193 / 82 / 28 / 303
Liberia
Cases / 8 / 52 / 21 / 34 / 107
Deaths / 7 / 33 / 17 / 15 / 65
Sierra Leone
Cases / 11 / 199 / 31 / 9 / 239
Deaths / 2 / 65 / 29 / 5 / 99
Totals
Cases / 22 / 544 / 140 / 75 / 759
Deaths / 14 / 291 / 128 / 48 / 467
*New cases were reported between 25-29 Jun 2014
""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas[4]['type'], 'cumulativeCaseCount')
self.assertEqual(metadatas[4]['dateRange'], [
datetime.datetime(2014, 6, 30, 0, 0),
datetime.datetime(2014, 7, 1, 0, 0)])
self.assertEqual(metadatas[4]['value'], 413)
self.assertEqual(metadatas[4]['location']['geonameid'], '2420477')
def test_number_in_header(self):
doc = AnnoDoc("""
Health Jurisdiction / Cases (percentage) / Incidence rate per 100 000 Person-Years
Salt Lake county / 162 (68.9) / 14.4
Utah county / 45 (19.1) / 7.6
Bear River / 5 (2.1) / 2.8
Southeast Utah / 2 (0.9) / 5.0
""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas[0]['type'], 'caseCount')
self.assertEqual(metadatas[0]['value'], 162)
self.assertEqual(metadatas[0]['location']['geonameid'], '5781004')
# @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO)
def test_unusual_format(self):
doc = AnnoDoc("""
For subscribers' convenience, we hereby reproduce Israel's annual rabies statistics since 2014:
Year // badger / cat / fox / jackal / wolf / dog / cattle / sheep / horse // total
2014 // 3 / 0 / 2 / 2 / 4 / 2 / 1 / 0 / 0 // 14
2015 // 12 / 1 / 1 / 3 / 0 / 1 / 7 / 0 / 1 // 20
2016 // 12 / 0 / 7 / 5 / 0 / 0 / 5 / 0 / 1 // 30
2017 // 10 / 2 / 0 / 47 / 0 / 0 / 14 / 1 / 0 // 74
2018 // 4 / 0 / 0 / 35 / 0 / 1 / 7 / 1 / 1 // 51
""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
# A value from row one is not used because 2014 is missed by the date
# parser although other years are caught.
# The index refers to the badgers in 2015. It is an unintuitive index
# because some species are not being parsed so their values are skipped.
self.assertEqual(metadatas[2]['type'], 'caseCount')
self.assertEqual(metadatas[2]['value'], 12)
self.assertEqual(metadatas[2]['species']['label'], 'Taxidea taxus')
self.assertEqual(metadatas[2]['dateRange'], [
datetime.datetime(2015, 1, 1, 0, 0),
datetime.datetime(2016, 1, 1, 0, 0)])
def test_date_association(self):
doc = AnnoDoc("""
The outbreak strains of salmonella have infected a reported 961 people in 48 states [only Alaska and Delaware have not reported cases - Mod.LL] and the District of Columbia. Illnesses started on dates ranging from 4 January 2017 to 31 July 2017.
State / Number of Cases
Alabama / 25
Arizona / 6
Arkansas / 9
California / 54
Virginia / 56
Washington / 22
West Virginia / 17
Wisconsin / 24
Wyoming / 10""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas[0]['dateRange'], [
datetime.datetime(2017, 1, 4, 0, 0),
datetime.datetime(2017, 8, 1, 0, 0)])
def test_fp_table_merging(self):
doc = AnnoDoc("""
Non-Latin Caribbean
Bahamas / week 30 [ending 25 Jul 2014] / 0 / 0 / 6 / 0
Dominica / week 28 [ending 11 Jul 2014] / 3559 / 141 / 0 / 0
Jamaica / week 29 [ending 18 Jul 2014] / 0 / 0 / 1 / 0
Turks & Caicos Islands / week 28 [ending 11 Jul 2014] / 0 / 10 / 7 / 0
US Virgin Islands / week 29 [ending 18 Jul 2014] / 0 / 2 / 7 / 0
Andean area:
Bolivia / 9 / 0 / 0 / 3 / 0
Colombia / 30 / 0 / 0 / 1 / 0
Peru / 28 / 0 / 0 / 3 / 0
""")
doc.add_tier(self.annotator)
def test_unparsable_date_bug(self):
doc = AnnoDoc("""
Cases by Country / Week updated / Probable / Conf. / Virus type / DHF severe / Deaths
Hispanic Caribbean
Dominican Republic / 17 [week ending 28 Apr 2017] / 315 / 0 / D? / 15 / 0
Puerto Rico / 19 [week ending 12 May 2017] / 9 / 0 / D2 / 0 / 0
English, French, Dutch Caribbean
American Virgin Islands / 19 [week ending 12 May 2017] / 1 / 1 / D? / 0 / 0
Andean
Bolivia / 17 / [week ending 28 Apr 2017] / 4260 / 0 / D? / 34 / 0
Colombia / 20 [week ending 19 May 2017] / 12 552 / 8357 / D? / 131 / 36
Ecuador / 17 [week ending 28 Apr 2017] / 6075 / 6075 / D? / 6 / 3
Peru / 20 [week ending 19 May 2017] / 44 971 / 12 717 / D 2,3 / 137 / 54
Venezuela / 17 [week ending 28 Apr 2017] / 2722 / 309 / D? / 7 / 0
""")
doc.add_tier(self.annotator)
def test_non_integer_value(self):
doc = AnnoDoc("""
******
[6] India, Pune, Marharastra, fatal human case
Date: Mon 4 Jul 2016, 12.57 AM IST
Source: The Times of India [edited]
""")
doc.add_tier(self.annotator)
self.assertEqual(len(doc.tiers['structured_incidents']), 0)
def test_multiline_title(self):
doc = AnnoDoc("""
Arizona, 3 May 2018.
More text
Species / Susceptible / Cases / Deaths / Killed and disposed of / Slaughtered
Birds / 3000/ 1500 / 1500 / 0 / 0
Affected population: Commercial layers
""")
doc.add_tier(self.annotator)
# TODO: 1500 in the Deaths column is parsed as a year. To resolve this
# the annotator needs to use a heuristic based on the column
# name when determining column types. Simply giving integer interpretations
# priority in all cases doesn't work on docs like the one in test_unusual_format.
self.assertEqual(doc.tiers['structured_incidents'][0].metadata['location']['name'], 'Arizona')
# @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO)
def test_missing_count_bug(self):
doc = AnnoDoc("""
State / Number of Cases
Alabama / 25
Arizona / 6
Arkansas / 9
California / 54
Colorado / 18
N Dakota / 1
S Dakota / 1
Connecticut / 9
""")
doc.add_tier(self.annotator)
locations = [span.metadata['location']
for span in doc.tiers['structured_incidents']]
geonameids = [
location['geonameid'] if isinstance(location, dict) else location
for location in locations]
self.assertEqual(geonameids, [
'4829764', '5551752', '4099753',
'5332921', '5417618', '5690763',
'5769223', '4831725'])
def test_case_synonyms(self):
doc = AnnoDoc("""
As of 7 Jun 2019, a total of 279 people infected with the outbreak strains of _Salmonella_ have been reported from 41 states.
A list of the states and the number of cases in each is on the map of reported cases page.
State / Ill people
------------------
Alabama / 7
Arkansas / 8
Arizona / 1
California / 9
Colorado / 4
Connecticut / 3
""")
doc.add_tier(self.annotator)
self.assertEqual(len(doc.tiers['structured_incidents']), 6)
| 32.169355 | 245 | 0.572324 | 1,947 | 15,956 | 4.619928 | 0.289676 | 0.045025 | 0.050695 | 0.024903 | 0.37532 | 0.319066 | 0.277043 | 0.252585 | 0.229016 | 0.229016 | 0 | 0.107783 | 0.297004 | 15,956 | 495 | 246 | 32.234343 | 0.694125 | 0.072888 | 0 | 0.406484 | 0 | 0.049875 | 0.492893 | 0 | 0 | 0 | 0 | 0.00202 | 0.067332 | 1 | 0.044888 | false | 0.002494 | 0.012469 | 0.002494 | 0.062344 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1211c97bd0c2cd978848796f6323f97d81c815a | 3,492 | py | Python | fastseq/optimizer/fairseq/__init__.py | nttcs-ds/fastseq | f1338f1125612df318c9d1f030a8457397ed05a6 | [
"MIT"
] | 346 | 2020-11-28T14:25:21.000Z | 2022-03-25T14:50:22.000Z | fastseq/optimizer/fairseq/__init__.py | nttcs-ds/fastseq | f1338f1125612df318c9d1f030a8457397ed05a6 | [
"MIT"
] | 22 | 2020-12-03T18:52:04.000Z | 2022-02-26T05:19:14.000Z | fastseq/optimizer/fairseq/__init__.py | nttcs-ds/fastseq | f1338f1125612df318c9d1f030a8457397ed05a6 | [
"MIT"
] | 35 | 2020-11-30T21:37:45.000Z | 2022-03-23T01:54:51.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
Automatically apply the optimizations if the supported versions of FairSeq
are detected.
"""
import logging
import sys
from packaging import version
from fastseq.config import FASTSEQ_VERSION, MAX_FAIRSEQ_VERSION, MIN_FAIRSEQ_VERSION
from fastseq.logging import get_logger
from fastseq.utils.api_decorator import OPTIMIZED_CLASSES
from fastseq import config
logger = get_logger(__name__, logging.INFO)
LATEST_VERSION = 'latest'
def is_supported_fairseq():
"""Check if the installed fairseq is supported.
Returns:
a bool value: True indicates the installed fairseq is supported.
"""
v = version.parse(fairseq.__version__)
return version.parse(
MIN_FAIRSEQ_VERSION) <= v <= version.parse(MAX_FAIRSEQ_VERSION)
def apply_fairseq_optimization():
"""Automaticall apply the optimization to the installed fairseq.
The optimized classes and functions are replaced in runtime.
"""
if not is_supported_fairseq():
logger.warning(
f"fairseq(v{fairseq.__version__}) is not supported by fastseq(v"
f"{FASTSEQ_VERSION}) yet, please change fairseq to "
f"v{MIN_FAIRSEQ_VERSION} ~ v{MAX_FAIRSEQ_VERSION}, or check other "
"versions of fastseq. Currently, no optimization in fastseq has "
"been applied. Please ignore this warning if you are not using "
"fairseq")
return
import fastseq.optimizer.fairseq.beam_search_optimizer # pylint: disable=import-outside-toplevel
if config.USE_EL_ATTN:
import fastseq.optimizer.fairseq.el_attention_optimizer # pylint: disable=import-outside-toplevel
import fastseq.optimizer.fairseq.generate # pylint: disable=import-outside-toplevel
_update_fairseq_model_registration()
logger.info(f"fairseq(v{fairseq.__version__}) has been optimized by "
f"fastseq(v{FASTSEQ_VERSION}).")
def _update_fairseq_model_registration():
"""Use the optimized classes to update the registered fairseq models and
arches.
"""
for model_name, model_class in MODEL_REGISTRY.items():
if model_class in OPTIMIZED_CLASSES:
MODEL_REGISTRY[model_name] = OPTIMIZED_CLASSES[model_class]
logger.debug(
"Update the register model {} from {} to {}".format(
model_name, model_class, OPTIMIZED_CLASSES[model_class]))
for arch_name, model_class in ARCH_MODEL_REGISTRY.items():
if model_class in OPTIMIZED_CLASSES:
ARCH_MODEL_REGISTRY[arch_name] = OPTIMIZED_CLASSES[model_class]
logger.debug(
"Update the register model arch {} from {} to {}".format(
arch_name, model_class, OPTIMIZED_CLASSES[model_class]))
is_fairseq_installed = True
try:
import fairseq # pylint: disable=ungrouped-imports
from fairseq.models import ARCH_MODEL_REGISTRY, MODEL_REGISTRY # pylint: disable=ungrouped-imports
from fairseq.sequence_generator import SequenceGenerator # pylint: disable=ungrouped-imports
except ImportError as error:
is_fairseq_installed = False
logger.warning('fairseq can not be imported. Please ignore this warning if '
'you are not using fairseq: {}'.format(error))
if is_fairseq_installed:
try:
apply_fairseq_optimization()
except:
logger.error("Unexpected error: {}".format(sys.exc_info()[0]))
raise
| 37.148936 | 106 | 0.71134 | 427 | 3,492 | 5.590164 | 0.285714 | 0.041894 | 0.043988 | 0.043569 | 0.293255 | 0.234604 | 0.165061 | 0.131546 | 0.131546 | 0.091328 | 0 | 0.000363 | 0.21134 | 3,492 | 93 | 107 | 37.548387 | 0.866376 | 0.202176 | 0 | 0.103448 | 0 | 0 | 0.216881 | 0.049541 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0 | 0.258621 | 0 | 0.344828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c123c8a1452dd7217130353820cbbb49ad40ee13 | 1,340 | py | Python | __main__.py | vmenezio/clippr | 78d2d8e14090fcde3c43da1656afae25d7b1629e | [
"MIT"
] | 1 | 2015-12-20T13:32:51.000Z | 2015-12-20T13:32:51.000Z | __main__.py | vmenezio/clippr | 78d2d8e14090fcde3c43da1656afae25d7b1629e | [
"MIT"
] | null | null | null | __main__.py | vmenezio/clippr | 78d2d8e14090fcde3c43da1656afae25d7b1629e | [
"MIT"
] | null | null | null | #! python3
# -*- coding: utf-8 -*-
# [ clipper ] #
# #
# Hey, welcome to clipper! This is a small tool I #
# have been building for personal use as a means #
# to take, analyze and upload screenshots quickly. #
# #
# I'm not sure how common this specific task is for #
# anyone else, but since, personally, it'd be a #
# huge time saver to have the proccess automated #
# and bound to a shortcut, I'm making the source #
# available to whomever else happens to find this #
# useful as well. Enjoy! #
# #
# - Vinícius Menézio #
from .img.clipImage import ClipImage
from requests.exceptions import ConnectionError
from imgurpython.helpers.error import ImgurClientError
def main():
clippy = ClipImage()
print( "dimensions:", clippy.width, "x", clippy.height, "px | colors:", len(clippy.palette) )
print("filesize: LOCAL", clippy.size/1000, "KB, ONLINE", clippy.onlineSize/1000,"KB\n") # BREAKS IF IT CAN'T UPLOAD / RETRIEVE FILESIZE
print("url:",clippy.url,"\n")
print(clippy.getColorTable())
if __name__ == "__main__":
main()
| 37.222222 | 139 | 0.54403 | 148 | 1,340 | 4.871622 | 0.682432 | 0.005548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011614 | 0.357463 | 1,340 | 35 | 140 | 38.285714 | 0.825784 | 0.590299 | 0 | 0 | 0 | 0 | 0.129344 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.363636 | 0.363636 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c124253bfbcd49e0f1812986a73b8ad635b8c1fb | 936 | py | Python | core/setup.py | DiegoGH117/cellare | c0c68f6f53ee8f31999c3538c327ddca34ba6e94 | [
"MIT"
] | null | null | null | core/setup.py | DiegoGH117/cellare | c0c68f6f53ee8f31999c3538c327ddca34ba6e94 | [
"MIT"
] | null | null | null | core/setup.py | DiegoGH117/cellare | c0c68f6f53ee8f31999c3538c327ddca34ba6e94 | [
"MIT"
] | null | null | null | from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name = 'CellARE',
version = '0.0.2',
description = 'A cellular automaton based implementation to run SIR simulations',
py_modules = ['cellare'],
package_dir = {'':'src'},
classifiers = [
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
],
long_description = long_description,
long_description_content_type = 'text/markdown',
install_requires=[
"numpy",
"matplotlib"
],
url = 'https://github.com/DiegoGH117/cellare',
project_urls = {
'Documentation': 'https://cellare.readthedocs.io/en/latest/',
},
) | 33.428571 | 87 | 0.569444 | 91 | 936 | 5.747253 | 0.747253 | 0.114723 | 0.143403 | 0.14914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016692 | 0.29594 | 936 | 28 | 88 | 33.428571 | 0.776935 | 0 | 0 | 0.074074 | 0 | 0 | 0.422625 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.037037 | 0 | 0.037037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1248143e6872a13760d0d34115b96fb5c387e21 | 12,008 | py | Python | deploy-tools/auction-deploy/tests/test_cli.py | d3centr0z/trustlines-blockchain | b90cba6e4ca7a5194eadc35793cc0fad63d9c761 | [
"MIT"
] | 9 | 2019-02-28T06:24:08.000Z | 2021-05-29T04:43:56.000Z | deploy-tools/auction-deploy/tests/test_cli.py | d3centr0z/trustlines-blockchain | b90cba6e4ca7a5194eadc35793cc0fad63d9c761 | [
"MIT"
] | 425 | 2019-04-02T08:07:27.000Z | 2021-07-01T18:29:02.000Z | deploy-tools/auction-deploy/tests/test_cli.py | d3centr0z/trustlines-blockchain | b90cba6e4ca7a5194eadc35793cc0fad63d9c761 | [
"MIT"
] | 10 | 2019-02-25T08:40:24.000Z | 2022-03-08T10:22:57.000Z | import csv
import re
import pytest
from click.testing import CliRunner
from deploy_tools.cli import test_json_rpc, test_provider
from eth_tester.exceptions import TransactionFailed
from eth_utils import to_checksum_address
import auction_deploy.core
from auction_deploy.cli import AuctionState, main
from auction_deploy.core import (
DeployedAuctionContracts,
deploy_auction_contracts,
get_deployed_auction_contracts,
)
@pytest.fixture
def runner():
return CliRunner()
def extract_auction_address(output):
"""extract the auction address from 'deploy' output"""
match = re.search("^Auction address: (0x[0-9a-fA-F]{40})$", output, re.M)
if match:
return match[1]
raise ValueError(f"Could not find auction address in output: {repr(output)}")
@pytest.fixture()
def deployed_auction_address(auction_options, runner, use_token, token_contract):
"""Deploys an auction and return its address"""
argument = (
f"deploy --release-timestamp 2000000000 --max-participants "
f"{auction_options.maximal_number_of_participants} "
f"--min-participants {auction_options.minimal_number_of_participants}"
f" --start-price {auction_options.start_price} --jsonrpc test"
)
if use_token:
argument += f" --use-token --token-address {auction_options.token_address}"
deploy_result = runner.invoke(main, args=argument)
if deploy_result.exception is not None:
raise RuntimeError(
"Error while trying to run auction-deploy"
) from deploy_result.exception
return extract_auction_address(deploy_result.output)
@pytest.fixture()
def whitelisted_auction_address(runner, deployed_auction_address, whitelist_file):
"""Whitelists all addresses in the whitelist on the deployed auction and returns its address"""
runner.invoke(
main,
args=f"whitelist --file {whitelist_file} --address {deployed_auction_address} "
+ "--batch-size 100 --jsonrpc test",
)
return deployed_auction_address
@pytest.fixture()
def whitelist_file(tmp_path, whitelist):
folder = tmp_path / "subfolder"
folder.mkdir()
file_path = folder / "whitelist.csv"
with file_path.open("w") as f:
writer = csv.writer(f)
writer.writerows([[to_checksum_address(address)] for address in whitelist])
return file_path
@pytest.fixture
def contracts(deployed_auction_address) -> DeployedAuctionContracts:
"""return the core.DeployedAuctionContracts object for the currently active auction"""
return get_deployed_auction_contracts(test_json_rpc, deployed_auction_address)
@pytest.fixture
def contracts_not_initialized(auction_options) -> DeployedAuctionContracts:
"""return the three auction related contracts where locker and slasher are not initialized"""
contracts = deploy_auction_contracts(
web3=test_json_rpc, auction_options=auction_options
)
return contracts
@pytest.fixture
def ensure_auction_state(contracts):
"""return a function that can be used to check the current auction state"""
def ensure_state(expected_state):
current_state = AuctionState(contracts.auction.functions.auctionState().call())
assert current_state == expected_state
return ensure_state
def bid(auction_contract, token_contract, sender, bid_value, use_token):
if use_token:
token_contract.functions.approve(auction_contract.address, bid_value).transact(
{"from": sender}
)
auction_contract.functions.bid().transact({"from": sender})
else:
auction_contract.functions.bid().transact({"from": sender, "value": bid_value})
@pytest.fixture
def deposit_pending_auction(
runner,
deployed_auction_address,
contracts,
token_contract,
auction_options,
use_token,
ether_owning_whitelist,
ensure_auction_state,
):
"""return the auction contract with enough bids so that the state is `DepositPending`"""
contracts.auction.functions.addToWhitelist(ether_owning_whitelist).transact()
contracts.auction.functions.startAuction().transact()
bid_value = contracts.auction.functions.currentPrice().call()
bid(
contracts.auction,
token_contract,
ether_owning_whitelist[0],
bid_value,
use_token,
)
bid(
contracts.auction,
token_contract,
ether_owning_whitelist[1],
bid_value,
use_token,
)
ensure_auction_state(AuctionState.DepositPending)
return contracts.auction
def test_cli_release_date_option(runner):
deploy_result = runner.invoke(
main, args="deploy --release-date '2033-05-18 03:33:21' --jsonrpc test"
)
assert deploy_result.exception is None
assert deploy_result.exit_code == 0
auction_address = extract_auction_address(deploy_result.output)
contracts = get_deployed_auction_contracts(test_json_rpc, auction_address)
release_timestamp = contracts.locker.functions.releaseTimestamp().call()
# 2033-05-18 03:33:21 is timestamp 2000000001
assert release_timestamp == 2_000_000_001
def test_cli_contract_parameters_set(runner):
result = runner.invoke(
main,
args="deploy --start-price 123 --duration 4 --max-participants 567 --min-participants 456 "
"--release-timestamp 2000000000 --jsonrpc test",
)
assert result.exit_code == 0
def test_cli_deploy_token_auction(runner):
arbitrary_token_address = "0x" + "1234" * 10
result = runner.invoke(
main,
args=f"deploy --use-token --token-address {arbitrary_token_address} --release-timestamp 2000000000 --jsonrpc test",
)
assert result.exit_code == 0
def test_cli_resume_deployment(runner, contracts_not_initialized):
result = runner.invoke(
main,
args=f"deploy --start-price 123 --duration 4 --max-participants 567 --min-participants 456 "
f"--release-timestamp 2000000000 --jsonrpc test --auction {contracts_not_initialized.auction.address}"
f" --locker {contracts_not_initialized.locker.address}",
)
assert result.exit_code == 0
assert (
extract_auction_address(result.output)
== contracts_not_initialized.auction.address
)
def test_cli_transaction_parameters_set(runner):
result = runner.invoke(
main,
args="deploy --nonce 0 --gas-price 123456789 --gas 7000000 --release-timestamp 2000000000 --jsonrpc test",
)
assert result.exit_code == 0
def test_cli_private_key(runner, keystore_file_path, key_password):
result = runner.invoke(
main,
args="deploy --jsonrpc test --release-timestamp 2000000000 --keystore "
+ str(keystore_file_path),
input=key_password,
)
assert result.exit_code == 0
def test_cli_start_auction(runner, deployed_auction_address):
result = runner.invoke(
main, args="start --jsonrpc test --address " + deployed_auction_address
)
assert result.exit_code == 0
def test_cli_close_auction(
runner, deployed_auction_address, ensure_auction_state, contracts
):
result = runner.invoke(
main, args=f"start --jsonrpc test --address {deployed_auction_address}"
)
assert result.exit_code == 0
auction_duration = (
contracts.auction.functions.auctionDurationInDays().call() * 24 * 3600
)
# auction is started, time travel forward
test_provider.ethereum_tester.time_travel(
test_json_rpc.eth.getBlock("latest").timestamp + auction_duration
)
test_provider.ethereum_tester.mine_block()
result = runner.invoke(
main, args=f"close --jsonrpc test --address {deployed_auction_address}"
)
assert result.exit_code == 0
ensure_auction_state(AuctionState.Failed)
def test_cli_start_auction_with_auto_nonce(
runner, deployed_auction_address, keystores, key_password
):
"""test the auto-nonce option. we only do this for the start-auction"""
result = runner.invoke(
main,
args=f"start --auto-nonce --jsonrpc test --keystore {keystores[0]}"
+ f" --address {deployed_auction_address}",
input=key_password,
)
assert result.exit_code == 0
def test_cli_start_auction_key_not_owner(
runner, deployed_auction_address, keystore_file_path, key_password
):
"""Test that when you attempt to start the auction with a private_key not corresponding to the
owner of the auction, the command fails
This shows that the command takes into account the key"""
result = runner.invoke(
main,
args="start --jsonrpc test --address "
+ deployed_auction_address
+ " --keystore "
+ str(keystore_file_path),
input=key_password,
)
assert result.exit_code == 1
def test_cli_deposit_bids(runner, deposit_pending_auction, ensure_auction_state):
result = runner.invoke(
main,
args=f"deposit-bids --jsonrpc test --address {deposit_pending_auction.address}",
)
assert result.exit_code == 0
ensure_auction_state(AuctionState.Ended)
@pytest.fixture()
def replace_bad_function_call_output():
# TransactionFailed is raised by eth_tester
# when BadFunctionCallOutput would be raised by web3 in `get_bid_token_address`
bad_function_call = auction_deploy.core.BadFunctionCallOutput
auction_deploy.core.BadFunctionCallOutput = TransactionFailed
yield
auction_deploy.core.BadFunctionCallOutput = bad_function_call
@pytest.mark.usefixtures("replace_bad_function_call_output")
def test_cli_auction_status(runner, deployed_auction_address):
result = runner.invoke(
main, args="status --jsonrpc test --address " + deployed_auction_address
)
assert result.exit_code == 0
@pytest.mark.usefixtures("replace_bad_function_call_output")
def test_cli_auction_status_locker_not_init(runner, contracts_not_initialized):
result = runner.invoke(
main,
args="status --jsonrpc test --address "
+ contracts_not_initialized.auction.address,
)
assert result.exit_code == 0
def test_cli_whitelist(runner, deployed_auction_address, whitelist_file, whitelist):
result = runner.invoke(
main,
args=f"whitelist --file {whitelist_file} --address {deployed_auction_address} "
+ "--batch-size 10 --jsonrpc test",
)
assert result.exit_code == 0
assert result.output == f"Number of whitelisted addresses: {len(whitelist)}\n"
def test_cli_check_whitelist_not_whitelisted(
runner, deployed_auction_address, whitelist_file, whitelist
):
result = runner.invoke(
main,
args=f"check-whitelist --file {whitelist_file} --address {deployed_auction_address} "
+ "--jsonrpc test",
)
assert result.exit_code == 0
assert (
result.output
== f"{len(whitelist)} of {len(whitelist)} addresses have not been whitelisted yet\n"
)
def test_cli_check_whitelist_all_whitelisted(
runner, whitelisted_auction_address, whitelist_file, whitelist
):
result = runner.invoke(
main,
args=f"check-whitelist --file {whitelist_file} --address {whitelisted_auction_address} "
+ "--jsonrpc test",
)
assert result.exit_code == 0
assert result.output == f"All {len(whitelist)} addresses have been whitelisted\n"
@pytest.mark.usefixtures("replace_bad_function_call_output")
def test_cli_not_checksummed_address(runner, deployed_auction_address):
address = deployed_auction_address.lower()
result = runner.invoke(main, args=f"status --jsonrpc test --address {address}")
assert result.exit_code == 0
def test_cli_incorrect_address_parameter_fails(runner):
not_an_address = "not_an_address"
result = runner.invoke(
main, args=f"status --jsonrpc test --address {not_an_address}"
)
assert (
f"The address parameter is not recognized to be an address: {not_an_address}"
in result.output
)
assert result.exit_code == 2
| 30.4 | 123 | 0.713358 | 1,451 | 12,008 | 5.650586 | 0.168849 | 0.068301 | 0.064398 | 0.051226 | 0.442127 | 0.386145 | 0.328821 | 0.297231 | 0.283571 | 0.240151 | 0 | 0.019922 | 0.193205 | 12,008 | 394 | 124 | 30.477157 | 0.826383 | 0.079947 | 0 | 0.32491 | 0 | 0 | 0.221181 | 0.05277 | 0.00361 | 0 | 0 | 0 | 0.097473 | 1 | 0.108303 | false | 0.021661 | 0.036101 | 0.00361 | 0.176895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1257c741492e036061014a924bb9f56f773f5b1 | 10,555 | py | Python | core/plugins/rabbitmq.py | dnegreira/hotsos | c88375d8700bf53faed4e5de55c34bd0bdc66187 | [
"Apache-2.0"
] | null | null | null | core/plugins/rabbitmq.py | dnegreira/hotsos | c88375d8700bf53faed4e5de55c34bd0bdc66187 | [
"Apache-2.0"
] | null | null | null | core/plugins/rabbitmq.py | dnegreira/hotsos | c88375d8700bf53faed4e5de55c34bd0bdc66187 | [
"Apache-2.0"
] | null | null | null | import os
from core.log import log
from core.cli_helpers import CLIHelper
from core.utils import mktemp_dump, sorted_dict
from core.ycheck.events import YEventCheckerBase
from core.searchtools import (
SearchDef,
SequenceSearchDef,
FileSearcher,
)
from core import (
checks,
plugintools,
)
RMQ_SERVICES_EXPRS = [
r"beam.smp",
r"epmd",
r"rabbitmq-server",
]
RMQ_PACKAGES = [
r"rabbitmq-server",
]
def cached_property(f):
@property
def _inner(inst):
if f.__name__ in inst._property_cache:
# log.debug("using cached value for %s", f.__name__)
return inst._property_cache[f.__name__]
# log.debug("using uncached value for %s", f.__name__)
ret = f(inst)
inst._property_cache[f.__name__] = ret
return ret
return _inner
class RabbitMQReport(object):
"""
Class providing easy access to the contents of a rabbitmqctl report.
First registers search definitions to execute against rabbitmqctl report
then runs the search to fetch the information that is then expose through
properties.
NOTE: the rabbitmqctl report output differs between versions 3.6.x and
3.8.x and we try to account for either by providing optional
regex expressions to match either.
"""
def __init__(self):
self._property_cache = {}
# save to file so we can search it later
self._f_report = mktemp_dump(''.join(CLIHelper().rabbitmqctl_report()))
searcher = FileSearcher()
searcher.add_search_term(self.connections_searchdef, self._f_report)
searcher.add_search_term(self.memory_searchdef, self._f_report)
searcher.add_search_term(self.cluster_partition_handling_searchdef,
self._f_report)
searcher.add_search_term(self.queues_searchdef, self._f_report)
self.results = searcher.search()
def __del__(self):
if os.path.exists(self._f_report):
os.unlink(self._f_report)
@cached_property
def queues_searchdef(self):
start = SearchDef([r"^Queues on ([^:]+):",
(r"^Listing queues for vhost ([^:]+) "
r"...")])
# NOTE: we don't use a list for the body here because
# we need to know which expression matched so that we
# can know in which order to retrieve the columns since
# their order is inverted between 3.6.x and 3.8.x
body = SearchDef(r"^(?:<([^.\s]+)[.0-9]+>\s+(\S+)|"
r"(\S+)\s+(?:\S+\s+){4}<([^.\s]+)[.0-9]"
r"+>)\s+.+")
end = SearchDef(r"^$")
return SequenceSearchDef(start=start, body=body, end=end,
tag='queues')
@cached_property
def skewed_nodes(self):
vhosts = self.vhosts
_skewed_nodes = {}
skewed_queue_nodes = {}
global_total_queues = sum([vhost.total_queues for vhost in vhosts])
for vhost in self.vhosts:
if not vhost.total_queues:
continue
total_pcent = (float(100) / global_total_queues *
vhost.total_queues)
for node, vhost_dist in vhost.node_queue_distributions.items():
if total_pcent >= 1 and vhost_dist['pcent'] > 75:
if node not in skewed_queue_nodes:
skewed_queue_nodes[node] = 0
skewed_queue_nodes[node] += 1
# Report the node with the greatest skew of queues/vhost
if skewed_queue_nodes:
max_node = None
for node_name in skewed_queue_nodes:
if max_node is None:
max_node = node_name
elif (skewed_queue_nodes[node_name] >=
skewed_queue_nodes[max_node]):
max_node = node_name
if (skewed_queue_nodes[max_node] >
_skewed_nodes.get(max_node, 0)):
_skewed_nodes[max_node] = skewed_queue_nodes[max_node]
return _skewed_nodes
@cached_property
def vhosts(self):
seq_def = self.queues_searchdef
vhosts = []
for section in self.results.find_sequence_sections(seq_def).values():
vhost = None
# ensure we get vhost before the rest
for result in section:
if result.tag == seq_def.start_tag:
# check both report formats
vhost = RabbitMQVhost(result.get(1))
break
for result in section:
if result.tag == seq_def.body_tag:
node_name = result.get(1) or result.get(4)
# if we matched the section header, skip
if node_name == "pid":
continue
queue = result.get(2) or result.get(3)
# if we matched the section header, skip
if queue == "name":
continue
vhost.node_inc_queue_count(node_name)
log.debug(vhost.name)
vhosts.append(vhost)
return vhosts
@cached_property
def connections_searchdef(self):
start = SearchDef([r"^Connections:$",
r"^Listing connections ...$"])
# Again, the user and protocol columns are inverted
# between 3.6.x and 3.8.x so we have to catch both and
# decide.
body = SearchDef(r"^<(rabbit[^>.]*)(?:[.][0-9]+)+>.+(?:[A-Z]+\s+{[\d,]+}\s+(\S+)|\d+\s+{[\d,]+}\s+\S+\s+(\S+)).+{\"connection_name\",\"([^:]+):\d+:.+$") # noqa
end = SearchDef(r"^$")
return SequenceSearchDef(start=start, body=body, end=end,
tag='connections')
@cached_property
def memory_searchdef(self):
start = SearchDef([r"^Status of node '([^']*)'$",
r"^Status of node ([^']*) ...$"])
body = SearchDef(r"^\s+\[{total,([0-9]+)}.+")
end = SearchDef(r"^$")
return SequenceSearchDef(start=start, body=body, end=end,
tag='memory')
@cached_property
def cluster_partition_handling_searchdef(self):
return SearchDef(r"^\s*{cluster_partition_handling,([^}]*)}",
tag='cluster_partition_handling')
@cached_property
def connections(self):
_connections = {'host': {}, 'client': {}}
sd = self.connections_searchdef
for results in self.results.find_sequence_sections(sd).values():
for result in results:
if result.tag == sd.body_tag:
host = result.get(1)
if host not in _connections['host']:
_connections['host'][host] = 1
else:
_connections['host'][host] += 1
# detect 3.6.x or 3.8.x format
user = result.get(2)
if user is None:
user = result.get(3)
client_name = result.get(4)
if user not in _connections['client']:
_connections['client'][user] = {}
if client_name not in _connections['client'][user]:
_connections['client'][user][client_name] = 1
else:
_connections['client'][user][client_name] += 1
if _connections['host']:
for client, users in _connections['client'].items():
sorted_users = sorted_dict(users, key=lambda e: e[1],
reverse=True)
_connections['client'][client] = sorted_users
return _connections
@cached_property
def memory_used(self):
sd = self.memory_searchdef
_memory_used = {}
for results in self.results.find_sequence_sections(sd).values():
for result in results:
if result.tag == sd.start_tag:
# check both report formats
node_name = result.get(1)
elif result.tag == sd.body_tag:
total = result.get(1)
mib_used = int(total) / 1024. / 1024.
_memory_used[node_name] = "{:.3f}".format(mib_used)
return _memory_used
@cached_property
def partition_handling(self):
results = self.results.find_by_tag("cluster_partition_handling")
if not results:
return
return results[0].get(1)
class RabbitMQVhost(object):
def __init__(self, name):
self.name = name
self._node_queues = {}
def node_inc_queue_count(self, node):
if node not in self._node_queues:
self._node_queues[node] = 0
self._node_queues[node] += 1
@property
def total_queues(self):
return sum(self.node_queues.values())
@property
def node_queues(self):
return self._node_queues
def node_queues_vhost_pcent(self, node):
return float(100) / self.total_queues * self.node_queues[node]
@property
def node_queue_distributions(self):
dists = {}
for node, queues in self.node_queues.items():
if queues:
vhost_pcent = self.node_queues_vhost_pcent(node)
dists[node] = {'queues': queues, 'pcent': vhost_pcent}
else:
dists[node] = {'queues': 0, 'pcent': 0}
return dists
class RabbitMQBase(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.report = RabbitMQReport()
class RabbitMQChecksBase(RabbitMQBase, plugintools.PluginPartBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.apt_check = checks.APTPackageChecksBase(core_pkgs=RMQ_PACKAGES)
@property
def plugin_runnable(self):
if self.apt_check.core:
return True
return False
class RabbitMQServiceChecksBase(RabbitMQChecksBase, checks.ServiceChecksBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, service_exprs=RMQ_SERVICES_EXPRS, **kwargs)
class RabbitMQEventChecksBase(RabbitMQChecksBase, YEventCheckerBase):
@property
def summary(self):
# mainline all results into summary root
return self.run_checks()
| 34.158576 | 168 | 0.559545 | 1,187 | 10,555 | 4.74305 | 0.20893 | 0.029307 | 0.028419 | 0.01492 | 0.277975 | 0.179396 | 0.142629 | 0.14103 | 0.123268 | 0.07833 | 0 | 0.009674 | 0.33406 | 10,555 | 308 | 169 | 34.269481 | 0.791293 | 0.110185 | 0 | 0.191781 | 0 | 0.004566 | 0.066102 | 0.029891 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109589 | false | 0 | 0.031963 | 0.022831 | 0.255708 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c127d6465fa7c0671438fe8816025b96ec521c2a | 31,776 | py | Python | src/mercs/core/Mercs.py | MattiasDC/mercs | 466962e254c4f56f4a16a31b1a3d7bd893c8e23e | [
"MIT"
] | 11 | 2020-01-28T16:15:53.000Z | 2021-05-20T08:05:42.000Z | src/mercs/core/Mercs.py | MattiasDC/mercs | 466962e254c4f56f4a16a31b1a3d7bd893c8e23e | [
"MIT"
] | null | null | null | src/mercs/core/Mercs.py | MattiasDC/mercs | 466962e254c4f56f4a16a31b1a3d7bd893c8e23e | [
"MIT"
] | 4 | 2020-02-06T09:02:28.000Z | 2022-02-14T09:42:04.000Z | import itertools
import warnings
from inspect import signature
from timeit import default_timer
from sklearn.preprocessing import normalize
import dask
import numpy as np
try:
import shap
except:
msg = "SHAP not found, therefore using SHAP-values for feature importance not available."
warnings.warn(msg)
shap = None
from dask import delayed
from networkx import NetworkXUnfeasible, find_cycle, topological_sort
from sklearn.ensemble import (
ExtraTreesClassifier,
ExtraTreesRegressor,
RandomForestClassifier,
RandomForestRegressor,
)
from sklearn.impute import SimpleImputer
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..algo import (
evaluation,
imputation,
inference,
inference_v3,
new_inference,
new_prediction,
selection,
vector_prediction,
)
from ..algo.induction import base_induction_algorithm, expand_induction_algorithm
from ..composition import CompositeModel, NewCompositeModel, o, x
from ..graph import build_diagram, compose_all, get_targ, model_to_graph
from ..utils import (
DESC_ENCODING,
MISS_ENCODING,
TARG_ENCODING,
DecoratedDecisionTreeClassifier,
DecoratedDecisionTreeRegressor,
DecoratedRandomForestClassifier,
DecoratedRandomForestRegressor,
code_to_query,
get_i_o,
query_to_code,
)
from ..visuals import save_diagram, show_diagram
try:
from xgboost import XGBClassifier as XGBC
from xgboost import XGBRegressor as XGBR
except:
XGBC, XGBR = None, None
try:
from lightgbm import LGBMClassifier as LGBMC
from lightgbm import LGBMRegressor as LGBMR
except:
LGBMC, LGBMR = None, None
try:
from catboost import CatBoostClassifier as CBC
from catboost import CatBoostRegressor as CBR
except:
CBC, CBR = None, None
try:
from wekalearn import RandomForestClassifier as WLC
from wekalearn import RandomForestRegressor as WLR
except:
WLC, WLR = None, None
class Mercs(object):
delimiter = "_"
selection_algorithms = dict(
default=selection.base_selection_algorithm,
base=selection.base_selection_algorithm,
random=selection.random_selection_algorithm,
)
induction_algorithms = dict(
base=base_induction_algorithm,
default=base_induction_algorithm,
expand=expand_induction_algorithm,
)
classifier_algorithms = dict(
DT=DecisionTreeClassifier,
DDT=DecoratedDecisionTreeClassifier,
RF=RandomForestClassifier,
DRF=DecoratedRandomForestClassifier,
XGB=XGBC,
xgb=XGBC,
weka=WLC,
LGBM=LGBMC,
lgbm=LGBMC,
CB=CBC,
extra=ExtraTreesClassifier,
)
regressor_algorithms = dict(
DT=DecisionTreeRegressor,
DDT=DecoratedDecisionTreeRegressor,
RF=RandomForestRegressor,
DRF=DecoratedDecisionTreeRegressor,
XGB=XGBR,
xgb=XGBR,
weka=WLR,
LGBM=LGBMR,
lgbm=LGBMR,
CB=CBR,
extra=ExtraTreesRegressor,
)
prediction_algorithms = dict(
mi=vector_prediction.mi,
mrai=vector_prediction.mrai,
it=vector_prediction.it,
rw=vector_prediction.rw,
)
inference_algorithms = dict(
base=inference.base_inference_algorithm,
dask=inference_v3.inference_algorithm,
own=inference_v3.inference_algorithm,
)
imputer_algorithms = dict(
nan=imputation.nan_imputation,
NAN=imputation.nan_imputation,
NaN=imputation.nan_imputation,
null=imputation.nan_imputation,
NULL=imputation.nan_imputation,
skl=imputation.skl_imputation,
base=imputation.skl_imputation,
default=imputation.skl_imputation,
)
evaluation_algorithms = dict(
base=evaluation.base_evaluation,
default=evaluation.base_evaluation,
dummy=evaluation.dummy_evaluation,
)
# Used in parse kwargs to identify parameters. If this identification goes wrong, you are sending settings
# somewhere you do not want them to be. So, this is a tricky part, and moreover hardcoded. In other words:
# this is risky terrain, and should probably be done differently in the future.
configuration_prefixes = dict(
imputation={"imputation", "imp"},
induction={"induction", "ind"},
selection={"selection", "sel"},
prediction={"prediction", "pred", "prd"},
inference={"inference", "infr", "inf"},
classification={"classification", "classifier", "clf"},
regression={"regression", "regressor", "rgr"},
metadata={"metadata", "meta", "mtd"},
evaluation={"evaluation", "evl"},
)
def __init__(
self,
selection_algorithm="base",
induction_algorithm="base",
classifier_algorithm="DT",
regressor_algorithm="DT",
prediction_algorithm="mi",
inference_algorithm="own",
imputer_algorithm="default",
evaluation_algorithm="default",
random_state=42,
**kwargs
):
self.params = dict(
selection_algorithm=selection_algorithm,
induction_algorithm=induction_algorithm,
classifier_algorithm=classifier_algorithm,
regressor_algorithm=regressor_algorithm,
prediction_algorithm=prediction_algorithm,
inference_algorithm=inference_algorithm,
imputer_algorithm=imputer_algorithm,
evaluation_algorithm=evaluation_algorithm,
random_state=random_state,
)
self.params = {**self.params, **kwargs}
self.random_state = random_state
self.selection_algorithm = self.selection_algorithms[selection_algorithm]
# N.b.: First try to look up the key. If the key is not found, we assume the algorithm itself was passed.
self.classifier_algorithm = self.classifier_algorithms.get(
classifier_algorithm, classifier_algorithm
)
self.regressor_algorithm = self.regressor_algorithms.get(
regressor_algorithm, regressor_algorithm
)
self.prediction_algorithm = self.prediction_algorithms[prediction_algorithm]
self.inference_algorithm = self.inference_algorithms[inference_algorithm]
self.induction_algorithm = self.induction_algorithms[
induction_algorithm
] # For now, we only have one.
self.imputer_algorithm = self.imputer_algorithms[imputer_algorithm]
self.evaluation_algorithm = self.evaluation_algorithms[evaluation_algorithm]
# Data-structures
self.m_codes = np.array([])
self.m_list = []
self.c_list = []
self.g_list = []
self.i_list = []
self.m_fimps = np.array([])
self.m_score = np.array([])
self.FI = np.array([])
self.targ_ids = np.array([])
# Query-related things
self.q_code = None
self.q_desc_ids = None
self.q_targ_ids = None
self.q_diagram = None
self.q_compose = None
self.q_methods = []
# Configurations
self.imp_cfg = self._default_config(self.imputer_algorithm)
self.ind_cfg = self._default_config(self.induction_algorithm)
self.sel_cfg = self._default_config(self.selection_algorithm)
self.clf_cfg = self._default_config(self.classifier_algorithm)
self.rgr_cfg = self._default_config(self.regressor_algorithm)
self.prd_cfg = self._default_config(self.prediction_algorithm)
self.inf_cfg = self._default_config(self.inference_algorithm)
self.evl_cfg = self._default_config(self.evaluation_algorithm)
self.configuration = dict(
imputation=self.imp_cfg,
induction=self.ind_cfg,
selection=self.sel_cfg,
classification=self.clf_cfg,
regression=self.rgr_cfg,
prediction=self.prd_cfg,
inference=self.inf_cfg,
) # Collect all configs in one
self._update_config(random_state=random_state, **kwargs)
self.metadata = dict()
self.model_data = dict()
self._extra_checks_on_config()
return
def fit(self, X, y=None, m_codes=None, **kwargs):
assert isinstance(X, np.ndarray)
if y is not None:
assert isinstance(y, np.ndarray)
X = np.c_[X, y]
tic = default_timer()
self.metadata = self._default_metadata(X)
self._update_metadata(**kwargs)
self.i_list = self.imputer_algorithm(X, self.metadata.get("nominal_attributes"))
# N.b.: `random state` parameter is in `self.sel_cfg`
if m_codes is None:
self.m_codes = self.selection_algorithm(self.metadata, **self.sel_cfg)
else:
self.m_codes = m_codes
self.m_list = self.induction_algorithm(
X,
self.m_codes,
self.metadata,
self.classifier_algorithm,
self.regressor_algorithm,
self.clf_cfg,
self.rgr_cfg,
**self.ind_cfg
)
self._filter_m_list_m_codes()
self._consistent_datastructures()
if self.imputer_algorithm == self.imputer_algorithms.get("nan"):
# If you do no have imputers, you cannot use them as a baseline evaluation
self.evl_cfg["consider_imputations"] = False
self.m_score = self.evaluation_algorithm(
X, self.m_codes, self.m_list, self.i_list, **self.evl_cfg
)
toc = default_timer()
self.model_data["ind_time"] = toc - tic
self.metadata["n_component_models"] = len(self.m_codes)
return
def predict(
self,
X,
q_code=None,
inference_algorithm=None,
prediction_algorithm=None,
**kwargs
):
# Update configuration if necessary
if q_code is None:
q_code = self._default_q_code()
if inference_algorithm is not None:
self._reconfig_inference(inference_algorithm=inference_algorithm)
if prediction_algorithm is not None:
self._reconfig_prediction(
prediction_algorithm=prediction_algorithm, **kwargs
)
# Adjust data
self.q_code = q_code
self.q_desc_ids, self.q_targ_ids, _ = code_to_query(
self.q_code, return_list=True
)
# Make query-diagram
tic_prediction = default_timer()
self.m_sel = self.prediction_algorithm(
self.m_codes, self.m_fimps, self.m_score, q_code=self.q_code, **self.prd_cfg
)
toc_prediction = default_timer()
tic_diagram = default_timer()
self.q_diagram = self._build_q_diagram(self.m_list, self.m_sel)
toc_diagram = default_timer()
tic_infalgo = default_timer()
if isinstance(self.q_diagram, tuple):
self.q_diagrams = self.q_diagram
# for d in self.q_diagrams:
# print(d.nodes)
# self.c_list.append(self._build_q_model(X, d))
self.c_list = [self._build_q_model(X, d) for d in self.q_diagrams]
self.c_sel = list(range(len(self.c_list)))
self.c_diagram = self._build_q_diagram(
self.c_list, self.c_sel, composition=True
)
self.q_model = self._build_q_model(X, self.c_diagram)
else:
self.q_model = self._build_q_model(X, self.q_diagram)
toc_infalgo = default_timer()
tic_dask = default_timer()
X = X[:, self.q_model.desc_ids]
result = self.q_model.predict(X)
toc_dask = default_timer()
self.model_data["prd_time"] = toc_prediction - tic_prediction
self.model_data["dia_time"] = toc_diagram - tic_diagram
self.model_data["infalgo_time"] = toc_infalgo - tic_infalgo
self.model_data["dsk_time"] = toc_dask - tic_dask
self.model_data["inf_time"] = toc_dask - tic_prediction
return result
def get_params(self, deep=False):
return self.params
# Diagrams
def _build_q_diagram(self, m_list, m_sel, composition=False):
if isinstance(m_sel, tuple):
diagrams = [
build_diagram(
m_list,
m_sel_instance,
self.q_code,
prune=True,
composition=composition,
)
for m_sel_instance in m_sel
]
return tuple(diagrams)
else:
return build_diagram(
m_list, m_sel, self.q_code, prune=True, composition=composition
)
def show_q_diagram(self, kind="svg", fi=False, ortho=False, index=None, **kwargs):
if isinstance(self.q_diagram, tuple) and index is None:
return show_diagram(self.c_diagram, kind=kind, fi=fi, ortho=ortho, **kwargs)
elif isinstance(self.q_diagram, tuple):
return show_diagram(
self.q_diagram[index], kind=kind, fi=fi, ortho=ortho, **kwargs
)
else:
return show_diagram(self.q_diagram, kind=kind, fi=fi, ortho=ortho, **kwargs)
def save_diagram(self, fname=None, kind="svg", fi=False, ortho=False):
return save_diagram(self.q_diagram, fname, kind=kind, fi=fi, ortho=ortho)
# Inference
def _build_q_model(self, X, diagram):
try:
self.inference_algorithm(
diagram,
self.m_list,
self.i_list,
self.c_list,
X,
self.metadata.get("nominal_attributes"),
)
except NetworkXUnfeasible:
cycle = find_cycle(self.q_diagram, orientation="original")
msg = """
Topological sort failed, investigate diagram to debug.
I will never be able to squeeze a prediction out of a diagram with a loop.
Cycle was: {}
""".format(
cycle
)
raise RecursionError(msg)
n_component_models = self.metadata["n_component_models"]
q_model = NewCompositeModel(
diagram,
nominal_attributes=self.metadata["nominal_attributes"],
n_component_models=n_component_models,
)
return q_model
def _merge_q_models(self, q_models):
q_diagram = build_diagram(self.c_list, self.c_sel, self.q_code, prune=True)
return q_diagram
def merge_models(self, q_models):
types = self._get_types(self.metadata)
walks = [
model_to_graph(m, types, idx=idx, composition=True)
for idx, m in enumerate(q_models)
]
q_diagram = compose_all(walks)
filtered_nodes = self.filter_nodes(q_diagram)
try:
self.inference_algorithm(q_diagram, sorted_nodes=filtered_nodes)
except NetworkXUnfeasible:
cycle = find_cycle(q_diagram, orientation="original")
msg = """
Topological sort failed, investigate diagram to debug.
I will never be able to squeeze a prediction out of a diagram with a loop.
Cycle was: {}
""".format(
cycle
)
raise RecursionError(msg)
q_model = CompositeModel(q_diagram)
return q_diagram, q_model
def _get_q_model(self, q_diagram, X):
self._add_imputer_function(q_diagram)
try:
self.inference_algorithm(q_diagram, X=X)
except NetworkXUnfeasible:
cycle = find_cycle(q_diagram, orientation="original")
msg = """
Topological sort failed, investigate diagram to debug.
I will never be able to squeeze a prediction out of a diagram with a loop.
Cycle was: {}
""".format(
cycle
)
raise RecursionError(msg)
q_model = CompositeModel(q_diagram)
return q_model
# Filter
def _filter_m_list_m_codes(self):
"""Filtering out the failed models.
This happens when TODO: EXPLAIN
"""
fail_m_idxs = [i for i, m in enumerate(self.m_list) if m is None]
self.m_codes = np.delete(self.m_codes, fail_m_idxs, axis=0)
self.m_list = [m for m in self.m_list if m is not None]
return
# Graphs
def _consistent_datastructures(self, binary_scores=False):
self._update_m_codes()
self._update_m_fimps()
return
def _expand_m_list(self):
self.m_list = list(itertools.chain.from_iterable(self.m_list))
return
def _add_model(self, model, binary_scores=False):
self.m_list.append(model)
self._consistent_datastructures(binary_scores=binary_scores)
return
def _update_m_codes(self):
self.m_codes = np.array(
[
query_to_code(
list(model.desc_ids),
list(model.targ_ids),
attributes=self.metadata["attributes"],
)
for model in self.m_list
]
)
return
def _update_m_fimps(self):
init = np.zeros(self.m_codes.shape)
for m_idx, mod in enumerate(self.m_list):
init[m_idx, list(mod.desc_ids)] = mod.feature_importances_
self.m_fimps = init
return
def _update_m_score(self, binary_scores=False):
if binary_scores:
self.m_score = (self.m_codes == TARG_ENCODING).astype(float)
return
# Imputer
def _add_imputer_function(self, g):
for n in g.nodes:
if g.nodes[n]["kind"] == "imputation":
idx = g.nodes[n]["idx"]
f_1 = self._dummy_array # Artificial input
f_2 = self.i_list[idx].transform # Actual imputation
f_3 = np.ravel # Return a vector, not array
g.nodes[n]["function"] = o(f_3, o(f_2, f_1))
return
# Add ids
@staticmethod
def _add_ids(g, desc_ids, targ_ids):
g.graph["desc_ids"] = set(desc_ids)
g.graph["targ_ids"] = set(targ_ids)
return g
# Metadata
def _default_metadata(self, X):
if X.ndim != 2:
X = X.reshape(-1, 1)
n_rows, n_cols = X.shape
types = [X[0, 0].dtype for _ in range(n_cols)]
nominal_attributes = set(
[att for att, typ in enumerate(types) if self._is_nominal(typ)]
)
numeric_attributes = set(
[att for att, typ in enumerate(types) if self._is_numeric(typ)]
)
metadata = dict(
attributes=set(range(n_cols)),
n_attributes=n_cols,
types=types,
nominal_attributes=nominal_attributes,
numeric_attributes=numeric_attributes,
)
return metadata
def _update_metadata(self, **kwargs):
self._update_dictionary(self.metadata, kind="metadata", **kwargs)
# Assure every attribute is `typed`: If not every attribute is here, set to numeric type (default)
numeric = self.metadata["numeric_attributes"]
nominal = self.metadata["nominal_attributes"]
att_ids = self.metadata["attributes"]
# All attributes should be accounted for and none should be double.
if (len(nominal) + len(numeric) - len(att_ids)) != 0:
numeric = att_ids - nominal
self._update_dictionary(
self.metadata, kind="metadata", numeric_attributes=numeric
)
return
# Configuration
def _reconfig_prediction(self, prediction_algorithm="mi", **kwargs):
self.prediction_algorithm = self.prediction_algorithms[prediction_algorithm]
self.prd_cfg = self._default_config(self.prediction_algorithm)
self.configuration["prediction"] = self.prd_cfg
self._update_config(**kwargs)
return
def _reconfig_inference(self, inference_algorithm="own", **kwargs):
self.inference_algorithm = self.inference_algorithms[inference_algorithm]
self.inf_cfg = self._default_config(self.inference_algorithm)
self.configuration["inference"] = self.inf_cfg
self._update_config(**kwargs)
return
@staticmethod
def _default_config(method):
config = {}
sgn = signature(method)
for key, parameter in sgn.parameters.items():
if parameter.default is not parameter.empty:
config[key] = parameter.default
return config
def _update_config(self, **kwargs):
for kind in self.configuration:
self._update_dictionary(self.configuration[kind], kind=kind, **kwargs)
return
def _extra_checks_on_config(self):
self._check_xgb_single_target()
return
def _check_xgb_single_target(self):
nb_targets = self.configuration["selection"]["nb_targets"]
if nb_targets == 1:
return None
else:
if (
self.classifier_algorithm is self.classifier_algorithms["XGB"]
or self.regressor_algorithm is self.regressor_algorithms["XGB"]
):
xgb = True
else:
xgb = False
if xgb:
msg = """
XGBoost cannot deal with multi-target outputs.
Hence, the `nb_targets` parameter is automatically adapted to 1,
so only single-target trees will be learned.
Please take this into account.
"""
warnings.warn(msg)
self.configuration["selection"]["nb_targets"] = 1
return
def _parse_kwargs(self, kind="selection", **kwargs):
prefixes = [e + self.delimiter for e in self.configuration_prefixes[kind]]
parameter_map = {
x.split(prefix)[1]: x
for x in kwargs
for prefix in prefixes
if x.startswith(prefix)
}
return parameter_map
def _update_dictionary(self, dictionary, kind=None, **kwargs):
# Immediate matches
overlap = set(dictionary).intersection(set(kwargs))
for k in overlap:
dictionary[k] = kwargs[k]
if kind is not None:
# Parsed matches
parameter_map = self._parse_kwargs(kind=kind, **kwargs)
overlap = set(dictionary).intersection(set(parameter_map))
for k in overlap:
dictionary[k] = kwargs[parameter_map[k]]
return
# Helpers
def _filter_X(self, X):
# Filter relevant input attributes
if X.shape[1] != len(self.q_compose.desc_ids):
indices = self._overlapping_indices(
self.q_desc_ids, self.q_compose.desc_ids
)
return X[:, indices]
@staticmethod
def _dummy_array(X):
"""
Return an array of np.nan, with the same number of rows as the input array.
Parameters
----------
X: np.ndarray(), n_rows, n_cols = X.shape,
We use the shape of X to deduce shape of our output.
Returns
-------
a: np.ndarray(), shape= (n_rows, 1)
n_rows is the same as the number of rows as X.
"""
n_rows, _ = X.shape
a = np.empty((n_rows, 1))
a.fill(np.nan)
return a
def _default_q_code(self):
q_code = np.zeros(self.metadata["n_attributes"])
q_code[-1] = TARG_ENCODING
return q_code
@staticmethod
def _is_nominal(t):
condition_01 = t == np.dtype(int)
return condition_01
@staticmethod
def _is_numeric(t):
condition_01 = t == np.dtype(float)
return condition_01
@staticmethod
def _get_types(metadata):
nominal = {i: "nominal" for i in metadata["nominal_attributes"]}
numeric = {i: "numeric" for i in metadata["numeric_attributes"]}
return {**nominal, **numeric}
@staticmethod
def _overlapping_indices(a, b):
"""
Given an array a and b, return the indices (in a) of elements that occur in both a and b.
Parameters
----------
a
b
Returns
-------
Examples
--------
a = [4,5,6]
b = [4,6,7]
overlapping_indices(a, b) = [0,2]
"""
return np.nonzero(np.in1d(a, b))[0]
@staticmethod
def filter_nodes(g):
# This is not as safe as it should be
sorted_nodes = list(topological_sort(g))
filtered_nodes = []
for n in reversed(sorted_nodes):
if g.nodes[n]["kind"] == "model":
break
filtered_nodes.append(n)
filtered_nodes = list(reversed(filtered_nodes))
return filtered_nodes
# SYNTH
def autocomplete(self, X, **kwargs):
return
# Legacy (delete when I am sure they can go)
def predict_old(
self, X, q_code=None, prediction_algorithm=None, beta=False, **kwargs
):
# Update configuration if necessary
if q_code is None:
q_code = self._default_q_code()
if prediction_algorithm is not None:
reuse = False
self._reconfig_prediction(
prediction_algorithm=prediction_algorithm, **kwargs
)
# Adjust data
tic_prediction = default_timer()
self.q_code = q_code
self.q_desc_ids, self.q_targ_ids, _ = code_to_query(
self.q_code, return_list=True
)
# Make query-diagram
self.q_diagram = self.prediction_algorithm(
self.g_list, q_code, self.fi, self.t_codes, **self.prd_cfg
)
toc_prediction = default_timer()
tic_dask = default_timer()
toc_dask = default_timer()
tic_compute = default_timer()
res = self.q_model.predict.compute()
toc_compute = default_timer()
# Diagnostics
self.model_data["prd_time"] = toc_prediction - tic_prediction
self.model_data["dsk_time"] = toc_dask - tic_dask
self.model_data["cmp_time"] = toc_compute - tic_compute
self.model_data["inf_time"] = toc_compute - tic_prediction
self.model_data["ratios"] = (
self.model_data["prd_time"] / self.model_data["inf_time"],
self.model_data["dsk_time"] / self.model_data["inf_time"],
self.model_data["cmp_time"] / self.model_data["inf_time"],
)
return res
def _update_g_list(self):
types = self._get_types(self.metadata)
self.g_list = [
model_to_graph(m, types=types, idx=idx) for idx, m in enumerate(self.m_list)
]
return
def _update_t_codes(self):
self.t_codes = (self.m_codes == TARG_ENCODING).astype(int)
return
# AVATAR-TOOLS
def avatar(
self,
explainer_data,
background_data=None,
check_additivity=True,
keep_abs_shaps=False,
**explainer_kwargs
):
assert shap is not None, "SHAP not found, so cannot do anything here."
self._init_avatar()
for m_idx in range(len(self.m_list)):
# Extract tree and m_code
tree = self.m_list[m_idx].model
m_code = self.m_codes[m_idx]
# Filter data
attribute_filter = m_code == DESC_ENCODING
X = explainer_data[:, attribute_filter]
if background_data is not None:
B = background_data[:, attribute_filter]
else:
B = background_data
# Shap Calculation
explainer = shap.TreeExplainer(tree, data=B, **explainer_kwargs)
raw_shaps = explainer.shap_values(X, check_additivity=check_additivity)
# Process Shap values
abs_shaps = self._raw_to_abs_shaps(raw_shaps)
nrm_shaps = self._abs_to_nrm_shaps(abs_shaps)
if keep_abs_shaps:
self.abs_shaps.append(abs_shaps)
self.nrm_shaps.append(nrm_shaps)
self._format_abs_shaps()
self._format_nrm_shaps()
return
@staticmethod
def _raw_to_abs_shaps(raw_shaps):
# Process Shap values
tsr_shaps = np.array(raw_shaps) # tensor
abs_shaps = np.abs(tsr_shaps) # absolute
if len(abs_shaps.shape) == 3:
# In case of nominal target, sum shap values across target classes
abs_shaps = np.sum(abs_shaps, axis=0)
return abs_shaps
@staticmethod
def _abs_to_nrm_shaps(abs_shaps):
avg_shaps = np.mean(
abs_shaps, axis=0
) # Avg over instances (of explainer data!)
nrm_shaps = np.squeeze(
normalize(avg_shaps.reshape(1, -1), norm="l1")
) # Normalize (between 0 and 1)
return nrm_shaps
def avatar_q_model(
self,
X_train,
X_test,
l1_reg="num_features(10)",
check_additivity=False,
n_samples=20,
silent=True,
):
assert shap is not None, "SHAP not found, so cannot do anything here."
# Extract function to explain
m = self.q_model
f = self._extract_function_to_explain(self.q_model)
# Data
assert (
X_train.shape[1] == X_test.shape[1]
), "Inconsistent attribute count. Your carelessness is disappointing."
if X_train.shape[1] != len(m.desc_ids):
attribute_filter = m.desc_ids
X_train = X_train[:, attribute_filter]
X_test = X_test[:, attribute_filter]
explainer = shap.KernelExplainer(f, shap.sample(X_train, n_samples))
raw_shaps = explainer.shap_values(
X_test, l1_reg=l1_reg, check_additivity=check_additivity, silent=silent
)
# Process Shap values
abs_shaps = self._raw_to_abs_shaps(raw_shaps)
nrm_shaps = self._abs_to_nrm_shaps(abs_shaps)
return nrm_shaps
@staticmethod
def _extract_function_to_explain(m):
assert m.n_outputs_ == 1
# Extract function
if m.out_kind in {"nominal"}:
f = lambda x: m.predict_proba(x)[0]
elif m.out_kind in {"numerc"}:
f = m.predict
else:
raise ValueError("I don't know this kind of q_model.out_kind")
return f
def _init_avatar(self):
"""Initialize avatar-datastructures that are used there.
"""
self.abs_shaps = []
self.nrm_shaps = []
return
def _format_nrm_shaps(self):
if isinstance(self.nrm_shaps, list) and len(self.nrm_shaps) > 0:
init = np.zeros(self.m_codes.shape)
for m_idx, (mod, nrm_shap) in enumerate(zip(self.m_list, self.nrm_shaps)):
init[m_idx, list(mod.desc_ids)] = nrm_shap
self.nrm_shaps = init
else:
return
def _format_abs_shaps(self):
if isinstance(self.abs_shaps, list) and len(self.abs_shaps) > 0:
n_models, n_attributes = self.m_codes.shape
n_instances = self.abs_shaps[0].shape[0]
init = np.zeros((n_models, n_instances, n_attributes))
for m_idx, (mod, abs_shap) in enumerate(zip(self.m_list, self.abs_shaps)):
init_abs = np.zeros((n_instances, n_attributes))
init_abs[:, list(mod.desc_ids)] = abs_shap
init[m_idx, :, :] = init_abs
self.abs_shaps = init
else:
return
| 30.97076 | 113 | 0.606905 | 3,734 | 31,776 | 4.909213 | 0.132833 | 0.012547 | 0.009328 | 0.01091 | 0.318641 | 0.252578 | 0.175277 | 0.150946 | 0.135126 | 0.114505 | 0 | 0.003078 | 0.304758 | 31,776 | 1,025 | 114 | 31.000976 | 0.826679 | 0.072382 | 0 | 0.21745 | 0 | 0 | 0.067073 | 0 | 0 | 0 | 0 | 0.000976 | 0.008054 | 1 | 0.067114 | false | 0 | 0.038926 | 0.004027 | 0.193289 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c12c2ac656b7260dfdb953a62cfeab4d5b386d09 | 6,377 | py | Python | src/ydata_quality/duplicates/engine.py | poga/ydata-quality | 0cdda2774b05101c5f4f773b5e946f2a6544da09 | [
"MIT"
] | 242 | 2021-09-22T17:16:49.000Z | 2022-03-30T10:26:25.000Z | src/ydata_quality/duplicates/engine.py | poga/ydata-quality | 0cdda2774b05101c5f4f773b5e946f2a6544da09 | [
"MIT"
] | 13 | 2021-09-23T00:15:10.000Z | 2022-02-04T16:33:42.000Z | src/ydata_quality/duplicates/engine.py | poga/ydata-quality | 0cdda2774b05101c5f4f773b5e946f2a6544da09 | [
"MIT"
] | 21 | 2021-09-24T09:59:30.000Z | 2022-03-16T02:48:11.000Z | """
Implementation of DuplicateChecker engine class to run duplicate records analysis.
"""
from typing import List, Optional, Union
from pandas import DataFrame
from src.ydata_quality.core.warnings import Priority
from ..core import QualityEngine, QualityWarning
from ..utils.auxiliary import find_duplicate_columns
class DuplicateChecker(QualityEngine):
"Engine for running analyis on duplicate records."
def __init__(self,
df: DataFrame,
entities: List[Union[str, List[str]]] = None,
is_close: bool = False,
severity: Optional[str] = None):
"""
Arguments:
df (DataFrame): reference DataFrame used to run the DataQuality analysis.
entities (List[Union[str, List[str]]]): entities relevant for duplicate analysis.
Passing lists allows composed entities of multiple columns.
is_close (bool): Pass True to use numpy.isclose instead of pandas.equals in column comparison.
severity (str): Sets the logger warning threshold.
Valid levels are [DEBUG, INFO, WARNING, ERROR, CRITICAL]."""
super().__init__(df=df, severity=severity)
self._entities = [] if entities is None else entities
self._tests = ["exact_duplicates", "entity_duplicates", "duplicate_columns"]
self._is_close = is_close
@property
def entities(self):
"Property that returns the entities relevant for duplicates analysis."
return self._entities
@entities.setter
def entities(self, entities: List[Union[str, List[str]]]):
if not isinstance(entities, list):
raise ValueError("Property 'entities' should be a list.")
entities = self.__unique_entities(entities)
assert all(entity in self.df.columns if isinstance(entity, str) else [
c in self.df.columns for c in entity] for entity in entities), "Given entities should exist as \
DataFrame's columns."
self._entities = entities
@staticmethod
def __unique_entities(entities: List[Union[str, List[str]]]):
"""Returns entities list with only unique entities"""
entities = set(entity if isinstance(entity, str) else entity[0] if len(
entity) == 1 else tuple(entity) for entity in entities)
return [entity if isinstance(entity, str) else list(entity) for entity in entities]
@staticmethod
def __get_duplicates(df: DataFrame):
"Returns duplicate records."
return df[df.duplicated()]
@staticmethod
def __get_entity_duplicates(df: DataFrame, entity: Union[str, List[str]]):
"Returns the duplicate records aggregated by a given entity."
return df.groupby(entity).apply(DuplicateChecker.__get_duplicates).reset_index(drop=True)
def exact_duplicates(self):
"Returns a DataFrame filtered for exact duplicate records."
dups = self.__get_duplicates(self.df) # Filter for duplicate instances
if len(dups) > 0:
self.store_warning(
QualityWarning(
test=QualityWarning.Test.EXACT_DUPLICATES, category=QualityWarning.Category.DUPLICATES,
priority=Priority.P2, data=dups,
description=f"Found {len(dups)} instances with exact duplicate feature values."
))
else:
self._logger.info("No exact duplicates were found.")
dups = None
return dups
def __provided_entity_dups(self, entity: Optional[Union[str, List[str]]] = None) -> dict:
"Find duplicates for passed entity (simple or composed)."
found_dups = {}
dups = self.__get_entity_duplicates(self.df, entity)
if len(dups) > 0: # if we have any duplicates
self.store_warning(
QualityWarning(
test='Entity Duplicates', category='Duplicates', priority=Priority.P2, data=dups,
description=f"Found {len(dups)} duplicates after grouping by entities."
))
if isinstance(entity, str):
entity = [entity] # Makes logic the same for str or List[str] entities
set_vals = set(dups[entity].apply(tuple, axis=1))
if len(entity) > 1:
entity_key = tuple(entity) # Lists are not hashable, therefore cannot be dictionary keys
else:
# No need to store keys as tuples for single entities (single values)
set_vals = [val[0] for val in set_vals]
entity_key = entity[0]
for val in set_vals: # iterate on each entity with duplicates
found_dups.setdefault(entity_key, {})[val] = dups[(dups[entity].values == val).all(axis=1)]
return found_dups
def entity_duplicates(self, entity: Optional[Union[str, List[str]]] = None):
"""Returns a dict of {entity: {entity_value: duplicates}} of duplicate records after grouping by an entity.
If entity is not specified, compute for all entities defined in the init.
"""
ent_dups = {}
if entity is not None: # entity is specified
ent_dups.update(self.__provided_entity_dups(entity))
else: # if entity is not specified
if len(self.entities) == 0:
self._logger.warning("There are no entities defined to run the analysis. Skipping the test.")
return None
for col in self.entities:
ent_dups.update(self.entity_duplicates(col))
return ent_dups
def duplicate_columns(self):
"Returns a mapping dictionary of columns with fully duplicated feature values."
dups = find_duplicate_columns(self.df, self._is_close)
cols_with_dups = len(dups.keys())
if cols_with_dups > 0:
self.store_warning(
QualityWarning(
test=QualityWarning.Test.DUPLICATE_COLUMNS, category=QualityWarning.Category.DUPLICATES,
priority=Priority.P1, data=dups,
description=f"Found {cols_with_dups} columns with exactly the same feature values as other columns."
)
)
else:
self._logger.info("No duplicate columns were found.")
dups = None
return dups
| 45.877698 | 120 | 0.631175 | 747 | 6,377 | 5.26506 | 0.243641 | 0.014238 | 0.021358 | 0.026697 | 0.237478 | 0.167302 | 0.08238 | 0.08238 | 0.063565 | 0.034579 | 0 | 0.003067 | 0.284303 | 6,377 | 138 | 121 | 46.210145 | 0.858677 | 0.233966 | 0 | 0.201923 | 0 | 0 | 0.162418 | 0 | 0 | 0 | 0 | 0 | 0.009615 | 1 | 0.096154 | false | 0.009615 | 0.048077 | 0 | 0.240385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c12f7689727d68b07585dc735616888c343cb5e6 | 3,575 | py | Python | dataio/python/pprint.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 1 | 2020-12-24T22:00:01.000Z | 2020-12-24T22:00:01.000Z | dataio/python/pprint.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | null | null | null | dataio/python/pprint.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 3 | 2020-07-17T09:20:29.000Z | 2021-03-30T16:44:18.000Z | import collections
import re
from icecube import icetray
from icecube import dataclasses
from icecube import dataio
def format_line( frame, key, maxwidth = None, ellipsis = '...' ):
'''Given an icecube frame and a key in that frame, return
exactly one line of text describing the I3FrameObject with that key.
Try to make the text as useful to a human reader as possible.
If accessing the object generates an exception, catch it and
return its description.
Clip to an optional maximum width with a trailing ellipsis'''
try:
obj = frame[key]
if (obj is None) and (key in frame):
return '(Unreadable)'
if hasattr(obj, "apply"):
obj = obj.apply(frame)
haslength = isinstance( obj, collections.Iterable )
except Exception as e:
obstr = '(Unreadable)'
else:
if( haslength ):
obstr = 'Iterable with {0} items'.format(len(obj))
else:
try:
# give the module and class name
obstr = '{0}.{1} object'.format(obj.__module__,obj.__class__.__name__)
except Exception as e:
# try basic repr
obstr = repr(obj).split('\n')[0]
if( maxwidth ):
if( len(obstr) > maxwidth ):
obstr = obstr[:maxwidth - len(ellipsis)] + ellipsis[0:maxwidth]
return obstr
def format_detail( frame, key ):
'''Given an icecube frame and a key in that frame, return
a human-readable string that describes the item in detail.'''
try:
obj = frame[key]
if hasattr(obj, "apply"):
obj = obj.apply(frame)
if isinstance(obj,dataclasses.I3String):
message = obj.value
if isinstance(obj,dataclasses.I3Double):
message = str(obj.value)
elif hasattr(obj, "items"):
message = '{\n'
for k in obj.keys():
message += str(k)+': '+str(obj[k])+'\n'
message += '}'
else:
message = str(obj)
except Exception as e:
message = '({0})'.format(e)
if re.match('<icecube\.[\S]*\.[\S]* object at [0-9xa-f]*>', message):
# Standard boring format. In some cases we might be able to do better.
if isinstance( obj, collections.Iterable):
message += ', contents:\n' + '\n'.join( [ str(x) for x in frame[key] ] )
return message
def format_xml( frame, key ):
'''Given an icecube frame and a key in that frame, return
the xml serialization of the item.'''
try:
if key in frame:
message = frame.as_xml(key)
else:
message = key+' not in frame'
except Exception as e:
message = '({0})'.format(e)
return message.expandtabs(4)
def format_size( frame, key):
'''Given an icecube frame and a key in that frame, return
the size of the string.
Default converts the string in Kilo, Mega, or GigaByte.
Adjust conversion to different formats by supplying
the list with given unit names.'''
cfactor = 1024.
sunit = False
unit = ['K', 'M', 'G']
if key in frame:
size = frame.size(key)
else:
return str()
while size > cfactor and bool(unit):
size /= cfactor
sunit = unit.pop(0)
if bool(sunit):
if size < 10:
return '{0:1.1f}{1:1s}'.format(size,sunit)
else:
return '{0:4.0f}{1:1s}'.format(size, sunit)
# Bytes are integer value, so show them like this
return '{0:4d} '.format(size)
| 30.818966 | 86 | 0.575385 | 466 | 3,575 | 4.377682 | 0.32618 | 0.027451 | 0.027451 | 0.037255 | 0.196078 | 0.163725 | 0.163725 | 0.163725 | 0.09902 | 0.09902 | 0 | 0.013029 | 0.313007 | 3,575 | 115 | 87 | 31.086957 | 0.81759 | 0.258741 | 0 | 0.32 | 0 | 0 | 0.080976 | 0.008524 | 0.013333 | 0 | 0 | 0 | 0 | 1 | 0.053333 | false | 0 | 0.066667 | 0 | 0.226667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c131857f7131f2f64a0c9cd301cbb4e69c3dcbec | 9,619 | py | Python | CryptoAttacks/Block/ecb.py | akbarszcz/CryptoAttacks | ae675d016b314414a3dc9b23c7d8a32da4c62457 | [
"MIT"
] | 54 | 2017-03-28T23:46:58.000Z | 2022-02-23T01:53:38.000Z | CryptoAttacks/Block/ecb.py | maximmasiutin/CryptoAttacks | d1d47d3cb2ce38738a60b728bc35ce80bfe64374 | [
"MIT"
] | null | null | null | CryptoAttacks/Block/ecb.py | maximmasiutin/CryptoAttacks | d1d47d3cb2ce38738a60b728bc35ce80bfe64374 | [
"MIT"
] | 13 | 2017-03-31T06:07:23.000Z | 2021-11-20T19:01:30.000Z | from __future__ import absolute_import, division, print_function
import string
from builtins import bytes, range
from CryptoAttacks.Math import factors
from CryptoAttacks.Utils import (add_padding, b2h, chunks, log, print_chunks,
random_bytes)
def encryption_oracle(payload):
"""Function implementing encryption oracle with ecb mode
Args:
payload(string): raw data to encrypt
Returns:
string
"""
raise NotImplementedError
def is_ecb(cipher, block_size=16):
"""Check if there are repeated blocks in ciphertext
Args:
cipher(string)
block_size(int)
Returns:
bool: True if there are repeated blocks (so it's probably ECB mode)
"""
cipher_blocks = chunks(cipher, block_size)
unique_blocks = set(cipher_blocks)
if len(unique_blocks) < len(cipher_blocks):
return True
return False
def find_block_size(encryption_oracle, constant=True):
"""Determine block size if ecb mode
Args:
encryption_oracle(callable)
constant(bool): True if prefix and suffix have constant length
Returns:
int
"""
if constant:
log.debug("constant == True")
payload = bytes(b'A')
size = len(encryption_oracle(payload))
while True:
payload += bytes(b'A')
new_size = len(encryption_oracle(payload))
if new_size > size:
log.info("block_size={}".format(new_size - size))
return new_size - size
else:
log.debug("constant == False")
payload = bytes(b'A')
max_size = len(encryption_oracle(payload))
possible_sizes = factors(max_size)
possible_sizes.add(max_size)
blocks_to_send = 5
for block_size in sorted(possible_sizes):
"""send payload of length x, so at least x-1 blocks should be identical"""
payload = random_bytes(1) * (blocks_to_send*block_size)
enc_chunks = chunks(encryption_oracle(payload), block_size)
for x in range(len(enc_chunks)-1):
if enc_chunks[x] == enc_chunks[x+1]:
log.debug("Found two identical blocks at {}: {}".format(x, print_chunks(enc_chunks)))
for y in range(2, blocks_to_send-1):
if enc_chunks[x] != enc_chunks[x+y]:
break
else:
log.info("block_size={}".format(block_size))
return block_size
def find_prefix_suffix_size(encryption_oracle, block_size=16):
"""Determine prefix and suffix sizes if ecb mode, sizes must be constant
Rarely may fail (if random data that are send unhappily matches prefix/suffix)
Args:
encryption_oracle(callable)
block_size(int)
Returns:
tuple(int,int): prefix_size, suffix_size
"""
blocks_to_send = 5
payload = random_bytes(1) * (blocks_to_send * block_size)
enc_chunks = chunks(encryption_oracle(payload), block_size)
log.debug("Encryption of length {}".format(blocks_to_send * block_size))
log.debug(print_chunks(enc_chunks))
for position_start in range(len(enc_chunks) - 1):
if enc_chunks[position_start] == enc_chunks[position_start + 1]:
for y in range(2, blocks_to_send - 1):
if enc_chunks[position_start] != enc_chunks[position_start + y]:
break
else:
log.success("Controlled payload start at chunk {}".format(position_start))
break
else:
log.critical_error("Position of controlled chunks not found")
log.info('Finding prefix')
changed_char = bytes([(payload[0] - 1)%256])
for aligned_bytes in range(block_size):
payload_new = payload[:aligned_bytes] + changed_char + payload[aligned_bytes+1:]
enc_chunks_new = chunks(encryption_oracle(payload_new), block_size)
log.debug(print_chunks(chunks(payload_new, block_size)))
log.debug(print_chunks(enc_chunks_new))
if enc_chunks_new[position_start] != enc_chunks[position_start]:
prefix_size = position_start*block_size - aligned_bytes
log.success("Prefix size: {}".format(prefix_size))
break
else:
log.critical_error("Size of prefix not found")
log.info('Finding suffix')
payload = random_bytes(1) * (block_size - (prefix_size % block_size)) # align to block_size
encrypted = encryption_oracle(payload)
suffix_size = len(encrypted) - len(payload) - prefix_size
while True:
payload += random_bytes(1)
suffix_size -= 1
if len(encryption_oracle(payload)) > len(encrypted):
log.success("Suffix size: {}".format(suffix_size))
break
else:
log.critical_error("Size of suffix not found")
return prefix_size, suffix_size
def decrypt(encryption_oracle, constant=True, block_size=16, prefix_size=None, secret_size=None,
alphabet=None):
"""Given encryption oracle which produce ecb(prefix || our_input || secret), find secret
Args:
encryption_oracle(callable)
constant(bool): True if prefix have constant length (secret must have constant length)
block_size(int/None)
prefix_size(int/None)
secret_size(int/None)
alphabet(string): plaintext space
Returns:
secret(string)
"""
log.debug("Start decrypt function")
if not alphabet:
alphabet = bytes(string.printable.encode())
if not block_size:
block_size = find_block_size(encryption_oracle, constant)
if constant:
log.debug("constant == True")
if not prefix_size or not secret_size:
prefix_size, secret_size = find_prefix_suffix_size(encryption_oracle, block_size)
"""Start decrypt"""
secret = bytes(b'')
aligned_bytes = random_bytes(1) * (block_size - (prefix_size % block_size))
if len(aligned_bytes) == block_size:
aligned_bytes = bytes(b'')
aligned_bytes_suffix = random_bytes(1) * (block_size - (secret_size % block_size))
if len(aligned_bytes_suffix) == block_size:
aligned_bytes_suffix = bytes(b'')
block_to_find_position = -1
controlled_block_position = (prefix_size+len(aligned_bytes)) // block_size
while len(secret) < secret_size:
if (len(secret)+1) % block_size == 0:
block_to_find_position -= 1
payload = aligned_bytes + aligned_bytes_suffix + random_bytes(1) + secret
enc_chunks = chunks(encryption_oracle(payload), block_size)
block_to_find = enc_chunks[block_to_find_position]
log.debug("To guess at position {}:".format(block_to_find_position))
log.debug("Plain: " + print_chunks(chunks(bytes(b'P'*prefix_size) + payload + bytes(b'S'*secret_size), block_size)))
log.debug("Encry: " + print_chunks(enc_chunks)+"\n")
for guessed_char in range(256):
guessed_char = bytes([guessed_char])
payload = aligned_bytes + add_padding(guessed_char + secret, block_size)
enc_chunks = chunks(encryption_oracle(payload), block_size)
log.debug("Plain: " + print_chunks(chunks(bytes(b'P'*prefix_size) + payload + bytes(b'S'*secret_size), block_size)))
log.debug("Encry: " + print_chunks(enc_chunks)+"\n")
if block_to_find == enc_chunks[controlled_block_position]:
secret = guessed_char + secret
log.debug("Found char, secret={}".format(repr(secret)))
break
else:
log.critical_error("Char not found, try change alphabet. Secret so far: {}".format(repr(secret)))
log.success("Secret(hex): {}".format(b2h(secret)))
return secret
else:
log.debug("constant == False")
def known_plaintexts(pairs, ciphertext, block_size=16):
"""Given enough pairs plaintext-ciphertext, we can assign ciphertexts blocks to plaintexts blocks,
then we can possibly decrypt ciphertext
Args:
pairs(list): list of dict, [{'cipher': 'aaa', 'plain': 'bbb'}, {'cipher': 'xxx', 'plain': 'pwa'}]
plaintexts have to be correctly padded (len(cipher) == len(plain))
ciphertext(string): ciphertext to decrypt
block_size(int)
Returns
tuple: ([decrypted_ciphertext_blocks], {'ciphertext_block': 'plaintext_block', ...})
decrypted_ciphertext_blocks may contain not-decrypted blocks from ciphertext
"""
result_mapping = {}
for pair in pairs:
ciphertext_blocks = chunks(pair['cipher'], block_size)
plaintext_blocks = chunks(pair['plain'], block_size)
if len(ciphertext_blocks) != len(plaintext_blocks):
print(pair)
print(ciphertext_blocks, plaintext_blocks)
print(len(ciphertext_blocks), len(plaintext_blocks))
assert 0
for cipher_block_no in range(len(ciphertext_blocks)):
result_mapping[ciphertext_blocks[cipher_block_no]] = plaintext_blocks[cipher_block_no]
target_ciphertext_blocks = chunks(ciphertext, block_size)
for cipher_block_no in range(len(target_ciphertext_blocks)):
if target_ciphertext_blocks[cipher_block_no] in list(result_mapping.keys()):
target_ciphertext_blocks[cipher_block_no] = result_mapping[target_ciphertext_blocks[cipher_block_no]]
return target_ciphertext_blocks, result_mapping
| 39.584362 | 132 | 0.640607 | 1,179 | 9,619 | 4.977099 | 0.149279 | 0.07362 | 0.043115 | 0.020279 | 0.406271 | 0.299591 | 0.222222 | 0.205351 | 0.145535 | 0.105658 | 0 | 0.006043 | 0.260214 | 9,619 | 242 | 133 | 39.747934 | 0.818578 | 0.174031 | 0 | 0.256757 | 0 | 0 | 0.067881 | 0 | 0 | 0 | 0 | 0 | 0.006757 | 1 | 0.040541 | false | 0 | 0.033784 | 0 | 0.121622 | 0.094595 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c133b030e2d992d0cf7302a80fd9d38d5daf7e7c | 973 | py | Python | codes/convergence_elasticity_advection/bilinear.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | 1 | 2021-06-18T14:52:03.000Z | 2021-06-18T14:52:03.000Z | codes/comparison/fem/bilinear.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | 1 | 2019-01-07T13:11:11.000Z | 2019-01-07T13:11:11.000Z | codes/convergence_elasticity_advection/bilinear.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import numpy as np
def bilinear(x,u_n,u,EPn,Pn,E,Sigy,H):
#initialization
h = x[1:len(x)]-x[:(len(x)-1)]
eps_n = (u_n[1:len(u_n)]-u_n[:(len(u_n)-1)])/h
eps = (u[1:len(u)]-u[:(len(u)-1)])/h
S = np.zeros(len(eps))
EP = np.zeros(len(eps))
P = np.zeros(len(eps))
TM = np.zeros(len(eps))
#Loop on integration points
for i,DEFO in enumerate(eps):
#(i) Elastic prediction
Selas = E*(DEFO-EPn[i])
#(ii) Compute the criterion
f = np.abs(Selas) - (Sigy+H*Pn[i])
if (f<=0):
#elastic step
S[i] = Selas
EP[i] = EPn[i]
P[i] = Pn[i]
TM[i] = E
elif (f>0):
#elastoplastic step: solve a nonlinear scalar equation
dP = f/(E+H)
P[i] = Pn[i]+dP
EP[i] = EPn[i]+(P[i]-Pn[i])*np.sign(Selas)
S[i] = E*(DEFO-EP[i])
TM[i] = (E*H)/(E+H)
return S,P,EP,TM
| 28.617647 | 66 | 0.464543 | 169 | 973 | 2.639053 | 0.337278 | 0.022422 | 0.089686 | 0.116592 | 0.053812 | 0.053812 | 0.053812 | 0.053812 | 0 | 0 | 0 | 0.012308 | 0.331963 | 973 | 33 | 67 | 29.484848 | 0.673846 | 0.174717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.041667 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1365b2df1fdc2c37aa4c5a090e8a65cce8207d8 | 2,985 | py | Python | enigma.py | danhab99/EnigmaPY | b7526c26ac98675e911a8d0dcaf1acfe6d2659fb | [
"MIT"
] | null | null | null | enigma.py | danhab99/EnigmaPY | b7526c26ac98675e911a8d0dcaf1acfe6d2659fb | [
"MIT"
] | null | null | null | enigma.py | danhab99/EnigmaPY | b7526c26ac98675e911a8d0dcaf1acfe6d2659fb | [
"MIT"
] | null | null | null | import create
from lib import Machine
from lib import Transformer
import argparse
import pickle
from itertools import chain
from random import shuffle
parser = argparse.ArgumentParser(description='A simulation of the enigma encryption algorithm', prog='enigma.py')
subparsers = parser.add_subparsers(help='Which command to run', dest='subroutine')
create_parser = subparsers.add_parser('create', help='A utility to create encryption codexes')
encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt a file with a codex')
parser.add_argument('--test',
type=argparse.FileType('r'),
help='Validate a cypher')
create_parser.add_argument('file',
metavar='<File>',
type=argparse.FileType('w'),
help='The file to output to')
create_parser.add_argument('-r --random',
action='store_true',
help='Generates a completly random codex')
encrypt_parser.add_argument('in_file',
metavar='<Input file>',
type=argparse.FileType('r'),
help='The file to be encrypted')
encrypt_parser.add_argument('out_file',
metavar='<Out file>',
type=argparse.FileType('w'),
help='The destination for the resuts')
encrypt_mutual = encrypt_parser.add_mutually_exclusive_group(required=True)
encrypt_mutual.add_argument('--codex',
type=argparse.FileType('r'),
help='The codex to use')
encrypt_mutual.add_argument('--random',
nargs=3,
help='Create a random codex using a preset alphabet [ABC, bytes, numbers, ASCII, UTF], a minimum number of transformers, and a maximum number of transformers')
args = parser.parse_args()
if (args.test):
with open('cypher.pkl', mode='rb') as file:
cypher = pickle.load(file)
abc = cypher.getABC()
# print(cypher)
machine = Machine(cypher)
def gen(length):
c = [sample(abc, len(abc))] * length
return chain.from_iterable(c)
def transform(d):
return [machine.parse(value, counter) for counter, value in enumerate(d)]
testData = list(gen(5))
pdb.set_trace()
results = transform(transform(testData))
if (False not in [item[0] == item[1] for item in zip(testData, results)]):
print("This is a valid cypher")
else:
print("This is NOT a valid cypher")
if (args.subroutine == 'create'):
file = create.Create()
with open(args.file.name, mode='wb+') as output:
pickle.dump(file, output)
if (args.subroutine == 'encrypt'):
machine = None
if (args.codex):
with open(args.codex, 'rb') as file:
machine = Machine(pickle.load(file))
if (args.random):
CYPHER = create.random(create.genPreset(args.random[0]), args.random[1], args.random[2])
machine = Machine(abc=CYPHER[0].getABC())
with open(args.in_file.name, 'rb') as input, open(args.out_file.name, 'wb') as output:
clean = input.read()
crypt = [machine.parse(i, value) for i in enumerate(clean)]
output.write(crypt)
| 33.166667 | 163 | 0.669012 | 402 | 2,985 | 4.890547 | 0.335821 | 0.032045 | 0.043235 | 0.032045 | 0.073754 | 0.061038 | 0.032553 | 0 | 0 | 0 | 0 | 0.003342 | 0.19799 | 2,985 | 89 | 164 | 33.539326 | 0.817878 | 0.004355 | 0 | 0.071429 | 0 | 0.014286 | 0.213131 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.1 | 0.014286 | 0.157143 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c13af56264e5c19bd63a5cb098d1273308f7f27f | 5,962 | py | Python | tests/sdk/test_client_response.py | AitoDotAI/aito-python-tools | 891d433222b04f4ff8a4eeafbb9268516fd215dc | [
"MIT"
] | 6 | 2019-10-16T02:35:06.000Z | 2021-02-03T13:39:43.000Z | tests/sdk/test_client_response.py | AitoDotAI/aito-python-tools | 891d433222b04f4ff8a4eeafbb9268516fd215dc | [
"MIT"
] | 23 | 2020-03-17T13:16:02.000Z | 2021-04-23T15:09:51.000Z | tests/sdk/test_client_response.py | AitoDotAI/aito-python-tools | 891d433222b04f4ff8a4eeafbb9268516fd215dc | [
"MIT"
] | null | null | null | import requests
from parameterized import parameterized, parameterized_class
import aito.client.requests as aito_requests
import aito.schema as aito_schema
from aito.client import AitoClient
from tests.cases import CompareTestCase
from tests.sdk.contexts import grocery_demo_client
def get_requests_resp_and_aito_resp(aito_client: AitoClient, request_obj: aito_requests.AitoRequest):
"""returns the json content from requests lib response and aito response for comparison"""
raw_resp_obj = requests.request(
method=request_obj.method,
url=aito_client.instance_url + request_obj.endpoint,
headers=aito_client.headers,
json=request_obj.query
)
raw_resp_json = raw_resp_obj.json()
aito_resp = aito_client.request(request_obj=request_obj)
return raw_resp_json, aito_resp
class TestBaseHitsResponse(CompareTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.client = grocery_demo_client()
cls.request_obj = aito_requests.GenericQueryRequest(query={'from': 'users', 'limit': 3})
cls.raw_resp_json, cls.aito_resp = get_requests_resp_and_aito_resp(cls.client, cls.request_obj)
def test_attributes(self):
for attr in ['offset', 'total']:
self.assertEqual(getattr(self.aito_resp, attr), self.raw_resp_json[attr])
self.assertTrue(hasattr(self.aito_resp, 'hits'))
for idx, hit in enumerate(self.aito_resp.hits):
self.assertEqual(hit.json, self.raw_resp_json['hits'][idx])
self.assertTrue(hasattr(self.aito_resp, 'first_hit'))
self.assertEqual(self.aito_resp.first_hit.json, self.raw_resp_json['hits'][0])
def test_get_field(self):
self.assertIn('offset', self.aito_resp)
with self.assertRaises(KeyError):
_ = self.aito_resp['some_field']
def test_iter_fields(self):
aito_res_fields = [field for field in self.aito_resp]
json_res_fields = list(self.raw_resp_json.keys())
self.assertCountEqual(aito_res_fields, json_res_fields)
@parameterized_class(("test_name", "request_obj", "score_field"), [
("predict", aito_requests.PredictRequest({"from": "products", "predict": "tags", "limit": 3}), "$p"),
("recommend", aito_requests.RecommendRequest(
{"from": "impressions", "recommend": "product", "goal": {"session.user": "veronica"}, "limit": 3}
), "$p" ),
("match", aito_requests.MatchRequest(
{"from": "impressions", "where": {"session.user": "veronica"}, "match": "product", "limit": 3}
), "$p"),
("similarity", aito_requests.SimilarityRequest({"from": "products", "similarity": {"name": "rye bread"}}), "$score")
])
class TestScoredHitsResponse(CompareTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.client = grocery_demo_client()
def test_hit_class(self):
raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(self.client, self.request_obj)
self.assertTrue(hasattr(aito_resp, 'first_hit'))
self.assertEqual(aito_resp.first_hit.score, raw_resp_json['hits'][0][self.score_field])
with self.assertRaises(KeyError):
_ = aito_resp.first_hit.explanation
def test_hit_with_explanation(self):
self.request_obj.query = {**self.request_obj.query, 'select': ['$why']}
raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(self.client, self.request_obj)
self.assertEqual(aito_resp.first_hit.explanation, raw_resp_json['hits'][0]['$why'])
class TestRelateResponse(CompareTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.client = grocery_demo_client()
def test_relate_response(self):
raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(
self.client,
aito_requests.RelateRequest({"from": "products", "where": {"$exists": "name"}, "relate": "tags", "limit": 2})
)
self.assertEqual(aito_resp.relations[0].json, raw_resp_json['hits'][0])
self.assertEqual(aito_resp.relations[0].frequencies, raw_resp_json['hits'][0]['fs'])
self.assertEqual(aito_resp.relations[0].probabilities, raw_resp_json['hits'][0]['ps'])
class TestEvaluateResponse(CompareTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.client = grocery_demo_client()
def test_relate_response(self):
raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(
self.client,
aito_requests.EvaluateRequest({
"test": {"$index": {"$mod": [10, 0]}},
"evaluate": {
"from": "products",
"where": {"name": {"$get": "name"}},
"match": "tags"
}
})
)
self.assertEqual(aito_resp.accuracy, raw_resp_json['accuracy'])
self.assertEqual(aito_resp.test_sample_count, raw_resp_json['testSamples'])
self.assertEqual(aito_resp.train_sample_count, raw_resp_json['trainSamples'])
class TestGetSchemaResponse(CompareTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.client = grocery_demo_client()
@parameterized.expand([
('get_database_schema', aito_requests.GetDatabaseSchemaRequest(), aito_schema.AitoDatabaseSchema),
('get_table_schema', aito_requests.GetTableSchemaRequest(table_name='products'), aito_schema.AitoTableSchema),
(
'get_column_schema',
aito_requests.GetColumnSchemaRequest(table_name='products', column_name='name'),
aito_schema.AitoColumnTypeSchema
)
])
def test_get_schema_response(self, _, request_instance, schema_cls):
raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(self.client, request_instance)
self.assertEqual(aito_resp.schema, schema_cls.from_deserialized_object(raw_resp_json))
| 42.283688 | 121 | 0.679638 | 701 | 5,962 | 5.476462 | 0.198288 | 0.070852 | 0.060172 | 0.05392 | 0.361552 | 0.318052 | 0.233655 | 0.220109 | 0.211253 | 0.211253 | 0 | 0.003532 | 0.192721 | 5,962 | 140 | 122 | 42.585714 | 0.794099 | 0.014089 | 0 | 0.295652 | 0 | 0 | 0.096901 | 0 | 0 | 0 | 0 | 0 | 0.165217 | 1 | 0.121739 | false | 0 | 0.06087 | 0 | 0.234783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c13d31a85ce3ce0b7615b9c5e782008402d5a721 | 9,292 | py | Python | lib/worker.py | GuoxiaWang/InstanceLabelTool | ece37a0dfe1467ad24d6d3472adb50b20b6abd24 | [
"MIT"
] | 6 | 2018-10-28T07:43:34.000Z | 2021-04-11T15:15:14.000Z | lib/worker.py | GuoxiaWang/InstanceLabelTool | ece37a0dfe1467ad24d6d3472adb50b20b6abd24 | [
"MIT"
] | 2 | 2019-03-13T15:16:57.000Z | 2019-04-15T02:35:46.000Z | lib/worker.py | GuoxiaWang/InstanceLabelTool | ece37a0dfe1467ad24d6d3472adb50b20b6abd24 | [
"MIT"
] | 1 | 2020-01-16T10:23:36.000Z | 2020-01-16T10:23:36.000Z | """
Copyright (c) 2018- Guoxia Wang
mingzilaochongtu at gmail com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
The Software is provided "as is", without warranty of any kind.
"""
from PyQt4 import QtCore, QtGui
import numpy as np
import cv2
import os
import getpass
from edgelink import edgelink
from annotation import Point, Annotation, AnnBoundary
class ConvertToBoundariesWorker(QtCore.QObject):
"""
Make a new thread instance to convert to boundaries
from a segment map
"""
finishedSignal = QtCore.pyqtSignal(list)
def __init__(self, objects=None, height=0, width=0):
QtCore.QObject.__init__(self)
self.objects = objects
self.segmentMap = np.zeros((height, width), np.uint8)
def setObjects(self, objects):
self.objects = objects
def setSegmentMap(self, height, width):
self.segmentMap = np.zeros((height, width), np.uint8)
# Segment map convert to boundary list
def convertToBoundaries(self):
# First, we fill all labels to numpy ndarray
count = 1
for obj in self.objects:
for poly in obj.polygon:
pts = []
for pt in poly:
pts.append([pt.x, pt.y])
pts = np.around(pts).astype(np.int32)
cv2.fillPoly(self.segmentMap, [pts], count)
count += 1
# Second, we convert to boundary map from segment map
edgeMap = self.segmentationMapToBoundaryMap(self.segmentMap)
# Third, we get edge fragments
edgelist, edgeim, etype = edgelink(edgeMap)
polygon = []
for edge in edgelist:
if (len(edge) < 5):
continue
# Auto correct occlusion boundary direction
if (self.isNeedReverse(edge)):
edge.reverse()
# Convert to QPolygonF
poly = []
for pt in edge:
point = Point(pt[1], pt[0])
poly.append(point)
polygon.append(poly)
self.finishedSignal.emit(polygon)
return polygon
# Label segmentation map to boundary map
def segmentationMapToBoundaryMap(self, segment):
height, width = segment.shape
boundary = np.zeros((2*height+1, 2*width+1), np.uint8)
# Find vertical direction difference
edgelsV = (segment[0:-1, :] != segment[1:, :]).astype(np.uint8)
# Add a zero row
edgelsV = np.vstack([edgelsV, np.zeros((1, width), dtype=np.uint8)])
# Find horizontal direction difference
edgelsH = (segment[:,0:-1] != segment[:, 1:]).astype(np.uint8)
# Append a zero column
edgelsH = np.hstack([edgelsH, np.zeros((height, 1), dtype=np.uint8)])
# Assign to boundary
boundary[2::2, 1::2] = edgelsV
boundary[1::2, 2::2] = edgelsH
# Get boundary
boundary[2:-1:2, 2:-1:2] = np.maximum(
np.maximum(edgelsH[0:-1, 0:-1], edgelsH[1:, 0:-1]),
np.maximum(edgelsV[0:-1, 0:-1], edgelsV[0:-1, 1:]))
boundary[0, :] = boundary[1, :]
boundary[:, 0] = boundary[:, 1]
boundary[-1, :] = boundary[-2, :]
boundary[:, -1] = boundary[:, -2]
boundary = boundary[2::2, 2::2]
return boundary
# Check one edge occluison direction, and return true if need reverse
def isNeedReverse(self, edge):
height, width = self.segmentMap.shape
step = 3
posDirCount = 0
totalCount = len(edge) / step
for i in range(totalCount):
idx = i * step
pt1 = QtCore.QPointF(edge[idx][1], edge[idx][0])
idx = (i + 1) * step
if (idx >= len(edge)):
idx = -1
pt2 = QtCore.QPointF(edge[idx][1], edge[idx][0])
line1 = QtCore.QLineF(pt1, pt2)
line1 = line1.normalVector()
pt3 = line1.p2()
pt3.setX(min(max(pt3.x(), 0), width-1))
pt3.setY(min(max(pt3.y(), 0), height-1))
pt4 = QtCore.QPointF(line1.x1() - line1.dx(), line1.y1() - line1.dy())
pt4.setX(min(max(pt4.x(), 0), width-1))
pt4.setY(min(max(pt4.y(), 0), height-1))
if (self.segmentMap[int(pt3.y()), int(pt3.x())] >=
self.segmentMap[int(pt4.y()), int(pt4.x())]):
posDirCount += 1
ratio = float(posDirCount) / np.ceil(float(totalCount))
# If ratio greater than the threshold, we dont need to reverse the edge
if (ratio > 0.3):
return False
else:
return True
class BatchConvertToBoundariesWorker(QtCore.QObject):
"""
Make a new thread instance to batch convert to occlusion boundary labels
from instance labels
"""
updateProgress = QtCore.pyqtSignal(int, str)
finished = QtCore.pyqtSignal()
information = QtCore.pyqtSignal(str, str)
# Flag indicate cancel by user
canceled = False
# User selected operation
userOperationResult = -1
# Mutex and waitcondition
mutex = QtCore.QMutex()
waitCondition = QtCore.QWaitCondition()
def __init__(self, imageList, imageDir, gtExt):
QtCore.QObject.__init__(self)
self.imageDir = imageDir
self.imageList = imageList
self.gtExt = gtExt
def stop(self):
self.canceled = True
def batchConvertToBoundaries(self):
overwriteAll = False
annotation = Annotation()
worker = ConvertToBoundariesWorker()
# Convert each image
for idx, filename in enumerate(self.imageList):
if (self.canceled):
break
# get label json file name
imageExt = os.path.splitext(filename)[1]
gtfilename = filename.replace(imageExt, self.gtExt)
filename = os.path.join(self.imageDir, gtfilename)
filename = os.path.normpath(filename)
# Update progress dialog
self.updateProgress.emit(idx + 1, "Converting {0}".format(gtfilename))
# Check if label json file exist
if (not os.path.isfile(filename)):
text = "{0} not exist. Continue?".format(filename)
self.mutex.lock()
self.information.emit("IOError", text)
self.waitCondition.wait(self.mutex)
self.mutex.unlock()
if (self.userOperationResult == QtGui.QMessageBox.Yes):
continue
else:
break
try:
annotation = Annotation()
annotation.fromJsonFile(filename)
except StandardError as e:
text = "Error parsing labels in {0}. \nContinue?".format(filename)
self.mutex.lock()
self.information.emit("IOError", text)
self.waitCondition.wait(self.mutex)
self.mutex.unlock()
if (self.userOperationResult == QtGui.QMessageBox.Yes):
continue
else:
break
# Skip all image of has no instance labels
if (not annotation.objects):
continue
# Check if it has occlusion boundary label
if (not overwriteAll and annotation.boundaries):
text = "{0} already exists occlusion boundary labels. Do you want to overwrite?".format(filename)
self.mutex.lock()
self.information.emit("Overwrite", text)
self.waitCondition.wait(self.mutex)
self.mutex.unlock()
if (self.userOperationResult == QtGui.QMessageBox.No):
continue
elif (self.userOperationResult == QtGui.QMessageBox.YesToAll):
overwriteAll = True
height = annotation.imgHeight
width = annotation.imgWidth
worker.setObjects(annotation.objects)
worker.setSegmentMap(height, width)
polygon = worker.convertToBoundaries()
# Create a new boundary object
boundaries = AnnBoundary()
boundaries.polygon = polygon
boundaries.deleted = 0
boundaries.verified = 0
boundaries.user = getpass.getuser()
boundaries.updateDate()
annotation.boundaries = boundaries
try:
annotation.toJsonFile(filename)
except StandardError as e:
text = "Error writting labels to {0}. \nContinue?".format(filename)
self.mutex.lock()
self.information.emit("IOError", text)
self.waitCondition.wait(self.mutex)
self.mutex.unlock()
if (self.userOperationResult == QtGui.QMessageBox.Yes):
continue
else:
break
self.finished.emit()
| 36.439216 | 113 | 0.567908 | 1,001 | 9,292 | 5.255744 | 0.27972 | 0.020528 | 0.026611 | 0.037065 | 0.213648 | 0.197301 | 0.188747 | 0.173921 | 0.113857 | 0.113857 | 0 | 0.021365 | 0.330069 | 9,292 | 254 | 114 | 36.582677 | 0.823775 | 0.160245 | 0 | 0.251429 | 0 | 0 | 0.028453 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051429 | false | 0.011429 | 0.04 | 0 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c142870cdc5b68b605e9ca3cb9dda2dd39df1fad | 674 | py | Python | fastquotes/index/csi.py | YangzhenZhao/fastquotes | 1faba9f7fc7801a11359001e08cefa9cfbc41d64 | [
"MIT"
] | 4 | 2020-11-18T11:25:00.000Z | 2021-04-08T01:02:49.000Z | fastquotes/index/csi.py | YangzhenZhao/fastquotes | 1faba9f7fc7801a11359001e08cefa9cfbc41d64 | [
"MIT"
] | null | null | null | fastquotes/index/csi.py | YangzhenZhao/fastquotes | 1faba9f7fc7801a11359001e08cefa9cfbc41d64 | [
"MIT"
] | 1 | 2020-11-18T11:25:01.000Z | 2020-11-18T11:25:01.000Z | import codecs
import json
import requests
from ..const import CUSTOM_HEADER
def latest_year_data(code: str, latest_year: int) -> list:
"""
lastest_year: 1、3、5
"""
url = (
f"http://www.csindex.com.cn/zh-CN/indices/index-detail/{code}?"
f"earnings_performance={latest_year}%E5%B9%B4&data_type=json"
)
text = requests.get(url, headers=CUSTOM_HEADER).text
res_list = []
text = codecs.decode(text.encode(), "utf-8-sig")
for item in json.loads(text):
res_list.append(
{
"date": item["tradedate"][:10],
"close": item["tclose"],
}
)
return res_list
| 23.241379 | 71 | 0.578635 | 87 | 674 | 4.344828 | 0.655172 | 0.079365 | 0.058201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018405 | 0.274481 | 674 | 28 | 72 | 24.071429 | 0.754601 | 0.02819 | 0 | 0 | 0 | 0.05 | 0.236307 | 0.090767 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.2 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c144dd7ed4502a22ce0fcfc2f712cd5108d540e6 | 5,160 | py | Python | substrabac/substrapp/utils.py | GuillaumeCisco/substra-backend | 777ec0cfc10a1aad34cccba449e4923c20786d32 | [
"Apache-2.0"
] | null | null | null | substrabac/substrapp/utils.py | GuillaumeCisco/substra-backend | 777ec0cfc10a1aad34cccba449e4923c20786d32 | [
"Apache-2.0"
] | null | null | null | substrabac/substrapp/utils.py | GuillaumeCisco/substra-backend | 777ec0cfc10a1aad34cccba449e4923c20786d32 | [
"Apache-2.0"
] | null | null | null | import io
import hashlib
import logging
import os
import tempfile
from os import path
from os.path import isfile, isdir
import shutil
import requests
import tarfile
import zipfile
import uuid
from checksumdir import dirhash
from django.conf import settings
from rest_framework import status
class JsonException(Exception):
def __init__(self, msg):
self.msg = msg
super(JsonException, self).__init__()
def get_dir_hash(archive_object):
with tempfile.TemporaryDirectory() as temp_dir:
try:
content = archive_object.read()
archive_object.seek(0)
uncompress_content(content, temp_dir)
except Exception as e:
logging.error(e)
raise e
else:
return dirhash(temp_dir, 'sha256')
def store_datasamples_archive(archive_object):
try:
content = archive_object.read()
archive_object.seek(0)
except Exception as e:
logging.error(e)
raise e
# Temporary directory for uncompress
datasamples_uuid = uuid.uuid4().hex
tmp_datasamples_path = path.join(getattr(settings, 'MEDIA_ROOT'),
f'datasamples/{datasamples_uuid}')
try:
uncompress_content(content, tmp_datasamples_path)
except Exception as e:
shutil.rmtree(tmp_datasamples_path, ignore_errors=True)
logging.error(e)
raise e
else:
# return the directory hash of the uncompressed file and the path of
# the temporary directory. The removal should be handled externally.
return dirhash(tmp_datasamples_path, 'sha256'), tmp_datasamples_path
def get_hash(file, key=None):
if file is None:
return ''
else:
if isinstance(file, (str, bytes, os.PathLike)):
if isfile(file):
with open(file, 'rb') as f:
data = f.read()
elif isdir(file):
return dirhash(file, 'sha256')
else:
return ''
else:
openedfile = file.open()
data = openedfile.read()
openedfile.seek(0)
return compute_hash(data, key)
def get_owner():
ledger_settings = getattr(settings, 'LEDGER')
return ledger_settings['client']['msp_id']
def compute_hash(bytes, key=None):
sha256_hash = hashlib.sha256()
if isinstance(bytes, str):
bytes = bytes.encode()
if key is not None and isinstance(key, str):
bytes += key.encode()
sha256_hash.update(bytes)
return sha256_hash.hexdigest()
def create_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
class ZipFile(zipfile.ZipFile):
"""Override Zipfile to ensure unix file permissions are preserved.
This is due to a python bug:
https://bugs.python.org/issue15795
Workaround from:
https://stackoverflow.com/questions/39296101/python-zipfile-removes-execute-permissions-from-binaries
"""
def extract(self, member, path=None, pwd=None):
if not isinstance(member, zipfile.ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
ret_val = self._extract_member(member, path, pwd)
attr = member.external_attr >> 16
os.chmod(ret_val, attr)
return ret_val
def uncompress_path(archive_path, to_directory):
if zipfile.is_zipfile(archive_path):
with ZipFile(archive_path, 'r') as zf:
zf.extractall(to_directory)
elif tarfile.is_tarfile(archive_path):
with tarfile.open(archive_path, 'r:*') as tf:
tf.extractall(to_directory)
else:
raise Exception('Archive must be zip or tar.gz')
def uncompress_content(archive_content, to_directory):
if zipfile.is_zipfile(io.BytesIO(archive_content)):
with ZipFile(io.BytesIO(archive_content)) as zf:
zf.extractall(to_directory)
else:
try:
with tarfile.open(fileobj=io.BytesIO(archive_content)) as tf:
tf.extractall(to_directory)
except tarfile.TarError:
raise Exception('Archive must be zip or tar.*')
class NodeError(Exception):
pass
def get_remote_file(url, auth, **kwargs):
kwargs.update({
'headers': {'Accept': 'application/json;version=0.0'},
'auth': auth
})
if settings.DEBUG:
kwargs['verify'] = False
try:
response = requests.get(url, **kwargs)
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e:
raise NodeError(f'Failed to fetch {url}') from e
return response
def get_remote_file_content(url, auth, content_hash, salt=None):
response = get_remote_file(url, auth)
if response.status_code != status.HTTP_200_OK:
logging.error(response.text)
raise NodeError(f'Url: {url} returned status code: {response.status_code}')
computed_hash = compute_hash(response.content, key=salt)
if computed_hash != content_hash:
raise NodeError(f"url {url}: hash doesn't match {content_hash} vs {computed_hash}")
return response.content
| 27.593583 | 105 | 0.64845 | 634 | 5,160 | 5.130915 | 0.293375 | 0.023978 | 0.027667 | 0.0166 | 0.183523 | 0.134952 | 0.083923 | 0.071934 | 0.050415 | 0 | 0 | 0.011743 | 0.257364 | 5,160 | 186 | 106 | 27.741935 | 0.837161 | 0.081008 | 0 | 0.236641 | 0 | 0 | 0.069807 | 0.016974 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091603 | false | 0.007634 | 0.114504 | 0 | 0.312977 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1484f680c8d5268a7187ffd0cd5d37747e57a92 | 1,569 | py | Python | Algorithms/Assign1/v2.py | thebishaldeb/ClassAssignments | f44c51695266da0c98d1ab3516c473c6d1008933 | [
"MIT"
] | null | null | null | Algorithms/Assign1/v2.py | thebishaldeb/ClassAssignments | f44c51695266da0c98d1ab3516c473c6d1008933 | [
"MIT"
] | null | null | null | Algorithms/Assign1/v2.py | thebishaldeb/ClassAssignments | f44c51695266da0c98d1ab3516c473c6d1008933 | [
"MIT"
] | null | null | null | # FUNCTION
def med(arr1, arr2, length):
if length == 2:
return findMed( arr1, arr2)
mid = int((length-1)/2)
if (arr1[mid] < arr2[mid]):
return med( arr2[0:mid+1], arr1[-mid-1:length], len(arr2[0:mid+1]))
elif (arr1[mid] > arr2[mid]):
return med( arr1[0:mid+1], arr2[-mid-1:length], len(arr1[0:mid+1]))
def findMed(arr1, arr2):
return sorted(arr1 + arr2)[int(len(arr1 + arr2)/2 - 1)]
# Dictionaries to store databases from the text files
db1 = {}
db2 = {}
with open("db1.txt","r") as file:
for line in file:
x = line.split("- ")
db1[int(x[0])] = x[1][0:len(x[1])-1]
with open("db2.txt","r") as file:
for line in file:
x = line.split("- ")
db2[int(x[0])] = x[1][0:len(x[1])-1]
print(db1)
print(db2)
kth = int(input('\nEnter the no of the smallest movie: '))
print('\nThe reqd. smallest movie from first database:', db1[sorted(db1)[kth-1]])
print('\nThe reqd. smallest movie from second database:', db2[sorted(db2)[kth-1]])
# The Duration of the movie of Databases sorted and stored in the lists
arr1 = sorted(db1)
arr2 = sorted(db2)
length = len(arr1) # No. of movies in the database
median = med(arr1, arr2, length) #Function 'med' defined at the top.
for i in range(length):
if arr1[i] == median:
print("\nThe movie with median duration, i.e.",median, "is", db1[median])
break
elif arr2[i] == median:
print("\nThe movie with median duration, i.e.",median, "is", db2[median])
break
| 26.59322 | 82 | 0.586361 | 250 | 1,569 | 3.68 | 0.26 | 0.052174 | 0.021739 | 0.036957 | 0.323913 | 0.323913 | 0.208696 | 0.208696 | 0.208696 | 0.208696 | 0 | 0.058774 | 0.240918 | 1,569 | 59 | 83 | 26.59322 | 0.713686 | 0.123646 | 0 | 0.166667 | 0 | 0 | 0.170073 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0.027778 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1486548c7b778a225198dc3750c6bc512122e6c | 4,025 | py | Python | brain-imaging/run_tsne_brain.py | agramfort/spatio-temporal-alignements | 18594cf0372dc874decccecad69e310f84142c88 | [
"BSD-3-Clause"
] | 28 | 2019-10-18T07:29:52.000Z | 2022-01-27T15:12:45.000Z | brain-imaging/run_tsne_brain.py | agramfort/spatio-temporal-alignements | 18594cf0372dc874decccecad69e310f84142c88 | [
"BSD-3-Clause"
] | 2 | 2021-01-16T18:34:31.000Z | 2022-02-03T14:49:34.000Z | brain-imaging/run_tsne_brain.py | agramfort/spatio-temporal-alignements | 18594cf0372dc874decccecad69e310f84142c88 | [
"BSD-3-Clause"
] | 4 | 2021-01-16T17:22:23.000Z | 2022-01-11T03:24:24.000Z |
import mne
import pickle
import numpy as np
from sta import sta_matrix, sdtw_matrix
from sklearn.manifold import TSNE
# change this if you have GPUs
# in our platform, this experiment ran on 4 GPUs in around 20 minutes
n_gpu_devices = 0
def generate_samples(n_samples, n_times, time_point, space_points, M,
smoothing_time=1., smoothing_space=0.01,
seed=None):
"""Simulate brain signals at a time_point and in a random vertex among
`space_points`."""
rng = np.random.RandomState(seed)
n_features = len(M)
time_points = (np.ones(n_samples) * time_point).astype(int)
space_points = rng.choice(space_points, size=n_samples)
signals = np.zeros((n_samples, n_times, n_features)).astype(float)
values = rng.rand(n_samples) * 2 + 1
signals[np.arange(n_samples), time_points, space_points] = values
# create temporal and spatial gaussian filters to smooth the data
times = np.arange(n_times)
metric = (times[:, None] - times[None, :]) ** 2
kernel_time = np.exp(- metric / smoothing_time)
kernel_space = np.exp(- M / smoothing_space)
for i, signal in enumerate(signals):
signals[i] = kernel_space.dot(signal.T).T
signals[i] = kernel_time.dot(signal)
return signals
if __name__ == "__main__":
# load brain regions
mt = mne.read_label("data/lh.MT.label")
v1 = mne.read_label("data/lh.V1.label")
# load ground metric defined on the cortical triangulated mesh
M_ = np.load("data/ground_metric.npy") ** 2
M = M_ / np.median(M_)
vertices = [np.arange(642), []]
gamma = 1.
n_features = len(M)
epsilon = 10. / n_features
K = np.exp(- M / epsilon)
mt_vertices = mt.vertices[mt.vertices < 642]
v1_vertices = v1.vertices[v1.vertices < 642]
seed = 42
n_samples_per_task = 50
n_times = 20
time0, time1 = 5, 15
# Create the four categories of brain signals with different random seeds
meg_v1_0 = generate_samples(n_samples_per_task, n_times, time0,
v1_vertices, M=M, seed=seed)
meg_v1_1 = generate_samples(n_samples_per_task, n_times, time1,
v1_vertices, M=M, seed=seed + 1)
meg_mt_0 = generate_samples(n_samples_per_task, n_times, time0,
mt_vertices, M=M, seed=seed + 2)
meg_mt_1 = generate_samples(n_samples_per_task, n_times, time1,
mt_vertices, M=M, seed=seed + 3)
# to avoid numerical errors with Sinkhorn, add 1e-3
meg = np.concatenate((meg_v1_0, meg_v1_1, meg_mt_0, meg_mt_1)) + 1e-3
# create labels for categories
y_time = np.r_[2 * np.r_[n_samples_per_task * [0],
n_samples_per_task * [1]].tolist()]
y_space = np.r_[2 * n_samples_per_task * [0], 2 * n_samples_per_task * [1]]
betas = [0, 0.001, 0.01, 0.1, 0.5, 1., 2., 3., 5., 10.]
experiment = dict(meg=meg, y_time=y_time, y_space=y_space, betas=betas)
train_data = []
n_samples, n_times, dimension = meg.shape
params = dict(K=K, epsilon=epsilon, gamma=gamma, n_jobs=4,
n_gpu_devices=n_gpu_devices)
precomputed = sta_matrix(meg, betas, **params)
experiment["sta"] = dict()
for beta, train_ in zip(betas, precomputed):
train = train_.copy()
# shift the distance to avoid negative values with large betas
train -= train.min()
tsne_data = TSNE(metric="precomputed").fit_transform(train)
experiment["sta"][beta] = tsne_data
method = "soft"
experiment["soft"] = dict()
for beta in betas:
precomputed = sdtw_matrix(meg, beta, n_jobs=10)
train = precomputed.copy()
# shift the distance to avoid negative values with large betas
train -= train.min()
tsne_data = TSNE(metric="precomputed").fit_transform(train)
experiment[method][beta] = tsne_data
expe_file = open("data/tsne-brains.pkl", "wb")
pickle.dump(experiment, expe_file)
| 37.268519 | 79 | 0.640248 | 592 | 4,025 | 4.119932 | 0.282095 | 0.052481 | 0.04059 | 0.055351 | 0.245182 | 0.203362 | 0.170562 | 0.170562 | 0.170562 | 0.170562 | 0 | 0.030723 | 0.24795 | 4,025 | 107 | 80 | 37.616822 | 0.775025 | 0.148571 | 0 | 0.08 | 0 | 0 | 0.035253 | 0.006463 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013333 | false | 0 | 0.066667 | 0 | 0.093333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c14f824a0678ada5998332bc22d1955b5b7acece | 19,547 | py | Python | src/muscle_synergies/vicon_data/user_data.py | elvis-sik/muscle_synergies | eff0d016f2032faa9b8fba5363249e6fdb150abf | [
"MIT"
] | 6 | 2021-02-05T21:53:08.000Z | 2022-01-20T16:50:39.000Z | src/muscle_synergies/vicon_data/user_data.py | elvis-sik/muscle_synergies | eff0d016f2032faa9b8fba5363249e6fdb150abf | [
"MIT"
] | 1 | 2021-02-06T14:14:52.000Z | 2021-03-01T03:44:23.000Z | src/muscle_synergies/vicon_data/user_data.py | elvis-sik/muscle_synergies | eff0d016f2032faa9b8fba5363249e6fdb150abf | [
"MIT"
] | null | null | null | """Types that help building the final representation of the data.
From the point of view of the internal API, the main type in this module is
:py:class:`Builder`, which uses the data stored in an
:py:class:`~muscle_synergies.vicon_data.aggregator.Aggregator` to build the
:py:class:`ViconNexusData`. That object, in turn, simply holds references to
:py:class:`DeviceData` instances corresponding to the different experimental
devices, organized by their type (see
:py:class:`~muscle_synergies.vicon_data.definitions.DeviceType`).
Refer to the documentation for the package
:py:mod:`muscle_synergies.vicon_data.__init__.py` for more on how
:py:class:`Builder` fits together with the other classes used for reading the
data from disk.
"""
import abc
from collections import defaultdict
from dataclasses import dataclass
from functools import lru_cache
from typing import Iterator, List, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from .aggregator import Aggregator, DeviceAggregator
from .definitions import DeviceType, SamplingFreq
@dataclass
class ViconNexusData:
"""The data contained in a Vicon Nexus CSV file.
The initialization arguments are stored as they are under the same names.
Args:
forcepl: a sequence of :py:class:`DeviceData` corresponding to the
different force plate devices.
emg: a single :py:class:`DeviceData` that includes all columns with EMG
measurements.
traj: a sequence of :py:class:`DeviceData` corresponding to the
different trajectory devices.
"""
forcepl: Sequence["DeviceData"]
emg: "DeviceData"
traj: Sequence["DeviceData"]
def __repr__(self):
return "ViconNexusData(forcepl=[...], emg=<DeviceData>, traj=[...])"
def describe(self) -> str:
"""Represent ViconNexusData object as a Markdown list.
This method is intended to help the user get a quick glance at what was
loaded. The returned value will be a multiline string similar to this:
ViconNexusData:
+ emg: 8 columns
+ forcepl (2 devices): DeviceData("Force Plate 1"), DeviceData("Force Plate 2")
+ traj (14 devices): DeviceData("Traj 1"), ..., DeviceData("Traj 14")
In the case of force plates and trajectory markers, if there are more
than 2 devices, they are occluded as in the last line of the example.
"""
emg_str = self._amount_str(self._num_cols(self.emg), "column")
forcepl_len_str = self._amount_str(len(self.forcepl), "device")
forcepl_members_str = self._stringify_list(self.forcepl)
traj_len_str = self._amount_str(len(self.traj), "device")
traj_members_str = self._stringify_list(self.traj)
return f"""ViconNexusData:
+ emg: {emg_str}
+ forcepl ({forcepl_len_str}): {forcepl_members_str}
+ traj ({traj_len_str}): {traj_members_str}"""
@staticmethod
def _num_cols(dev: "DeviceData") -> int:
"""Get number of columns contained in :py:class:`DeviceData` object."""
return len(dev.df.columns)
@staticmethod
def _amount_str(num: int, noun: str) -> str:
"""Add an "s" to a noun to make it plural if needed."""
if num == 1:
plural_s = ""
else:
plural_s = "s"
return f"{num} {noun}{plural_s}"
@staticmethod
def _stringify_list(seq: Sequence) -> str:
"""Represent list as string occluding elements to make it short."""
seq = list(seq)
if len(seq) > 2:
seq = [seq[0]] + ["..."] + [seq[-1]]
return ", ".join(map(str, seq))
class Builder:
"""Build a ViconNexusData using the data stored in an Aggregator."""
def __init__(self, aggregator: Optional[Aggregator] = None):
self.aggregator = aggregator
def build(self, aggregator: Optional[Aggregator] = None) -> ViconNexusData:
"""Build a ViconNexusData using the data stored in an Aggregator.
Args:
aggregator: if not provided, use the one given during
initialization.
Raises:
ValueError if the number of EMG devices is not exactly 1.
"""
if aggregator is None:
aggregator = self.aggregator
frame_tracker = self._build_frame_tracker(aggregator)
devices_by_type = defaultdict(list)
for device_agg in self._devices(aggregator):
device_data = self._build_device(device_agg, frame_tracker)
device_type = self._device_agg_type(device_agg)
devices_by_type[device_type].append(device_data)
# TODO fix a typing mess:
# 1. make _vicon_nexus_data get 3 parameters corresponding to device
# type lists instead of a dict
# 2. _simplify_emg now gets an emg_list and returns an emg_dev,
# checking if the list has too many entries
# done.
return self._vicon_nexus_data(self._simplify_emg(devices_by_type))
def _build_device(
self,
device_agg: DeviceAggregator,
frame_tracker: Tuple["ForcesEMGFrameTracker", "TrajFrameTracker"],
) -> "DeviceData":
"""Create new DeviceData from DeviceAggregator and frame trackers."""
params_dict = self._params_device_data(device_agg, frame_tracker)
return self._instantiate_device(**params_dict)
def _params_device_data(
self,
device_agg: DeviceAggregator,
frame_tracker: Tuple["ForcesEMGFrameTracker", "TrajFrameTracker"],
) -> Mapping[str, Union[str, DeviceType, "_SectionFrameTracker", pd.DataFrame]]:
"""Build a dict with the params to create a new DeviceData instance.
This method sets up a dict corresponding to the keyword arguments
required by :py:meth`~Builder._instantiate_device`.
"""
return {
"device_name": self._device_agg_name(device_agg),
"device_type": self._device_agg_type(device_agg),
"units": self._device_agg_units(device_agg),
"frame_tracker": self._choose_frame_tracker(device_agg, *frame_tracker),
"dataframe": self._extract_dataframe(device_agg),
}
def _build_frame_tracker(
self, aggregator: Aggregator
) -> Tuple["ForcesEMGFrameTracker", "TrajFrameTracker"]:
"""Build frame trackers corresponding to Aggregator."""
sampling_freq = self._aggregator_sampling_freq(aggregator)
return (ForcesEMGFrameTracker(sampling_freq), TrajFrameTracker(sampling_freq))
@staticmethod
def _instantiate_device(
device_name: str,
device_type: DeviceType,
units: List[str],
frame_tracker: "_SectionFrameTracker",
dataframe: pd.DataFrame,
) -> "DeviceData":
"""Instantiate new DeviceData object."""
return DeviceData(
device_name=device_name,
device_type=device_type,
units=units,
frame_tracker=frame_tracker,
dataframe=dataframe,
)
@classmethod
def _extract_dataframe(cls, device_aggregator: DeviceAggregator) -> pd.DataFrame:
"""Create DataFrame with the data in the DeviceAggregator."""
data = cls._device_agg_data(device_aggregator)
header = cls._device_agg_coords(device_aggregator)
return pd.DataFrame(data, columns=header, dtype=float)
def _simplify_emg(
self, devices_by_type: Mapping[DeviceType, List["DeviceData"]]
) -> Mapping[DeviceType, Union["DeviceData", List["DeviceData"]]]:
"""Replaces list of EMG devices with the single device in dict.
Args:
devices_by_type: a dict which lists all devices of each type.
Returns:
a copy of the dict with one change.
`new_devices_by_type[DeviceType.EMG]` will not be a a list of
devices but rather a single one as it is assumed that all EMG data
is represented as being different coordinates of a single
experimental device.
Raises:
ValueError if the number of EMG devices is not exactly 1.
"""
new_devices_dict = dict(devices_by_type)
emg_list = devices_by_type[DeviceType.EMG]
if len(emg_list) != 1:
raise ValueError(f"found {len(emg_list)} EMG devices - expected one")
emg_dev = emg_list[0]
new_devices_dict[DeviceType.EMG] = emg_dev
return new_devices_dict
@staticmethod
def _vicon_nexus_data(
devices_by_type: Mapping[DeviceType, Union["DeviceData", List["DeviceData"]]],
) -> ViconNexusData:
"""Instantiate new ViconNexusData object."""
return ViconNexusData(
forcepl=devices_by_type[DeviceType.FORCE_PLATE],
emg=devices_by_type[DeviceType.EMG],
traj=devices_by_type[DeviceType.TRAJECTORY_MARKER],
)
@staticmethod
def _devices(aggregator: Aggregator) -> Iterator[DeviceAggregator]:
"""Yield all `DeviceAggregator`s stored in the Aggregator."""
yield from aggregator.get_devices()
def _choose_frame_tracker(
self,
device_agg: DeviceAggregator,
forces_emg_tracker: "ForcesEMGFrameTracker",
traj_tracker: "TrajFrameTracker",
) -> "_SectionFrameTracker":
"""Choose the correct frame tracker for device."""
forces_emg = {DeviceType.FORCE_PLATE, DeviceType.EMG}
if self._device_agg_type(device_agg) in forces_emg:
return forces_emg_tracker
return traj_tracker
@staticmethod
def _device_agg_name(device_aggregator: DeviceAggregator) -> str:
"""Get device name from DeviceAggregator."""
return device_aggregator.name
@staticmethod
def _device_agg_type(device_aggregator: DeviceAggregator) -> DeviceType:
"""Get device type from DeviceAggregator."""
return device_aggregator.device_type
@staticmethod
def _device_agg_units(device_aggregator: DeviceAggregator) -> List[str]:
"""Get device units from DeviceAggregator."""
return device_aggregator.units
@staticmethod
def _device_agg_coords(device_aggregator: DeviceAggregator) -> List[str]:
"""Get device coordinates from DeviceAggregator."""
return device_aggregator.coords
@staticmethod
def _device_agg_data(device_aggregator: DeviceAggregator) -> List[List[float]]:
"""Get the data rows stored in DeviceAggregator."""
return device_aggregator.data_rows
@staticmethod
def _aggregator_sampling_freq(aggregator: Aggregator) -> "SamplingFreq":
"""Get the sampling frequencies stored in Aggregator."""
return aggregator.get_sampling_freq()
class _SectionFrameTracker(abc.ABC):
"""Convert array indices to/from (frame, subframe) for a section.
This class is abstract, subclasses implement the conversions, which differ
between the 2 sections of the CSV file. The first data row will have index
0 and correspond to frame 0 and subframe 0. The second data row will have
index 1 but its frame and subframe will differ depending on the relative
sampling rate of each section. See
:py:class:`~muscle_synergies.vicon_data.definitions.SamplingFreq`.
The 2 main methods of :py:class:`_SectionFrameTracker` are:
+ :py:meth:`~_SectionFrameTracker.index`: convert frame and subframe to the
corresponding array index.
+ :py:meth:`~_SectionFrameTracker.frame_tracker`: convert an array index to
the corresponding frame and subframe.
"""
def __init__(self, sampling_freq=SamplingFreq):
self._sampling_freq = sampling_freq
@property
def num_frames(self) -> int:
"""Total number of frames."""
return self._sampling_freq.num_frames
@abc.abstractproperty
def sampling_frequency(self) -> int:
"""Sampling frequency in Hz with which the measurements were made."""
pass
@abc.abstractmethod
def index(self, frame: int, subframe: int) -> int:
"""Array index associated with frame and subframe.
Raises:
ValueError if the arguments are outside of the allowed range.
`frame` should be between 1 and
:py:attr:`~_SectionFrameTracker.num_frames`. `subframe` should
be between 0 and
:py:attr:`~_SectionFrameTracker.num_subframes`.
"""
self._validate_frame_tracker_args(frame, subframe)
@abc.abstractmethod
def frame_tracker(self, index: int) -> Tuple[int, int]:
"""Frame and subframe associated with given array index.
Raises:
ValueError if the argument is outside of the allowed range (from 0
to :py:attr:`~_SectionFrameTracker.final_index`).
"""
self._validate_index_arg(index)
@abc.abstractproperty
def final_index(self) -> int:
"""The highest array index."""
pass
@property
def num_subframes(self) -> int:
"""The total number of subframes."""
return self._sampling_freq.num_subframes
@property
def _freq_forces_emg(self) -> int:
"""The sampling rate of the section with force plates and EMG."""
return self._sampling_freq.freq_forces_emg
@property
def _freq_traj(self) -> int:
"""The sampling rate of the section with trajectories."""
return self._sampling_freq.freq_traj
def _validate_index_arg(self, index: int):
"""Raise exception if index is outside of allowed range."""
if index not in range(self.final_index + 1):
raise ValueError(f"index {index} out of bounds (max is self.final_index)")
def _validate_frame_tracker_args(self, frame: int, subframe: int):
"""Raise exception if frame and subframe are not in allowed range."""
if frame not in range(1, self.num_frames + 1):
raise ValueError(f"frame {frame} is out of bounds")
if subframe not in range(self.num_subframes):
raise ValueError(f"subframe {subframe} out of range")
def time_seq(self) -> pd.Series:
"""Create Series with times in seconds of all measurements."""
return self._time_seq(self.sampling_frequency, self.final_index + 1)
@staticmethod
@lru_cache(maxsize=2)
def _time_seq(sampling_frequency: int, num_measurements: int) -> pd.Series:
"""Memoized version of time_seq."""
period = 1 / sampling_frequency
return pd.Series(period * np.arange(1, num_measurements + 1, 1))
class ForcesEMGFrameTracker(_SectionFrameTracker):
@property
def sampling_frequency(self) -> int:
return self._freq_forces_emg
def index(self, frame: int, subframe: int) -> int:
super().index(frame, subframe)
return (frame - 1) * self.num_subframes + subframe
def frame_tracker(self, index: int) -> Tuple[int, int]:
super().frame_tracker(index)
frame = (index // self.num_subframes) + 1
subframe = index % self.num_subframes
return frame, subframe
@property
def final_index(self) -> int:
return self.num_frames * self.num_subframes - 1
class TrajFrameTracker(_SectionFrameTracker):
@property
def sampling_frequency(self) -> int:
return self._freq_traj
def index(self, frame: int, subframe: int) -> int:
super().index(frame, subframe)
return frame - 1
def frame_tracker(self, index: int) -> Tuple[int, int]:
super().frame_tracker(index)
return index + 1, 0
@property
def final_index(self) -> int:
return self.num_frames - 1
class DeviceData:
"""Data associated with a measurement device."""
name: str
"""the name of the device, as it occurs on the CSV file. """
dev_type: DeviceType
"""the data associated with the device."""
units: Tuple[str]
"""physical units of each column in the :py:class:`~pandas.DataFrame`."""
df: pd.DataFrame
"""the type of the device (can be a force plate, trajectory marker or EMG
device).
"""
def __init__(
self,
device_name: str,
device_type: DeviceType,
units: List[str],
frame_tracker: _SectionFrameTracker,
dataframe: pd.DataFrame,
):
self.name = device_name
self.dev_type = device_type
self.units = tuple(units)
self.df = dataframe
self._frame_tracker = frame_tracker
@property
def sampling_frequency(self) -> int:
"""Sampling rate with which measurements were made."""
return self._frame_tracker.sampling_frequency
def time_seq(self) -> pd.Series:
"""Compute the moment in seconds in which measurements were made.
Returns:
a :py:class:`pandas.Series` where each entry corresponds to
"""
return self._frame_tracker.time_seq()
def iloc(self, frame: int, subframe: int) -> pd.Series:
"""Index data rows by their frame and subframe.
This method works similarly to :py:attr:`pandas.DataFrame.iloc`: its
purpose is to help the user index the data referring to rows. Whereas
the :py:class:`~pandas.DataFrame` version is used by directly indexing
it (`datafr.iloc[0]` returns the first row), the :py:class:`DeviceData`
version is a method.
To get the i-th row of the :py:class:`~pandas.DataFrame`, use its own
:py:attr:`~pandas.DataFrame.iloc`. This method should be used only when
the goal is to get not the i-th row but the one corresponding to a
given frame and subframe.
Raises:
KeyError: if the frame and subframe are out of bounds.
"""
return self.df.iloc[self._convert_key(frame, subframe)]
def frame_subfr(self, index: int) -> Tuple[ int, int]:
"""Find (frame, subframe) pair corresponding to index."""
return self._frame_tracker.frame_tracker(index)
def _key_slice_frame_subframe(
self,
stop: Tuple[int, int],
start: Optional[Tuple[int, int]] = None,
step: Optional[int] = None,
) -> slice:
"""Create slice with indexes corresponding to (frame, subframe) range.
Raises:
KeyError: if the frame and subframe are out-of-bounds.
"""
stop_index = self._convert_key(*stop)
if start is None:
return slice(stop_index)
start_index = self._convert_key(*start)
if step is None:
return slice(start_index, stop_index)
return slice(start_index, stop_index, step)
def _convert_key(self, frame: int, subframe: int) -> int:
"""Get index corresponding to given frame and subframe.
Raises:
KeyError: if the frame and subframe are out-of-bounds.
"""
try:
return self._frame_tracker_index(frame, subframe)
except ValueError as err:
raise KeyError from err
def _frame_tracker_index(self, frame: int, subframe: int) -> int:
"""Call FrameTracker.index with arguments."""
return self._frame_tracker.index(frame, subframe)
def __eq__(self, other) -> bool:
return (
self.name == other.name
and self.dev_type == other.dev_type
and self.units == other.units
and self.df.equals(other.df)
)
def __str__(self):
return f'DeviceData("{self.name}")'
def __repr__(self):
return f"<{str(self)}>"
| 37.020833 | 91 | 0.65785 | 2,392 | 19,547 | 5.198579 | 0.15301 | 0.031846 | 0.012545 | 0.011259 | 0.268999 | 0.190109 | 0.154564 | 0.133735 | 0.117973 | 0.095456 | 0 | 0.003207 | 0.250269 | 19,547 | 527 | 92 | 37.091082 | 0.845309 | 0.339234 | 0 | 0.249097 | 0 | 0 | 0.068773 | 0.013434 | 0 | 0 | 0 | 0.001898 | 0 | 1 | 0.194946 | false | 0.00722 | 0.032491 | 0.028881 | 0.444043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1530d1f98179c78b07bae3b02ff2a685a89878e | 1,629 | py | Python | tests/modification-check.py | luisriverag/certbot | 52e207a404ab3600637fc7a24492e2c68512ce2d | [
"Apache-2.0"
] | 1 | 2017-05-14T17:09:38.000Z | 2017-05-14T17:09:38.000Z | tests/modification-check.py | luisriverag/certbot | 52e207a404ab3600637fc7a24492e2c68512ce2d | [
"Apache-2.0"
] | 5 | 2021-03-15T21:43:04.000Z | 2021-07-22T20:31:43.000Z | tests/modification-check.py | luisriverag/certbot | 52e207a404ab3600637fc7a24492e2c68512ce2d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Ensures there have been no changes to important certbot-auto files."""
import hashlib
import os
# Relative to the root of the Certbot repo, these files are expected to exist
# and have the SHA-256 hashes contained in this dictionary. These hashes were
# taken from our v1.14.0 tag which was the last release we intended to make
# changes to certbot-auto.
#
# Deleting letsencrypt-auto-source/letsencrypt-auto and
# letsencrypt-auto-source/letsencrypt-auto.sig can be done once we're
# comfortable breaking any certbot-auto scripts that haven't already updated to
# the last version. See
# https://opensource.eff.org/eff-open-source/pl/65geri7c4tr6iqunc1rpb3mpna for
# more info.
EXPECTED_FILES = {
os.path.join('letsencrypt-auto-source', 'letsencrypt-auto'):
'b997e3608526650a08e36e682fc3bf0c29903c06fa5ba4cc49308c43832450c2',
os.path.join('letsencrypt-auto-source', 'letsencrypt-auto.sig'):
'61c036aabf75da350b0633da1b2bef0260303921ecda993455ea5e6d3af3b2fe',
}
def find_repo_root():
return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def sha256_hash(filename):
hash_object = hashlib.sha256()
with open(filename, 'rb') as f:
hash_object.update(f.read())
return hash_object.hexdigest()
def main():
repo_root = find_repo_root()
for filename, expected_hash in EXPECTED_FILES.items():
filepath = os.path.join(repo_root, filename)
assert sha256_hash(filepath) == expected_hash, f'unexpected changes to {filepath}'
print('All certbot-auto files have correct hashes.')
if __name__ == '__main__':
main()
| 33.9375 | 90 | 0.745242 | 217 | 1,629 | 5.470046 | 0.511521 | 0.101095 | 0.070767 | 0.107835 | 0.170177 | 0.11289 | 0.077506 | 0.077506 | 0 | 0 | 0 | 0.076032 | 0.152241 | 1,629 | 47 | 91 | 34.659574 | 0.78349 | 0.398404 | 0 | 0 | 0 | 0 | 0.306334 | 0.180685 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.130435 | false | 0 | 0.086957 | 0.043478 | 0.304348 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c153b504e55b04acb0b49c1e4ecd7223c00968b8 | 560 | py | Python | tests/test_load.py | michaelpeterswa/qsml | e3aeb48ac8ba7bb3eca7ec866f6d75258cfdc7c2 | [
"MIT"
] | 7 | 2020-06-28T16:28:54.000Z | 2020-09-18T13:18:55.000Z | tests/test_load.py | michaelpeterswa/qsml | e3aeb48ac8ba7bb3eca7ec866f6d75258cfdc7c2 | [
"MIT"
] | 1 | 2020-06-27T08:36:02.000Z | 2020-06-28T23:30:03.000Z | tests/test_load.py | michaelpeterswa/qsml | e3aeb48ac8ba7bb3eca7ec866f6d75258cfdc7c2 | [
"MIT"
] | 1 | 2020-07-30T05:03:38.000Z | 2020-07-30T05:03:38.000Z | import unittest
import qsml
class TestLoad(unittest.TestCase):
def test_load(self):
file = "tests/load.qsml"
returned_val = {
"myportfolio": {"GOOG": 10, "AAPL": 5, "BRK.B": 1},
"test": {"SNAP": 130, "MSFT": 5, "TSLA": 100},
}
self.assertEqual(qsml.load(file), returned_val, "Were not equal")
def test_load_comment_error(self):
file = "tests/load2.qsml"
with self.assertRaises(qsml.error.QSMLError):
qsml.load(file)
if __name__ == "__main__":
unittest.main()
| 25.454545 | 73 | 0.583929 | 67 | 560 | 4.671642 | 0.597015 | 0.044728 | 0.070288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029126 | 0.264286 | 560 | 21 | 74 | 26.666667 | 0.730583 | 0 | 0 | 0 | 0 | 0 | 0.166071 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c153e1c37eaaf1da2ce812283ce1bb7f91f0f0b1 | 6,012 | py | Python | votesim/utilities/decorators.py | johnh865/election_sim | b73b7e65f1bb22abb82cbe8442fcf02b0c20894e | [
"MIT"
] | 8 | 2019-10-21T23:24:51.000Z | 2021-09-14T03:04:59.000Z | votesim/utilities/decorators.py | johnh865/election_sim | b73b7e65f1bb22abb82cbe8442fcf02b0c20894e | [
"MIT"
] | 2 | 2021-02-09T23:52:47.000Z | 2021-02-10T04:08:35.000Z | votesim/utilities/decorators.py | johnh865/election_sim | b73b7e65f1bb22abb82cbe8442fcf02b0c20894e | [
"MIT"
] | 1 | 2019-10-21T23:32:18.000Z | 2019-10-21T23:32:18.000Z | """
Collection of utilities such as memoization, automatic property storage, etc
"""
from __future__ import print_function, absolute_import, division
from functools import wraps, partial
import logging
from votesim.utilities import misc
logger = logging.getLogger(__name__)
class memoize:
"""
Decorator used to store past calls.
"""
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args, **kwargs):
key = (args, frozenset(kwargs.items()))
try:
return self.memoized[key]
except KeyError:
self.memoized[key] = self.function(*args, **kwargs)
return self.memoized[key]
class method_memoize(object):
"""cache the return value of a method
This class is meant to be used as a decorator of methods. The return value
from a given method invocation will be cached on the instance whose method
was invoked. All arguments passed to a method decorated with memoize must
be hashable.
If a memoized method is invoked directly on its class the result will not
be cached. Instead the method will be invoked like a static method:
class Obj(object):
@memoize
def add_to(self, arg):
return self + arg
Obj.add_to(1) # not enough arguments
Obj.add_to(1, 2) # returns 3, result is not cached
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
#
#def lazyprop(fn):
# """
# Decorator used to cache property results
#
# From stack overflow. Author Mike Boers
# https://stackoverflow.com/questions/3012421/python-memoising-deferred-lookup-property-decorator
# """
#
# attr_name = '_lazy_' + fn.__name__
# @property
# def _lazyprop(self):
# if not hasattr(self, attr_name):
# setattr(self, attr_name, fn(self))
# return getattr(self, attr_name)
# return _lazyprop
#
### Lazy Property decorator
# Property name to hold all lazy data
_data_holder_attr = '_cache_properties'
def clean_lazy_properties(instance):
'''Clean all lazy properties'''
setattr(instance, _data_holder_attr, {})
def clean_some_lazy_properties(instance, names):
"""Clean properties in iterable names"""
try:
cache = getattr(instance, _data_holder_attr)
except AttributeError:
return
if isinstance(names, str):
names = [names]
for name in names:
try:
del cache[name]
except KeyError:
pass
setattr(instance, _data_holder_attr, cache)
return
def modify_lazy_property(instance, name, value, dictname=_data_holder_attr):
"""Modify a lazy property"""
cache = getattr(instance, dictname)
cache[name] = value
setattr(instance, _data_holder_attr, cache)
return
def lazy_property(fn):
"""
Version of lazy_property by John Huang.
Decorator used to cache property results into dictionary.
The cache can be clered using clean_lazy_properties.
"""
cache_name = _data_holder_attr
attr_name = fn.__name__
def get_cache(instance):
if not hasattr(instance, cache_name):
setattr(instance, cache_name, {})
return getattr(instance, cache_name)
@property
@wraps(fn)
def get_attr(self):
cache = get_cache(self)
if attr_name not in cache:
cache[attr_name] = fn(self)
return cache[attr_name]
return get_attr
def lazy_property2(name=_data_holder_attr):
"""
Version of lazy_property by John Huang.
Decorator used to cache property results into dictionary.
The cache can be cleared using clean_lazy_properties.
Decorator must be called as a function.
Parameters
----------
name : str
Name of cache dictionary
Example
---------
Set the lazy property
>>> class class1(object):
>>> @lazy_property2('my_cache')
>>> def property(self):
>>> x = 2.0
>>> return x
Delete the lazy property
>>> a = class1()
>>> del a.my_cache
"""
def decorator(fn):
cache_name = name
attr_name = fn.__name__
def get_cache(instance):
if not hasattr(instance, cache_name):
setattr(instance, cache_name, {})
return getattr(instance, cache_name)
@property
@wraps(fn)
def get_attr(self):
cache = get_cache(self)
if attr_name not in cache:
cache[attr_name] = fn(self)
return cache[attr_name]
return get_attr
return decorator
def reuse_doc(f):
"""Reuse the docstring from f on the decorated function
Parameters
----------
f : func or class
Desired func/class whose __doc__ you want to reuse
Returns
-------
out : decorator
Example
--------
Here we decorate class B with class A's docstring
>>> class A(object):
>>> '''I got A docstring'''
>>> def __init__(self):
>>> self.x = 10
>>> @reuse_doc(A)
>>> class B(A):
>>> pass
>>> B.__doc__ == 'I got A docstring'
"""
doc = f.__doc__
def decorator(fn):
fn.__doc__ = doc
return fn
return decorator
| 23.669291 | 100 | 0.58849 | 710 | 6,012 | 4.774648 | 0.243662 | 0.028319 | 0.033038 | 0.025959 | 0.254867 | 0.228614 | 0.218289 | 0.218289 | 0.19292 | 0.19292 | 0 | 0.005119 | 0.317698 | 6,012 | 253 | 101 | 23.762846 | 0.821307 | 0.397705 | 0 | 0.494737 | 0 | 0 | 0.00526 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178947 | false | 0.010526 | 0.042105 | 0 | 0.421053 | 0.010526 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c15402f1ab58bd4a60c7b4bb3dddbb75ea0cbef9 | 10,304 | py | Python | portcran.py | yzgyyang/portcran | 04fa6ce8cd8585ed96aab19177d030b030ff79c9 | [
"BSD-2-Clause"
] | 1 | 2021-07-15T04:35:08.000Z | 2021-07-15T04:35:08.000Z | portcran.py | yzgyyang/portcran | 04fa6ce8cd8585ed96aab19177d030b030ff79c9 | [
"BSD-2-Clause"
] | null | null | null | portcran.py | yzgyyang/portcran | 04fa6ce8cd8585ed96aab19177d030b030ff79c9 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
from argparse import ArgumentParser, Namespace
from pathlib import Path
from re import search
from sys import argv
from typing import Callable, Iterable, List, Optional, TextIO, Tuple
from urllib.request import urlopen, urlretrieve
from ports import Platform, PortError, PortLicense, Ports
from ports.cran import Cran, CranPort
__author__ = "David Naylor <dbn@FreeBSD.org>"
__license__ = "BSD (FreeBSD)"
__summary__ = "Generates FreeBSD Ports from CRAN packages"
__version__ = "0.1.9"
ERR_GENERAL = 1
ERR_CATEGORY = 2
ERR_EXISTS = 3
class Command(object):
def __init__(self, description: str) -> None:
self._parser = ArgumentParser(description=description)
self._subparsers = self._parser.add_subparsers(title="available sub-commands", help="sub-command help")
def execute(self, args: List[str]) -> None:
parsed_args = self._parser.parse_args(args)
if hasattr(parsed_args, "action"):
parsed_args.action(parsed_args)
else:
self.usage()
def usage(self) -> None:
self._parser.print_usage()
def __call__(self, verb: str, description: str) -> Callable[[Callable[[Namespace], None]], ArgumentParser]:
def decorator(action: Callable[[Namespace], None]) -> ArgumentParser:
parser = self._subparsers.add_parser(verb, help=description)
parser.set_defaults(action=action)
return parser
return decorator
def make_cran_port(name: str, portdir: Optional[Path] = None, version: Optional[str] = None) -> CranPort:
if not version:
print("Checking for latest version...")
site_page = urlopen("http://cran.r-project.org/package=%s" % name).read().decode("utf-8")
version_match = search(r"<td>Version:</td>\s*<td>(.*?)</td>", str(site_page))
assert version_match is not None
version = version_match.group(1)
distfile = Ports.distdir / ("%s_%s.tar.gz" % (name, version))
if not distfile.exists(): # pylint: disable=no-member
print("Fetching package source (%s-%s)..." % (name, version))
urlretrieve("https://cran.r-project.org/src/contrib/%s" % distfile.name, distfile) # pylint: disable=no-member
return CranPort.create(name, distfile, portdir)
def diff(left: Iterable[str], right: Iterable[str]) -> Tuple[List[str], bool, List[str]]:
left = list(left)
right = list(right)
old = [i for i in left if i not in right]
new = [i for i in right if i not in left]
left = [i for i in left if i not in old]
right = [i for i in right if i not in new]
return old, left == right, new
def yies(obj: list) -> str:
return "ies" if len(obj) > 1 else "y"
def log_depends(log: TextIO, depend: str, difference: Tuple[List[str], bool, List[str]]) -> None:
old, common, new = difference
if not common:
log.write(" - order %s dependencies lexicographically on origin\n" % depend)
if old:
log.write(" - remove unused %s dependenc%s:\n" % (depend, yies(old)))
for i in sorted(old):
log.write(" - %s\n" % i)
if new:
log.write(" - add new %s dependenc%s:\n" % (depend, yies(new)))
for i in sorted(new):
log.write(" - %s\n" % i)
def log_uses(log: TextIO, difference: Tuple[List[str], bool, List[str]]) -> None:
old, common, new = difference
if not common:
log.write(" - sort cran uses arguments lexicographically\n")
for arg in old:
if arg == "auto-plist":
log.write(" - manually generate pkg-plist\n")
elif arg == "compiles":
log.write(" - port no longer needs to compile\n")
else:
raise PortError("Log: unknown cran argument: %s" % arg)
for arg in new:
if arg == "auto-plist":
log.write(" - automatically generate pkg-plist\n")
elif arg == "compiles":
log.write(" - mark port as needing to compile\n")
else:
raise PortError("Log: unknown cran argument: %s" % arg)
def log_license(log: TextIO, old: PortLicense, new: PortLicense) -> None:
if list(old) != list(sorted(new)):
log.write(" - update license to: %s\n" % " ".join(sorted(new)))
elif old.combination != new.combination:
if new.combination is None:
log.write(" - remove license combination\n")
else:
log.write(" - update license combination\n")
def generate_update_log(old: CranPort, new: CranPort) -> None:
assert (old.portversion or old.distversion) != new.distversion
with open(new.portdir / "commit.svn", "w", encoding="utf-8") as log:
log.write("%s: updated to version %s\n\n" % (new.origin, new.distversion))
if old.portrevision is not None:
log.write(" - removed PORTREVISION due to version bump\n")
if old.maintainer != new.maintainer:
log.write(" - update maintainer\n")
if old.comment != new.comment:
log.write(" - updated comment to align with CRAN package\n")
if list(sorted(old.license)) != list(sorted(new.license)) or old.license.combination != new.license.combination:
log.write(" - updated license to align with CRAN package\n")
if old.license.file is None and new.license.file is not None:
log.write(" - added license file from CRAN package\n")
elif old.license.file is not None and new.license.file is None:
log.write(" - removed license file (no longer in CRAN package)\n")
for depend in ("build", "lib", "run", "test"):
old_depends = getattr(old.depends, depend)
new_depends = getattr(new.depends, depend)
log_depends(log, depend, diff([i.origin for i in old_depends], sorted(i.origin for i in new_depends)))
if old.description != new.description:
log.write(" - update description to align with CRAN package\n")
if old.website != new.website:
log.write(" - update website URL to align with CRAN package\n")
if new.version in new.changelog:
assert old.portname is not None
port = make_cran_port(new.portname, version=new.version)
assert port.version == new.version
if port.version in port.changelog and port.changelog[port.version] == new.changelog[new.version]:
log.write(" - changelog not updated\n")
else:
log.write(" - changelog:\n")
for line in new.changelog[new.version]:
log.write(" -")
length = 4
for word in line.split(" "):
length += len(word) + 1
if length > 75:
log.write("\n ")
length = 5 + len(word)
log.write(" " + word)
log.write("\n")
else:
log.write(" - no changelog provided\n")
log.write("\nGenerated by:\tportcran (%s)\n" % __version__)
def update_category(portsdir: Path, category: str, name: str) -> None:
entry = " SUBDIR += %s\n" % name
makefile = portsdir / category / "Makefile"
tmpfile = portsdir / category / ".Makefile.portcran"
with makefile.open() as old:
with tmpfile.open("w") as new:
has_subdir = False
drain = False
for line in old.readlines():
if not drain:
if line == entry:
drain = True
if line.lstrip().startswith("SUBDIR"):
has_subdir = True
if line > entry:
new.write(entry)
drain = True
elif has_subdir:
new.write(entry)
drain = True
new.write(line)
tmpfile.rename(makefile)
def generate_create_log(cran: CranPort) -> None:
with open(cran.portdir / ".." / ".." / "commit.svn", "w") as log:
log.write("%s: %s\n" % (cran.origin, cran.comment))
log.write("\nGenerated by:\tportcran (%s)\n" % __version__)
def main() -> None:
command = Command(__summary__)
@command("update", "update a CRAN port")
def update(args: Namespace) -> None:
port = Ports.get_port_by_name(Cran.PKGNAMEPREFIX + args.name)
assert isinstance(port, CranPort)
cran = make_cran_port(args.name, portdir=port._portdir)
cran.generate()
generate_update_log(port, cran)
update.add_argument("name", help="name of the CRAN package")
update.add_argument("-o", "--output", help="output directory")
@command("create", "create a CRAN port")
def create(args: Namespace) -> None:
if args.address is not None:
Platform.address = args.address
categories = args.categories.split(",")
for category in categories:
if category not in Ports.categories:
print("err: %s in not a ports category" % category)
exit(ERR_CATEGORY)
portsdir = Ports.dir if args.portsdir is None else Path(args.portsdir)
category = categories[0]
name = Cran.PKGNAMEPREFIX + args.name
portdir = portsdir / category / name
cran = make_cran_port(args.name, portdir)
cran.categories = categories
cran.maintainer = Platform.address
try:
port = Ports.get_port_by_name(name)
print("err: CRAN port %s already exists at %s" % (args.name, port.origin))
exit(ERR_EXISTS)
except PortError:
pass
portdir.mkdir()
update_category(portsdir, category, name)
cran.generate()
generate_create_log(cran)
create.add_argument("name", help="name of the CRAN package")
create.add_argument("-a", "--address", help="creator's email address")
create.add_argument("-c", "--categories", default="math", help="comma separated list of the CRAN port's categories")
create.add_argument("-p", "--portsdir", help="output ports directory")
command.execute(argv[1:])
if __name__ == "__main__":
try:
main()
except PortError as ex:
print("err: %s" % ex)
exit(ERR_GENERAL)
| 40.566929 | 120 | 0.595885 | 1,294 | 10,304 | 4.657651 | 0.2017 | 0.042476 | 0.007964 | 0.004646 | 0.211548 | 0.167745 | 0.12444 | 0.105857 | 0.096565 | 0.043803 | 0 | 0.002426 | 0.279794 | 10,304 | 253 | 121 | 40.727273 | 0.809729 | 0.007085 | 0 | 0.146226 | 0 | 0 | 0.178138 | 0.003324 | 0 | 0 | 0 | 0 | 0.023585 | 1 | 0.080189 | false | 0.004717 | 0.037736 | 0.004717 | 0.146226 | 0.028302 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c155e8b957ea1abd8dd89360b9558b67dc020499 | 1,243 | py | Python | src/gluonts/nursery/torch_arsgls_rbpf/test/basic_tests/conv.py | richardk53/gluon-ts | 5bde492198c0348b550ac6f7269f1740a699ec30 | [
"Apache-2.0"
] | null | null | null | src/gluonts/nursery/torch_arsgls_rbpf/test/basic_tests/conv.py | richardk53/gluon-ts | 5bde492198c0348b550ac6f7269f1740a699ec30 | [
"Apache-2.0"
] | null | null | null | src/gluonts/nursery/torch_arsgls_rbpf/test/basic_tests/conv.py | richardk53/gluon-ts | 5bde492198c0348b550ac6f7269f1740a699ec30 | [
"Apache-2.0"
] | null | null | null | import torch
from torch import nn
from utils.utils import compute_conv_output_img_dims
def test_compute_conv_dims_out():
for width_img in [63, 64, 65, 66]:
dims_img = (width_img, width_img)
inp = torch.randn((10, 1,) + dims_img)
for padding in [0, 1, 2]:
for dilation in [1, 2, 3]:
for stride in [1, 2, 3]:
for kernel_size in [2, 3, 4, 5]:
conv = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
computed_img_dims_out = compute_conv_output_img_dims(
dims_img=dims_img,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
actual_img_dims_out = conv(inp).shape[2:]
assert computed_img_dims_out == actual_img_dims_out
| 38.84375 | 77 | 0.442478 | 127 | 1,243 | 4.031496 | 0.314961 | 0.095703 | 0.078125 | 0.078125 | 0.367188 | 0.242188 | 0.242188 | 0.242188 | 0.242188 | 0.242188 | 0 | 0.044304 | 0.491553 | 1,243 | 31 | 78 | 40.096774 | 0.765823 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035714 | 1 | 0.035714 | false | 0 | 0.107143 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1561171d3885a4dc3c76906c27aa5632df77a77 | 589 | py | Python | OOP/deep_dive_tkinter/many_widget_example.py | Amaranese/python-exercises-notes-solutions-projects | 58f7677ecb97971733d9f4ff87fda75e23d7c0cb | [
"Unlicense"
] | 1 | 2021-12-03T12:38:33.000Z | 2021-12-03T12:38:33.000Z | OOP/deep_dive_tkinter/many_widget_example.py | Amaranese/python-exercises-notes-solutions-projects | 58f7677ecb97971733d9f4ff87fda75e23d7c0cb | [
"Unlicense"
] | null | null | null | OOP/deep_dive_tkinter/many_widget_example.py | Amaranese/python-exercises-notes-solutions-projects | 58f7677ecb97971733d9f4ff87fda75e23d7c0cb | [
"Unlicense"
] | null | null | null | import tkinter as tk
parent = tk.Tk()
# tk.WidgetName(parent_frame, options)
tk.Entry(parent, width=25).pack()
tk.Button(parent, text="LOOKOUT!").pack()
tk.Checkbutton(parent, text='RememberMe', variable=tk.IntVar()).pack()
tk.Label(parent, text="What's Your Name?").pack()
tk.OptionMenu(parent, tk.IntVar(), "Select Age", "15+", "25+", "40+", "60+").pack()
tk.Scrollbar(parent, orient=tk.VERTICAL).pack()
tk.Radiobutton(parent, text='Democratic', variable=tk.IntVar(), value=3).pack()
tk.Radiobutton(parent, text='Republican', variable=tk.IntVar(), value=5).pack()
parent.mainloop()
| 32.722222 | 83 | 0.702886 | 85 | 589 | 4.858824 | 0.470588 | 0.101695 | 0.116223 | 0.11138 | 0.130751 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022059 | 0.076401 | 589 | 17 | 84 | 34.647059 | 0.737132 | 0.061121 | 0 | 0 | 0 | 0 | 0.139746 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c156565f017d48828a6c04509f6eaa61d605a332 | 432 | py | Python | hardhat/recipes/x11/driver/xf86-video-nouveau.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | hardhat/recipes/x11/driver/xf86-video-nouveau.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | hardhat/recipes/x11/driver/xf86-video-nouveau.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | from ..base import X11DriverBaseRecipe
class Xf86VideoNouveauRecipe(X11DriverBaseRecipe):
def __init__(self, *args, **kwargs):
super(Xf86VideoNouveauRecipe, self).__init__(*args, **kwargs)
self.sha256 = '6d9242ba139c3df7afefffb455573b52' \
'f4427920b978161c00483c64a6da47cb'
self.name = 'xf86-video-nouveau'
self.version = '1.0.13'
self.depends = ['xorg-server']
| 33.230769 | 69 | 0.664352 | 36 | 432 | 7.75 | 0.722222 | 0.071685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.172619 | 0.222222 | 432 | 12 | 70 | 36 | 0.657738 | 0 | 0 | 0 | 0 | 0 | 0.229167 | 0.148148 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1588d562ae990566fc09dd0f8d1a7453c6a6f20 | 3,563 | py | Python | fem_dsa/networks/autoencoders.py | idealab-isu/DSA | b9157eb9307c0ff06d91ff2bdcd8f70df5b896cb | [
"BSD-3-Clause"
] | 3 | 2022-01-18T01:33:34.000Z | 2022-03-22T20:46:16.000Z | DiffNet/networks/autoencoders.py | adityabalu/DiffNet | a21e024ad9948fa76fe73796e216a0a6601f2c7c | [
"MIT"
] | 1 | 2022-03-30T10:16:47.000Z | 2022-03-30T10:16:47.000Z | DiffNet/networks/autoencoders.py | adityabalu/DiffNet | a21e024ad9948fa76fe73796e216a0a6601f2c7c | [
"MIT"
] | 2 | 2021-12-01T20:53:24.000Z | 2021-12-02T06:42:39.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Encoder(nn.Module):
def __init__(self, in_channels=3, dim=64, n_downsample=3, encoder_type='convolutional'):
super(Encoder, self).__init__()
# Initial convolution block
layers = [
nn.ReflectionPad2d(3),
nn.Conv2d(in_channels, dim*2, 7),
nn.InstanceNorm2d(dim),
nn.LeakyReLU(0.2, inplace=True),
]
# Downsampling
for i in range(n_downsample):
if i <= 3:
layers += [
nn.Conv2d(dim*2*(i+1), dim * (i+2)*2, 4, stride=2, padding=1),
nn.InstanceNorm2d(dim * (i+2)*2),
nn.ReLU(inplace=True),
]
else:
layers += [
nn.Conv2d(dim*2*(5), dim * (5)*2, 4, stride=2, padding=1),
nn.InstanceNorm2d(dim * (5)*2),
nn.ReLU(inplace=True),
]
self.model_blocks = nn.Sequential(*layers, nn.Tanh())
def forward(self, x):
x = self.model_blocks(x)
return x
class Decoder(nn.Module):
def __init__(self, out_channels=3, dim=64, n_upsample=3, encoder_type='convolutional', activation='relu'):
super(Decoder, self).__init__()
layers = []
dim = dim
# Upsampling
for i in reversed(range(n_upsample)):
# print(i)
if i > 3:
print('Arjuna')
layers += [
nn.ConvTranspose2d(dim * (5)*2, dim * (5)*2, 4, stride=2, padding=1),
nn.InstanceNorm2d(dim * (5)*2),
nn.LeakyReLU(0.2, inplace=True),
]
else:
layers += [
nn.ConvTranspose2d(dim * (i + 2)*2, dim * (i + 1)*2, 4, stride=2, padding=1),
nn.InstanceNorm2d(dim * (i + 1)*2),
nn.LeakyReLU(0.2, inplace=True),
]
# Output layer
# layers += [nn.ReflectionPad2d(3), nn.Conv2d(dim, out_channels, 7)]
layers += [nn.ReflectionPad2d(4), nn.Conv2d(dim * (i + 1)*2, out_channels, 3), nn.Conv2d(out_channels, out_channels, 7)]
self.model_blocks = nn.Sequential(*layers)
if activation == 'sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'relu':
self.activation = nn.ReLU()
def forward(self, x):
# print(x.shape)
x = self.model_blocks(x)
#x = self.activation(x)
return x
class AE(nn.Module):
"""docstring for AE"""
def __init__(self, in_channels, out_channels, dims=64, n_downsample=4):
super(AE, self).__init__()
self.encoder = Encoder(in_channels, dim=dims, n_downsample=n_downsample, encoder_type='regular')
self.decoder = Decoder(out_channels, dim=dims, n_upsample=n_downsample, activation='relu')
def forward(self, x):
code = self.encoder(x)
out = self.decoder(code)
return out
class VAE(nn.Module):
"""docstring for AE"""
def __init__(self, in_channels, out_channels, dims=64, n_downsample=3):
super(VAE, self).__init__()
self.encoder = Encoder(in_channels, dim=dims, n_downsample=n_downsample, encoder_type='variational')
self.decoder = Decoder(out_channels, dim=dims, n_upsample=n_downsample)
def forward(self, x):
mu, z = self.encoder(x)
out = self.decoder(z)
return out | 33.299065 | 128 | 0.541959 | 435 | 3,563 | 4.28046 | 0.183908 | 0.059076 | 0.05102 | 0.019334 | 0.562836 | 0.440924 | 0.33029 | 0.305048 | 0.305048 | 0.305048 | 0 | 0.034884 | 0.324165 | 3,563 | 107 | 129 | 33.299065 | 0.738372 | 0.058939 | 0 | 0.302632 | 0 | 0 | 0.020683 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.052632 | 0 | 0.263158 | 0.013158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1598a3545cd8dc90345c280e6f51a6897b9912a | 2,215 | py | Python | week02/03.MoreTesting/fractions/tests_collect_fractions.py | TsvetomirTsvetkov/Python-Course-101 | 1c5ea4631128c22effe3c4ee5a18c43f5e79d463 | [
"MIT"
] | null | null | null | week02/03.MoreTesting/fractions/tests_collect_fractions.py | TsvetomirTsvetkov/Python-Course-101 | 1c5ea4631128c22effe3c4ee5a18c43f5e79d463 | [
"MIT"
] | null | null | null | week02/03.MoreTesting/fractions/tests_collect_fractions.py | TsvetomirTsvetkov/Python-Course-101 | 1c5ea4631128c22effe3c4ee5a18c43f5e79d463 | [
"MIT"
] | null | null | null | # tests_collect_fractions.py
import unittest
from collect_fractions import (
validate_input_collect,
lcm,
collect_fractions
)
class TestValidateInputCollect(unittest.TestCase):
def test_validation_passes_with_correct_input(self):
fractions = [(1, 3), (4, 5)]
validate_input_collect(fractions)
def test_validation_raises_exception_with_empty_list(self):
fractions = []
exc = None
try:
validate_input_collect(fractions)
except Exception as err:
exc = err
self.assertIsNotNone(exc)
self.assertEqual(str(exc), 'List cannot be empty.')
def test_validation_raises_exception_if_fractions_is_not_of_type_list(self):
fractions = ((1, 3), (4, 5))
exc = None
try:
validate_input_collect(fractions)
except Exception as err:
exc = err
self.assertIsNotNone(exc)
self.assertEqual(str(exc), 'Argument can only be of type "list".')
def test_validation_raises_exception_if_length_of_element_is_not_two(self):
fractions = [(1, 2), (1, 3, 4)]
exc = None
try:
validate_input_collect(fractions)
except Exception as err:
exc = err
self.assertIsNotNone(exc)
self.assertEqual(str(exc), 'Tuple can only contain 2 elements.')
def test_validation_raises_exception_if_one_of_the_elements_of_the_tuples_is_not_integer(self):
fractions = [(1, 5), (1, 2.0)]
exc = None
try:
validate_input_collect(fractions)
except Exception as err:
exc = err
self.assertIsNotNone(exc)
self.assertEqual(str(exc), 'Tuple can only contain integers.')
def test_validation_raises_exception_if_one_of_the_elements_has_denominator_zero(self):
fractions = [(1, 2), (1, 0)]
exc = None
try:
validate_input_collect(fractions)
except Exception as err:
exc = err
self.assertIsNotNone(exc)
self.assertEqual(str(exc), 'Cannot devide by zero.')
class TestCollectFractions(unittest.TestCase):
def test_collect_fractions_passes_with_only_one_element_in_list(self):
fractions = [(1, 7)]
self.assertEqual((1, 7), collect_fractions(fractions))
def test_collect_fraction_passes_with_more_than_one_element_in_list(self):
fractions = [(1, 4), (1, 2)]
self.assertEqual((3, 4), collect_fractions(fractions))
if __name__ == '__main__':
unittest.main() | 25.170455 | 96 | 0.751242 | 308 | 2,215 | 5.068182 | 0.237013 | 0.122998 | 0.089686 | 0.111467 | 0.580397 | 0.539398 | 0.474055 | 0.435618 | 0.435618 | 0.435618 | 0 | 0.017433 | 0.145372 | 2,215 | 88 | 97 | 25.170455 | 0.807184 | 0.011738 | 0 | 0.484375 | 0 | 0 | 0.069927 | 0 | 0 | 0 | 0 | 0 | 0.1875 | 1 | 0.125 | false | 0.046875 | 0.03125 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c159e41ff48b6f66e8bdd24ff1ed589656d0c172 | 3,278 | py | Python | exporter/management/commands/exporter.py | open-contracting/data-registry | 5a73e7f2334c6af5be23070493842b494b3e5357 | [
"BSD-3-Clause"
] | null | null | null | exporter/management/commands/exporter.py | open-contracting/data-registry | 5a73e7f2334c6af5be23070493842b494b3e5357 | [
"BSD-3-Clause"
] | 170 | 2021-02-12T12:52:37.000Z | 2022-03-28T14:37:05.000Z | exporter/management/commands/exporter.py | open-contracting/data-registry | 5a73e7f2334c6af5be23070493842b494b3e5357 | [
"BSD-3-Clause"
] | null | null | null | import gzip
import logging
import shutil
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connections
from yapw.methods.blocking import ack
from exporter.util import Export, create_client
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Start a worker to export files from collections in Kingfisher Process.
Data is exported as gzipped line-delimited JSON files, with one file per year and one ``full.jsonl.gz`` file.
Multiple workers can run at the same time.
"""
def handle(self, *args, **options):
create_client().consume(callback, "exporter_init")
def callback(state, channel, method, properties, input_message):
collection_id = input_message.get("collection_id")
job_id = input_message.get("job_id")
export = Export(job_id)
dump_file = export.directory / "full.jsonl"
try:
export.directory.mkdir(parents=True)
except FileExistsError:
[f.unlink() for f in export.directory.glob("*") if f.is_file()]
export.lock()
id = 0
page = 1
files = {}
# acknowledge message processing now to avoid connection loses
# the rest can run for hours and is irreversible anyways
ack(state, channel, method.delivery_tag)
# load data from kf-process and save
while True:
with connections["kingfisher_process"].cursor() as cursor:
logger.debug("Processing page %s with id > %s", page, id)
cursor.execute(
"""
SELECT d.id, d.data, d.data->>'date'
FROM compiled_release c
JOIN data d ON (c.data_id = d.id)
WHERE collection_id = %s
AND d.id > %s
ORDER BY d.id
LIMIT %s
""",
[collection_id, id, settings.EXPORTER_PAGE_SIZE],
)
records = cursor.fetchall()
if not records:
break
with open(dump_file, "a") as full:
files[dump_file] = full
for r in records:
id = r[0]
full.write(r[1])
full.write("\n")
# annual and monthly dump
if r[2] is not None and len(r[2]) > 9:
year_path = export.directory / f"{int(r[2][:4])}.jsonl"
if year_path not in files:
files[year_path] = year_path.open("a")
files[year_path].write(r[1])
files[year_path].write("\n")
month_path = export.directory / f"{int(r[2][:4])}_{r[2][5:7]}.jsonl"
if month_path not in files:
files[month_path] = month_path.open("a")
files[month_path].write(r[1])
files[month_path].write("\n")
page = page + 1
# last page
if len(records) < settings.EXPORTER_PAGE_SIZE:
break
for path, file in files.items():
file.close()
with path.open("rb") as f_in:
with gzip.open(f"{path}.gz", "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
path.unlink()
export.unlock()
| 29.531532 | 113 | 0.556742 | 413 | 3,278 | 4.307506 | 0.382567 | 0.026981 | 0.011804 | 0.019112 | 0.068578 | 0.02923 | 0.02923 | 0.02923 | 0 | 0 | 0 | 0.007852 | 0.339536 | 3,278 | 110 | 114 | 29.8 | 0.813857 | 0.125381 | 0 | 0.031746 | 0 | 0 | 0.066693 | 0.021437 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031746 | false | 0 | 0.126984 | 0 | 0.174603 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c160f505df5dab1a29a92764a36839b1cc74f021 | 3,357 | py | Python | test_triplegan.py | AmirHosseinAmeli/Triple-GAN | 127948d9e22767d315a4b3ca58fc4a56d92ff9d3 | [
"MIT"
] | 29 | 2020-09-03T08:35:47.000Z | 2022-02-10T18:39:29.000Z | test_triplegan.py | AmirHosseinAmeli/Triple-GAN | 127948d9e22767d315a4b3ca58fc4a56d92ff9d3 | [
"MIT"
] | 6 | 2020-12-22T14:43:14.000Z | 2022-03-12T00:55:24.000Z | test_triplegan.py | AmirHosseinAmeli/Triple-GAN | 127948d9e22767d315a4b3ca58fc4a56d92ff9d3 | [
"MIT"
] | 8 | 2020-10-01T04:03:40.000Z | 2022-03-21T10:23:40.000Z | import copy
import os
import pickle
import torch
import torch.nn as nn
import numpy as np
from library import inputs, eval_inception_score
from Utils.checkpoints import save_context, Logger
from Utils import flags
from Utils import config
import Torture
FLAGS = flags.FLAGS
KEY_ARGUMENTS = config.load_config(FLAGS.config_file)
model = FLAGS.old_model
dirname = os.path.dirname(model)
basename = os.path.basename(model)
config_path = os.path.join(dirname, "..", "source", "configs_dict.pkl")
summary_path = os.path.join(dirname, "..", "summary")
with open(config_path, "rb") as f:
new_dict = pickle.load(f)
new_dict["gpu"] = FLAGS.gpu
FLAGS.set_dict(new_dict)
FLAGS.old_model = "loaded"
text_logger, MODELS_FOLDER, SUMMARIES_FOLDER = save_context(__file__, KEY_ARGUMENTS)
torch.manual_seed(1234)
torch.cuda.manual_seed(1235)
np.random.seed(1236)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
itr = inputs.get_data_iter(batch_size=100, subset=1000)
itr_u = inputs.get_data_iter(batch_size=100)
netG, optim_G = inputs.get_generator_optimizer()
netD, optim_D = inputs.get_discriminator_optimizer()
netC, optim_c = inputs.get_classifier_optimizer()
netC_T, _ = inputs.get_classifier_optimizer()
netG, netD, netC = netG.to(device), netD.to(device), netC.to(device)
netG = nn.DataParallel(netG)
netD = nn.DataParallel(netD)
netC = nn.DataParallel(netC)
netC_T = nn.DataParallel(netC_T)
checkpoint_io = Torture.utils.checkpoint.CheckpointIO(checkpoint_dir=MODELS_FOLDER)
checkpoint_io.register_modules(
netG=netG,
netD=netD,
netC=netC,
netC_T=netC_T,
optim_G=optim_G,
optim_D=optim_D,
optim_c=optim_c,
)
checkpoint_io.load_file(model)
logger = Logger(log_dir=SUMMARIES_FOLDER)
# with torch.no_grad():
# netG.eval()
# data, label = itr.__next__()
# sample_z = torch.randn(FLAGS.bs_g, FLAGS.g_z_dim).to(device)
# tlabel = label[: FLAGS.bs_g // 10]
# tlabel = torch.cat([tlabel for _ in range(10)], 0)
# x_fake = netG(sample_z, tlabel)
# logger.add_imgs(x_fake, "imgtest", nrow=FLAGS.bs_g // 10)
# itr_test = inputs.get_data_iter(batch_size=100, train=False, infinity=False)
# netC_T.eval()
# total, correct = 0, 0
# for images, labels in itr_test:
# images, labels = images.to(device), labels.to(device)
# outputs = netC_T(images)
# _, predicted = torch.max(outputs.data, 1)
# total += labels.size(0)
# correct += (predicted == labels).sum().item()
# print(total, correct, correct / total)
# # # # Inception score
with torch.no_grad():
netG.eval()
img_list = []
for _ in range(500):
sample_z = torch.randn(100, FLAGS.g_z_dim).to(device)
data, label = itr.__next__()
# print(label.shape, sample_z.shape)
x_fake = netG(sample_z.to(device), label.to(device))
img_list.append(x_fake.data.cpu().numpy() * 0.5 + 0.5)
img_list = np.concatenate(img_list, axis=0)
img_list = (np.transpose(img_list, [0, 2, 3, 1]) * 255).astype(np.uint8)
new_img_list = []
for i in range(50000):
new_img_list.append(img_list[i])
with open("image.pkl", "wb") as f:
pickle.dump(new_img_list, f)
exit()
print(img_list.shape)
print(eval_inception_score.get_inception_score(new_img_list))
| 32.278846 | 84 | 0.712541 | 514 | 3,357 | 4.412451 | 0.303502 | 0.037037 | 0.017637 | 0.022487 | 0.107143 | 0.074515 | 0.03836 | 0 | 0 | 0 | 0 | 0.021075 | 0.151921 | 3,357 | 103 | 85 | 32.592233 | 0.775553 | 0.231159 | 0 | 0 | 0 | 0 | 0.024247 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.152778 | 0 | 0.152778 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c164aad97b718794ec2487936b78ec7212cf88c1 | 1,523 | py | Python | Library/operations.py | marcelodaher/ArraySim | f42db96e30acff6f3ce3829dc89a79ef5473b4db | [
"MIT"
] | 1 | 2019-12-06T16:48:10.000Z | 2019-12-06T16:48:10.000Z | Library/operations.py | marcelodaher/ArraySim | f42db96e30acff6f3ce3829dc89a79ef5473b4db | [
"MIT"
] | null | null | null | Library/operations.py | marcelodaher/ArraySim | f42db96e30acff6f3ce3829dc89a79ef5473b4db | [
"MIT"
] | null | null | null | # coding=utf-8
import numpy as np
def colKRproduct(A,B):
'''
columnwise Khatri-Rao product between matrix A and B
'''
if A.shape[1] != B.shape[1]:
raise TypeError("A and B must have the same number of columns")
q = A.shape[1]
C = np.zeros([A.shape[0]*B.shape[0],q])
for i in np.arange(q):
C[:,i] = np.kron(A[:,i],B[:,i])
return C
def colKRproduct_conj_self(A):
return np.apply_along_axis(lambda x: np.kron(x.conj(),x),0,A)
def Xi(nMicX,nMicY):
'''
Retorna a matrix de permutação \Xi
'''
Xi = np.zeros([nMicX*nMicY,nMicX*nMicY])
print("XI() NOT IMPLEMENTED")
return Xi
def S2Z(S,nMicX,nMicY):
Z = np.zeros([nMicX*nMicY,nMicX*nMicY], dtype = S.dtype)
for x in np.arange(nMicX):
for y in np.arange(nMicX):
Z[:,y+x*nMicY] = np.reshape(
S[y*nMicY:(y+1)*nMicY,x*nMicX:(x+1)*nMicX],
newshape = [nMicX*nMicY],
order="F")
return Z
def spark(A):
from itertools import combinations as comb
from numpy import linalg
A = np.array(A)
At = A.T
[m,n] = At.shape
if n > m: return 0
for k in range (1,n+1):
row_combos = comb(range(m),k)
for rows in row_combos:
R = np.array([At[row] for row in rows])
rank = linalg.matrix_rank(R)
if rank < k: return k
return n+1
| 27.196429 | 76 | 0.512147 | 231 | 1,523 | 3.34632 | 0.359307 | 0.090556 | 0.03881 | 0.043984 | 0.069858 | 0.069858 | 0 | 0 | 0 | 0 | 0 | 0.014141 | 0.349967 | 1,523 | 56 | 77 | 27.196429 | 0.766667 | 0.066316 | 0 | 0 | 0 | 0 | 0.048616 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.078947 | 0.026316 | 0.342105 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1653d9ca159307ad4091c89f53debf9a3453ffc | 1,337 | py | Python | gtsfm/runner/run_scene_optimizer_olssonloader.py | swershrimpy/gtsfm | 8d301eb3ef9172345a1ac1369fd4e19764d28946 | [
"Apache-2.0"
] | 122 | 2021-02-07T23:01:58.000Z | 2022-03-30T13:10:35.000Z | gtsfm/runner/run_scene_optimizer_olssonloader.py | swershrimpy/gtsfm | 8d301eb3ef9172345a1ac1369fd4e19764d28946 | [
"Apache-2.0"
] | 273 | 2021-01-30T16:45:26.000Z | 2022-03-16T15:02:33.000Z | gtsfm/runner/run_scene_optimizer_olssonloader.py | swershrimpy/gtsfm | 8d301eb3ef9172345a1ac1369fd4e19764d28946 | [
"Apache-2.0"
] | 13 | 2021-03-12T03:01:27.000Z | 2022-03-11T03:16:54.000Z | import argparse
import os
from pathlib import Path
import gtsfm.utils.logger as logger_utils
from gtsfm.loader.loader_base import LoaderBase
from gtsfm.loader.olsson_loader import OlssonLoader
from gtsfm.runner.gtsfm_runner_base import GtsfmRunnerBase
DATA_ROOT = Path(__file__).resolve().parent.parent.parent / "tests" / "data"
logger = logger_utils.get_logger()
class GtsfmRunnerOlssonLoader(GtsfmRunnerBase):
def __init__(self):
super(GtsfmRunnerOlssonLoader, self).__init__(tag="GTSFM on Dataset in Olsson's Lund format")
def construct_argparser(self) -> argparse.ArgumentParser:
parser = super(GtsfmRunnerOlssonLoader, self).construct_argparser()
parser.add_argument("--dataset_root", type=str, default=os.path.join(DATA_ROOT, "set1_lund_door"), help="")
parser.add_argument("--image_extension", type=str, default="JPG", help="")
return parser
def construct_loader(self) -> LoaderBase:
loader = OlssonLoader(
self.parsed_args.dataset_root,
image_extension=self.parsed_args.image_extension,
max_frame_lookahead=self.parsed_args.max_frame_lookahead,
max_resolution=self.parsed_args.max_resolution,
)
return loader
if __name__ == "__main__":
runner = GtsfmRunnerOlssonLoader()
runner.run()
| 31.833333 | 115 | 0.729993 | 156 | 1,337 | 5.929487 | 0.410256 | 0.043243 | 0.060541 | 0.036757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000905 | 0.173523 | 1,337 | 41 | 116 | 32.609756 | 0.836199 | 0 | 0 | 0 | 0 | 0 | 0.078534 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.25 | 0 | 0.464286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c165a6d6b497f214d3ee7b9ab319db0cb8d9588f | 384 | py | Python | src/reverse/setup.py | fugue/zim-example | 861b197ddc1074375bb9437b3282ab3e517b9019 | [
"MIT"
] | null | null | null | src/reverse/setup.py | fugue/zim-example | 861b197ddc1074375bb9437b3282ab3e517b9019 | [
"MIT"
] | null | null | null | src/reverse/setup.py | fugue/zim-example | 861b197ddc1074375bb9437b3282ab3e517b9019 | [
"MIT"
] | 2 | 2021-03-17T03:02:52.000Z | 2021-07-21T23:31:08.000Z | import os.path
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), "requirements.txt")) as f:
requirements = f.read().strip()
setup(
name="reverse",
version="0.0.0",
description="Reverse data",
packages=find_packages(exclude=["tests"]),
package_data={"reverse": ["metadata/*"]},
install_requires=requirements,
)
| 25.6 | 76 | 0.690104 | 48 | 384 | 5.354167 | 0.645833 | 0.070039 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009119 | 0.143229 | 384 | 14 | 77 | 27.428571 | 0.772036 | 0 | 0 | 0 | 0 | 0 | 0.161458 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c165ec6055b3d0599812a0a06fa513f8948722c9 | 3,204 | py | Python | tutorial_metarl/tasks/CompositionalTwoArmedBandit.py | akjagadish/tutorial-metarl | 8810eafa783749c70a0575e805810a098b3df0fb | [
"MIT"
] | null | null | null | tutorial_metarl/tasks/CompositionalTwoArmedBandit.py | akjagadish/tutorial-metarl | 8810eafa783749c70a0575e805810a098b3df0fb | [
"MIT"
] | null | null | null | tutorial_metarl/tasks/CompositionalTwoArmedBandit.py | akjagadish/tutorial-metarl | 8810eafa783749c70a0575e805810a098b3df0fb | [
"MIT"
] | null | null | null | import torch
import numpy as np
import math
class CompositionalTwoArmedBandit():
def __init__(self, probs, ctx_dim, num_arms, num_ctx=400, max_ctx=1000):
self.probs = np.asarray(probs)
self.num_arms = num_arms
self.ctx_dim = ctx_dim
self.num_ctx = num_ctx
self.max_ctx = max_ctx
self.context = self.make_contexts(ctx_dim, num_ctx, max_ctx)
def sample(self, num_episodes=1000, num_trials=100, prob=None, cxt_per_epoch=False, repeats=None):
if cxt_per_epoch:
# generate unique contexts
self.context = self.make_contexts(self.ctx_dim, self.num_ctx, self.max_ctx)
# group them into high and rewarding
highrwd_context = self.context[:int(self.num_ctx/2)]
lowrwd_context = self.context[int(self.num_ctx/2):]
# make copies and generate samples for both contexts
highsamples = self.make_bag_of_tasks(num_episodes, repeats=repeats)
lowsamples = highsamples.copy()
np.random.shuffle(lowsamples)
# set low and high probs
low_prob, high_prob = self.probs
probs = self.probs.copy()
X, Y = [], []
ctx = torch.zeros(self.num_arms, self.ctx_dim)
for hsample, lsample in zip(highsamples, lowsamples):
# change high and low rewarding arm
np.random.shuffle(probs)
# sample contexts and assign to respective arms
ctx[probs == low_prob] = lowrwd_context[lsample]
ctx[probs == high_prob] = highrwd_context[hsample]
x, y = self._sample_one_episode(ctx.reshape(-1), probs, num_trials)
X.append(x)
Y.append(y)
Y = torch.stack(Y)
X = torch.stack(X)
return X, Y
def _sample_one_episode(self, x, probs, num_trials):
X, Y = [], []
low_prob, high_prob = self.probs
for _ in range(num_trials):
y = np.zeros(self.num_arms)
y[probs == low_prob] = np.random.choice([0, 1], size=(1,), p=self.probs[::-1])
y[probs == high_prob] = np.random.choice([0, 1], size=(1,), p=self.probs)
Y.append(torch.as_tensor(y))
X.append(torch.as_tensor(x).type(torch.FloatTensor))
return torch.stack(X), torch.stack(Y)
def make_bag_of_tasks(self, num_episodes, repeats=None):
num_contexts_per_group = int(self.num_ctx/2)
if repeats is None:
repeats = int(num_episodes/num_contexts_per_group)
samples = np.repeat(np.arange(num_contexts_per_group), repeats)
samples = samples[:num_episodes]
np.random.shuffle(samples)
return samples
def make_contexts(self, ctx_dim, num_ctx, max_ctx):
sample_contexts = np.random.randint(2, size=(max_ctx, ctx_dim))
while len(np.unique(sample_contexts, axis=0))<num_ctx: # such that we sample unique contexts
sample_contexts = np.random.randint(2, size=(max_ctx, ctx_dim))
sample_contexts = np.unique(sample_contexts, axis=0)[:num_ctx]
np.random.shuffle(sample_contexts)
return torch.tensor(sample_contexts).type(torch.FloatTensor)
| 41.61039 | 102 | 0.626717 | 441 | 3,204 | 4.335601 | 0.222222 | 0.034519 | 0.026151 | 0.020397 | 0.290795 | 0.196653 | 0.15272 | 0.15272 | 0.084728 | 0.084728 | 0 | 0.012319 | 0.265293 | 3,204 | 76 | 103 | 42.157895 | 0.799915 | 0.077715 | 0 | 0.103448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086207 | false | 0 | 0.051724 | 0 | 0.224138 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c16896197cec1995065f5c34607ce687f11e89f6 | 2,916 | py | Python | scripts/example.py | alexboden/nba-who-has-more | 590ba8bd062b96ff866c13988eb79a8c7ff0f488 | [
"MIT"
] | null | null | null | scripts/example.py | alexboden/nba-who-has-more | 590ba8bd062b96ff866c13988eb79a8c7ff0f488 | [
"MIT"
] | null | null | null | scripts/example.py | alexboden/nba-who-has-more | 590ba8bd062b96ff866c13988eb79a8c7ff0f488 | [
"MIT"
] | null | null | null | from nba_api.stats.static import players
from nba_api.stats import endpoints
from nba_api.stats.library.parameters import SeasonAll
from nba_api.stats.endpoints import playercareerstats
from nba_api.stats.endpoints import commonplayerinfo
from nba_api.stats.endpoints import playergamelog
import pandas as pd
import time
from random import *
import time
start_time = time.time()
#list of all players
player_dict = players.get_players()
def games_with_x_or_more_points(seasons, x, player_id):
count = 0
for s in seasons:
time.sleep(0.6)
gamelog_player = playergamelog.PlayerGameLog(player_id = player_id, season = s)
df_player_games = gamelog_player.get_data_frames()[0]
box_scores_points = df_player_games.loc[:, "PTS"]
for points in box_scores_points:
if(points >= x):
count += 1
return count
def get_player_id(fullname):
player = [player for player in player_dict if player['full_name'] == fullname][0]
return player['id']
def get_player_seasons(player_id):
player_info = commonplayerinfo.CommonPlayerInfo(player_id=player_id)
available_seasons = player_info.available_seasons.get_dict()
seasons = []
for season in available_seasons["data"]:
for s in season:
year = s[1:5];
if not year in seasons:
seasons.append(year)
return seasons
all_time_great_list_file = open("NBA/alltimegreats.txt","r")
ALL_TIMERS = []
while(True):
line = all_time_great_list_file.readline()[3:].strip()
if not line:
break
ALL_TIMERS.append(line)
player1 = ALL_TIMERS[randint(0, 99)]
player2 = ALL_TIMERS[randint(0, 99)]
print(player1)
print(player2)
while(player1 == player2):
player2 = ALL_TIMERS[randint(0, 99)]
player1_id = get_player_id(player1)
player2_id = get_player_id(player2)
player1_seasons = get_player_seasons(player1_id)
player2_seasons = get_player_seasons(player2_id)
ready = input()
print(player1 + " has " + str(games_with_x_or_more_points(player1_seasons, 30, player1_id)) + " games with 30 or more points")
print(player2 + " has " +str( games_with_x_or_more_points(player2_seasons, 30, player2_id)) + " games with 30 or more points")
# career = playercareerstats.PlayerCareerStats(player_id=player['id'])
# career_df = career.get_data_frames()[0]
# df_player_games.to_csv(filename)
# nba_players = players.get_players()
# for p in celtics_players:
# player_dict = [player for player in nba_players if player['full_name'] == p][0]
# career = playercareerstats.PlayerCareerStats(player_id=player_dict['id'])
# career_df = career.get_data_frames()[0]
# print(career_df)
# bron = player_info.available_seasons.get_dict()
# player_info = playercareerstats.career_totals_regular_season(per_mode36='totals', player_id=2544)
print("--- %s seconds ---" % (time.time() - start_time)) | 25.80531 | 126 | 0.718793 | 410 | 2,916 | 4.834146 | 0.243902 | 0.056509 | 0.030272 | 0.045409 | 0.284057 | 0.254289 | 0.083754 | 0.058527 | 0 | 0 | 0 | 0.023304 | 0.175926 | 2,916 | 113 | 127 | 25.80531 | 0.801498 | 0.20439 | 0 | 0.070175 | 0 | 0 | 0.054569 | 0.009095 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.175439 | 0 | 0.280702 | 0.087719 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c168f756bc02752d155d2b864b3e1da8b5fa59b8 | 2,653 | py | Python | data/Process_MIR1k.py | carrieeeeewithfivee/tf2_Vocal_Separation_UNet | 5dbb6838bee0d8fbf0f73fa83e8c3d6c1978c67c | [
"MIT"
] | null | null | null | data/Process_MIR1k.py | carrieeeeewithfivee/tf2_Vocal_Separation_UNet | 5dbb6838bee0d8fbf0f73fa83e8c3d6c1978c67c | [
"MIT"
] | 1 | 2022-01-02T06:54:27.000Z | 2022-01-02T12:09:13.000Z | data/Process_MIR1k.py | carrieeeeewithfivee/tf2_Vocal_Separation_UNet | 5dbb6838bee0d8fbf0f73fa83e8c3d6c1978c67c | [
"MIT"
] | null | null | null | import os
from librosa.core import load, stft, istft, magphase
from librosa.output import write_wav
from concurrent.futures import ThreadPoolExecutor
from time import time
import asyncio
import os,glob
import numpy as np
from multiprocessing import cpu_count
#Thanks to https://github.com/jnzhng/keras-unet-vocal-separation
SAMPLE_RATE = 8192
WINDOW_SIZE = 1024
HOP_LENGTH = 768
def downsample(input_path, output_path):
wav, _ = load(input_path, sr=SAMPLE_RATE)
write_wav(output_path, wav, SAMPLE_RATE, norm=True)
print(f"Saving {output_path}")
def load_as_mag(file):
wav, _ = load(file, sr=None)
spectrogram = stft(wav, n_fft=WINDOW_SIZE, hop_length=HOP_LENGTH)
mag, _ = magphase(spectrogram)
return mag.astype(np.float32)
def save_to_npz(base, sample):
nps = {}
mix = load_as_mag(f'{base}/{sample}/mix.wav')
vocal = load_as_mag(f'{base}/{sample}/vocal.wav')
inst = load_as_mag(f'{base}/{sample}/inst.wav')
mix_max = mix.max()
mix_norm = mix / mix_max
vocal_norm = vocal / mix_max
inst_norm = inst / mix_max
#print(f"Saving {sample}")
try:
np.savez_compressed(f'MIR-1K_resized/{sample}.npz', mix=mix_norm, vocal=vocal_norm, inst=inst_norm)
except Exception as e:
print(e)
if __name__ == '__main__':
voise = 'MIR-1K/voise'
bg = 'MIR-1K/bg'
mix = 'MIR-1K/mix'
name = 0
resampled_data = 'MIR-1K_resampled_data'
base = 'MIR-1K'
foldernames = []
for filename in sorted(glob.glob(os.path.join(voise, '*.wav'))):
foldernames.append(os.path.split(filename)[-1].replace('.wav',''))
dirs = foldernames
with ThreadPoolExecutor(max_workers=cpu_count() * 2) as pool:
for i in range(len(dirs)):
target_dir = 'MIR-1K_resampled_data/{}_{:0>2d}/'.format(base, i+1)
os.makedirs(target_dir, exist_ok=True)
pool.submit(downsample, f'{mix}/{dirs[i]}.wav', target_dir + 'mix.wav')
pool.submit(downsample, f'{bg}/{dirs[i]}.wav', target_dir + 'inst.wav')
pool.submit(downsample, f'{voise}/{dirs[i]}.wav', target_dir + 'vocal.wav')
# ## Save wav files to npz
# 1. Load wave files from `corpus_resized`.
# 2. Apply Short-time Fourier transform (STFT) to audio trios
# 3. Apply normalization to magnitudes and save as npz dict in `numpy/`
dirs = sorted(list(os.walk('MIR-1K_resampled_data'))[0][1])
print(dirs)
with ThreadPoolExecutor(max_workers=cpu_count() * 2) as pool:
#print("!!!")
for i in range(len(dirs)):
#print("!!!")
pool.submit(save_to_npz, 'MIR-1K_resampled_data', dirs[i]) | 35.851351 | 107 | 0.656238 | 390 | 2,653 | 4.276923 | 0.328205 | 0.026978 | 0.021583 | 0.043165 | 0.196043 | 0.113909 | 0.056355 | 0.056355 | 0.056355 | 0 | 0 | 0.016053 | 0.201659 | 2,653 | 74 | 108 | 35.851351 | 0.771483 | 0.115718 | 0 | 0.070175 | 0 | 0 | 0.150193 | 0.092426 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.22807 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c1693dff2b16a43c1fe7913423163831050a96a1 | 3,195 | py | Python | utils/utils.py | bo-miao/anomaly_classification | 08829b3cdc488c6c7867f02950b5e22b6a5d5435 | [
"Apache-2.0"
] | null | null | null | utils/utils.py | bo-miao/anomaly_classification | 08829b3cdc488c6c7867f02950b5e22b6a5d5435 | [
"Apache-2.0"
] | null | null | null | utils/utils.py | bo-miao/anomaly_classification | 08829b3cdc488c6c7867f02950b5e22b6a5d5435 | [
"Apache-2.0"
] | null | null | null | from utils import lr_scheduler, metric, prefetch, summary
import os, sys
import time
import numpy as np
from collections import OrderedDict
import glob
import math
import copy
import tqdm
from sklearn.metrics import roc_auc_score, roc_curve, auc
import matplotlib.pyplot as plt
from torch.cuda.amp import autocast
import cv2
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
rng = np.random.RandomState(2020)
def get_the_number_of_params(model, is_trainable=False):
"""get the number of the model"""
if is_trainable:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
return sum(p.numel() for p in model.parameters())
def AUC(anomal_scores, labels):
frame_auc = 0
try:
frame_auc = roc_auc_score(y_true=np.squeeze(labels, axis=0), y_score=np.squeeze(anomal_scores))
except:
print("AUC Cal ERROR: ", labels, anomal_scores)
return frame_auc
def evaluate_resnet(model, test_batch, args):
single_time = metric.AverageMeter('Time', ':6.3f')
progress = metric.ProgressMeter(len(test_batch), single_time, prefix="Evaluation: ")
model.eval()
counter = 0
tp = 0
for k, (images, labels) in enumerate(test_batch):
images = images.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
label = labels if args.label else None
label = label.view(-1)
input_image = images.detach()
a = time.time()
with autocast():
logit = model.forward(input_image)
if args.evaluate_time:
single_time.update((time.time() - a) * 1000)
progress.print(counter)
print("Single batch time cost {}ms".format(1000 * (time.time() - a)))
class_vector = F.softmax(logit, 1).data.squeeze()
assert len(class_vector) == len(label), "class number must match"
probs, idx = class_vector.sort(1, True)
idx = idx[:,0]
tp += torch.sum(idx.view(-1)==label).item()
counter += len(label)
accuracy = tp / counter
print("INFERENCE ACCURACY IS {}".format(accuracy))
return accuracy
def visualize(recon, gt):
b, c, h, w = recon.size()
for i in range(b):
img1, img2 = recon[i], gt[i]
img = torch.cat((img1, img2), dim=2)
img = 255. * (img + 1.) / 2.
img = img.squeeze(0).byte().cpu().numpy().transpose((1, 2, 0))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img = cv2.resize(img, (600, 300))
frame, name = img, str(int(time.time()*1000))
cv2.imwrite(os.path.join("/data/miaobo/tmp", name+".jpg"), frame)
return True
def visualize_single(image):
b, c, h, w = image.size()
for i in range(b):
img = image[i]
img = 255. * (img + 1.) / 2.
img = img.byte().cpu().numpy().transpose((1, 2, 0))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
frame, name = img, str(int(time.time()*1000))
cv2.imwrite(os.path.join("/data/miaobo/tmp", name+".jpg"), frame)
return True
| 30.428571 | 103 | 0.638498 | 459 | 3,195 | 4.359477 | 0.363834 | 0.01999 | 0.010995 | 0.013993 | 0.209895 | 0.209895 | 0.193903 | 0.176912 | 0.176912 | 0.14093 | 0 | 0.02806 | 0.23036 | 3,195 | 104 | 104 | 30.721154 | 0.785685 | 0.008451 | 0 | 0.144578 | 0 | 0 | 0.047438 | 0 | 0 | 0 | 0 | 0 | 0.012048 | 1 | 0.060241 | false | 0 | 0.253012 | 0 | 0.385542 | 0.048193 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c169f12d80ecf64a50d7329d9a77f916c0b26871 | 1,960 | py | Python | src/kpi_WV_hs/.ipynb_checkpoints/compute_kpi_1d_v2_prun-checkpoint.py | tlechauveCLS/kpi_mpc | 4dc61d210c2b97e6ac240e54a8d96c35cf9123de | [
"MIT"
] | null | null | null | src/kpi_WV_hs/.ipynb_checkpoints/compute_kpi_1d_v2_prun-checkpoint.py | tlechauveCLS/kpi_mpc | 4dc61d210c2b97e6ac240e54a8d96c35cf9123de | [
"MIT"
] | null | null | null | src/kpi_WV_hs/.ipynb_checkpoints/compute_kpi_1d_v2_prun-checkpoint.py | tlechauveCLS/kpi_mpc | 4dc61d210c2b97e6ac240e54a8d96c35cf9123de | [
"MIT"
] | 1 | 2022-03-23T07:48:27.000Z | 2022-03-23T07:48:27.000Z | #!/home1/datawork/agrouaze/conda_envs2/envs/py2.7_cwave/bin/python
# coding: utf-8
"""
"""
import sys
print(sys.executable)
import subprocess
import logging
from dateutil import rrule
import datetime
if __name__ == '__main__':
root = logging.getLogger ()
if root.handlers:
for handler in root.handlers:
root.removeHandler (handler)
import argparse
parser = argparse.ArgumentParser (description='start prun')
parser.add_argument ('--verbose',action='store_true',default=False)
args = parser.parse_args ()
if args.verbose:
logging.basicConfig (level=logging.DEBUG,format='%(asctime)s %(levelname)-5s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
else:
logging.basicConfig (level=logging.INFO,format='%(asctime)s %(levelname)-5s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
prunexe = '/appli/prun/bin/prun'
listing = '/home1/scratch/agrouaze/list_kpi_1d_v2_prun_test.txt' # written below
# call prun
opts = ' --split-max-lines=3 --background -e '
listing_content = []
sta = datetime.datetime(2015,1,1)
#sta = datetime.datetime(2020,6,1) # pour test 2 qui utilisent les cross assignments de partitions
logging.info('start year: %s',sta)
sto = datetime.datetime.today()
fid = open(listing,'w')
cpt = 0
for unit in ['S1A','S1B']:
for wv in ['wv1','wv2']:
logging.info('%s',unit)
for dd in rrule.rrule(rrule.DAILY,dtstart=sta,until=sto):
fid.write('%s %s %s\n'%(unit,wv,dd.strftime('%Y%m%d')))
cpt +=1
fid.close()
logging.info('listing written ; %s nb lines: %s',listing,cpt)
pbs = '/home1/datahome/agrouaze/git/kpi_mpc/src/kpi_WV_hs/compute_kpi_1d_v2.pbs'
cmd = prunexe+opts+pbs+' '+listing
logging.info('cmd to cast = %s',cmd)
st = subprocess.check_call(cmd,shell=True)
logging.info('status cmd = %s',st) | 40 | 102 | 0.626531 | 269 | 1,960 | 4.468401 | 0.513011 | 0.054908 | 0.03827 | 0.049917 | 0.076539 | 0.076539 | 0.076539 | 0.076539 | 0.076539 | 0.076539 | 0 | 0.021484 | 0.216327 | 1,960 | 49 | 103 | 40 | 0.761068 | 0.101531 | 0 | 0.046512 | 0 | 0 | 0.251572 | 0.070898 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.139535 | 0 | 0.139535 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c16a520b3532e245375dff9d61f50950a6a91c7f | 20,482 | py | Python | pysrc/simulserver.py | juliusbierk/simultant | 9d454b58797399f60812c4d8c482a57e82b5dba7 | [
"MIT"
] | null | null | null | pysrc/simulserver.py | juliusbierk/simultant | 9d454b58797399f60812c4d8c482a57e82b5dba7 | [
"MIT"
] | null | null | null | pysrc/simulserver.py | juliusbierk/simultant | 9d454b58797399f60812c4d8c482a57e82b5dba7 | [
"MIT"
] | null | null | null | import asyncio
import concurrent
import functools
import json
import numpy as np
import torch
from aiohttp import web
from aiohttp.web_runner import GracefulExit
import aiohttp_cors
import logging
import csv
import multiprocessing
import queue
import pickle
# Local imports:
from torchfcts import function_from_code, get_default_args, check_code_get_args, get_f_expr_or_ode, get_const_bools
from torchfit import torch_fit
if __name__ == '__main__':
import dbfcts as db # we do not need a database connection for spawned processes
logging.basicConfig(level=logging.WARN)
logging.root.setLevel(logging.WARN)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
HOST = '127.0.0.1'
PORT = 7555
DEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
sys_print = print
def print(*args):
sys_print(*args, flush=True)
async def index(request):
return web.json_response({'running': True})
async def check_code(request):
data = await request.json()
d = check_code_get_args(data['code'], data['name_underscore'], data['expr_mode'], data['ode_dim'],
data['ode_dim_select'])
return web.json_response(d)
async def add_model(request):
data = await request.json()
if data['expr_mode'] and 'ode_dim' in data:
del data['ode_dim']
del data['ode_dim_select']
f = function_from_code(data['code'], data['name_underscore'])
kwargs = get_default_args(f, data['expr_mode'], data.get('ode_dim'))
consts = get_const_bools(f)
data['args'] = [{'name': k, 'value': v, 'const': consts[k]} for k, v in kwargs.items()]
await db.create_model(data['name'], data)
return web.json_response({'success': True})
async def delete_model(request):
data = await request.json()
await db.delete_model(data['name'])
return web.json_response({'success': True})
async def delete_data(request):
data = await request.json()
await db.delete_data(data['parent'])
return web.json_response({'success': True})
async def model_exist_check(request):
data = await request.json()
print(data['name'], await db.get_models_names())
return web.json_response({'exists': data['name'] in await db.get_models_names()})
async def model_list(request):
return web.json_response(await db.get_all_models())
async def data_list(request):
return web.json_response(await db.get_data_names())
async def plot_code(request):
data = await request.json()
if data['content']['expr_mode']:
mask, res, x = plot_code_py(data)
else: # ODEs can be slow to solve, so we spin up a new process to not block the async loop
with concurrent.futures.ProcessPoolExecutor(max_workers=None) as executor:
future = asyncio.wrap_future(executor.submit(plot_code_py, data))
mask, res, x = await future
return web.json_response({'x': x[mask].numpy().tolist(), 'y': res[mask].numpy().tolist()})
def plot_code_py(data):
content = data['content']
f_name = content['name_underscore']
f = function_from_code(content['code'], f_name)
kwargs = get_default_args(f, content['expr_mode'], content.get('ode_dim'))
f = get_f_expr_or_ode(content['code'], content['expr_mode'], f_name, content.get('ode_dim_select'))
# if not content['expr_mode']:
# kwargs['y0'] = torch.tensor(kwargs['y0'], dtype=torch.double)
for k in kwargs:
kwargs[k] = torch.tensor(kwargs[k], dtype=torch.double)
if 'xlim' in data:
x = torch.linspace(data['xlim'][0], data['xlim'][1], 250, dtype=torch.double)
else:
x = torch.linspace(0, 10, 250, dtype=torch.double)
with torch.no_grad():
res = f(x, **kwargs)
mask = torch.isfinite(res)
return mask, res, x
async def plot_data(request):
data = await request.json()
plot_data = []
max_n = data.get('max_n', 250)
for content in data['content']:
dataset = await db.get_data_content(content['id'])
if len(dataset['x']) > max_n:
skip = 1 + int(len(dataset['x']) / max_n)
else:
skip = 1
x = dataset['x'][::skip]
y = dataset['y'][::skip]
plot_data.append({'x': x, 'y': y, 'name': dataset['name'], 'mode': 'markers',
'type': 'scattergl'})
return web.json_response(plot_data)
async def upload_data(request):
data = await request.post()
example = None
filenames = []
has_header = json.loads(data['has_header'])
commit_data = json.loads(data['commit_data'])
multiple_x_axes = json.loads(data['multiple_x_axes'])
for fname in data:
if not fname.startswith('file_'):
continue
f = data[fname].file.read().decode('latin-1')
fname = fname[5:]
filenames.append(fname)
if not commit_data and len(filenames) > 1:
continue
sniffer = csv.Sniffer()
dialect = sniffer.sniff(f)
if has_header is None:
has_header = sniffer.has_header(f)
rows = [r for r in csv.reader(f.split('\n'), dialect=dialect) if len(r) > 0]
if has_header:
header = rows[0]
rows = rows[1:]
else:
header = ['x'] + [f'#{i}' for i in range(1, len(rows[0]))]
if commit_data:
try:
num_rows = np.array([[np.nan if x.strip() == '' else np.double(x) for x in r] for r in rows])
except ValueError:
return web.json_response({'success': False, 'error': 'Data contains non-numerical entries.'})
if multiple_x_axes:
for i in range(0, num_rows.shape[1], 2):
x = num_rows[:, i]
y = num_rows[:, i + 1]
mask = ~np.isnan(y)
if any(np.isnan(x[mask])):
return web.json_response({'success': False, 'error': 'x-axis not defined for all y-values.'})
dataset = {'parent_name': fname, 'name': header[i], 'x': list(x[mask]), 'y': list(y[mask]),
'orig_x': list(x[mask]), 'orig_y': list(y[mask])}
await db.create_dataset(header[i + 1], fname, dataset)
else:
x = num_rows[:, 0]
for i in range(1, num_rows.shape[1]):
y = num_rows[:, i]
mask = ~np.isnan(y)
if any(np.isnan(x[mask])):
return web.json_response({'success': False, 'error': 'x-axis not defined for all y-values.'})
dataset = {'parent_name': fname, 'name': header[i], 'x': list(x[mask]), 'y': list(y[mask]),
'orig_x': list(x[mask]), 'orig_y': list(y[mask])}
await db.create_dataset(header[i], fname, dataset)
else:
cut_horizontal = False
cut_vertical = False
if len(rows[0]) > 7:
rows = [r[:7] + ['⋯'] for r in rows]
header = header[:7] + ['⋯']
cut_horizontal = True
if len(rows) > 7:
rows = rows[:7] + [['<center>⋮</center>'] * len(rows[0])]
cut_vertical = True
if cut_horizontal and cut_vertical:
rows[-1][-1] = '⋱'
example = {'header': header, 'has_header': has_header, 'data': rows, 'fname': fname}
if commit_data:
return web.json_response({'success': True, 'error': None})
else:
res = {'filenames': filenames, 'example': example}
return web.json_response(res)
async def shuwdown(request):
print('Stopping python server')
fit_process.terminate()
raise GracefulExit
async def stop_spurious_running_fits_and_empty_stop_queue(n_max=5):
# stop any fits that might be running (not that any should be...)
for _ in range(n_max):
interrupt_queue.put(True)
await asyncio.sleep(0.01)
while True:
try:
interrupt_queue.get_nowait()
except queue.Empty:
break
async def load_fit_models_data(fit_info):
# Get model code
models = {}
for model_id, d in fit_info['models'].items():
m = await db.get_models_content(d['name'])
models[model_id] = {'code': m['code'], 'expr_mode': m['expr_mode'], 'name_underscore': m['name_underscore'],
'ode_dim': m.get('ode_dim'), 'ode_dim_select': m.get('ode_dim_select')}
# Get data
data = []
for data_id, d in fit_info['data'].items():
if d['in_use']:
db_data = await db.get_data_content(d['id'])
data.append({'x': db_data['x'], 'y': db_data['y'], 'weight': d['weight'], 'model': d['model'],
'parameters': d['parameters']})
return fit_info, data, models
async def run_fit(request):
if request.method == 'POST':
await stop_spurious_running_fits_and_empty_stop_queue()
run_fit_queue.put(await load_fit_models_data(await request.json()))
return web.json_response({'status': 'started'})
return web.json_response({'error': 'must be a POST request'})
async def interrupt_fit(request):
if request.method == 'POST':
interrupt_queue.put(True)
return web.json_response({'status': 'interrupting'})
return web.json_response({'error': 'must be a POST request'})
async def fit_result(request):
try:
fit, r2 = result_queue.get_nowait()
# Empty iteration queue:
await asyncio.sleep(0.01)
try:
while True:
status_queue.get_nowait()
except queue.Empty:
pass
except queue.Empty:
# No fit result yet, check if there is a loss update:
d = None
try:
while True:
d = status_queue.get_nowait()
except queue.Empty:
pass
return web.json_response({'status': 'no-fit', 'info': d})
return web.json_response({'status': 'success', 'fit': fit, 'r2': r2})
class PickleableF:
def __init__(self, m):
self.m = m
def __call__(self, *args, **kwargs):
m = self.m
f = get_f_expr_or_ode(m['code'], m['expr_mode'], m['name_underscore'], m.get('ode_dim_select'))
return list(f(*args, **kwargs).numpy())
async def plot_fit(request):
data = await request.json()
plot_data, is_fitted = await make_plot(data)
res = {'plots': plot_data, 'is_fitted': is_fitted}
return web.json_response(res)
async def make_plot(data):
plot_data = []
max_n = data.get('max_n', 250)
# Generate functions
models = {}
for model_id, d in data['models'].items():
m = await db.get_models_content(d['name'])
models[model_id] = PickleableF(m)
models[model_id].expr_mode = m['expr_mode']
models[model_id].ode_dim = m.get('ode_dim')
# Plot data
xmin = float('infinity')
xmax = float('-infinity')
for d_id in data['data']:
d = data['data'][d_id]
if d['in_use']:
#
dataset = await db.get_data_content(d['id'])
if len(dataset['x']) > max_n:
skip = 1 + int(len(dataset['x']) / max_n)
else:
skip = 1
x = dataset['x'][::skip]
y = dataset['y'][::skip]
if min(x) < xmin:
xmin = min(x)
if max(x) > xmax:
xmax = max(x)
plot_data.append({'x': x, 'y': y, 'name': dataset['name'], 'mode': 'markers',
'type': 'scattergl', 'legendgroup': d_id})
# Plot fits
x = np.linspace(xmin, xmax, 250)
x_list = list(x)
x_torch = torch.from_numpy(x)
is_fitted = False
with concurrent.futures.ProcessPoolExecutor(max_workers=None) as executor:
for i, d_id in enumerate(data['data']):
d = data['data'][d_id]
if d['in_use']:
f = models[d['model']]
is_fitted = True
kwargs = {}
for p in d['parameters']:
p_id = d['parameters'][p]
parameter = data['parameters'][p_id]
if parameter['const']:
kwargs[p] = parameter['value']
elif parameter.get('fit') is None:
kwargs[p] = parameter['value']
is_fitted = False
else:
kwargs[p] = parameter['fit']
for p in kwargs:
kwargs[p] = torch.tensor(kwargs[p], dtype=torch.double)
if not f.expr_mode:
kwargs = transform_y0_kwargs_for_ode(kwargs, f.ode_dim)
# Run function evaluation in parallel, without blocking the server loop:
future = asyncio.wrap_future(executor.submit(f, x_torch, **kwargs))
c = DEFAULT_PLOTLY_COLORS[i % len(DEFAULT_PLOTLY_COLORS)]
plot_data.append(
{'x': x_list, 'future': future, 'mode': 'lines', 'showlegend': False, 'legendgroup': d_id,
'line': {'color': c} if is_fitted else {'color': c, 'dash': 'dash'}})
for d in plot_data:
if 'future' in d:
d['y'] = await d['future']
del d['future']
return plot_data, is_fitted
async def make_download(data):
download_data = []
# Generate functions
models = {}
for model_id, d in data['models'].items():
m = await db.get_models_content(d['name'])
models[model_id] = PickleableF(m)
models[model_id].expr_mode = m['expr_mode']
models[model_id].ode_dim = m.get('ode_dim')
# Get data and range
datasets = {}
xmin = float('infinity')
xmax = float('-infinity')
for d_id in data['data']:
d = data['data'][d_id]
if d['in_use']:
dataset = await db.get_data_content(d['id'])
datasets[d_id] = dataset
x = dataset['x']
if min(x) < xmin:
xmin = min(x)
if max(x) > xmax:
xmax = max(x)
# Generate fits and store data
x = np.linspace(xmin, xmax, 250)
x_list = list(x)
x_torch = torch.from_numpy(x)
with concurrent.futures.ProcessPoolExecutor(max_workers=None) as executor:
for i, d_id in enumerate(data['data']):
d = data['data'][d_id]
if d['in_use']:
dataset = datasets[d_id]
store = {
'name': dataset['name'],
'x_data': dataset['x'],
'y_data': dataset['y']
}
f = models[d['model']]
kwargs = {}
list_of_parameters = []
for p in d['parameters']:
p_id = d['parameters'][p]
parameter = data['parameters'][p_id]
if parameter['const']:
kwargs[p] = parameter['value']
elif parameter.get('fit') is None:
kwargs[p] = parameter['value']
else:
kwargs[p] = parameter['fit']
info = {'name': p,
'type': parameter['type'],
'value:': kwargs[p],
'is_const': parameter['const']}
if parameter['type'] == 'detached':
info['detached_name'] = parameter['name']
list_of_parameters.append(info)
store['parameters'] = list_of_parameters
for p in kwargs:
kwargs[p] = torch.tensor(kwargs[p], dtype=torch.double)
if not f.expr_mode:
kwargs = transform_y0_kwargs_for_ode(kwargs, f.ode_dim)
# Run function evaluation in parallel, without blocking the server loop:
future = asyncio.wrap_future(executor.submit(f, x_torch, **kwargs))
store['x_fit'] = x_list
store['future'] = future
download_data.append(store)
for d in download_data:
if 'future' in d:
d['y_fit'] = await d['future']
del d['future']
return download_data
async def download_fit(request):
data = await request.json()
download_data = await make_download(data)
return web.json_response(download_data, dumps=functools.partial(json.dumps, indent=4))
def transform_y0_kwargs_for_ode(kwargs, dim):
y0 = np.ones(dim)
for i in range(dim):
y0[i] = kwargs[f'y0[{i}]']
del kwargs[f'y0[{i}]']
kwargs['y0'] = torch.from_numpy(y0)
return kwargs
def fitter(input_queue, output_queue, status_queue, interrupt_queue):
print('Fitting queue running')
while True:
fit_info, data, models = input_queue.get(True)
logger.debug('Got fit to be run')
# First get all parameters
parameter_names = []
values = []
const_index = 0
for parameter_id, d in fit_info['parameters'].items():
if not d['const']:
parameter_names.append(parameter_id)
values.append(d['value'])
const_index += 1
for parameter_id, d in fit_info['parameters'].items():
if d['const']:
parameter_names.append(parameter_id)
values.append(d['value'])
logger.debug(f'#parameters = {len(fit_info["parameters"])}')
logger.debug(f'#fit parameters = {const_index}')
for d in data:
d['parameter_indeces'] = {k: parameter_names.index(v) for k, v in d['parameters'].items()}
if const_index == 0:
logger.info('No parameters to be fitted')
output_queue.put(None)
continue
# with open('cache.pkl', 'wb') as f:
# pickle.dump((parameter_names, values, const_index, models, data), f)
method = fit_info.get('method')
fit, r2 = torch_fit(parameter_names, values, const_index, models, data, status_queue, interrupt_queue,
method=method)
output_queue.put((fit, r2))
if __name__ == '__main__':
multiprocessing.freeze_support()
# with open('cache.pkl', 'rb') as f:
# torch_fit(*pickle.load(f))
# exit()
# Fitter
run_fit_queue = multiprocessing.Queue()
result_queue = multiprocessing.Queue()
status_queue = multiprocessing.Queue()
interrupt_queue = multiprocessing.Queue()
fit_process = multiprocessing.Process(target=fitter,
args=(run_fit_queue, result_queue, status_queue, interrupt_queue))
fit_process.daemon = True
fit_process.start()
# Web Server
app = web.Application()
cors = aiohttp_cors.setup(app, defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
)
})
routes = [('/', index),
('/check_code', check_code),
('/plot_code', plot_code),
('/add_model', add_model),
('/delete_model', delete_model),
('/delete_data', delete_data),
('/model_exist_check', model_exist_check),
('/model_list', model_list),
('/upload_data', upload_data),
('/data_list', data_list),
('/plot_data', plot_data),
('/run_fit', run_fit),
('/interrupt_fit', interrupt_fit),
('/plot_fit', plot_fit),
('/fit_result', fit_result),
('/download_fit', download_fit),
('/exit', shuwdown),
]
methods = ['GET', 'POST', 'DELETE']
for uri, f in routes:
resource = cors.add(app.router.add_resource(uri))
for m in methods:
cors.add(resource.add_route(m, f))
print('Python server started')
try:
web.run_app(app, host=HOST, port=PORT, shutdown_timeout=0.0)
finally:
fit_process.terminate()
| 33.412724 | 117 | 0.553803 | 2,573 | 20,482 | 4.223475 | 0.139915 | 0.019049 | 0.027514 | 0.044446 | 0.434435 | 0.374804 | 0.341401 | 0.298702 | 0.272936 | 0.246066 | 0 | 0.013218 | 0.309296 | 20,482 | 612 | 118 | 33.46732 | 0.75493 | 0.043257 | 0 | 0.383772 | 0 | 0 | 0.113757 | 0.002709 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013158 | false | 0.004386 | 0.037281 | 0 | 0.116228 | 0.015351 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c16b54a8fb917e5a067468f0c78cd337a4b77c6b | 4,312 | py | Python | streak/api_get.py | srevinsaju/streak | ff21f39b06da3010568940d335c32bd7d357ca69 | [
"MIT"
] | 2 | 2022-03-07T20:18:46.000Z | 2022-03-08T12:48:04.000Z | streak/api_get.py | srevinsaju/streak | ff21f39b06da3010568940d335c32bd7d357ca69 | [
"MIT"
] | null | null | null | streak/api_get.py | srevinsaju/streak | ff21f39b06da3010568940d335c32bd7d357ca69 | [
"MIT"
] | null | null | null | from flask import jsonify, make_response, request
from . import app
from .api_post import engine, login
from .core import utility_funcs
from sqlalchemy.orm import sessionmaker
from sqlalchemy_cockroachdb import run_transaction
from .api_post import login_required
@app.route("/api/v1/tasks/list")
@login_required
def list():
user_uuid = request.environ["user_id"]
d = []
tasks = run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.get_tasks(session, user_uuid),
)
for task in tasks:
d.append(
{
"id": task.task_id,
"name": task.task_name,
"description": task.task_description,
"schedule": str(task.schedule),
"timestamp": str(task.timestamp),
}
)
return jsonify(d)
@app.route("/api/v1/task/<task_uuid>")
@login_required
def meta(task_uuid):
user_uuid = request.environ["user_id"]
Session = sessionmaker(bind=engine)
with Session() as session:
task = utility_funcs.get_task(session, user_uuid, task_uuid)
return {
"id": task.task_id,
"name": task.task_name,
"description": task.task_description,
"schedule": str(task.schedule),
"timestamp": str(task.timestamp),
}
@app.route("/api/v1/task/<task_uuid>/completed")
@login_required
def get_completed(task_uuid):
user_uuid = request.environ["user_id"]
is_completed = run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.has_task_completed(
session, task_id=task_uuid, user_id=user_uuid
),
)
return {"completed": is_completed}
@app.route("/api/v1/task/<task_uuid>/current-streak")
@login_required
def get_current_streak(task_uuid):
user_uuid = request.environ["user_id"]
streak = run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.task_streak_status(
session, task_id=task_uuid, user_id=user_uuid
),
)
return {"streak": streak}
def _get_info_fmt(session, user_uuid):
user = utility_funcs.get_user(session, user_uuid)
return {
"id": str(user.user_id),
"username": user.username,
"name": user.name,
"last_seen": user.last_seen,
"last_checked_events": user.last_checked_events,
}
@app.route("/api/v1/users/<user_id>")
@login_required
def get_info(user_uuid):
return run_transaction(
sessionmaker(bind=engine), lambda session: _get_info_fmt(session, user_uuid)
)
@app.route("/api/v1/self")
@login_required
def get_self_info():
user_uuid = request.environ["user_id"]
return run_transaction(
sessionmaker(bind=engine), lambda session: _get_info_fmt(session, user_uuid)
)
@app.route("/api/v1/users/<friend_id>/friend_status")
@login_required
def friend_status(friend_id):
user_uuid = request.environ["user_id"]
print(friend_id, user_uuid, friend_id == str(user_uuid))
if friend_id == str(user_uuid):
return make_response("Cannot make friends with yourself", 403)
return {
"friends": run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.check_friend(session, user_uuid, friend_id),
)
}
@app.route("/api/v1/streaks/maximum")
@login_required
def max_streak():
user_uuid = request.environ["user_id"]
all, month, year = run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.get_max_streak(session, user_uuid),
)
return {"all_time": all, "month": month, "year": year}
@app.route("/api/v1/task/<task_id>/maximum")
@login_required
def max_streak_task(task_id):
user_uuid = request.environ["user_id"]
all, month, year = run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.get_max_streak_task(session, user_uuid, task_id),
)
return {"all_time": all, "month": month, "year": year}
@app.route("/api/v1/events")
@login_required
def get_notifications():
user_uuid = request.environ["user_id"]
return run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.get_notifications(session, user_uuid),
)
| 28 | 87 | 0.661874 | 541 | 4,312 | 5.007394 | 0.147874 | 0.073828 | 0.040605 | 0.047988 | 0.598007 | 0.558509 | 0.512366 | 0.472868 | 0.433001 | 0.365449 | 0 | 0.003854 | 0.217764 | 4,312 | 153 | 88 | 28.183007 | 0.799288 | 0 | 0 | 0.419355 | 0 | 0 | 0.12013 | 0.049165 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08871 | false | 0 | 0.056452 | 0.008065 | 0.241935 | 0.008065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c16c66d300e2ec1188a948c8172e2c9116bd68b9 | 2,831 | py | Python | octopus/modules/account/dao.py | tuub/magnificent-octopus | 62722fbb9eecd0f6727b4d9cc0ef3b732b4702d9 | [
"Apache-2.0"
] | null | null | null | octopus/modules/account/dao.py | tuub/magnificent-octopus | 62722fbb9eecd0f6727b4d9cc0ef3b732b4702d9 | [
"Apache-2.0"
] | null | null | null | octopus/modules/account/dao.py | tuub/magnificent-octopus | 62722fbb9eecd0f6727b4d9cc0ef3b732b4702d9 | [
"Apache-2.0"
] | 2 | 2019-12-17T14:55:17.000Z | 2020-02-03T12:35:24.000Z | from octopus.modules.es import dao
from datetime import datetime
from octopus.modules.account.exceptions import NonUniqueAccountException
def query_filter(q):
"""Function used by the query endpoint to ensure only the relevant account data is returned"""
# q is an esprit.models.Query object
# this limits the query to certain fields in the source, so that things like password
# hashes and activation/reset tokens are never sent to the client
q.include_source(["id", "email", "created_date", "last_updated", "role"])
class BasicAccountDAO(dao.ESDAO):
__type__ = 'account'
@classmethod
def pull_by_email(cls, email):
q = AccountQuery(email=email)
accs = cls.object_query(q=q.query())
if len(accs) > 1:
raise NonUniqueAccountException("There is more than one user account with the email {x}".format(x=email))
elif len(accs) == 1:
return accs[0]
else:
return None
@classmethod
def get_by_reset_token(cls, reset_token, not_expired=True):
q = AccountQuery(reset_token=reset_token)
accs = cls.object_query(q=q.query())
if len(accs) > 1:
raise NonUniqueAccountException("There is more than one user account with the reset token {x}".format(x=reset_token))
elif len(accs) == 0:
return None
acc = accs[0]
if acc.is_reset_expired() and not_expired:
return None
return acc
@classmethod
def get_by_activation_token(cls, activation_token, not_expired=True):
q = AccountQuery(activation_token=activation_token)
accs = cls.object_query(q=q.query())
if len(accs) > 1:
raise NonUniqueAccountException("There is more than one user account with the activation token {x}".format(x=activation_token))
elif len(accs) == 0:
return None
acc = accs[0]
if acc.is_activation_expired() and not_expired:
return None
return acc
class AccountQuery(object):
def __init__(self, email=None, reset_token=None, activation_token=None):
self.email = email
self.reset_token = reset_token
self.activation_token = activation_token
def query(self):
q = {
"query" : {
"bool" : {
"must" : []
}
}
}
if self.email is not None:
q["query"]["bool"]["must"].append({"term" : {"email.exact" : self.email}})
if self.reset_token is not None:
q["query"]["bool"]["must"].append({"term" : {"reset_token.exact" : self.reset_token}})
if self.activation_token is not None:
q["query"]["bool"]["must"].append({"term" : {"activation_token.exact" : self.activation_token}})
return q | 36.766234 | 139 | 0.620276 | 359 | 2,831 | 4.749304 | 0.270195 | 0.070381 | 0.018768 | 0.032845 | 0.380059 | 0.380059 | 0.342522 | 0.342522 | 0.296774 | 0.277419 | 0 | 0.004373 | 0.273048 | 2,831 | 77 | 140 | 36.766234 | 0.824101 | 0.096079 | 0 | 0.327869 | 0 | 0 | 0.13127 | 0.008621 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098361 | false | 0 | 0.04918 | 0 | 0.344262 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |