hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a7b8609ea8c6c4e15219756fff731c7fbf1f2405 | 866 | py | Python | app/routers/__init__.py | 01xu10/pity | ac4aafba47d916ac8731ba087ff26eb06f90d61c | [
"Apache-2.0"
] | 1 | 2021-11-11T14:12:36.000Z | 2021-11-11T14:12:36.000Z | app/routers/__init__.py | 01xu10/pity | ac4aafba47d916ac8731ba087ff26eb06f90d61c | [
"Apache-2.0"
] | null | null | null | app/routers/__init__.py | 01xu10/pity | ac4aafba47d916ac8731ba087ff26eb06f90d61c | [
"Apache-2.0"
] | null | null | null | from fastapi import Header
from starlette import status
from app.excpetions.RequestException import AuthException, PermissionException
from app.middleware.Jwt import UserToken
from config import Config
FORBIDDEN = "对不起, 你没有足够的权限"
class Permission:
def __init__(self, role: int = Config.GUEST):
self.role = role
def __call__(self, token: str = Header(...)):
if not token:
raise AuthException(status.HTTP_200_OK, "用户信息身份认证失败, 请检查")
try:
user_info = UserToken.parse_token(token)
if user_info.get("role", 0) < self.role:
raise PermissionException(status.HTTP_200_OK, FORBIDDEN)
except PermissionException as e:
raise e
except Exception as e:
raise AuthException(status.HTTP_200_OK, str(e))
return user_info
| 32.074074 | 79 | 0.650115 | 100 | 866 | 5.45 | 0.48 | 0.044037 | 0.07156 | 0.082569 | 0.121101 | 0.121101 | 0 | 0 | 0 | 0 | 0 | 0.015949 | 0.275982 | 866 | 26 | 80 | 33.307692 | 0.85327 | 0 | 0 | 0 | 0 | 0 | 0.038095 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.238095 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7ba5eeccf839d34334c3273069700ce6031b16d | 764 | py | Python | losses.py | rfonod/deepsleep2 | fa7703b6a55d6620a3f53bc463c59bde4f2f2d83 | [
"MIT"
] | null | null | null | losses.py | rfonod/deepsleep2 | fa7703b6a55d6620a3f53bc463c59bde4f2f2d83 | [
"MIT"
] | null | null | null | losses.py | rfonod/deepsleep2 | fa7703b6a55d6620a3f53bc463c59bde4f2f2d83 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class CustomBCELoss(nn.Module):
def __init__(self):
super(CustomBCELoss, self).__init__()
self.loss = nn.BCELoss()
def forward(self, y_hat, y):
y_hat = y_hat.view(-1)
y = y.view(-1)
y_hat = y_hat[y > -0.5]
y = y[y > -0.5]
return self.loss(y_hat, y)
class CustomBCEWithLogitsLoss(nn.Module):
def __init__(self):
super(CustomBCEWithLogitsLoss, self).__init__()
self.loss = nn.BCEWithLogitsLoss()
def forward(self, y_hat, y):
y_hat = y_hat.view(-1)
y = y.view(-1)
y_hat = y_hat[y > -0.5]
y = y[y > -0.5]
return self.loss(y_hat, y)
| 20.105263 | 55 | 0.515707 | 105 | 764 | 3.485714 | 0.219048 | 0.131148 | 0.136612 | 0.087432 | 0.63388 | 0.535519 | 0.404372 | 0.404372 | 0.404372 | 0.404372 | 0 | 0.024341 | 0.354712 | 764 | 38 | 56 | 20.105263 | 0.718053 | 0 | 0 | 0.636364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7bad4267f3dae5546f7f7b439c0829f830e0fe8 | 1,171 | py | Python | molecule/default/tests/test_hetzner_robot_state.py | nl2go/hetzner-firewall | 68f9ea8fd5502fbd8a1130aa74b957b521f1a18c | [
"MIT"
] | 10 | 2019-11-20T11:10:54.000Z | 2020-12-14T11:59:23.000Z | molecule/default/tests/test_hetzner_robot_state.py | nl2go/hetzner-firewall | 68f9ea8fd5502fbd8a1130aa74b957b521f1a18c | [
"MIT"
] | 1 | 2019-12-11T10:36:21.000Z | 2019-12-11T10:36:21.000Z | molecule/default/tests/test_hetzner_robot_state.py | nl2go/hetzner-firewall | 68f9ea8fd5502fbd8a1130aa74b957b521f1a18c | [
"MIT"
] | 1 | 2020-11-21T14:23:09.000Z | 2020-11-21T14:23:09.000Z | import os
import requests
import unittest
from requests.auth import HTTPBasicAuth
class DefaultTest(unittest.TestCase):
hetzner_robot_base_url = os.getenv(
'HETZNER_ROBOT_BASE_URL', 'http://localhost:3000'
)
auth = HTTPBasicAuth('robot', 'secret')
def test_firewall_templates_unchanged(self):
response = requests.get(self.hetzner_robot_base_url +
"/firewall/template", auth=self.auth)
self.assertEqual(len(response.json()), 1)
self.assertDictEqual(response.json()[0], {
'firewall_template': {
'id': 1,
'name': 'Existing Template',
'whitelist_hos': True,
'is_default': False,
'rules': {
'input': [{
'action': 'accept',
'ip_version': 'ipv4',
'name': 'Allow all'}]
}}})
def test_firewall_amount_unchanged(self):
response = requests.get(self.hetzner_robot_base_url +
"/firewall", auth=self.auth)
self.assertEqual(len(response.json()), 0)
| 33.457143 | 69 | 0.537148 | 109 | 1,171 | 5.568807 | 0.486239 | 0.079077 | 0.105437 | 0.125206 | 0.345964 | 0.345964 | 0.345964 | 0.345964 | 0.207578 | 0.207578 | 0 | 0.011734 | 0.345004 | 1,171 | 34 | 70 | 34.441176 | 0.779661 | 0 | 0 | 0.068966 | 0 | 0 | 0.164816 | 0.018787 | 0 | 0 | 0 | 0 | 0.103448 | 1 | 0.068966 | false | 0 | 0.137931 | 0 | 0.310345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7bb54e469408b65070753ecfc360eaaedddf1cc | 3,245 | py | Python | tools/memory_inspector/memory_inspector/backends/memdump_parser.py | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | tools/memory_inspector/memory_inspector/backends/memdump_parser.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | tools/memory_inspector/memory_inspector/backends/memdump_parser.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This parser turns the am memdump output into a |memory_map.Map| instance."""
import base64
import logging
import re
from memory_inspector.core import memory_map
def Parse(content):
"""Parses the output of memdump.
memdump (see chrome/src/tools/memdump) is a Linux/Android binary meant to be
executed on the target device which extracts memory map information about one
or more processes. In principle is can be seen as an alternative to cat-ing
/proc/PID/smaps, but with extra features (multiprocess accounting and resident
pages reporting).
The expected memdump output looks like this:
------------------------------------------------------------------------------
[ PID=1234]
1000-2000 r-xp 0 private_unevictable=4096 private=8192 shared_app=[] \
shared_other_unevictable=4096 shared_other=4096 "/lib/foo.so" [v///fv0D]
... other entries like the one above.
------------------------------------------------------------------------------
The output is extremely similar to /proc/PID/smaps, with the following notes:
- unevictable has pretty much the same meaning of "dirty", in VM terms.
- private and shared_other are cumulative. This means the the "clean" part
must be calculated as difference of (private - private_unevictable).
- The final field [v///fv0D] is a base64 encoded bitmap which contains the
information about which pages inside the mapping are resident (present).
See tests/android_backend_test.py for a more complete example.
Args:
content: string containing the memdump output.
Returns:
An instance of |memory_map.Map|.
"""
RE = (r'^([0-9a-f]+)-([0-9a-f]+)\s+'
r'([rwxps-]{4})\s+'
r'([0-9a-f]+)\s+'
r'private_unevictable=(\d+) private=(\d+) '
r'shared_app=(.*?) '
r'shared_other_unevictable=(\d+) shared_other=(\d+) '
r'\"(.*)\" '
r'\[([a-zA-Z0-9+/=-_:]*)\]$')
map_re = re.compile(RE)
skip_first_n_lines = 1
maps = memory_map.Map()
for line in content.splitlines():
line = line.rstrip('\r\n')
if skip_first_n_lines > 0:
skip_first_n_lines -= 1
continue
m = map_re.match(line)
if not m:
logging.warning('Skipping unrecognized memdump line "%s"' % line)
continue
start = int(m.group(1), 16)
end = int(m.group(2), 16) - 1 # end addr is inclusive in memdump output.
if (start > end):
# Sadly, this actually happened. Probably a kernel bug, see b/17402069.
logging.warning('Skipping unfeasible mmap "%s"' % line)
continue
entry = memory_map.MapEntry(
start=start,
end=end,
prot_flags=m.group(3),
mapped_file=m.group(10),
mapped_offset=int(m.group(4), 16))
entry.priv_dirty_bytes = int(m.group(5))
entry.priv_clean_bytes = int(m.group(6)) - entry.priv_dirty_bytes
entry.shared_dirty_bytes = int(m.group(8))
entry.shared_clean_bytes = int(m.group(9)) - entry.shared_dirty_bytes
entry.resident_pages = [ord(c) for c in base64.b64decode(m.group(11))]
maps.Add(entry)
return maps
| 36.875 | 80 | 0.642219 | 468 | 3,245 | 4.350427 | 0.455128 | 0.02947 | 0.030943 | 0.027505 | 0.058939 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031502 | 0.197843 | 3,245 | 87 | 81 | 37.298851 | 0.750672 | 0.530354 | 0 | 0.069767 | 0 | 0 | 0.184932 | 0.073288 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.093023 | 0 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7c205d1562ec0fb93d3ebdfa9f3056c3f608f16 | 809 | py | Python | lib/reda/testing/test_container_ert.py | j-hase/reda | b6419c39842cfbdd9380a27a5c6e9a04ccaeb294 | [
"MIT"
] | null | null | null | lib/reda/testing/test_container_ert.py | j-hase/reda | b6419c39842cfbdd9380a27a5c6e9a04ccaeb294 | [
"MIT"
] | null | null | null | lib/reda/testing/test_container_ert.py | j-hase/reda | b6419c39842cfbdd9380a27a5c6e9a04ccaeb294 | [
"MIT"
] | null | null | null | """Container tests"""
import pandas as pd
import reda
def test_init():
"""test initializing an empty ERT container"""
container = reda.ERT()
def test_init_with_data():
"""test initializing an ERT container and provide good data"""
df = pd.DataFrame(
[
# normals
(0, 1, 2, 4, 3, 1.1),
(0, 1, 2, 5, 4, 1.2),
(0, 1, 2, 6, 5, 1.3),
(0, 1, 2, 7, 6, 1.4),
(0, 2, 3, 5, 4, 1.5),
(0, 2, 3, 6, 5, 1.6),
(0, 2, 3, 7, 6, 1.7),
(0, 3, 4, 6, 5, 1.8),
(0, 3, 4, 7, 6, 1.9),
(0, 4, 5, 7, 6, 2.0),
],
columns=['timestep', 'a', 'b', 'm', 'n', 'r'],
)
container_good = reda.ERT(data=df)
assert container_good.data.shape[0] == df.shape[0]
| 25.28125 | 66 | 0.430161 | 130 | 809 | 2.630769 | 0.315385 | 0.02924 | 0.035088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.141454 | 0.370828 | 809 | 31 | 67 | 26.096774 | 0.530452 | 0.150803 | 0 | 0 | 0 | 0 | 0.019374 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7c3510ee0d82cf4e8f4fa6ca5d22a1a4c273690 | 13,191 | py | Python | client/tools/swarming_tasks_cost.py | stefb965/luci-py | e0a8a5640c4104e5c90781d833168aa8a8d1f24d | [
"Apache-2.0"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | tools/swarming_client/tools/swarming_tasks_cost.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | tools/swarming_client/tools/swarming_tasks_cost.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/env python
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Calculate statistics about tasks.
Saves the data fetched from the server into a json file to enable reprocessing
the data without having to always fetch from the server.
"""
import datetime
import json
import logging
import optparse
import os
import subprocess
import sys
import urllib
CLIENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(
__file__.decode(sys.getfilesystemencoding()))))
_EPOCH = datetime.datetime.utcfromtimestamp(0)
# Type of bucket to use.
MAJOR_OS, MAJOR_OS_ASAN, MINOR_OS, MINOR_OS_GPU = range(4)
def do_bucket(items, bucket_type):
"""Categorizes the tasks based on one of the bucket type defined above."""
out = {}
for task in items:
if 'heartbeat:1' in task['tags']:
# Skip heartbeats.
continue
is_asan = 'asan:1' in task['tags']
os_tag = None
gpu_tag = None
for t in task['tags']:
if t.startswith('os:'):
os_tag = t[3:]
if os_tag == 'Linux':
# GPU tests still specify Linux.
# TODO(maruel): Fix the recipe.
os_tag = 'Ubuntu'
elif t.startswith('gpu:'):
gpu_tag = t[4:]
if bucket_type in (MAJOR_OS, MAJOR_OS_ASAN):
if os_tag:
os_tag = os_tag.split('-')[0]
tag = os_tag or ''
if bucket_type == MINOR_OS_GPU and gpu_tag and gpu_tag != 'none':
tag += ' gpu:' + gpu_tag
if bucket_type == MAJOR_OS_ASAN and is_asan:
tag += ' ASan'
out.setdefault(tag, []).append(task)
# Also create global buckets for ASan.
if bucket_type == MAJOR_OS_ASAN:
tag = '(any OS) ASan' if is_asan else '(any OS) Not ASan'
out.setdefault(tag, []).append(task)
return out
def seconds_to_timedelta(seconds):
"""Converts seconds in datetime.timedelta, stripping sub-second precision.
This is for presentation, where subsecond values for summaries is not useful.
"""
return datetime.timedelta(seconds=round(seconds))
def parse_time_option(value):
"""Converts time as an option into a datetime.datetime.
Returns None if not specified.
"""
if not value:
return None
try:
return _EPOCH + datetime.timedelta(seconds=int(value))
except ValueError:
pass
for fmt in (
'%Y-%m-%d',
'%Y-%m-%d %H:%M',
'%Y-%m-%dT%H:%M',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%S.%f'):
try:
return datetime.datetime.strptime(value, fmt)
except ValueError:
pass
raise ValueError('Failed to parse %s' % value)
def parse_time(value):
"""Converts serialized time from the API to datetime.datetime."""
for fmt in ('%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S'):
try:
return datetime.datetime.strptime(value, fmt)
except ValueError:
pass
raise ValueError('Failed to parse %s' % value)
def average(items):
if not items:
return 0.
return sum(items) / len(items)
def median(items):
return percentile(items, 50)
def percentile(items, percent):
"""Uses NIST method."""
if not items:
return 0.
rank = percent * .01 * (len(items) + 1)
rank_int = int(rank)
rest = rank - rank_int
if rest and rank_int < len(items) - 1:
return items[rank_int] + rest * (items[rank_int+1] - items[rank_int])
return items[min(rank_int, len(items) - 1)]
def sp(dividend, divisor):
"""Returns the percentage for dividend/divisor, safely."""
if not divisor:
return 0.
return 100. * float(dividend) / float(divisor)
def fetch_data(options):
"""Fetches TaskResultSummary as JSON from options.swarming and writes it to
options.json.
"""
if not options.start:
# Defaults to 25 hours ago.
options.start = datetime.datetime.utcnow() - datetime.timedelta(
seconds=25*60*60)
else:
options.start = parse_time_option(options.start)
if not options.end:
options.end = options.start + datetime.timedelta(days=1)
else:
options.end = parse_time_option(options.end)
url = 'tasks/list?' + urllib.urlencode(
{
'start': int((options.start - _EPOCH).total_seconds()),
'end': int((options.end - _EPOCH).total_seconds()),
})
cmd = [
sys.executable, os.path.join(CLIENT_DIR, 'swarming.py'),
'query',
'-S', options.swarming,
'--json', options.json,
'--limit', '0',
'--progress',
url,
]
if options.verbose:
cmd.extend(('--verbose', '--verbose', '--verbose'))
logging.info('%s', ' '.join(cmd))
subprocess.check_call(cmd)
print('')
def stats(tasks, show_cost):
"""Calculates and prints statistics about the tasks as a list of JSON encoded
TaskResultSummary.
"""
# Split tasks into 3 buckets.
# - 'rn' means ran, not idempotent
# - 'ri' means ran, idempotent
# - 'dd' means deduplicated.
rn = [
i for i in tasks
if not i.get('deduped_from') and not i.get('properties_hash')
]
ri = [
i for i in tasks if not i.get('deduped_from') and i.get('properties_hash')
]
dd = [i for i in tasks if i.get('deduped_from')]
# Note worthy results.
failures = [i for i in tasks if i.get('failure')]
internal_failures = [i for i in tasks if i.get('internal_failure')]
two_tries = [
i for i in tasks if i.get('try_number') == '2' and not i.get('deduped_from')
]
# TODO(maruel): 'state'
# Summations.
duration_rn = sum(i.get('duration', 0.) for i in rn)
duration_ri = sum(i.get('duration', 0.) for i in ri)
duration_dd = sum(i.get('duration', 0.) for i in dd)
duration_total = duration_rn + duration_ri + duration_dd
cost_rn = sum(sum(i.get('costs_usd') or [0.]) for i in rn)
cost_ri = sum(sum(i.get('costs_usd') or [0.]) for i in ri)
cost_dd = sum(i.get('cost_saved_usd', 0.) for i in dd)
cost_total = cost_rn + cost_ri + cost_dd
pendings = sorted(
(parse_time(i['started_ts']) - parse_time(i['created_ts'])).total_seconds()
for i in tasks if i.get('started_ts') and not i.get('deduped_from')
)
pending_total = datetime.timedelta(seconds=round(sum(pendings), 2))
pending_avg = datetime.timedelta(seconds=round(average(pendings), 2))
pending_med = datetime.timedelta(seconds=round(median(pendings), 2))
pending_p99 = datetime.timedelta(seconds=round(percentile(pendings, 99), 2))
# Calculate percentages to understand load relativeness.
percent_rn_nb_total = sp(len(rn), len(tasks))
percent_ri_nb_total = sp(len(ri), len(tasks))
percent_dd_nb_total = sp(len(dd), len(tasks))
percent_dd_nb_rel = sp(len(dd), len(ri) + len(dd))
percent_rn_duration_total = sp(duration_rn, duration_total)
percent_ri_duration_total = sp(duration_ri, duration_total)
percent_dd_duration_total = sp(duration_dd, duration_total)
percent_dd_duration_rel = sp(duration_dd, duration_dd + duration_ri)
percent_rn_cost_total = sp(cost_rn, cost_total)
percent_ri_cost_total = sp(cost_ri, cost_total)
percent_dd_cost_total = sp(cost_dd, cost_total)
percent_dd_cost_rel = sp(cost_dd, cost_dd + cost_ri)
reliability = 100. - sp(len(internal_failures), len(tasks))
percent_failures = sp(len(failures), len(tasks))
percent_two_tries = sp(len(two_tries), len(tasks))
# Print results as a table.
if rn:
cost = ' %7.2f$ (%5.1f%%)' % (cost_rn, percent_rn_cost_total)
print(
' %6d (%5.1f%%) %18s (%5.1f%%)%s '
'Real tasks executed, not idempotent' % (
len(rn), percent_rn_nb_total,
seconds_to_timedelta(duration_rn), percent_rn_duration_total,
cost if show_cost else ''))
if ri:
cost = ' %7.2f$ (%5.1f%%)' % (cost_ri, percent_ri_cost_total)
print(
' %6d (%5.1f%%) %18s (%5.1f%%)%s '
'Real tasks executed, idempotent' % (
len(ri), percent_ri_nb_total,
seconds_to_timedelta(duration_ri), percent_ri_duration_total,
cost if show_cost else ''))
if ri and rn:
cost = ' %7.2f$ ' % (cost_rn + cost_ri)
print(
' %6d %18s %s '
'Real tasks executed, all types' % (
len(rn) + len(ri),
seconds_to_timedelta(duration_rn + duration_ri),
cost if show_cost else ''))
if dd:
cost = ' %7.2f$*(%5.1f%%)' % (cost_dd, percent_dd_cost_total)
print(
' %6d*(%5.1f%%) %18s*(%5.1f%%)%s *Wasn\'t run, '
'previous results reused' % (
len(dd), percent_dd_nb_total,
seconds_to_timedelta(duration_dd), percent_dd_duration_total,
cost if show_cost else ''))
cost = ' (%5.1f%%)' % (percent_dd_cost_rel)
print(
' (%5.1f%%) (%5.1f%%)%s '
' (relative to idempotent tasks only)' % (
percent_dd_nb_rel, percent_dd_duration_rel,
cost if show_cost else ''))
if int(bool(rn)) + int(bool(ri)) + int(bool(dd)) > 1:
cost = ' %7.2f$' % (cost_total)
print(
' %6d %18s%s '
'Total tasks' % (
len(tasks), seconds_to_timedelta(duration_total),
cost if show_cost else ''))
print (
' Reliability: %7g%% Internal errors: %-4d' % (
reliability, len(internal_failures)))
print (
' Tasks failures: %-4d (%5.3f%%)' % (
len(failures), percent_failures))
print (
' Retried: %-4d (%5.3f%%) (Upgraded an internal failure '
'to a successful task)' %
(len(two_tries), percent_two_tries))
print (
' Pending Total: %13s Avg: %7s Median: %7s P99%%: %7s' % (
pending_total, pending_avg, pending_med, pending_p99))
def present_task_types(items, bucket_type, show_cost):
cost = ' Usage Cost $USD' if show_cost else ''
print(' Nb of Tasks Total Duration%s' % cost)
buckets = do_bucket(items, bucket_type)
for index, (bucket, tasks) in enumerate(sorted(buckets.iteritems())):
if index:
print('')
print('%s:' % (bucket or '<None>'))
stats(tasks, show_cost)
if buckets:
print('')
print('Global:')
stats(items, show_cost)
def present_users(items):
users = {}
for task in items:
user = ''
for tag in task['tags']:
if tag.startswith('user:'):
if tag[5:]:
user = tag[5:]
break
if tag == 'purpose:CI':
user = 'CI'
break
if tag == 'heartbeat:1':
user = 'heartbeat'
break
if user:
users.setdefault(user, 0)
users[user] += 1
maxlen = max(len(i) for i in users)
maxusers = 100
for index, (name, tasks) in enumerate(
sorted(users.iteritems(), key=lambda x: -x[1])):
if index == maxusers:
break
print('%3d %-*s: %d' % (index + 1, maxlen, name, tasks))
def main():
parser = optparse.OptionParser(description=sys.modules['__main__'].__doc__)
parser.add_option(
'-S', '--swarming',
metavar='URL', default=os.environ.get('SWARMING_SERVER', ''),
help='Swarming server to use')
parser.add_option(
'--start', help='Starting date in UTC; defaults to 25 hours ago')
parser.add_option(
'--end', help='End date in UTC; defaults to --start+1 day')
parser.add_option(
'--no-cost', action='store_false', dest='cost', default=True,
help='Strip $ from display')
parser.add_option(
'--users', action='store_true', help='Display top users instead')
parser.add_option(
'--json', default='tasks.json',
help='File containing raw data; default: %default')
parser.add_option('-v', '--verbose', action='count', default=0)
group = optparse.OptionGroup(parser, 'Grouping')
group.add_option(
'--major-os', action='store_const',
dest='bucket', const=MAJOR_OS, default=MAJOR_OS,
help='Classify by OS type, independent of OS version (default)')
group.add_option(
'--minor-os', action='store_const',
dest='bucket', const=MINOR_OS,
help='Classify by minor OS version')
group.add_option(
'--gpu', action='store_const',
dest='bucket', const=MINOR_OS_GPU,
help='Classify by minor OS version and GPU type when requested')
group.add_option(
'--asan', action='store_const',
dest='bucket', const=MAJOR_OS_ASAN,
help='Classify by major OS version and ASAN')
parser.add_option_group(group)
options, args = parser.parse_args()
if args:
parser.error('Unsupported argument %s' % args)
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR)
if options.swarming:
fetch_data(options)
elif not os.path.isfile(options.json):
parser.error('--swarming is required.')
with open(options.json, 'rb') as f:
items = json.load(f)['items']
first = items[-1]
last = items[0]
print(
'From %s to %s (%s)' % (
first['created_ts'].split('.')[0],
last['created_ts'].split('.')[0],
parse_time(last['created_ts']) - parse_time(first['created_ts'])
))
print('')
if options.users:
present_users(items)
else:
present_task_types(items, options.bucket, options.cost)
return 0
if __name__ == '__main__':
sys.exit(main())
| 32.094891 | 80 | 0.626412 | 1,870 | 13,191 | 4.256684 | 0.193583 | 0.008543 | 0.010553 | 0.006156 | 0.227513 | 0.165704 | 0.115327 | 0.104271 | 0.072362 | 0.057035 | 0 | 0.014044 | 0.22811 | 13,191 | 410 | 81 | 32.173171 | 0.767727 | 0.105527 | 0 | 0.199377 | 0 | 0.003115 | 0.187212 | 0 | 0 | 0 | 0 | 0.004878 | 0 | 1 | 0.040498 | false | 0.009346 | 0.024922 | 0.003115 | 0.11215 | 0.05919 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7c3702efe624cd5195333a8550b95ce520c60b7 | 1,641 | py | Python | elasticdl/python/tests/embedding_test_module.py | sorrycc/elasticdl | 01439e0bf7bba6ebfffe265916fd41370a59c29d | [
"MIT"
] | 2 | 2021-07-07T16:31:50.000Z | 2021-11-08T09:23:01.000Z | elasticdl/python/tests/embedding_test_module.py | sorrycc/elasticdl | 01439e0bf7bba6ebfffe265916fd41370a59c29d | [
"MIT"
] | null | null | null | elasticdl/python/tests/embedding_test_module.py | sorrycc/elasticdl | 01439e0bf7bba6ebfffe265916fd41370a59c29d | [
"MIT"
] | 1 | 2021-08-18T18:14:38.000Z | 2021-08-18T18:14:38.000Z | import tensorflow as tf
from tensorflow.keras.layers import Concatenate, Dense, Flatten
from elasticdl.python.elasticdl.layers.embedding import Embedding
class CustomModel(tf.keras.Model):
def __init__(self, output_dim=16):
super(CustomModel, self).__init__(name="embedding_test_model")
self.output_dim = output_dim
self.embedding_1 = Embedding(output_dim)
self.embedding_2 = Embedding(output_dim)
self.concat = Concatenate()
self.dense = Dense(1, input_shape=(output_dim * 3,))
self.flatten = Flatten()
def call(self, inputs, training=False):
f1 = self.embedding_1(inputs["f1"])
f2 = self.embedding_1(inputs["f2"])
f3 = self.embedding_2(inputs["f3"])
x = self.concat([f1, f2, f3])
x = self.dense(x)
return self.flatten(x)
def loss(predictions, labels):
return tf.reduce_mean(tf.square(predictions - labels))
def dataset_fn(dataset, training=True):
def _parse_data(record):
feature_description = {
"f1": tf.io.FixedLenFeature([1], tf.int64),
"f2": tf.io.FixedLenFeature([1], tf.int64),
"f3": tf.io.FixedLenFeature([1], tf.int64),
"label": tf.io.FixedLenFeature([1], tf.int64),
}
r = tf.io.parse_single_example(record, feature_description)
return {"f1": r["f1"], "f2": r["f2"], "f3": r["f3"]}, r["label"]
dataset = dataset.map(_parse_data)
return dataset
def optimizer(lr=0.1):
return tf.optimizers.SGD(lr)
def eval_metrics_fn(predictions, labels):
return {"mse": tf.reduce_mean(tf.square(predictions - labels))}
| 32.176471 | 72 | 0.642901 | 214 | 1,641 | 4.766355 | 0.327103 | 0.052941 | 0.07451 | 0.078431 | 0.178431 | 0.178431 | 0.072549 | 0 | 0 | 0 | 0 | 0.031783 | 0.213894 | 1,641 | 50 | 73 | 32.82 | 0.758915 | 0 | 0 | 0 | 0 | 0 | 0.034735 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.189189 | false | 0 | 0.081081 | 0.081081 | 0.459459 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7c3d5996e5e43293a4950394b8c3859c6567912 | 2,474 | py | Python | tareas/Proteins.py | phabel-LD/python_class-2 | ac21d0d0e0e46c345dbcdb8cc0f47c11c17bff6b | [
"MIT"
] | null | null | null | tareas/Proteins.py | phabel-LD/python_class-2 | ac21d0d0e0e46c345dbcdb8cc0f47c11c17bff6b | [
"MIT"
] | null | null | null | tareas/Proteins.py | phabel-LD/python_class-2 | ac21d0d0e0e46c345dbcdb8cc0f47c11c17bff6b | [
"MIT"
] | null | null | null | '''
NAME
ProteinDB.py
VERSION
[1.0]
AUTHOR
Daianna Gonzalez Padilla <daianna@lcg.unam.mx>
DESCRIPTION
This programs takes a PDB file and returns certain residues of a protein chain
CATEGORY
Protein Data Bank files analysis
USAGE
None
ARGUMENTS
This program doesn't take arguments
INPUT
The input of the function is the path to the file, the chain name and the residue name
OUTPUT
Returns a list whose elements are lists with the residue name and its ID
EXAMPLES
Example 1: gets get_them=get_residue('C:/Users/hp/Downloads/1kcw.pdb','A', 'CYS')
print(get_them)
and returns [['CYS', 155], ['CYS', 181], ['CYS', 221], ['CYS', 257], ['CYS', 319]
['CYS', 338], ['CYS', 515], ['CYS', 541], ['CYS', 618], ['CYS', 680],
['CYS', 699], ['CYS', 855], ['CYS', 881], ['CYS', 1021]]
SOURCE
https://github.com/daianna21/python_class/blob/master/tareas/ProteinDB.py
'''
from Bio import PDB
def get_residue(path, chain_name, res_name):
"""
This function gets a PDB file, the name of a protein's module chain and a certain residue
and returns the IDs of those residues in the specified chain
Parameters:
path (str): absolute path to file
chain_name (str): Single letter name of the protein module chain
res_name (str): Three leter name of the residue to search
Returns:
residues (list): list of lists with the residue name and the ID of each one
"""
# List to save the residues
residues=[]
# Create the parser and ignore warnings
parser = PDB.PDBParser(QUIET=True)
# Create the structure from the file
struct=parser.get_structure('protein',path)
# For each module, get each chain and search the given one
for model in struct:
for chain in model:
if chain==model[chain_name]:
# Search the residues in the chain and store them with its IDs
for residue in chain:
if residue.get_resname() == res_name:
residues.append([residue.get_resname(), residue.get_id()[1]])
#print(residue)
return(residues)
#Example
get_them=get_residue('C:/Users/hp/Downloads/1kcw.pdb','A', 'THR')
print(get_them)
| 35.342857 | 99 | 0.59135 | 329 | 2,474 | 4.392097 | 0.398176 | 0.024913 | 0.029066 | 0.026298 | 0.094118 | 0.094118 | 0.058131 | 0.058131 | 0.058131 | 0.058131 | 0 | 0.030071 | 0.31447 | 2,474 | 69 | 100 | 35.855072 | 0.821934 | 0.694422 | 0 | 0 | 0 | 0 | 0.065183 | 0.047695 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.142857 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7c79ba1be51d3a3e2b95f71ea5d3121848438e2 | 2,482 | py | Python | chunkflow/chunk/validate.py | julesberman/chunkflow | c6af0d036bc2f308c64c591d49c94c414c569241 | [
"Apache-2.0"
] | 36 | 2019-03-20T21:58:15.000Z | 2022-03-28T08:40:59.000Z | chunkflow/chunk/validate.py | julesberman/chunkflow | c6af0d036bc2f308c64c591d49c94c414c569241 | [
"Apache-2.0"
] | 96 | 2019-01-23T14:49:18.000Z | 2022-03-21T19:54:20.000Z | chunkflow/chunk/validate.py | julesberman/chunkflow | c6af0d036bc2f308c64c591d49c94c414c569241 | [
"Apache-2.0"
] | 7 | 2019-03-25T19:32:19.000Z | 2021-07-20T19:39:03.000Z | import logging
import numpy as np
from skimage.feature import match_template
def validate_by_template_matching(img: np.ndarray):
""" Detect 3d black boxes by template matching.
1. binarize the image. the voxels inside the black box will be false, and the outside will be true
2. The template is 7x7x2 with one section true and the other false.
3. sliding the template through the array, and detect the matching regions.
4. rotate the template to be 7x2x7 and 2x7x7, do the same detection.
5. if we can find multiple matchings in all the x,y,z direction, there is probably a black box.
Note that this is always effective. If the black box is large enough to reach both sides,
the detection will fail.
Parameters
-----------
img:
3D image volume.
"""
logging.info("validation by template matching...")
if np.issubdtype(img.dtype, np.floating):
logging.warning(
'do not support image with floating data type, will skip the validation.'
)
return True
img = img.astype(dtype=np.bool)
score_threshold = 0.9
num_threshold = 100
evidence_point = 0
temp = np.zeros((7, 7, 2), dtype=np.bool)
temp[:, :, 0] = True
result = match_template(img, temp)
if np.count_nonzero(result > score_threshold) > num_threshold:
evidence_point += 1
temp = np.zeros((7, 7, 2), dtype=np.bool)
temp[:, :, 1] = True
result = match_template(img, temp)
if np.count_nonzero(result > score_threshold) > num_threshold:
evidence_point += 1
temp = np.zeros((2, 7, 7), dtype=np.bool)
temp[0, :, :] = True
result = match_template(img, temp)
if np.count_nonzero(result > score_threshold) > num_threshold:
evidence_point += 1
temp = np.zeros((2, 7, 7), dtype=np.bool)
temp[1, :, :] = True
result = match_template(img, temp)
if np.count_nonzero(result > score_threshold) > num_threshold:
evidence_point += 1
temp = np.zeros((7, 2, 7), dtype=np.bool)
temp[:, 0, :] = True
result = match_template(img, temp)
if np.count_nonzero(result > score_threshold) > num_threshold:
evidence_point += 1
temp = np.zeros((7, 2, 7), dtype=np.bool)
temp[:, 1, :] = True
result = match_template(img, temp)
if np.count_nonzero(result > score_threshold) > num_threshold:
evidence_point += 1
if evidence_point > 4:
return False
else:
return True
| 32.657895 | 102 | 0.647865 | 358 | 2,482 | 4.385475 | 0.310056 | 0.035669 | 0.049045 | 0.057325 | 0.466242 | 0.466242 | 0.466242 | 0.466242 | 0.466242 | 0.466242 | 0 | 0.028373 | 0.247381 | 2,482 | 75 | 103 | 33.093333 | 0.812099 | 0.249799 | 0 | 0.541667 | 0 | 0 | 0.05814 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0 | 0.0625 | 0 | 0.145833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7c93b57b7fde5b9ea9b125108f651e2630bb6e2 | 4,021 | py | Python | tests/conftest.py | xlevus/aiosql | 00f59ab6d6b13d2a2d4a4153c0c03f3b9d601ad0 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/conftest.py | xlevus/aiosql | 00f59ab6d6b13d2a2d4a4153c0c03f3b9d601ad0 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/conftest.py | xlevus/aiosql | 00f59ab6d6b13d2a2d4a4153c0c03f3b9d601ad0 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import csv
import sqlite3
from pathlib import Path
from typing import NamedTuple
import pytest
BLOGDB_PATH = Path(__file__).parent / "blogdb"
USERS_DATA_PATH = BLOGDB_PATH / "data/users_data.csv"
BLOGS_DATA_PATH = BLOGDB_PATH / "data/blogs_data.csv"
def populate_sqlite3_db(db_path):
conn = sqlite3.connect(db_path)
cur = conn.cursor()
cur.executescript(
"""
create table users (
userid integer not null primary key,
username text not null,
firstname integer not null,
lastname text not null
);
create table blogs (
blogid integer not null primary key,
userid integer not null,
title text not null,
content text not null,
published date not null default CURRENT_DATE,
foreign key(userid) references users(userid)
);
"""
)
with USERS_DATA_PATH.open() as fp:
users = list(csv.reader(fp))
cur.executemany(
"""
insert into users (
username,
firstname,
lastname
) values (?, ?, ?);""",
users,
)
with BLOGS_DATA_PATH.open() as fp:
blogs = list(csv.reader(fp))
cur.executemany(
"""
insert into blogs (
userid,
title,
content,
published
) values (?, ?, ?, ?);""",
blogs,
)
conn.commit()
conn.close()
@pytest.fixture()
def sqlite3_db_path(tmpdir):
db_path = str(Path(tmpdir.strpath) / "blogdb.db")
populate_sqlite3_db(db_path)
return db_path
@pytest.fixture()
def sqlite3_conn(sqlite3_db_path):
conn = sqlite3.connect(sqlite3_db_path)
yield conn
conn.close()
@pytest.fixture
def pg_conn(postgresql):
"""Runs the sqitch plan and loads seed data before returning db connection.
"""
with postgresql:
# Loads data from blogdb fixture data
with postgresql.cursor() as cur:
cur.execute(
"""
create table users (
userid serial not null primary key,
username varchar(32) not null,
firstname varchar(255) not null,
lastname varchar(255) not null
);"""
)
cur.execute(
"""
create table blogs (
blogid serial not null primary key,
userid integer not null references users(userid),
title varchar(255) not null,
content text not null,
published date not null default CURRENT_DATE
);"""
)
with postgresql.cursor() as cur:
with USERS_DATA_PATH.open() as fp:
cur.copy_from(fp, "users", sep=",", columns=["username", "firstname", "lastname"])
with BLOGS_DATA_PATH.open() as fp:
cur.copy_from(
fp, "blogs", sep=",", columns=["userid", "title", "content", "published"]
)
return postgresql
@pytest.fixture()
def pg_dsn(pg_conn):
p = pg_conn.get_dsn_parameters()
return f"postgres://{p['user']}@{p['host']}:{p['port']}/{p['dbname']}"
class UserBlogSummary(NamedTuple):
title: str
published: str
@pytest.fixture
def _record_classes():
return {"UserBlogSummary": UserBlogSummary}
@pytest.fixture(params=["class", "import_path"])
def record_classes(request, _record_classes):
if request.param == "class":
return _record_classes
elif request.param == "import_path":
return {
name: f"{klass.__module__}.{klass.__name__}" for name, klass in _record_classes.items()
}
raise RuntimeError("Unknown record_class type")
| 28.118881 | 99 | 0.535439 | 415 | 4,021 | 5.026506 | 0.26747 | 0.060403 | 0.033557 | 0.032598 | 0.341323 | 0.193672 | 0.193672 | 0.161074 | 0.088207 | 0.060403 | 0 | 0.007862 | 0.367322 | 4,021 | 142 | 100 | 28.316901 | 0.812107 | 0.028351 | 0 | 0.236111 | 0 | 0 | 0.122151 | 0.04086 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097222 | false | 0 | 0.097222 | 0.013889 | 0.319444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7c99f94d94f8ae11bef335d3dcfc4aa68248ef5 | 2,321 | py | Python | codes/metrics.py | NilLau/NilLau.github.io | e55768be0be4d6549b24c702554c11e64958d4c7 | [
"MIT"
] | null | null | null | codes/metrics.py | NilLau/NilLau.github.io | e55768be0be4d6549b24c702554c11e64958d4c7 | [
"MIT"
] | 6 | 2019-06-20T10:05:10.000Z | 2019-07-08T04:53:01.000Z | codes/metrics.py | 117ami/117ami.github.io | e55768be0be4d6549b24c702554c11e64958d4c7 | [
"MIT"
] | null | null | null | import sklearn
import sklearn.metrics
import collections
def _calculate(y_true, y_pred):
tp = collections.defaultdict(int) # True Positive
fp = collections.defaultdict(int) # False Positive
fn = collections.defaultdict(int) # False Negative
for l, p in zip(y_true, y_pred):
if l == p:
tp[p] += 1
else:
fp[p] += 1
fn[l] += 1
return tp, fp, fn
def precision_recall(y_true, y_pred):
tp, fp, fn = _calculate(y_true, y_pred)
labels = sorted(list(set(y_true)))
print("{:>10} {:>10} {:>10} {:>10}".format(
'', 'precision', 'recall', 'f1_score'))
for l in labels:
# Be careful, tp[x], fp[x] can be zero at the same time.
prec = 0 if tp[l] == 0 else tp[l] / (tp[l] + fp[l]) # precision
rec = tp[l] / (tp[l] + fn[l]) # recall
f1 = 0 if prec * rec == 0 else 2 * (prec * rec) / (prec + rec)
print("{:>10} {:>10.4f} {:>10.4f} {:>10.4f}".format(l, prec, rec, f1))
def micro_f1_score(y_true, y_pred):
tp, fp, fn = _calculate(y_true, y_pred)
labels = sorted(list(set(y_true)))
c_tp = sum(tp.values())
c_fp = sum(fp.values())
c_fn = sum(fn.values())
prec = 0 if c_tp == 0 else c_tp / (c_tp + c_fp)
rec = c_tp / (c_tp + c_fn)
print("f1_score micro ", 0 if prec * rec ==
0 else 2 * (prec * rec) / (prec + rec))
def weighted_f1_score(y_true, y_pred):
tp, fp, fn = _calculate(y_true, y_pred)
labels = sorted(list(set(y_true)))
cnt = collections.Counter(y_true)
weighted_score = 0
for l in labels:
prec = 0 if tp[l] == 0 else tp[l] / (tp[l] + fp[l]) # precision
rec = tp[l] / (tp[l] + fn[l]) # recall
f1 = 0 if prec * rec == 0 else 2 * (prec * rec) / (prec + rec)
weighted_score += f1 * cnt[l] / len(y_true)
print("f1_score_weighted", weighted_score)
y_true = [0, 0, 1, 1, 1, 2, 2, 2]
y_pred = [0, 0, 2, 1, 0, 1, 1, 0]
print(sklearn.metrics.classification_report(y_true, y_pred))
precision_recall(y_true, y_pred)
micro_f1_score(y_true, y_pred)
print("micro average", sklearn.metrics.f1_score(y_true, y_pred, average='micro'))
weighted_f1_score(y_true, y_pred)
print("weighted average", sklearn.metrics.f1_score(y_true, y_pred, average='weighted'))
| 35.166667 | 87 | 0.577768 | 381 | 2,321 | 3.338583 | 0.16273 | 0.078616 | 0.066038 | 0.110063 | 0.488994 | 0.461478 | 0.430031 | 0.375 | 0.375 | 0.375 | 0 | 0.039044 | 0.260664 | 2,321 | 65 | 88 | 35.707692 | 0.702214 | 0.056872 | 0 | 0.264151 | 0 | 0 | 0.073395 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.056604 | 0 | 0.150943 | 0.132075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7c9a78549473f6bf05e499c3c3874ea7ed52f30 | 5,296 | py | Python | jetbot/motor.py | santaimpersonator/jetbot | dc06680316b1d6c6dbf27b93ec695d6a52d0a193 | [
"MIT"
] | null | null | null | jetbot/motor.py | santaimpersonator/jetbot | dc06680316b1d6c6dbf27b93ec695d6a52d0a193 | [
"MIT"
] | null | null | null | jetbot/motor.py | santaimpersonator/jetbot | dc06680316b1d6c6dbf27b93ec695d6a52d0a193 | [
"MIT"
] | null | null | null | # Modified by SparkFun Electronics June 2021
# Author: Wes Furuya
#
# Do you like this code? Help support SparkFun and buy a SparkFun jetbot kit!
# https://www.sparkfun.com/products/15365
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warrranty of
# MERCHANABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have reciede a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/license>
#
#==================================================================================
# Copyright (c) 2021 SparkFun Electronics
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#==================================================================================
import atexit
import qwiic
from Adafruit_MotorHAT import Adafruit_MotorHAT
import traitlets
from traitlets.config.configurable import Configurable
# Scan for devices on I2C bus
addresses = qwiic.scan()
class Motor(Configurable):
value = traitlets.Float()
# config
alpha = traitlets.Float(default_value=1.0).tag(config=True)
beta = traitlets.Float(default_value=0.0).tag(config=True)
# Adafruit Hardware
if 96 in addresses:
def __init__(self, driver, channel, *args, **kwargs):
super(Motor, self).__init__(*args, **kwargs) # initializes traitlets
self._driver = driver
self._motor = self._driver.getMotor(channel)
if(channel == 1):
self._ina = 1
self._inb = 0
else:
self._ina = 2
self._inb = 3
atexit.register(self._release)
@traitlets.observe('value')
def _observe_value(self, change):
self._write_value(change['new'])
def _write_value(self, value):
"""Sets motor value between [-1, 1]"""
mapped_value = int(255.0 * (self.alpha * value + self.beta))
speed = min(max(abs(mapped_value), 0), 255)
self._motor.setSpeed(speed)
if mapped_value < 0:
self._motor.run(Adafruit_MotorHAT.FORWARD)
# The two lines below are required for the Waveshare JetBot Board only
self._driver._pwm.setPWM(self._ina,0,0)
self._driver._pwm.setPWM(self._inb,0,speed*16)
else:
self._motor.run(Adafruit_MotorHAT.BACKWARD)
# The two lines below are required for the Waveshare JetBot Board only
self._driver._pwm.setPWM(self._ina,0,speed*16)
self._driver._pwm.setPWM(self._inb,0,0)
def _release(self):
"""Stops motor by releasing control"""
self._motor.run(Adafruit_MotorHAT.RELEASE)
# The two lines below are required for the Waveshare JetBot Board only
self._driver._pwm.setPWM(self._ina,0,0)
self._driver._pwm.setPWM(self._inb,0,0)
# SparkFun Hardware
elif 93 in addresses:
def __init__(self, driver, channel, *args, **kwargs):
super(Motor, self).__init__(*args, **kwargs) # initializes traitlets
self._driver = driver
atexit.register(self._release)
self.channel = channel
@traitlets.observe('value')
def _observe_value(self, change):
self._write_value(change['new'])
def _write_value(self, value):
"""Sets motor value between [-1, 1]"""
speed = int(255 * (self.alpha * value + self.beta))
# Set Motor Controls: .set_drive( motor number, direction, speed)
# Motor Number: A = 0, B = 1
# Direction: FWD = 0, BACK = 1
# Speed: (-255) - 255 (neg. values reverse direction of motor)
if self.channel == 1:
self._motor = self._driver.set_drive(self.channel-1, 0, speed)
elif self.channel == 2:
self._motor = self._driver.set_drive(self.channel-1, 0, speed)
self._driver.enable()
def _release(self):
"""Stops motor by releasing control"""
self._driver.disable()
| 40.427481 | 86 | 0.619335 | 662 | 5,296 | 4.832326 | 0.344411 | 0.04689 | 0.024383 | 0.035636 | 0.374179 | 0.301657 | 0.301657 | 0.301657 | 0.292591 | 0.263832 | 0 | 0.018723 | 0.263784 | 5,296 | 130 | 87 | 40.738462 | 0.801744 | 0.45355 | 0 | 0.440678 | 0 | 0 | 0.005662 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135593 | false | 0 | 0.084746 | 0 | 0.288136 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7ca3c511e408446381f4b30a33fe34343bdda15 | 594 | py | Python | mafia/roles/mafia/mafiarole.py | Mysteryjuju/MafiaGame | 97d7bd1e494a8288ac14e264c1fcee226db118f1 | [
"MIT"
] | null | null | null | mafia/roles/mafia/mafiarole.py | Mysteryjuju/MafiaGame | 97d7bd1e494a8288ac14e264c1fcee226db118f1 | [
"MIT"
] | null | null | null | mafia/roles/mafia/mafiarole.py | Mysteryjuju/MafiaGame | 97d7bd1e494a8288ac14e264c1fcee226db118f1 | [
"MIT"
] | null | null | null | from mafia.roles.baserole import BaseRole
from mafia.misc.utils import Alignment, Colors
class MafiaRole(BaseRole):
"""
Mafia role base
"""
def __init__(self):
"""
Initializer
"""
super().__init__()
self.alignment = Alignment.MAFIA
self.color = Colors.MAFIA_COLOR
self.goal = "Tuer tous les membres de la ville et tous vos opposants."
self.special_attributes = ["Suggérez une cible à éliminer avec la commande \"-suggest X\"",
"Vous pouvez parler à la Mafia durant la nuit"]
| 28.285714 | 99 | 0.606061 | 69 | 594 | 5.072464 | 0.666667 | 0.051429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.304714 | 594 | 20 | 100 | 29.7 | 0.847458 | 0.045455 | 0 | 0 | 0 | 0 | 0.280303 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7cb6d2b04fa5055e612e1c6136d5fe5299b54b6 | 2,883 | py | Python | src/app/crud/hospital.py | FelisCatusKR/DBMS-term-project | bdcf3671b7189df934552df6cfd45464e3b858f3 | [
"MIT"
] | null | null | null | src/app/crud/hospital.py | FelisCatusKR/DBMS-term-project | bdcf3671b7189df934552df6cfd45464e3b858f3 | [
"MIT"
] | null | null | null | src/app/crud/hospital.py | FelisCatusKR/DBMS-term-project | bdcf3671b7189df934552df6cfd45464e3b858f3 | [
"MIT"
] | null | null | null | from typing import List, Optional
from sqlalchemy import cast
from sqlalchemy.orm import Session
from geoalchemy2 import *
from fastapi.encoders import jsonable_encoder
from app.schemas.hospital import HospitalCreate, HospitalUpdate
from app.models.hospital import Hospital
def read(db: Session, hospital_id: int) -> Optional[Hospital]:
return db.query(Hospital).filter(Hospital.id == hospital_id).first()
def read_by_distance(
db: Session,
q: Optional[str],
lon: float,
lat: float,
radius: int,
skip: int,
limit: int,
) -> List[Optional[Hospital]]:
point = f"SRID=4326;POINT({lon} {lat})"
distance = Hospital.geom.distance_centroid(point)
if q is None:
return (
db.query(Hospital)
.filter(func.ST_DWithin(cast(Hospital.geom, Geography), cast(point, Geography), radius))
.order_by(distance)
.offset(skip)
.limit(limit)
.all()
)
else:
return (
db.query(Hospital)
.filter(Hospital.name.like(f"%{q}%"))
.filter(func.ST_DWithin(cast(Hospital.geom, Geography), cast(point, Geography), radius))
.order_by(distance)
.offset(skip)
.limit(limit)
.all()
)
def create(db: Session, hospital_in: HospitalCreate) -> Hospital:
geom = f"SRID=4326;POINT({hospital_in.lon} {hospital_in.lat})"
db_hospital = Hospital(
name=hospital_in.name,
addr=hospital_in.addr,
tel=hospital_in.tel,
lon=hospital_in.lon,
lat=hospital_in.lat,
geom=geom,
strCnd=hospital_in.strCnd,
course_bitmask=hospital_in.course_bitmask,
dutyTime1s=hospital_in.dutyTime1s,
dutyTime1c=hospital_in.dutyTime1c,
dutyTime2s=hospital_in.dutyTime2s,
dutyTime2c=hospital_in.dutyTime2c,
dutyTime3s=hospital_in.dutyTime3s,
dutyTime3c=hospital_in.dutyTime3c,
dutyTime4s=hospital_in.dutyTime4s,
dutyTime4c=hospital_in.dutyTime4c,
dutyTime5s=hospital_in.dutyTime5s,
dutyTime5c=hospital_in.dutyTime5c,
dutyTime6s=hospital_in.dutyTime6s,
dutyTime6c=hospital_in.dutyTime6c,
dutyTime7s=hospital_in.dutyTime7s,
dutyTime7c=hospital_in.dutyTime7c,
dutyTime8s=hospital_in.dutyTime8s,
dutyTime8c=hospital_in.dutyTime8c,
)
db.add(db_hospital)
db.commit()
db.refresh(db_hospital)
return db_hospital
def update(db: Session, hospital: Hospital, hospital_in: HospitalUpdate) -> Hospital:
hospital_data = jsonable_encoder(hospital)
update_data = hospital_in.dict(skip_defaults=True)
for field in hospital_data:
if field in update_data:
setattr(hospital, field, update_data[field])
db.add(hospital)
db.commit()
db.refresh(hospital)
return hospital
| 31.681319 | 100 | 0.661117 | 329 | 2,883 | 5.638298 | 0.279635 | 0.150943 | 0.027493 | 0.033962 | 0.190297 | 0.152022 | 0.114286 | 0.114286 | 0.114286 | 0.114286 | 0 | 0.01867 | 0.238293 | 2,883 | 90 | 101 | 32.033333 | 0.826047 | 0 | 0 | 0.197531 | 0 | 0 | 0.029483 | 0.01873 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049383 | false | 0 | 0.08642 | 0.012346 | 0.197531 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7ce442a9002b2c8fc1dfecde39741edffd73ae3 | 4,400 | py | Python | master/nouvelle-0.90/nouvelle-0.90/Nouvelle/BaseHTTP.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 4 | 2018-09-07T15:35:24.000Z | 2019-03-27T09:48:12.000Z | master/nouvelle-0.90/nouvelle-0.90/Nouvelle/BaseHTTP.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 371 | 2020-03-04T21:51:56.000Z | 2022-03-31T20:59:11.000Z | master/nouvelle-0.90/nouvelle-0.90/Nouvelle/BaseHTTP.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 3 | 2019-06-18T19:57:17.000Z | 2020-11-06T03:55:08.000Z | """ Nouvelle.BaseHTTP
Glue for using Nouvelle with Python's builtin BaseHTTPServer module.
This provides a Page class that lets objects be attached as children
to it, a RequestHandler that dispatches HTTP requests to a root Page,
and a simple main function that makes it quick and easy to start a
server with a particular Page at its root.
"""
#
# Nouvelle web framework
# Copyright (C) 2003-2004 Micah Dowty <micahjd@users.sourceforge.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import Nouvelle
from Nouvelle import tag
import BaseHTTPServer, urlparse
class Page:
"""A web resource that renders a tree of tag instances from its 'document'
attribute, and can have child resources attached to it.
"""
serializerFactory = Nouvelle.Serializer
responseCode = 200
def handleRequest(self, request, args):
"""Given a RequestHandler instance, send back an HTTP response code,
headers, and a rendition of this page.
"""
request.send_response(self.responseCode)
self.sendHeaders(request)
context = {
'owner': self,
'request': request,
'args': args,
}
self.preRender(context)
rendered = str(self.serializerFactory().render(self.document, context))
request.wfile.write(rendered)
def sendHeaders(self, request):
"""Send back HTTP headers for a given request"""
request.send_header('Content-Type', 'text/html')
request.end_headers()
def preRender(self, context):
"""Called prior to rendering each request, subclasses can use this to annotate
'context' with extra information or perform other important setup tasks.
"""
pass
def addChild(self, name, page):
"""Add the given Page instance as a child under this one in the URL tree"""
if not hasattr(self, 'children'):
self.children = {}
self.children[name] = page
def findChild(self, name):
"""Return the named child of this Page. By default this looks in
self.children, and if a page isn't found returns Error404.
"""
if not name:
# Ignore empty path segments
return self
if hasattr(self, 'children') and self.children.has_key(name):
return self.children[name]
return Error404()
class Error404(Page):
"""A 404 error, resource not found"""
responseCode = 404
document = tag('html')[
tag('head')[
tag('title')[ "404 - Resource not found" ],
],
tag('body')[
tag('h1')[ "404" ],
tag('h3')[ "Resource not found" ],
],
]
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
# Parse the path we were given as a URL...
scheme, host, path, parameters, query, fragment = urlparse.urlparse(self.path)
# Find the page corresponding with our URL's path
page = self.rootPage
for segment in path.split("/"):
page = page.findChild(segment)
# Split the query into key-value pairs
args = {}
for pair in query.split("&"):
if pair.find("=") >= 0:
key, value = pair.split("=", 1)
args.setdefault(key, []).append(value)
else:
args[pair] = []
page.handleRequest(self, args)
def main(rootPage, port=8080):
handler = RequestHandler
handler.rootPage = rootPage
BaseHTTPServer.HTTPServer(('', port), handler).serve_forever()
### The End ###
| 34.375 | 86 | 0.624773 | 544 | 4,400 | 5.042279 | 0.435662 | 0.030623 | 0.013124 | 0.02078 | 0.036456 | 0.036456 | 0.02479 | 0 | 0 | 0 | 0 | 0.017817 | 0.285682 | 4,400 | 127 | 87 | 34.645669 | 0.854916 | 0.442045 | 0 | 0.032787 | 0 | 0 | 0.053478 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114754 | false | 0.016393 | 0.04918 | 0 | 0.327869 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7ce8420f210b43e94c8dc6f9cc5ae505b5727f0 | 1,165 | py | Python | 2016/4/rooms.py | lvaughn/advent | ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e | [
"CC0-1.0"
] | null | null | null | 2016/4/rooms.py | lvaughn/advent | ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e | [
"CC0-1.0"
] | null | null | null | 2016/4/rooms.py | lvaughn/advent | ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
import re
from collections import Counter
from operator import itemgetter
from string import ascii_lowercase
def is_valid_room(name, checksum):
counts = Counter(name.replace('-', ''))
in_order = sorted(sorted(counts.items(), key=itemgetter(0)), key=itemgetter(1), reverse=True)
check = ''.join(a[0] for a in in_order[:5])
return check == checksum
LETTER_TO_LOC = {}
for loc, letter in enumerate(ascii_lowercase):
LETTER_TO_LOC[letter] = loc
def decrypt_name(name, sector):
output = ''
for c in name:
if c == '-':
output += ' '
else:
output += ascii_lowercase[(LETTER_TO_LOC[c]+sector)%26]
return output.strip()
room_re = re.compile(r'([a-z-]+)(\d+)\[(\w{5})\]')
valid_rooms = []
sector_sum = 0
with open('input.txt', 'r') as f:
for line in f:
m = room_re.match(line)
assert m
if is_valid_room(m[1], m[3]):
sector = int(m[2])
sector_sum += int(sector)
valid_rooms.append((decrypt_name(m[1], sector), sector))
print(sector_sum)
for room in valid_rooms:
if 'north' in room[0]:
print(room) | 25.888889 | 97 | 0.612017 | 170 | 1,165 | 4.047059 | 0.423529 | 0.061047 | 0.047965 | 0.063953 | 0.072674 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015748 | 0.23691 | 1,165 | 45 | 98 | 25.888889 | 0.758155 | 0.018026 | 0 | 0 | 0 | 0 | 0.037587 | 0.021853 | 0 | 0 | 0 | 0 | 0.028571 | 1 | 0.057143 | false | 0 | 0.114286 | 0 | 0.228571 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7cf9f2fc80d8141cb47a3804740140932036277 | 17,900 | py | Python | kmip/core/messages/messages.py | smira/PyKMIP | 54f3688a14bcc442b270765e21732b77d18b2a72 | [
"Apache-2.0"
] | null | null | null | kmip/core/messages/messages.py | smira/PyKMIP | 54f3688a14bcc442b270765e21732b77d18b2a72 | [
"Apache-2.0"
] | null | null | null | kmip/core/messages/messages.py | smira/PyKMIP | 54f3688a14bcc442b270765e21732b77d18b2a72 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kmip.core import enums
from kmip.core.enums import Tags
from kmip.core.messages import contents
from kmip.core.messages.contents import AsynchronousCorrelationValue
from kmip.core.messages.contents import BatchErrorContinuationOption
from kmip.core.factories.payloads.request import RequestPayloadFactory
from kmip.core.factories.payloads.response import ResponsePayloadFactory
from kmip.core.primitives import Struct
from kmip.core.utils import BytearrayStream
class RequestHeader(Struct):
def __init__(self,
protocol_version=None,
maximum_response_size=None,
asynchronous_indicator=None,
authentication=None,
batch_error_cont_option=None,
batch_order_option=None,
time_stamp=None,
batch_count=None):
super(RequestHeader, self).__init__(tag=Tags.REQUEST_HEADER)
self.protocol_version = protocol_version
self.maximum_response_size = maximum_response_size
self.asynchronous_indicator = asynchronous_indicator
self.authentication = authentication
self.batch_error_cont_option = batch_error_cont_option
self.batch_order_option = batch_order_option
self.time_stamp = time_stamp
self.batch_count = batch_count
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(RequestHeader, self).read(
istream,
kmip_version=kmip_version
)
tstream = BytearrayStream(istream.read(self.length))
self.protocol_version = contents.ProtocolVersion()
self.protocol_version.read(tstream, kmip_version=kmip_version)
# Read the maximum response size if it is present
if self.is_tag_next(Tags.MAXIMUM_RESPONSE_SIZE, tstream):
self.maximum_response_size = contents.MaximumResponseSize()
self.maximum_response_size.read(tstream, kmip_version=kmip_version)
# Read the asynchronous indicator if it is present
if self.is_tag_next(Tags.ASYNCHRONOUS_INDICATOR, tstream):
self.asynchronous_indicator = contents.AsynchronousIndicator()
self.asynchronous_indicator.read(
tstream,
kmip_version=kmip_version
)
# Read the authentication if it is present
if self.is_tag_next(Tags.AUTHENTICATION, tstream):
self.authentication = contents.Authentication()
self.authentication.read(tstream, kmip_version=kmip_version)
# Read the batch error continuation option if it is present
if self.is_tag_next(Tags.BATCH_ERROR_CONTINUATION_OPTION, tstream):
self.batch_error_cont_option = BatchErrorContinuationOption()
self.batch_error_cont_option.read(
tstream,
kmip_version=kmip_version
)
# Read the batch order option if it is present
if self.is_tag_next(Tags.BATCH_ORDER_OPTION, tstream):
self.batch_order_option = contents.BatchOrderOption()
self.batch_order_option.read(tstream, kmip_version=kmip_version)
# Read the time stamp if it is present
if self.is_tag_next(Tags.TIME_STAMP, tstream):
self.time_stamp = contents.TimeStamp()
self.time_stamp.read(tstream, kmip_version=kmip_version)
self.batch_count = contents.BatchCount()
self.batch_count.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
# Write the contents of a request header to the stream
self.protocol_version.write(tstream, kmip_version=kmip_version)
if self.maximum_response_size is not None:
self.maximum_response_size.write(
tstream,
kmip_version=kmip_version
)
if self.asynchronous_indicator is not None:
self.asynchronous_indicator.write(
tstream,
kmip_version=kmip_version
)
if self.authentication is not None:
self.authentication.write(tstream, kmip_version=kmip_version)
if self.batch_error_cont_option is not None:
self.batch_error_cont_option.write(
tstream,
kmip_version=kmip_version
)
if self.batch_order_option is not None:
self.batch_order_option.write(tstream, kmip_version=kmip_version)
if self.time_stamp is not None:
self.time_stamp.write(tstream, kmip_version=kmip_version)
self.batch_count.write(tstream, kmip_version=kmip_version)
# Write the length and value of the request header
self.length = tstream.length()
super(RequestHeader, self).write(
ostream,
kmip_version=kmip_version
)
ostream.write(tstream.buffer)
class ResponseHeader(Struct):
def __init__(self,
protocol_version=None,
time_stamp=None,
batch_count=None):
super(ResponseHeader, self).__init__(tag=Tags.RESPONSE_HEADER)
self.protocol_version = protocol_version
self.time_stamp = time_stamp
self.batch_count = batch_count
self.validate()
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(ResponseHeader, self).read(
istream,
kmip_version=kmip_version
)
tstream = BytearrayStream(istream.read(self.length))
self.protocol_version = contents.ProtocolVersion()
self.protocol_version.read(tstream, kmip_version=kmip_version)
self.time_stamp = contents.TimeStamp()
self.time_stamp.read(tstream, kmip_version=kmip_version)
self.batch_count = contents.BatchCount()
self.batch_count.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
self.validate()
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
# Write the contents of a response header to the stream
self.protocol_version.write(tstream, kmip_version=kmip_version)
self.time_stamp.write(tstream, kmip_version=kmip_version)
self.batch_count.write(tstream, kmip_version=kmip_version)
# Write the length and value of the request header
self.length = tstream.length()
super(ResponseHeader, self).write(
ostream,
kmip_version=kmip_version
)
ostream.write(tstream.buffer)
def validate(self):
if self.time_stamp is not None:
# TODO (peter-hamilton) conduct type check
self.time_stamp.validate()
if self.batch_count is not None:
# TODO (peter-hamilton) conduct type check
self.batch_count.validate()
class RequestBatchItem(Struct):
def __init__(self,
operation=None,
unique_batch_item_id=None,
request_payload=None,
message_extension=None):
super(RequestBatchItem, self).__init__(tag=Tags.REQUEST_BATCH_ITEM)
self.payload_factory = RequestPayloadFactory()
self.operation = operation
self.unique_batch_item_id = unique_batch_item_id
self.request_payload = request_payload
self.message_extension = message_extension
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(RequestBatchItem, self).read(
istream,
kmip_version=kmip_version
)
tstream = BytearrayStream(istream.read(self.length))
# Read the batch item operation
self.operation = contents.Operation()
self.operation.read(tstream, kmip_version=kmip_version)
# Read the unique batch item ID if it is present
if self.is_tag_next(Tags.UNIQUE_BATCH_ITEM_ID, tstream):
self.unique_batch_item_id = contents.UniqueBatchItemID()
self.unique_batch_item_id.read(tstream, kmip_version=kmip_version)
# Dynamically create the response payload class that belongs to the
# operation
self.request_payload = self.payload_factory.create(
self.operation.value)
self.request_payload.read(tstream, kmip_version=kmip_version)
# Read the message extension if it is present
if self.is_tag_next(Tags.MESSAGE_EXTENSION, tstream):
self.message_extension = contents.MessageExtension()
self.message_extension.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
# Write the contents of the batch item to the stream
self.operation.write(tstream, kmip_version=kmip_version)
if self.unique_batch_item_id is not None:
self.unique_batch_item_id.write(tstream, kmip_version=kmip_version)
self.request_payload.write(tstream, kmip_version=kmip_version)
if self.message_extension is not None:
self.message_extension.write(tstream, kmip_version=kmip_version)
# Write the length and value of the batch item
self.length = tstream.length()
super(RequestBatchItem, self).write(
ostream,
kmip_version=kmip_version
)
ostream.write(tstream.buffer)
class ResponseBatchItem(Struct):
def __init__(self,
operation=None,
unique_batch_item_id=None,
result_status=None,
result_reason=None,
result_message=None,
async_correlation_value=None,
response_payload=None,
message_extension=None):
super(ResponseBatchItem, self).__init__(tag=Tags.RESPONSE_BATCH_ITEM)
self.payload_factory = ResponsePayloadFactory()
self.operation = operation
self.unique_batch_item_id = unique_batch_item_id
self.result_status = result_status
self.result_reason = result_reason
self.result_message = result_message
self.async_correlation_value = async_correlation_value
self.response_payload = response_payload
self.message_extension = message_extension
self.validate()
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(ResponseBatchItem, self).read(
istream,
kmip_version=kmip_version
)
tstream = BytearrayStream(istream.read(self.length))
# Read the batch item operation if it is present
if self.is_tag_next(Tags.OPERATION, tstream):
self.operation = contents.Operation()
self.operation.read(tstream, kmip_version=kmip_version)
# Read the unique batch item ID if it is present
if self.is_tag_next(Tags.UNIQUE_BATCH_ITEM_ID, tstream):
self.unique_batch_item_id = contents.UniqueBatchItemID()
self.unique_batch_item_id.read(tstream, kmip_version=kmip_version)
# Read the batch item result status
self.result_status = contents.ResultStatus()
self.result_status.read(tstream, kmip_version=kmip_version)
# Read the batch item result reason if it is present
if self.is_tag_next(Tags.RESULT_REASON, tstream):
self.result_reason = contents.ResultReason()
self.result_reason.read(tstream, kmip_version=kmip_version)
# Read the batch item result message if it is present
if self.is_tag_next(Tags.RESULT_MESSAGE, tstream):
self.result_message = contents.ResultMessage()
self.result_message.read(tstream, kmip_version=kmip_version)
# Read the batch item asynchronous correlation value if it is present
if self.is_tag_next(Tags.ASYNCHRONOUS_CORRELATION_VALUE, tstream):
self.async_correlation_value = AsynchronousCorrelationValue()
self.async_correlation_value.read(
tstream,
kmip_version=kmip_version
)
if (self.operation is not None):
# Dynamically create the response payload class that belongs to the
# operation
expected = self.payload_factory.create(self.operation.value)
if self.is_tag_next(expected.tag, tstream):
self.response_payload = expected
self.response_payload.read(tstream, kmip_version=kmip_version)
# Read the message extension if it is present
if self.is_tag_next(Tags.MESSAGE_EXTENSION, tstream):
self.message_extension = contents.MessageExtension()
self.message_extension.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
self.validate()
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
# Write the contents of the batch item to the stream
if self.operation is not None:
self.operation.write(tstream, kmip_version=kmip_version)
if self.unique_batch_item_id is not None:
self.unique_batch_item_id.write(tstream, kmip_version=kmip_version)
self.result_status.write(tstream, kmip_version=kmip_version)
if self.result_reason is not None:
self.result_reason.write(tstream, kmip_version=kmip_version)
if self.result_message is not None:
self.result_message.write(tstream, kmip_version=kmip_version)
if self.async_correlation_value is not None:
self.async_correlation_value.write(
tstream,
kmip_version=kmip_version
)
if self.response_payload is not None:
self.response_payload.write(tstream, kmip_version=kmip_version)
if self.message_extension is not None:
self.message_extension.write(tstream, kmip_version=kmip_version)
# Write the length and value of the batch item
self.length = tstream.length()
super(ResponseBatchItem, self).write(
ostream,
kmip_version=kmip_version
)
ostream.write(tstream.buffer)
def validate(self):
pass
class RequestMessage(Struct):
def __init__(self, request_header=None, batch_items=None,):
super(RequestMessage, self).__init__(tag=Tags.REQUEST_MESSAGE)
self.request_header = request_header
self.batch_items = batch_items
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(RequestMessage, self).read(
istream,
kmip_version=kmip_version
)
self.request_header = RequestHeader()
self.request_header.read(istream, kmip_version=kmip_version)
self.batch_items = []
for _ in range(self.request_header.batch_count.value):
batch_item = RequestBatchItem()
batch_item.read(istream, kmip_version=kmip_version)
self.batch_items.append(batch_item)
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
# Write the request header and all batch items
self.request_header.write(tstream, kmip_version=kmip_version)
for batch_item in self.batch_items:
batch_item.write(tstream, kmip_version=kmip_version)
# Write the TTLV encoding of the request message
self.length = tstream.length()
super(RequestMessage, self).write(
ostream,
kmip_version=kmip_version
)
ostream.write(tstream.buffer)
class ResponseMessage(Struct):
def __init__(self, response_header=None, batch_items=None,):
super(ResponseMessage, self).__init__(tag=Tags.RESPONSE_MESSAGE)
self.response_header = response_header
self.batch_items = batch_items
self.validate()
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(ResponseMessage, self).read(
istream,
kmip_version=kmip_version
)
self.response_header = ResponseHeader()
self.response_header.read(istream, kmip_version=kmip_version)
self.batch_items = []
for _ in range(self.response_header.batch_count.value):
batch_item = ResponseBatchItem()
batch_item.read(istream, kmip_version=kmip_version)
self.batch_items.append(batch_item)
self.validate()
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
# Write the request header and all batch items
self.response_header.write(tstream, kmip_version=kmip_version)
for batch_item in self.batch_items:
batch_item.write(tstream, kmip_version=kmip_version)
# Write the TTLV encoding of the request message
self.length = tstream.length()
super(ResponseMessage, self).write(
ostream,
kmip_version=kmip_version
)
ostream.write(tstream.buffer)
def validate(self):
pass
| 38.744589 | 79 | 0.672961 | 2,099 | 17,900 | 5.489757 | 0.084802 | 0.137464 | 0.085915 | 0.126009 | 0.714918 | 0.681593 | 0.63716 | 0.610778 | 0.575458 | 0.542827 | 0 | 0.002405 | 0.256648 | 17,900 | 461 | 80 | 38.828633 | 0.863595 | 0.121397 | 0 | 0.535604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002169 | 0 | 1 | 0.065015 | false | 0.006192 | 0.027864 | 0 | 0.111455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7d37c4fbaba187b1d8c21abcc7fcbbd619740c7 | 1,852 | py | Python | spree/rest/traversal/fields.py | spreecode/python-spree-rest | 877bd2c5dc8fc7efc6c04675939f5b389e5ffd24 | [
"MIT"
] | null | null | null | spree/rest/traversal/fields.py | spreecode/python-spree-rest | 877bd2c5dc8fc7efc6c04675939f5b389e5ffd24 | [
"MIT"
] | null | null | null | spree/rest/traversal/fields.py | spreecode/python-spree-rest | 877bd2c5dc8fc7efc6c04675939f5b389e5ffd24 | [
"MIT"
] | null | null | null | """
This module contains :mod:`marshmallow` fields that are designed
to work with the Pyramid Traversal approach of :mod:`spree.rest.traversal` module.
"""
from marshmallow import fields
class NodeRef(fields.Field):
"""Field that takes the value from ``self.context['node'].ref``.
It's only processed on load, a ``load_only`` parameter is forced,
as well as ``missing`` parameter which is set to ``True``
in order to always run the deserialization method.
"""
def __init__(self, *args, **kwargs):
"""
You can pass any parameters acceptable for a generic :class:`fields.Field`,
except few of them, which are:
``load_only``: which is always set to True, as serialization should be supported
by creating a specific schema that overrides this field with a specific, typed one.
``missing``: which is always set to True, in order to always trigger this field
deserialization.
``required``: which is always set to True.
:param args:
:param kwargs:
"""
kwargs.update({
'load_only': True,
'missing': True,
'required': True
})
super(NodeRef, self).__init__(*args, **kwargs)
def _deserialize(self, value, attr, data):
"""
Returns the value of ``self.context['node'].ref``, which is supposed
to be a ``ref`` attribute of :class:`spree.rest.traversal.endpoints.APIEntityEndpoint`.
:param value: Value for deserialization, most likely the ``missing`` ``True`` value.
This value is not used.
:param attr: Name of the deserialized field
:param data: All deserialized data
:return: :class:`APIEndpoint` ``ref`` value
:rtype: str
"""
return self.context['node'].ref
| 37.04 | 99 | 0.618251 | 230 | 1,852 | 4.926087 | 0.443478 | 0.030891 | 0.031774 | 0.047661 | 0.092674 | 0.092674 | 0.042365 | 0 | 0 | 0 | 0 | 0 | 0.274838 | 1,852 | 49 | 100 | 37.795918 | 0.843634 | 0.681425 | 0 | 0 | 0 | 0 | 0.071066 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7d43ff738d4e7d8fe031340508bb25b108d9d48 | 48,990 | py | Python | qiskit_chemistry/parser/_inputparser.py | dongreenberg/qiskit-chemistry | 93dd3374056054eeff5557285fe62c46daef248f | [
"Apache-2.0"
] | null | null | null | qiskit_chemistry/parser/_inputparser.py | dongreenberg/qiskit-chemistry | 93dd3374056054eeff5557285fe62c46daef248f | [
"Apache-2.0"
] | null | null | null | qiskit_chemistry/parser/_inputparser.py | dongreenberg/qiskit-chemistry | 93dd3374056054eeff5557285fe62c46daef248f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from qiskit_chemistry import QiskitChemistryError
from qiskit_chemistry.drivers import local_drivers, get_driver_configuration
import json
import os
from collections import OrderedDict
import logging
import copy
import pprint
import ast
from qiskit_aqua import (local_pluggables_types,
PluggableType,
get_pluggable_configuration,
local_pluggables,
get_backends_from_provider)
from qiskit_aqua.parser import JSONSchema
from qiskit_chemistry.core import local_chemistry_operators, get_chemistry_operator_configuration
logger = logging.getLogger(__name__)
class InputParser(object):
"""Common input file parser."""
OPERATOR = 'operator'
DRIVER = 'driver'
AUTO_SUBSTITUTIONS = 'auto_substitutions'
_OLD_ENABLE_SUBSTITUTIONS = 'enable_substitutions'
_START_COMMENTS = ['#', '%']
_START_SECTION = '&'
_END_SECTION = '&end'
_PROPVALUE_SEPARATOR = '='
_OPTIMIZER = 'optimizer'
_VARIATIONAL_FORM = 'variational_form'
_UNKNOWN = 'unknown'
_HDF5_INPUT = 'hdf5_input'
_DRIVER_NAMES = None
_DEFAULT_PROPERTY_ORDER = [JSONSchema.NAME, _UNKNOWN]
_BACKEND_PROPERTY_ORDER = [JSONSchema.PROVIDER, JSONSchema.NAME, _UNKNOWN]
def __init__(self, input=None):
"""Create InputParser object."""
self._sections = OrderedDict()
self._original_sections = OrderedDict()
self._filename = None
self._inputdict = None
if input is not None:
if isinstance(input, dict):
self._inputdict = input
elif isinstance(input, str):
self._filename = input
else:
raise QiskitChemistryError("Invalid parser input type.")
self._section_order = [JSONSchema.NAME, JSONSchema.PROBLEM,
InputParser.DRIVER, InputParser._UNKNOWN,
InputParser.OPERATOR, PluggableType.ALGORITHM.value]
for pluggable_type in local_pluggables_types():
if pluggable_type not in [PluggableType.INPUT, PluggableType.ALGORITHM]:
self._section_order.append(pluggable_type.value)
self._section_order.append(JSONSchema.BACKEND)
jsonfile = os.path.join(os.path.dirname(__file__), 'substitutions.json')
with open(jsonfile) as json_file:
self._substitutions = json.load(json_file)
self._json_schema = JSONSchema(os.path.join(
os.path.dirname(__file__), 'input_schema.json'))
# get some properties from algorithms schema
self._json_schema.copy_section_from_aqua_schema(PluggableType.ALGORITHM.value)
self._json_schema.copy_section_from_aqua_schema(JSONSchema.BACKEND)
self._json_schema.copy_section_from_aqua_schema(JSONSchema.PROBLEM)
self._json_schema.schema['properties'][JSONSchema.PROBLEM]['properties'][InputParser.AUTO_SUBSTITUTIONS] = {
"type": "boolean",
"default": "true"
}
self._json_schema.populate_problem_names()
self._json_schema.commit_changes()
# logger.debug('Resolved Schema Input: {}'.format(json.dumps(self._json_schema.schema, sort_keys=True, indent=4)))
def _order_sections(self, sections):
sections_sorted = OrderedDict(sorted(list(sections.items()),
key=lambda x: self._section_order.index(
x[0])
if x[0] in self._section_order else self._section_order.index(InputParser._UNKNOWN)))
for section, values in sections_sorted.items():
if not self.section_is_driver(section) and 'properties' in values and isinstance(values['properties'], dict):
_property_order = InputParser._BACKEND_PROPERTY_ORDER if section == JSONSchema.BACKEND else InputParser._DEFAULT_PROPERTY_ORDER
sections_sorted[section]['properties'] = OrderedDict(sorted(list(values['properties'].items()),
key=lambda x: _property_order.index(x[0])
if x[0] in _property_order
else _property_order.index(InputParser._UNKNOWN)))
return sections_sorted
def parse(self):
"""Parse the data."""
if self._inputdict is None:
if self._filename is None:
raise QiskitChemistryError("Missing input file")
section = None
self._sections = OrderedDict()
contents = ''
with open(self._filename, 'rt', encoding="utf8", errors='ignore') as f:
for line in f:
contents += line
section = self._process_line(section, line)
contents = contents.strip().replace('\n', '').replace('\r', '')
if not(self._sections) and len(contents) > 0:
# check if input file was dictionary
try:
v = ast.literal_eval(contents)
if isinstance(v, dict):
self._inputdict = json.loads(json.dumps(v))
self._load_parser_from_dict()
except:
pass
else:
self._load_parser_from_dict()
# check for old enable_substitutions name
old_enable_substitutions = self.get_section_property(JSONSchema.PROBLEM, InputParser._OLD_ENABLE_SUBSTITUTIONS)
if old_enable_substitutions is not None:
self.delete_section_property(JSONSchema.PROBLEM, InputParser._OLD_ENABLE_SUBSTITUTIONS)
self.set_section_property(JSONSchema.PROBLEM, InputParser.AUTO_SUBSTITUTIONS, old_enable_substitutions)
self._json_schema.update_backend_schema()
self._json_schema.update_pluggable_input_schemas(self)
self._update_driver_input_schemas()
self._update_operator_input_schema()
self._sections = self._order_sections(self._sections)
self._original_sections = copy.deepcopy(self._sections)
def _load_parser_from_dict(self):
self._sections = OrderedDict()
for section_name, value in self._inputdict.items():
section_name = JSONSchema.format_section_name(section_name).lower()
self._sections[section_name] = OrderedDict()
self._sections[section_name]['properties'] = OrderedDict()
self._sections[section_name]['data'] = ''
if isinstance(value, dict):
for k, v in value.items():
self._sections[section_name]['properties'][k] = v
contents = ''
properties = self._sections[section_name]['properties']
lastIndex = len(properties) - 1
for i, (k, v) in enumerate(properties.items()):
contents += '{}{}{}'.format(k,
InputParser._PROPVALUE_SEPARATOR, v)
if i < lastIndex:
contents += '\n'
self._sections[section_name]['data'] = contents
elif isinstance(value, list) or isinstance(value, str):
lines = []
if isinstance(value, list):
lines = value
self._sections[section_name]['data'] = '\n'.join(
str(e) for e in value)
else:
lines = value.splitlines()
self._sections[section_name]['data'] = value
for line in lines:
k, v = self._get_key_value(line)
if k is not None and v is not None:
self._sections[section_name]['properties'][k] = v
else:
raise QiskitChemistryError(
"Invalid parser input type for section {}".format(section_name))
def is_modified(self):
"""
Returns true if data has been changed
"""
original_section_names = set(self._original_sections.keys())
section_names = set(self._sections.keys())
if original_section_names != section_names:
return True
for section_name in section_names:
original_section = self._original_sections[section_name]
section = self._sections[section_name]
if self.section_is_text(section_name):
original_data = original_section['data'] if 'data' in original_section else None
data = section['data'] if 'data' in section else None
if original_data != data:
return True
else:
original_properties = original_section['properties'] if 'properties' in original_section else None
properties = section['properties'] if 'properties' in section else None
if original_properties != properties:
return True
return False
@staticmethod
def is_pluggable_section(section_name):
section_name = JSONSchema.format_section_name(section_name)
for pluggable_type in local_pluggables_types():
if section_name == pluggable_type.value:
return True
return False
def get_section_types(self, section_name):
return self._json_schema.get_section_types(section_name)
def get_property_types(self, section_name, property_name):
return self._json_schema.get_property_types(section_name, property_name)
def get_default_sections(self):
properties = self._json_schema.get_default_sections()
driver_name = self.get_section_property(
InputParser.DRIVER, JSONSchema.NAME)
if driver_name is not None:
properties[driver_name.lower()] = {
"type": "object"
}
return properties
def get_default_section_names(self):
sections = self.get_default_sections()
return list(sections.keys()) if sections is not None else []
def get_section_default_properties(self, section_name):
return self._json_schema.get_section_default_properties(section_name)
def allows_additional_properties(self, section_name):
return self._json_schema.allows_additional_properties(section_name)
def get_property_default_values(self, section_name, property_name):
return self._json_schema.get_property_default_values(section_name, property_name)
def get_property_default_value(self, section_name, property_name):
return self._json_schema.get_property_default_value(section_name, property_name)
def get_filename(self):
"""Return the filename."""
return self._filename
@staticmethod
def get_operator_problems(input_name):
config = get_chemistry_operator_configuration(input_name)
if 'problems' in config:
return config['problems']
return []
@staticmethod
def get_algorithm_problems(algo_name):
return JSONSchema.get_algorithm_problems(algo_name)
def _update_operator_input_schema(self):
# find operator
default_name = self.get_property_default_value(InputParser.OPERATOR, JSONSchema.NAME)
operator_name = self.get_section_property(InputParser.OPERATOR, JSONSchema.NAME, default_name)
if operator_name is None:
# find the first valid input for the problem
problem_name = self.get_section_property(
JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
problem_name = self.get_property_default_value(
JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
raise QiskitChemistryError(
"No algorithm 'problem' section found on input.")
for name in local_chemistry_operators():
if problem_name in self.get_operator_problems(name):
# set to the first input to solve the problem
operator_name = name
break
if operator_name is None:
# just remove fromm schema if none solves the problem
if InputParser.OPERATOR in self._json_schema.schema['properties']:
del self._json_schema.schema['properties'][InputParser.OPERATOR]
return
if default_name is None:
default_name = operator_name
config = {}
try:
config = get_chemistry_operator_configuration(operator_name)
except:
pass
input_schema = config['input_schema'] if 'input_schema' in config else {
}
properties = input_schema['properties'] if 'properties' in input_schema else {
}
properties[JSONSchema.NAME] = {'type': 'string'}
required = input_schema['required'] if 'required' in input_schema else [
]
additionalProperties = input_schema['additionalProperties'] if 'additionalProperties' in input_schema else True
if default_name is not None:
properties[JSONSchema.NAME]['default'] = default_name
required.append(JSONSchema.NAME)
if InputParser.OPERATOR not in self._json_schema.schema['properties']:
self._json_schema.schema['properties'][InputParser.OPERATOR] = {
'type': 'object'}
self._json_schema.schema['properties'][InputParser.OPERATOR]['properties'] = properties
self._json_schema.schema['properties'][InputParser.OPERATOR]['required'] = required
self._json_schema.schema['properties'][InputParser.OPERATOR]['additionalProperties'] = additionalProperties
def _merge_dependencies(self):
algo_name = self.get_section_property(PluggableType.ALGORITHM.value, JSONSchema.NAME)
if algo_name is None:
return
config = get_pluggable_configuration(PluggableType.ALGORITHM, algo_name)
pluggable_dependencies = [] if 'depends' not in config else config['depends']
pluggable_defaults = {} if 'defaults' not in config else config['defaults']
for pluggable_type in local_pluggables_types():
if pluggable_type not in [PluggableType.INPUT, PluggableType.ALGORITHM] and \
pluggable_type.value not in pluggable_dependencies:
# remove pluggables from input that are not in the dependencies
if pluggable_type.value in self._sections:
del self._sections[pluggable_type.value]
section_names = self.get_section_names()
for pluggable_type in pluggable_dependencies:
pluggable_name = None
new_properties = {}
if pluggable_type in pluggable_defaults:
for key, value in pluggable_defaults[pluggable_type].items():
if key == JSONSchema.NAME:
pluggable_name = pluggable_defaults[pluggable_type][key]
else:
new_properties[key] = value
if pluggable_name is None:
continue
if pluggable_type not in section_names:
self.set_section(pluggable_type)
if self.get_section_property(pluggable_type, JSONSchema.NAME) is None:
self.set_section_property(pluggable_type, JSONSchema.NAME, pluggable_name)
if pluggable_name == self.get_section_property(pluggable_type, JSONSchema.NAME):
properties = self.get_section_properties(pluggable_type)
if new_properties:
new_properties.update(properties)
else:
new_properties = properties
self.set_section_properties(pluggable_type, new_properties)
def _update_driver_input_schemas(self):
# find driver name
default_name = self.get_property_default_value(InputParser.DRIVER, JSONSchema.NAME)
driver_name = self.get_section_property(InputParser.DRIVER, JSONSchema.NAME, default_name)
if driver_name is not None:
driver_name = driver_name.strip().lower()
for name in local_drivers():
name_orig = name
name = name.lower()
if driver_name is not None and driver_name == name:
config = get_driver_configuration(name_orig)
input_schema = copy.deepcopy(config['input_schema']) if 'input_schema' in config else {'type': 'object'}
if '$schema' in input_schema:
del input_schema['$schema']
if 'id' in input_schema:
del input_schema['id']
self._json_schema.schema['properties'][driver_name] = input_schema
else:
if name in self._json_schema.schema['properties']:
del self._json_schema.schema['properties'][name]
@staticmethod
def _load_driver_names():
if InputParser._DRIVER_NAMES is None:
InputParser._DRIVER_NAMES = [name.lower() for name in local_drivers()]
def _merge_default_values(self):
section_names = self.get_section_names()
if JSONSchema.NAME not in section_names:
self.set_section(JSONSchema.NAME)
if PluggableType.ALGORITHM.value in section_names:
if JSONSchema.PROBLEM not in section_names:
self.set_section(JSONSchema.PROBLEM)
self._json_schema.update_backend_schema()
self._json_schema.update_pluggable_input_schemas(self)
self._merge_dependencies()
self._update_driver_sections()
self._update_driver_input_schemas()
self._update_operator_input_schema()
# do not merge any pluggable that doesn't have name default in schema
default_section_names = []
pluggable_type_names = [pluggable_type.value for pluggable_type in local_pluggables_types()]
for section_name in self.get_default_section_names():
if section_name in pluggable_type_names:
if self.get_property_default_value(section_name, JSONSchema.NAME) is not None:
default_section_names.append(section_name)
else:
default_section_names.append(section_name)
section_names = set(self.get_section_names()
) | set(default_section_names)
for section_name in section_names:
if section_name not in self._sections:
self.set_section(section_name)
new_properties = self.get_section_default_properties(section_name)
if new_properties is not None:
if self.section_is_text(section_name):
text = self.get_section_text(section_name)
if (text is None or len(text) == 0) and \
isinstance(new_properties, str) and \
len(new_properties) > 0 and \
text != new_properties:
self.set_section_data(section_name, new_properties)
else:
properties = self.get_section_properties(section_name)
new_properties.update(properties)
self.set_section_properties(section_name, new_properties)
self._sections = self._order_sections(self._sections)
def validate_merge_defaults(self):
self._merge_default_values()
self._json_schema.validate(self.to_JSON())
self._validate_algorithm_problem()
self._validate_operator_problem()
def _validate_algorithm_problem(self):
algo_name = self.get_section_property(PluggableType.ALGORITHM.value, JSONSchema.NAME)
if algo_name is None:
return
problem_name = self.get_section_property(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
problem_name = self.get_property_default_value(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
raise QiskitChemistryError("No algorithm 'problem' section found on input.")
problems = InputParser.get_algorithm_problems(algo_name)
if problem_name not in problems:
raise QiskitChemistryError("Problem: {} not in the list of problems: {} for algorithm: {}.".format(
problem_name, problems, algo_name))
def _validate_operator_problem(self):
operator_name = self.get_section_property(InputParser.OPERATOR, JSONSchema.NAME)
if operator_name is None:
return
problem_name = self.get_section_property(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
problem_name = self.get_property_default_value(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
raise QiskitChemistryError("No algorithm 'problem' section found on input.")
problems = InputParser.get_operator_problems(operator_name)
if problem_name not in problems:
raise QiskitChemistryError(
"Problem: {} not in the list of problems: {} for operator: {}.".format(problem_name, problems, operator_name))
def to_JSON(self):
json_dict = OrderedDict()
for section_name in self.get_section_names():
if self.section_is_text(section_name):
json_dict[section_name] = self.get_section_text(section_name)
else:
json_dict[section_name] = self.get_section_properties(
section_name)
return json_dict
def to_dictionary(self):
dict = OrderedDict()
for section_name in self.get_section_names():
if self.section_is_text(section_name):
dict[section_name] = self.get_section_text(section_name).splitlines()
else:
dict[section_name] = self.get_section_properties(section_name)
return dict
def commit_changes(self):
self._original_sections = copy.deepcopy(self._sections)
def save_to_file(self, file_name):
if file_name is None:
raise QiskitChemistryError('Missing file path')
file_name = file_name.strip()
if len(file_name) == 0:
raise QiskitChemistryError('Missing file path')
prev_filename = self.get_filename()
sections = copy.deepcopy(self.get_sections())
if prev_filename is not None:
prev_dirname = os.path.dirname(os.path.realpath(prev_filename))
dirname = os.path.dirname(os.path.realpath(file_name))
if prev_dirname != dirname:
InputParser._from_relative_to_abs_paths(
sections, prev_filename)
contents = ''
lastIndex = len(sections) - 1
for i, (section_name, section) in enumerate(sections.items()):
contents += '{}{}'.format(InputParser._START_SECTION, section_name)
if self.section_is_text(section_name):
value = section['data']
if value is not None:
contents += '\n{}'.format(str(value))
else:
if 'properties' in section:
for k, v in section['properties'].items():
contents += '\n {}{}{}'.format(
k, InputParser._PROPVALUE_SEPARATOR, str(v))
contents += '\n{}'.format(InputParser._END_SECTION)
if i < lastIndex:
contents += '\n\n'
with open(file_name, 'w') as f:
print(contents, file=f)
def export_dictionary(self, file_name):
if file_name is None:
raise QiskitChemistryError('Missing file path')
file_name = file_name.strip()
if len(file_name) == 0:
raise QiskitChemistryError('Missing file path')
value = json.loads(json.dumps(self.to_dictionary()))
value = pprint.pformat(value, indent=4)
with open(file_name, 'w') as f:
print(value, file=f)
@staticmethod
def _from_relative_to_abs_paths(sections, filename):
directory = os.path.dirname(filename)
for _, section in sections.items():
if 'properties' in section:
for key, value in section['properties'].items():
if key == InputParser._HDF5_INPUT:
if value is not None and not os.path.isabs(value):
value = os.path.abspath(
os.path.join(directory, value))
InputParser._set_section_property(
sections, section[JSONSchema.NAME], key, value, ['string'])
def section_is_driver(self, section_name):
section_name = JSONSchema.format_section_name(section_name).lower()
InputParser._load_driver_names()
return section_name in InputParser._DRIVER_NAMES
def section_is_text(self, section_name):
section_name = JSONSchema.format_section_name(section_name).lower()
types = self.get_section_types(section_name)
if len(types) > 0:
return 'string' in types
return False
def get_sections(self):
return self._sections
def get_section(self, section_name):
"""Return a Section by name.
Args:
section_name (str): the name of the section, case insensitive
Returns:
Section: The section with this name
Raises:
QiskitChemistryError: if the section does not exist.
"""
section_name = JSONSchema.format_section_name(section_name).lower()
try:
return self._sections[section_name]
except KeyError:
raise QiskitChemistryError('No section "{0}"'.format(section_name))
def get_section_text(self, section_name):
section = self.get_section(section_name)
if section is None:
return ''
if 'data' in section:
return section['data']
return ''
def get_section_properties(self, section_name):
section = self.get_section(section_name)
if section is None:
return {}
if 'properties' in section:
return section['properties']
return {}
def get_section_property(self, section_name, property_name, default_value=None):
"""Return a property by name.
Args:
section_name (str): the name of the section, case insensitive
property_name (str): the property name in the section
default_value : default value in case it is not found
Returns:
Value: The property value
"""
section_name = JSONSchema.format_section_name(section_name).lower()
property_name = JSONSchema.format_property_name(property_name)
if section_name in self._sections:
section = self._sections[section_name]
if 'properties' in section and property_name in section['properties']:
return section['properties'][property_name]
return default_value
def get_section_data(self, section_name, default_value=None):
"""
Return a section data.
Args:
section_name (str): the name of the section, case insensitive
default_value : default value in case it is not found
Returns:
Value: data value
"""
section_name = JSONSchema.format_section_name(section_name).lower()
if section_name in self._sections:
section = self._sections[section_name]
if 'data' in section:
return section['data']
return default_value
def set_section(self, section_name):
"""
Args:
section_name (str): the name of the section, case insensitive
"""
section_name = JSONSchema.format_section_name(section_name).lower()
if section_name not in self._sections:
self._sections[section_name] = OrderedDict(
[(JSONSchema.NAME, section_name)])
self._sections[section_name]['properties'] = OrderedDict()
self._sections[section_name]['data'] = ''
self._sections = self._order_sections(self._sections)
def delete_section(self, section_name):
"""
Args:
section_name (str): the name of the section, case insensitive
"""
section_name = JSONSchema.format_section_name(section_name).lower()
if section_name not in self._sections:
return
del self._sections[section_name]
# update schema
self._json_schema.rollback_changes()
self._json_schema.update_backend_schema()
self._json_schema.update_pluggable_input_schemas(self)
self._update_driver_input_schemas()
self._update_operator_input_schema()
def set_section_properties(self, section_name, properties):
self.delete_section_properties(section_name)
for property_name, value in properties.items():
self.set_section_property(section_name, property_name, value)
def set_section_property(self, section_name, property_name, value):
section_name = JSONSchema.format_section_name(section_name).lower()
property_name = JSONSchema.format_property_name(property_name)
value = self._json_schema.check_property_value(section_name, property_name, value)
types = self.get_property_types(section_name, property_name)
parser_temp = copy.deepcopy(self)
InputParser._set_section_property(parser_temp._sections, section_name, property_name, value, types)
msg = self._json_schema.validate_property(parser_temp.to_JSON(), section_name, property_name)
if msg is not None:
raise QiskitChemistryError("{}.{}: Value '{}': '{}'".format(section_name, property_name, value, msg))
# check if this provider is loadable and valid
if JSONSchema.BACKEND == section_name and property_name == JSONSchema.PROVIDER:
get_backends_from_provider(value)
InputParser._set_section_property(self._sections, section_name, property_name, value, types)
if property_name == JSONSchema.NAME:
if InputParser.OPERATOR == section_name:
self._update_operator_input_schema()
# remove properties that are not valid for this section
default_properties = self.get_section_default_properties(section_name)
if isinstance(default_properties, dict):
properties = self.get_section_properties(section_name)
for property_name in list(properties.keys()):
if property_name != JSONSchema.NAME and property_name not in default_properties:
self.delete_section_property(section_name, property_name)
elif JSONSchema.PROBLEM == section_name:
self._update_algorithm_problem()
self._update_operator_problem()
elif JSONSchema.BACKEND == section_name:
self._json_schema.update_backend_schema()
elif InputParser.is_pluggable_section(section_name):
self._json_schema.update_pluggable_input_schemas(self)
# remove properties that are not valid for this section
default_properties = self.get_section_default_properties(section_name)
if isinstance(default_properties, dict):
properties = self.get_section_properties(section_name)
for property_name in list(properties.keys()):
if property_name != JSONSchema.NAME and property_name not in default_properties:
self.delete_section_property(section_name, property_name)
if section_name == PluggableType.ALGORITHM.value:
self._update_dependency_sections()
elif value is not None:
value = str(value).lower().strip()
if len(value) > 0 and self.section_is_driver(value):
self._update_driver_input_schemas()
self._update_driver_sections()
self._sections = self._order_sections(self._sections)
def _update_algorithm_problem(self):
problem_name = self.get_section_property(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
problem_name = self.get_property_default_value(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
raise QiskitChemistryError("No algorithm 'problem' section found on input.")
algo_name = self.get_section_property(PluggableType.ALGORITHM.value, JSONSchema.NAME)
if algo_name is not None and problem_name in InputParser.get_algorithm_problems(algo_name):
return
for algo_name in local_pluggables(PluggableType.ALGORITHM):
if problem_name in self.get_algorithm_problems(algo_name):
# set to the first algorithm to solve the problem
self.set_section_property(
PluggableType.ALGORITHM.value, JSONSchema.NAME, algo_name)
return
# no algorithm solve this problem, remove section
self.delete_section(PluggableType.ALGORITHM.value)
def _update_operator_problem(self):
problem_name = self.get_section_property(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
problem_name = self.get_property_default_value(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
raise QiskitChemistryError("No algorithm 'problem' section found on input.")
operator_name = self.get_section_property(
InputParser.OPERATOR, JSONSchema.NAME)
if operator_name is not None and problem_name in InputParser.get_operator_problems(operator_name):
return
for operator_name in local_chemistry_operators():
if problem_name in self.get_operator_problems(operator_name):
# set to the first input to solve the problem
self.set_section_property(InputParser.OPERATOR, JSONSchema.NAME, operator_name)
return
# no input solve this problem, remove section
self.delete_section(InputParser.OPERATOR)
def _update_dependency_sections(self):
algo_name = self.get_section_property(PluggableType.ALGORITHM.value, JSONSchema.NAME)
config = {} if algo_name is None else get_pluggable_configuration(PluggableType.ALGORITHM, algo_name)
classical = config['classical'] if 'classical' in config else False
pluggable_dependencies = [] if 'depends' not in config else config['depends']
pluggable_defaults = {} if 'defaults' not in config else config['defaults']
for pluggable_type in local_pluggables_types():
# remove pluggables from input that are not in the dependencies
if pluggable_type not in [PluggableType.INPUT, PluggableType.ALGORITHM] and \
pluggable_type.value not in pluggable_dependencies and \
pluggable_type.value in self._sections:
del self._sections[pluggable_type.value]
for pluggable_type in pluggable_dependencies:
pluggable_name = None
if pluggable_type in pluggable_defaults:
if JSONSchema.NAME in pluggable_defaults[pluggable_type]:
pluggable_name = pluggable_defaults[pluggable_type][JSONSchema.NAME]
if pluggable_name is not None and pluggable_type not in self._sections:
self.set_section_property(pluggable_type, JSONSchema.NAME, pluggable_name)
# update default values for new dependency pluggable types
self.set_section_properties(pluggable_type, self.get_section_default_properties(pluggable_type))
# update backend based on classical
if classical:
if JSONSchema.BACKEND in self._sections:
del self._sections[JSONSchema.BACKEND]
else:
if JSONSchema.BACKEND not in self._sections:
self.set_section_properties(JSONSchema.BACKEND, self.get_section_default_properties(JSONSchema.BACKEND))
# reorder sections
self._sections = self._order_sections(self._sections)
def _update_driver_sections(self):
driver_name = self.get_section_property(InputParser.DRIVER, JSONSchema.NAME)
if driver_name is not None:
driver_name = driver_name.strip().lower()
for name in local_drivers():
name = name.lower()
if driver_name is not None and driver_name == name:
continue
if name in self._sections:
del self._sections[name]
if driver_name is not None and driver_name not in self._sections:
self.set_section(driver_name)
value = self.get_section_default_properties(driver_name)
if isinstance(value, dict):
for property_name, property_value in value.items():
self.set_section_property(
driver_name, property_name, property_value)
else:
if value is None:
types = self.get_section_types(driver_name)
if 'null' not in types:
if 'string' in types:
value = ''
elif 'object' in types:
value = {}
elif 'array' in types:
value = []
self.set_section_data(driver_name, value)
@staticmethod
def _set_section_property(sections, section_name, property_name, value, types):
"""
Args:
section_name (str): the name of the section, case insensitive
property_name (str): the property name in the section
value : property value
types : schema valid types
"""
section_name = JSONSchema.format_section_name(section_name).lower()
property_name = JSONSchema.format_property_name(property_name)
value = JSONSchema.get_value(value, types)
if section_name not in sections:
sections[section_name] = OrderedDict(
[(JSONSchema.NAME, section_name)])
if 'properties' not in sections[section_name]:
sections[section_name]['properties'] = OrderedDict()
# name should come first
if JSONSchema.NAME == property_name and property_name not in sections[section_name]['properties']:
new_dict = OrderedDict([(property_name, value)])
new_dict.update(sections[section_name]['properties'])
sections[section_name]['properties'] = new_dict
else:
sections[section_name]['properties'][property_name] = value
# rebuild data
contents = ''
properties = sections[section_name]['properties']
lastIndex = len(properties) - 1
for i, (key, value) in enumerate(properties.items()):
contents += '{}{}{}'.format(key,
InputParser._PROPVALUE_SEPARATOR, value)
if i < lastIndex:
contents += '\n'
sections[section_name]['data'] = contents
def delete_section_property(self, section_name, property_name):
"""
Args:
section_name (str): the name of the section, case insensitive
property_name (str): the property name in the section
"""
section_name = JSONSchema.format_section_name(section_name).lower()
property_name = JSONSchema.format_property_name(property_name)
rebuild_data = False
if section_name in self._sections and \
'properties' in self._sections[section_name] and \
property_name in self._sections[section_name]['properties']:
del self._sections[section_name]['properties'][property_name]
rebuild_data = True
if rebuild_data:
contents = ''
properties = self._sections[section_name]['properties']
lastIndex = len(properties) - 1
for i, (key, value) in enumerate(properties.items()):
contents += '{}{}{}'.format(key,
InputParser._PROPVALUE_SEPARATOR, value)
if i < lastIndex:
contents += '\n'
self._sections[section_name]['data'] = contents
def delete_section_properties(self, section_name):
"""
Args:
section_name (str): the name of the section, case insensitive
"""
section_name = JSONSchema.format_section_name(section_name).lower()
if section_name in self._sections:
del self._sections[section_name]
def set_section_data(self, section_name, value):
"""
Sets a section data.
Args:
section_name (str): the name of the section, case insensitive
value : value to set
"""
section_name = JSONSchema.format_section_name(section_name).lower()
value = self._json_schema.check_section_value(section_name, value)
self._sections[section_name] = OrderedDict(
[(JSONSchema.NAME, section_name)])
self._sections[section_name]['data'] = value
properties = OrderedDict()
if value is not None:
lines = str(value).splitlines()
for line in lines:
k, v = self._get_key_value(line)
if k is not None and v is not None:
properties[k] = v
self._sections[section_name]['properties'] = properties
def delete_section_data(self, section_name):
"""
Deletes a section data.
Args:
section_name (str): the name of the section, case insensitive
"""
section_name = JSONSchema.format_section_name(section_name).lower()
if section_name in self._sections:
self._sections[section_name]['data'] = ''
self._sections[section_name]['properties'] = OrderedDict()
def get_section_names(self):
"""Return all the names of the sections."""
return list(self._sections.keys())
def is_substitution_allowed(self):
auto_substitutions = self.get_property_default_value(
JSONSchema.PROBLEM, InputParser.AUTO_SUBSTITUTIONS)
auto_substitutions = self.get_section_property(
JSONSchema.PROBLEM, InputParser.AUTO_SUBSTITUTIONS, auto_substitutions)
if auto_substitutions is None:
auto_substitutions = True
return auto_substitutions
def check_if_substitution_key(self, section_name, property_names):
result = [(property_name, False) for property_name in property_names]
if not self.is_substitution_allowed():
return result
section_name = JSONSchema.format_section_name(section_name).lower()
property_names = [JSONSchema.format_property_name(
property_name) for property_name in property_names]
section_property_name = self.get_property_default_value(
section_name, JSONSchema.NAME)
section_property_name = self.get_section_property(
section_name, JSONSchema.NAME, section_property_name)
for key in self._substitutions.keys():
key_items = key.split('.')
if len(key_items) == 3 and \
key_items[0] == section_name and \
key_items[1] == section_property_name and \
key_items[2] in property_names:
result[property_names.index(key_items[2])] = (
key_items[2], True)
continue
return result
def process_substitutions(self, substitutions=None):
if substitutions is not None and not isinstance(substitutions, dict):
raise QiskitChemistryError(
'Invalid substitution parameter: {}'.format(substitutions))
if not self.is_substitution_allowed():
return {}
result = {}
for key, value in self._substitutions.items():
key_items = key.split('.')
if len(key_items) != 3:
raise QiskitChemistryError(
'Invalid substitution key: {}'.format(key))
name = self.get_property_default_value(
key_items[0], JSONSchema.NAME)
name = self.get_section_property(
key_items[0], JSONSchema.NAME, name)
if name != key_items[1]:
continue
value_set = False
value_items = value.split('.')
if len(value_items) == 3:
name = self.get_section_property(
value_items[0], JSONSchema.NAME)
if name == value_items[1]:
v = self.get_property_default_value(
value_items[0], value_items[2])
v = self.get_section_property(
value_items[0], value_items[2], v)
if v is not None:
self.set_section_property(
key_items[0], key_items[2], v)
result[key] = v
value_set = True
if value_set or substitutions is None:
continue
if value in substitutions:
self.set_section_property(
key_items[0], key_items[2], substitutions[value])
result[key] = substitutions[value]
return result
def _process_line(self, section, line):
stripLine = line.strip()
if len(stripLine) == 0:
if section is not None:
section['data'].append(line)
return section
if stripLine.lower().startswith(InputParser._END_SECTION):
if section is not None:
self._sections[section[JSONSchema.NAME]
] = self._process_section(section)
return None
if stripLine.startswith(InputParser._START_SECTION):
if section is not None:
raise QiskitChemistryError('New section "{0}" starting before the end of previuos section "{1}"'.format(
line, section[JSONSchema.NAME]))
return OrderedDict([(JSONSchema.NAME, stripLine[1:].lower()), ('data', [])])
if section is None:
return section
section['data'].append(line)
return section
def _process_section(self, section):
contents = ''
sep_pos = -len(os.linesep)
lastIndex = len(section['data']) - 1
for i, line in enumerate(section['data']):
key, value = self._get_key_value(line)
if key is not None and value is not None:
if 'properties' not in section:
section['properties'] = OrderedDict()
section['properties'][key] = value
if i == lastIndex:
if len(line) >= len(os.linesep) and line[sep_pos:] == os.linesep:
line = line[:sep_pos]
contents += line
section['data'] = contents
return section
@staticmethod
def _get_key_value(line):
stripLine = line.strip()
pos = -1
for start_comment in InputParser._START_COMMENTS:
pos = stripLine.find(start_comment)
if pos >= 0:
break
if pos == 0:
return (None, None)
if pos > 0:
stripLine = stripLine[:pos].strip()
pos = stripLine.find(InputParser._PROPVALUE_SEPARATOR)
if pos > 0:
key = stripLine[0:pos].strip()
value = stripLine[pos + 1:].strip()
return (key, JSONSchema.get_value(value))
return (None, None)
| 43.162996 | 143 | 0.621025 | 5,316 | 48,990 | 5.445636 | 0.058315 | 0.075996 | 0.02273 | 0.023041 | 0.608829 | 0.523438 | 0.454731 | 0.399841 | 0.344364 | 0.310926 | 0 | 0.001861 | 0.298142 | 48,990 | 1,134 | 144 | 43.201058 | 0.840071 | 0.067238 | 0 | 0.386445 | 0 | 0 | 0.041109 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070155 | false | 0.002378 | 0.014269 | 0.009512 | 0.174792 | 0.004756 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7d594f1ab2c2cdb126fc52377da1fe717c592c1 | 18,281 | py | Python | text/src/autogluon/text/text_prediction/modules/basic_prediction.py | RuohanW/autogluon | fa349db5e75a18cd3af7d9d3f1064eb34e92aca1 | [
"Apache-2.0"
] | 1 | 2020-03-20T08:01:35.000Z | 2020-03-20T08:01:35.000Z | text/src/autogluon/text/text_prediction/modules/basic_prediction.py | RuohanW/autogluon | fa349db5e75a18cd3af7d9d3f1064eb34e92aca1 | [
"Apache-2.0"
] | null | null | null | text/src/autogluon/text/text_prediction/modules/basic_prediction.py | RuohanW/autogluon | fa349db5e75a18cd3af7d9d3f1064eb34e92aca1 | [
"Apache-2.0"
] | 2 | 2021-02-13T04:41:33.000Z | 2021-07-10T07:14:59.000Z | import numpy as np
import mxnet as mx
from mxnet.gluon import nn, HybridBlock
from mxnet.util import use_np
from autogluon_contrib_nlp.utils.config import CfgNode
from autogluon_contrib_nlp.layers import get_activation, get_norm_layer
from .. import constants as _C
@use_np
class BasicMLP(HybridBlock):
def __init__(self, in_units,
mid_units,
out_units,
num_layers=1,
normalization='layer_norm',
norm_eps=1E-5,
dropout=0.1,
data_dropout=False,
activation='leaky',
weight_initializer=None,
bias_initializer=None,
prefix=None, params=None):
"""
data -> [dropout] * (0/1) -> [Dense -> Normalization -> ACT] * N -> dropout -> Dense -> out
Parameters
----------
in_units
mid_units
out_units
num_layers
Number of intermediate layers
normalization
norm_eps
dropout
activation
"""
super().__init__(prefix=prefix, params=params)
self.in_units = in_units
self.data_dropout = data_dropout
if mid_units < 0:
mid_units = in_units
with self.name_scope():
self.proj = nn.HybridSequential()
with self.proj.name_scope():
if num_layers > 0 and data_dropout:
self.proj.add(nn.Dropout(dropout))
for i in range(num_layers):
self.proj.add(nn.Dense(units=mid_units,
in_units=in_units,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
use_bias=False))
self.proj.add(get_norm_layer(normalization,
axis=-1,
epsilon=norm_eps,
in_channels=mid_units))
self.proj.add(get_activation(activation))
in_units = mid_units
self.proj.add(nn.Dropout(dropout))
self.proj.add(nn.Dense(units=out_units,
in_units=in_units,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
flatten=False))
def hybrid_forward(self, F, x):
return self.proj(x)
@use_np
class CategoricalFeatureNet(HybridBlock):
def __init__(self, num_class, out_units, cfg=None, prefix=None, params=None):
super().__init__(prefix=prefix, params=params)
if cfg is None:
cfg = CategoricalFeatureNet.get_cfg()
else:
cfg = CategoricalFeatureNet.get_cfg().clone_merge(cfg)
self.cfg = cfg
embed_initializer = mx.init.create(*cfg.initializer.embed)
weight_initializer = mx.init.create(*cfg.initializer.weight)
bias_initializer = mx.init.create(*cfg.initializer.bias)
with self.name_scope():
self.embedding = nn.Embedding(input_dim=num_class,
output_dim=cfg.emb_units,
weight_initializer=embed_initializer)
self.proj = BasicMLP(in_units=cfg.emb_units,
mid_units=cfg.mid_units,
out_units=out_units,
num_layers=cfg.num_layers,
normalization=cfg.normalization,
norm_eps=cfg.norm_eps,
data_dropout=cfg.data_dropout,
dropout=cfg.dropout,
activation=cfg.activation,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
@staticmethod
def get_cfg(key=None):
if key is None:
cfg = CfgNode()
cfg.emb_units = 32
cfg.mid_units = 64
cfg.num_layers = 1
cfg.data_dropout = False
cfg.dropout = 0.1
cfg.activation = 'leaky'
cfg.normalization = 'layer_norm'
cfg.norm_eps = 1e-5
cfg.initializer = CfgNode()
cfg.initializer.embed = ['xavier', 'gaussian', 'in', 1.0]
cfg.initializer.weight = ['xavier', 'uniform', 'avg', 3.0]
cfg.initializer.bias = ['zeros']
return cfg
else:
raise NotImplementedError
def hybrid_forward(self, F, feature):
embed = self.embedding(feature)
return self.proj(embed)
@use_np
class NumericalFeatureNet(HybridBlock):
def __init__(self, input_shape, out_units, cfg=None, prefix=None, params=None):
super().__init__(prefix=prefix, params=params)
if cfg is None:
cfg = NumericalFeatureNet.get_cfg()
self.input_shape = input_shape
self.need_first_reshape = isinstance(input_shape, (list, tuple)) and len(input_shape) != 1
self.in_units = int(np.prod(input_shape))
self.cfg = NumericalFeatureNet.get_cfg().clone_merge(cfg)
weight_initializer = mx.init.create(*cfg.initializer.weight)
bias_initializer = mx.init.create(*cfg.initializer.bias)
with self.name_scope():
if self.cfg.input_centering:
self.data_bn = nn.BatchNorm(in_channels=self.in_units)
self.proj = BasicMLP(in_units=self.in_units,
mid_units=cfg.mid_units,
out_units=out_units,
num_layers=cfg.num_layers,
normalization=cfg.normalization,
norm_eps=cfg.norm_eps,
data_dropout=cfg.data_dropout,
dropout=cfg.dropout,
activation=cfg.activation,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
@staticmethod
def get_cfg(key=None):
if key is None:
cfg = CfgNode()
cfg.input_centering = False
cfg.mid_units = 128
cfg.num_layers = 1
cfg.data_dropout = False
cfg.dropout = 0.1
cfg.activation = 'leaky'
cfg.normalization = 'layer_norm'
cfg.norm_eps = 1e-5
cfg.initializer = CfgNode()
cfg.initializer.weight = ['xavier', 'uniform', 'avg', 3.0]
cfg.initializer.bias = ['zeros']
else:
raise NotImplementedError
return cfg
def hybrid_forward(self, F, features):
if self.need_first_reshape:
features = F.np.reshape(features, (-1, self.in_units))
if self.cfg.input_centering:
features = self.data_bn(features)
return self.proj(features)
@use_np
class FeatureAggregator(HybridBlock):
def __init__(self, num_fields, out_shape, in_units,
cfg=None, prefix=None, params=None):
super().__init__(prefix=prefix, params=params)
if cfg is None:
cfg = FeatureAggregator.get_cfg()
self.cfg = FeatureAggregator.get_cfg().clone_merge(cfg)
self.num_fields = num_fields
if isinstance(out_shape, list):
out_shape = tuple(out_shape)
self.out_shape = out_shape
self.in_units = in_units
weight_initializer = mx.init.create(*self.cfg.initializer.weight)
bias_initializer = mx.init.create(*self.cfg.initializer.bias)
out_units = int(np.prod(out_shape))
with self.name_scope():
if self.cfg.agg_type == 'mean':
in_units = in_units
elif self.cfg.agg_type == 'concat':
in_units = in_units * num_fields
else:
raise NotImplementedError
mid_units = in_units if cfg.mid_units < 0 else cfg.mid_units
self.proj = BasicMLP(in_units=in_units,
mid_units=mid_units,
out_units=out_units,
num_layers=cfg.num_layers,
normalization=cfg.normalization,
norm_eps=cfg.norm_eps,
dropout=cfg.dropout,
data_dropout=cfg.data_dropout,
activation=cfg.activation,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
@staticmethod
def get_cfg(key=None):
if key is None:
cfg = CfgNode()
cfg.agg_type = 'concat'
cfg.mid_units = -1
cfg.num_layers = 0
cfg.data_dropout = False
cfg.dropout = 0.1
cfg.activation = 'tanh'
cfg.normalization = 'layer_norm'
cfg.norm_eps = 1e-5
cfg.initializer = CfgNode()
cfg.initializer.weight = ['xavier', 'uniform', 'avg', 3.0]
cfg.initializer.bias = ['zeros']
else:
raise NotImplementedError
return cfg
def hybrid_forward(self, F, field_proj_features):
"""
Parameters
----------
field_proj_features
List of projection features. All elements must have the same shape.
Returns
-------
scores
Shape (batch_size,) + out_shape
"""
if len(field_proj_features) == 0:
agg_features = field_proj_features[0]
else:
if self.cfg.agg_type == 'mean':
agg_features = F.np.stack(field_proj_features)
agg_features = F.np.mean(agg_features, axis=0)
elif self.cfg.agg_type == 'concat':
agg_features = F.np.concatenate(field_proj_features, axis=-1)
else:
# TODO(sxjscience) May try to implement more advanced pooling methods for
# multimodal data.
raise NotImplementedError
scores = self.proj(agg_features)
if len(self.out_shape) != 1:
scores = F.np.reshape(scores, (-1,) + self.out_shape)
return scores
@use_np
class BERTForTabularBasicV1(HybridBlock):
"""The basic model for tabular classification + regression with
BERT (and its variants like ALBERT, MobileBERT, ELECTRA, etc.)
as the backbone for handling text data.
Here, we use the backbone network to extract the contextual embeddings and use
another dense layer to map the contextual embeddings to the class scores.
Input:
TextField + EntityField --> TextNet -------> TextFeature
...
CategoricalField --> CategoricalNet --> CategoricalFeature ==> AggregateNet --> logits/scores
...
NumericalField ----> NumericalNet ----> NumericalFeature
"""
def __init__(self, text_backbone,
feature_field_info,
label_shape=None,
cfg=None,
prefix=None,
params=None):
"""
Parameters
----------
text_backbone
Backbone network for handling the text data
feature_field_info
The field information of the training data. Each will be a tuple:
- (field_type, attributes)
label_shape
The shape of the label/number of classes. If we need a scalar, it will be an empty tuple "()".
cfg
The configuration of the network
prefix
params
"""
super().__init__(prefix=prefix, params=params)
self.cfg = BERTForTabularBasicV1.get_cfg()
if cfg is not None:
self.cfg = self.cfg.clone_merge(cfg)
assert self.cfg.text_net.pool_type == 'cls'
feature_units = self.cfg.feature_units
if feature_units == -1:
feature_units = text_backbone.units
if isinstance(label_shape, int):
out_shape = (label_shape,)
elif label_shape is None:
out_shape = ()
else:
out_shape = label_shape
with self.name_scope():
self.text_backbone = text_backbone
self.feature_field_info = feature_field_info
self.categorical_fields = []
self.numerical_fields = []
self.categorical_networks = None
self.numerical_network = None
numerical_elements = None
num_features = 1
for i, (field_type_code, field_attrs) in enumerate(self.feature_field_info):
if field_type_code == _C.CATEGORICAL:
if self.categorical_networks is None:
self.categorical_networks = nn.HybridSequential()
with self.categorical_networks.name_scope():
self.categorical_networks.add(
CategoricalFeatureNet(num_class=field_attrs['prop'].num_class,
out_units=feature_units,
cfg=cfg.categorical_net))
num_features += 1
elif field_type_code == _C.NUMERICAL:
if numerical_elements is None:
numerical_elements = int(np.prod(field_attrs['prop'].shape))
else:
numerical_elements += int(np.prod(field_attrs['prop'].shape))
if numerical_elements is not None:
self.numerical_network = NumericalFeatureNet(input_shape=(numerical_elements,),
out_units=feature_units,
cfg=cfg.numerical_net)
num_features += 1
else:
self.numerical_network = None
self.agg_layer = FeatureAggregator(num_fields=num_features,
out_shape=out_shape,
in_units=feature_units,
cfg=cfg.agg_net)
@staticmethod
def get_cfg(key=None):
if key is None:
cfg = CfgNode()
cfg.feature_units = -1 # -1 means not given and we will use the units of BERT
# TODO(sxjscience) Use a class to store the TextNet
cfg.text_net = CfgNode()
cfg.text_net.use_segment_id = True
cfg.text_net.pool_type = 'cls'
cfg.agg_net = FeatureAggregator.get_cfg()
cfg.categorical_net = CategoricalFeatureNet.get_cfg()
cfg.numerical_net = NumericalFeatureNet.get_cfg()
cfg.initializer = CfgNode()
cfg.initializer.weight = ['truncnorm', 0, 0.02]
cfg.initializer.bias = ['zeros']
return cfg
else:
raise NotImplementedError
def initialize_with_pretrained_backbone(self, backbone_params_path, ctx=None):
self.text_backbone.load_parameters(backbone_params_path, ctx=ctx)
self.agg_layer.initialize(ctx=ctx)
if self.categorical_networks is not None:
self.categorical_networks.initialize(ctx=ctx)
if self.numerical_network is not None:
self.numerical_network.initialize(ctx=ctx)
def hybrid_forward(self, F, features):
"""
Parameters
----------
features
A list of field data
Returns
-------
logits_or_scores
Shape (batch_size,) + out_shape
"""
field_features = []
text_contextual_features = dict()
categorical_count = 0
numerical_samples = []
for i, (field_type_code, field_attrs) in enumerate(self.feature_field_info):
if field_type_code == _C.TEXT:
batch_token_ids, batch_valid_length, batch_segment_ids, _ = features[i]
if self.cfg.text_net.use_segment_id:
contextual_embedding, pooled_output = self.text_backbone(batch_token_ids,
batch_segment_ids,
batch_valid_length)
else:
contextual_embedding = self.text_backbone(batch_token_ids, batch_valid_length)
pooled_output = contextual_embedding[:, 0, :]
text_contextual_features[i] = contextual_embedding
field_features.append(pooled_output)
elif field_type_code == _C.ENTITY:
# TODO Implement via segment-pool
raise NotImplementedError('Currently not supported')
elif field_type_code == _C.CATEGORICAL:
batch_sample = features[i]
extracted_feature = self.categorical_networks[categorical_count](batch_sample)
categorical_count += 1
field_features.append(extracted_feature)
elif field_type_code == _C.NUMERICAL:
batch_sample = features[i]
numerical_samples.append(F.np.reshape(
batch_sample, (-1, int(np.prod(field_attrs['prop'].shape)))))
if len(numerical_samples) > 0:
if len(numerical_samples) == 1:
numerical_feature = self.numerical_network(numerical_samples[0])
else:
numerical_feature = self.numerical_network(F.np.concatenate(numerical_samples,
axis=-1))
field_features.append(numerical_feature)
return self.agg_layer(field_features)
| 42.415313 | 106 | 0.537936 | 1,851 | 18,281 | 5.061588 | 0.133441 | 0.020173 | 0.014089 | 0.01046 | 0.451169 | 0.386914 | 0.301206 | 0.277511 | 0.248586 | 0.23898 | 0 | 0.006204 | 0.382802 | 18,281 | 430 | 107 | 42.513953 | 0.82416 | 0.091789 | 0 | 0.4375 | 0 | 0 | 0.013802 | 0 | 0 | 0 | 0 | 0.006977 | 0.002976 | 1 | 0.044643 | false | 0 | 0.020833 | 0.002976 | 0.107143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7d7ae3d758a461732da2b916d6af6e8af35be88 | 11,676 | py | Python | mobilib/area.py | simberaj/mobilib | ae350d095a34f53704bd4aaaf7f45e573bda779a | [
"MIT"
] | null | null | null | mobilib/area.py | simberaj/mobilib | ae350d095a34f53704bd4aaaf7f45e573bda779a | [
"MIT"
] | null | null | null | mobilib/area.py | simberaj/mobilib | ae350d095a34f53704bd4aaaf7f45e573bda779a | [
"MIT"
] | null | null | null | """Tools for areal spatial objects (primarily polygons)."""
import collections
import math
import operator
from typing import List, Tuple, Optional, Dict, Iterable
import numpy as np
import pandas as pd
import geopandas as gpd
import shapely.ops
import shapely.geometry
import sklearn.cluster
from mobilib.core import AnyPolygon
def equalize_polygons(polygons: gpd.GeoSeries,
target_area: float,
subdivisions: Optional[gpd.GeoSeries] = None,
unsafe_geom: bool = False,
) -> Tuple[gpd.GeoSeries, pd.DataFrame]:
"""Merge and/or split polygons so that their surface area closely matches the target figure.
The criterion being minimized is ``abs(1 - area / target_area)``.
:param polygons: Polygons to adjust. If a polygon is larger than target_area
and subdivisions are available, it is split into its subdivisions, which
are reaggregated into compact shapes to match target_area.
If a polygon is smaller than target_area, it is aggregated with its
neighbors.
:param target_area: An areal measure in the units of the polygons' CRS
that should be closely matched by the output polygons.
:param subdivisions: If given, large polygons will be subdivided into
aggregates of these units. This would usually be one of the lower
hierarchical levels of division (e.g. U.S. counties when polygons are
U.S. states).
:param unsafe_geom: Account for the fact that the geometries of polygons
and subdivisions do not match exactly; use boundaries of polygons
where possible, boundaries of subdivisions to subdivide them.
:returns: A GeoSeries with the newly created polygons, and a DataFrame
mapping its index (id) to the index of the original polygons (orig_id).
"""
# target_area = polygons.area.quantile(.5)
print('target area', target_area)
# --- polygon aggregation
agg_poly = gpd.GeoSeries(
aggregate(polygons.tolist(), target_area),
crs=polygons.crs,
)
# --- polygon splitting
areas = agg_poly.area
area_quots = (areas / target_area).round(0)
subdiv_map = gpd.sjoin(
gpd.GeoDataFrame(geometry=subdivisions.representative_point()),
gpd.GeoDataFrame(geometry=agg_poly),
how='inner',
op='intersects'
)['index_right']
ok_polygons = agg_poly[area_quots <= 1]
poly_i = len(ok_polygons)
out_id_map = list(zip(range(poly_i), ok_polygons.index))
subdiv_polygons = []
print(poly_i, end=' \r')
for too_big_id, too_big_poly in agg_poly[area_quots > 1].items():
agg_subdivs = aggregate(
subdivisions[subdiv_map[subdiv_map == too_big_id].index].tolist(),
target_area,
)
if unsafe_geom:
agg_subdivs = match_geometry(agg_subdivs, too_big_poly)
subdiv_polygons.extend(agg_subdivs)
out_id_map.extend(zip(
range(poly_i, poly_i + len(agg_subdivs)),
[too_big_id] * len(agg_subdivs)
))
poly_i += len(agg_subdivs)
print(poly_i, end=' \r')
return gpd.GeoSeries(pd.concat((
ok_polygons,
gpd.GeoSeries(subdiv_polygons, crs=ok_polygons.crs),
), ignore_index=True)), pd.DataFrame.from_records(out_id_map, columns=['id', 'orig_id'])
def match_geometry(subdivisions: List[AnyPolygon],
main_polygon: AnyPolygon,
) -> List[AnyPolygon]:
"""Match the subdivisions so that they fully subdivide the main polygon, without outreach."""
if np.isclose(main_polygon.area, sum(subdiv.area for subdiv in subdivisions)):
return subdivisions
main_polygon = main_polygon.buffer(0)
cut_subdivs = [subdiv.intersection(main_polygon) for subdiv in subdivisions]
remnant = main_polygon.difference(shapely.ops.unary_union(subdivisions))
if remnant.is_empty:
return cut_subdivs
else:
# return cut_subdivs + [remnant]
# break the remnant down to individual components and eliminate them
remnant_parts = remnant.geoms if hasattr(remnant, 'geoms') else [remnant]
subdiv_comps = {}
for remnant_part in remnant_parts:
max_inters_len = -1
to_merge_subdiv_i = 0
for i, cut_subdiv in enumerate(cut_subdivs):
inters_len = cut_subdiv.intersection(remnant_part).length
if inters_len > max_inters_len:
max_inters_len = inters_len
to_merge_subdiv_i = i
subdiv_comps.setdefault(to_merge_subdiv_i, []).append(remnant_part)
return [
shapely.ops.unary_union([cut_subdiv] + subdiv_comps.get(i, []))
for i, cut_subdiv in enumerate(cut_subdivs)
]
def aggregate(polygons: List[AnyPolygon],
target_area: float,
areas: Optional[List[float]] = None,
neighbour_table: Optional[List[Tuple[int, int]]] = None,
) -> List[AnyPolygon]:
if areas is None:
areas = [poly.area for poly in polygons]
max_addable_area = target_area * 1.5
tot_weight = sum(a if a < max_addable_area else max_addable_area for a in areas)
tgt_n = int(round(tot_weight / target_area))
if len(polygons) <= tgt_n:
return polygons
elif tgt_n == 1:
return shapely.ops.unary_union(polygons)
else:
group_labels = _centroid_kmeans([poly.centroid for poly in polygons], tgt_n, areas)
# TODO here, the neighbour table would be used to optimize the aggregates
return _union_groups(polygons, group_labels)
def _to_neighbour_dict(neighbour_table: List[Tuple[int, int]]) -> Dict[int, List[int]]:
neighbour_dict = {}
for i1, i2 in neighbour_table:
neighbour_dict.setdefault(i1, []).append(i2)
neighbour_dict.setdefault(i2, []).append(i1)
return neighbour_dict
def _centroid_kmeans(centroids: List[shapely.geometry.Point],
tgt_n: int,
weights: List[float],
) -> List[List[int]]:
coors = np.array([point.coords[0] for point in centroids])
clusterer = sklearn.cluster.KMeans(n_clusters=tgt_n, random_state=1711)
return clusterer.fit_predict(coors, sample_weight=np.array(weights)).tolist()
def _create_groups(group_labels: List[int]) -> List[List[int]]:
groups = [[] for i in range(max(group_labels) + 1)]
for i, label in enumerate(group_labels):
groups[label].append(i)
return groups
def _optimize_aggregates(polygons: List[AnyPolygon],
group_labels: List[int],
neighbour_dict: Dict[int, List[int]],
target_area: float,
) -> List[int]:
groups = _create_groups(group_labels)
neighbour_edges = _neighbour_edge_lengths(polygons, neighbour_dict)
aggregates = _union_groups(polygons, groups)
candidates = list(sorted(_aggregate_updates(
polygons, groups, group_labels, aggregates, neighbour_edges, target_area
), key=operator.itemgetter(1)))
raise NotImplementedError
# candidates.sort(key=operator.itemgetter(1))
# while candidates:
# # get the best change and perform it
# best_change, best_crit = candidates.pop()
# move_i, agg_from_i, agg_to_i = best_change
# labels[move_i] = agg_to_i
# aggregates[agg_from_i] = aggregates[agg_from_i].difference(polygons[move_i])
# aggregates[agg_to_i] = aggregates[agg_to_i].union(polygons[move_i])
# update = (agg_from_i, agg_to_i)
# # remove all changes concerning these two aggregates
# candidates = [
# (change, crit) for change, crit in candidates
# if change[1] not in update and change[2] not in update
# ]
# # add them back
# for i1, i2 in neighbour_table:
# TODO
def _aggregate_updates(polygons: List[AnyPolygon],
groups: List[List[int]],
group_labels: List[int],
aggregates: List[AnyPolygon],
neighbour_edges: Dict[int, Dict[int, float]],
target_area: float,
) -> Iterable[Tuple[Tuple[int, int, int], float]]:
# find all changes that are for the better
for grp1_i, group1 in enumerate(groups):
for poly1_i in group1:
poly_cf = polygons[poly1_i].length
poly_area = polygons[poly1_i].area
inward_cf = 0
outward_cfs = collections.defaultdict(float)
for poly2_i, edge_len in neighbour_edges[poly1_i].items():
if group_labels[poly2_i] == grp1_i:
inward_cf += edge_len
else:
outward_cfs[group_labels[poly2_i]] += edge_len
for grp2_i, outward_cf in outward_cfs.items():
# criterion for moving poly1_i from grp1_i to grp2_i
crit_delta = aggregation_criterion_delta(
area=poly_area,
cf=poly_cf,
inward_cf=inward_cf,
outward_cf=outward_cf,
from_agg=aggregates[grp1_i],
to_agg=aggregates[grp2_i],
target_area=target_area,
)
if crit_delta > 0:
yield (poly1_i, grp1_i, grp2_i), crit_delta
def aggregation_criterion(polygon: AnyPolygon, target_area: float) -> float:
area = polygon.area
return abs(1 - area / target_area) * polygon.length / (2 * math.sqrt(math.pi * area))
def aggregation_criterion_delta(area: float,
cf: float,
inward_cf: float,
outward_cf: float,
from_agg: AnyPolygon,
to_agg: AnyPolygon,
target_area: float,
) -> float:
from_area_after = from_agg.area - area
to_area_after = to_agg.area + area
return (
(
abs(1 - from_area_after / target_area)
* (from_agg.length + 2 * inward_cf - cf)
/ (2 * math.sqrt(math.pi * from_area_after))
)
+ (
abs(1 - to_area_after / target_area)
* (from_agg.length + cf - 2 * outward_cf)
/ (2 * math.sqrt(math.pi * to_area_after))
)
- aggregation_criterion(from_agg, target_area=target_area)
- aggregation_criterion(to_agg, target_area=target_area)
)
def _neighbour_edge_lengths(polygons: List[AnyPolygon],
neighbour_dict: Dict[int, List[int]],
) -> Dict[int, Dict[int, float]]:
return {
i1: {i2: _shared_edge_length(polygons[i1], polygons[i2]) for i2 in neighs}
for i1, neighs in neighbour_dict.items()
}
def _shared_edge_length(poly1: AnyPolygon, poly2: AnyPolygon) -> float:
inters = poly1.intersection(poly2)
if inters.geom_type not in ('LineString', 'MultiLineString'):
raise ValueError
return inters.length
def _union_groups(polygons: List[AnyPolygon],
groups: List[List[int]],
) -> List[AnyPolygon]:
return [shapely.ops.unary_union([polygons[i] for i in group]) for group in groups]
def representative_points(geometry: gpd.GeoSeries) -> gpd.GeoSeries:
"""Return a GeoSeries of representative points of input geometries."""
return geometry.map(lambda x: x.representative_point())
| 41.551601 | 97 | 0.621017 | 1,432 | 11,676 | 4.844274 | 0.212291 | 0.041805 | 0.014127 | 0.011532 | 0.124982 | 0.063428 | 0.030272 | 0.009803 | 0 | 0 | 0 | 0.008311 | 0.288969 | 11,676 | 280 | 98 | 41.7 | 0.827271 | 0.200668 | 0 | 0.095 | 0 | 0 | 0.00977 | 0 | 0 | 0 | 0 | 0.003571 | 0 | 1 | 0.07 | false | 0 | 0.055 | 0.01 | 0.205 | 0.015 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7d940aacea208c1f81a4ef776df0c9e2e28197e | 18,061 | py | Python | notebooks/c10_GANs/GANs.py | lixuekai2001/ml_for_log_data | 1e01c4c6c9a3ee6e20c5cfe8db44029c0aeaedd8 | [
"Apache-2.0"
] | 9 | 2020-09-24T06:34:03.000Z | 2021-08-18T14:43:11.000Z | notebooks/c10_GANs/GANs.py | lixuekai2001/ml_for_log_data | 1e01c4c6c9a3ee6e20c5cfe8db44029c0aeaedd8 | [
"Apache-2.0"
] | null | null | null | notebooks/c10_GANs/GANs.py | lixuekai2001/ml_for_log_data | 1e01c4c6c9a3ee6e20c5cfe8db44029c0aeaedd8 | [
"Apache-2.0"
] | 6 | 2020-10-14T07:13:20.000Z | 2021-12-23T01:59:41.000Z | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: py37_pytorch
# language: python
# name: conda-env-py37_pytorch-py
# ---
# # GANs
# In this notebook, we are going to create a generative adversarial network (GAN) from scratch! Specifically, we will build and train a GAN that can generate hand-written images of digits (0-9).
# Generative Adversarial Networks are considered the state of the art for the generation of photorealistic images.
#
# Take a look at the images below. Can you distinguish which images are real and which ones are fakes?
#
# <table>
# <tr>
# <th>
# <img src='./this_person_does_not_exist/person1.jpeg' height=200 width=200/>
# </th>
#
# <th>
# <img src='./this_person_does_not_exist/person2.jpeg' height=200 width=200/>
# </th>
# <th>
# <img src='./this_person_does_not_exist/person3.jpeg' height=200 width=200/>
# </th>
# </tr>
# <tr>
# <th>
# <img src='./this_person_does_not_exist/person4.jpeg' height=200 width=200/>
# </th>
# <th>
# <img src='./this_person_does_not_exist/person5.jpeg' height=200 width=200/>
# </th>
# <th>
# <img src='./this_person_does_not_exist/person6.jpeg' height=200 width=200/>
# </th>
# </tr>
# </table>
#
# Actually all images are fake... Those images were created by a GAN named StyleGAN2 (Dec 2019) - Karras et al. and Nvidia
#
# Source: https://thispersondoesnotexist.com/
#
# Also see: https://thisxdoesnotexist.com/
#
# ## How does a GAN work?
#
# GANs are a hot topic that is evolving very fast. However, we will explore the basic concept of GANs as proposed by Ian Goodfellow in 2014. [Original paper in GANs](https://arxiv.org/pdf/1406.2661.pdf)
#
# In a GAN the **discriminator** (also called sometimes the **critic**) is a binary classifier that will be trained to distinguish if an image is fake or real. The **generator** will be trying to generate images that fool the **discriminator**. At the beginning both the **generator** $G$ and **discriminator** $D$ will be taking random guesses. Both $G$ and $D$ will be learning from each other's feedback. The input of the $G$ will be a random vector $z$.
#
# <img src='Gan.png' height=400 width=400 />
#
# In a more formal way, $G$ and $D$ will be playing a MiniMax game trying to get better than their adversary.
#
# $min_D max_G \{ -E_{x \sim \text{Data}} log D(\mathbf x) - E_{z \sim \text{Noise}} log(1 - D(G(\mathbf z))) \}$
#
#
#
# ## Import Libraries
# We will begin by importing some useful packages and the MNIST dataset which will be used to build and train our GAN.
# +
import torch
from torch import nn
from torchvision import transforms
from torchvision.datasets import MNIST # Training dataset
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
from deep_ml_curriculum.torchsummaryX import summary
torch.manual_seed(2020) # Set for testing purposes, please do not change!
# -
# Function for visualizing images: Given a tensor of images, number of images, and size per image, plots and prints the images in a uniform grid.
def show_tensor_images(image_tensor, num_images=25, size=(1, 28, 28)):
image_unflat = image_tensor.detach().cpu().view(-1, *size)
image_grid = make_grid(image_unflat[:num_images], nrow=5)
plt.imshow(image_grid.permute(1, 2, 0).squeeze())
plt.show()
# ## MNIST Dataset
#
# The training images your discriminator will be using is from a dataset called [MNIST](http://yann.lecun.com/exdb/mnist/). It contains 60,000 images of handwritten digits (28x28 pixels), from 0 to 9, like these. Additionally, these images are also in black-and-white so only one dimension, or "color channel", is needed to represent them. Colored images are usually in the RGB space and need 3 channels to represent them.
#
# 
#
# Source Image:[MNIST Example](https://en.wikipedia.org/wiki/MNIST_database#/media/File:MnistExamples.png)
# License: [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0)
#
# #### Batches
# While you could train your model after generating one image, it is extremely inefficient and leads to less stable training. In GANs, and in machine learning in general, you will process multiple images per training step. These are called batches.
#
# This means that your generator will generate an entire batch of images and receive the discriminator's feedback on each before updating the model. The same goes for the discriminator, it will calculate its loss on the entire batch of generated images as well as on the reals before the model is updated.
#
# # Generator
#
# The first step is to build the generator component. We need to create a function to make a single block for the Generator's neural network. Each block should include a linear transformation to map to another shape, a batch normalization for stabilization, and finally a non-linear activation function (you use a ReLU here) so the output can be transformed in complex ways. You will learn more about activations and batch normalization later in the course.
#
#
def get_generator_block(input_dim, output_dim):
return nn.Sequential(
nn.Linear(input_dim, output_dim),
nn.BatchNorm1d(output_dim),
nn.ReLU(inplace=True),
)
# - Generator Class Values:
# - z_dim: the dimension of the noise vector
# - im_dim: the dimension of the output image. MNIST images are 28 x 28 = 784.
# - hidden_dim: the inner dimension
#
# - Forward Function: Function for completing a forward pass of the generator: Given a noise vector, returns a generated image.
# - noise: a noise tensor with dimensions (n_samples, z_dim)
#
class Generator(nn.Module):
def __init__(self, z_dim=10, im_dim=784, hidden_dim=128):
super(Generator, self).__init__()
# Build the neural network
self.gen = nn.Sequential(
get_generator_block(z_dim, hidden_dim),
get_generator_block(hidden_dim, hidden_dim * 2),
get_generator_block(hidden_dim * 2, hidden_dim * 4),
get_generator_block(hidden_dim * 4, hidden_dim * 8),
nn.Linear(hidden_dim * 8, im_dim),
# Scale data from 0-1
nn.Sigmoid(),
)
def forward(self, noise):
return self.gen(noise)
# <div class="alert alert-success" style="font-size:100%">
#
# **Exercise 1:** <br>
# Complete the function below `get_noise` for creating noise vectors: Given the dimensions (n_samples, z_dim), creates a tensor of that shape filled with random numbers from the normal distribution.
#
# Parameters:
# - n_samples: the number of samples in the batch, a scalar
# - z_dim: the dimension of the noise vector, a scalar
# - device: the device type
#
# **Hint:** `torch.randn` might be useful.
# </div>
# You can click in the button below the reveal the solution for exercise 1
#
# <details>
# <summary>
# <font size="4" color="darkblue"><b>See the solution for Exercise 1</b></font>
# </summary>
#
# ```python
# def get_noise(n_samples, z_dim, device=device):
# return torch.randn((n_samples, z_dim)).to(device)
#
# noise = get_noise(4, 128)
# gen = Generator(128)
# gen(noise)
# ```
#
# </details>
# ## Discriminator
#
# The second component we need to construct is the discriminator. Similarly yo the generator component, we will need to create a function that builds a neural network block for the discriminator. Instead of using `nn.ReLU` we will use `nn.LeakyReLU`. This will help to avoid the problem of vanishing gradients in the network.
def get_discriminator_block(input_dim, output_dim):
return nn.Sequential(
nn.Linear(input_dim, output_dim),
nn.LeakyReLU(negative_slope=0.2)
)
# - Discriminator Class Values:
# - im_dim: the dimension of the images. A flatten image of 28x28 would have a size of 784.
# - hidden_dim: the inner dimension
#
# - Forward Function: Function for completing a forward pass of the generator: Given a noise vector, returns a generated image.
# - noise: a noise tensor with dimensions (n_samples, z_dim)
class Discriminator(nn.Module):
def __init__(self, im_dim=784, hidden_dim=128):
super(Discriminator, self).__init__()
self.disc = nn.Sequential(
get_discriminator_block(im_dim, hidden_dim * 2),
get_discriminator_block(hidden_dim * 2, hidden_dim * 1),
get_discriminator_block(hidden_dim * 1, hidden_dim//2),
# Here we want to have a 1-dimension tensor representing fake/real
nn.Linear(hidden_dim//2, 1),
)
def forward(self, image):
return self.disc(image)
# ## Training
#
# First, we will set some parameters:
# * criterion: the loss function
# * n_epochs: the number of times you iterate through the entire dataset when training
# * z_dim: the dimension of the noise vector
# * display_step: how often to display/visualize the images
# * batch_size: the number of images per forward/backward pass
# * lr: the learning rate
# * device: the device type
#
# Next, you will load the MNIST dataset as tensors using a dataloader.
# +
# Set your parameters
criterion = nn.BCEWithLogitsLoss()
n_epochs = 20
z_dim = 64
display_step = 500
batch_size = 128
lr = 1e-5
device = "cuda" if torch.cuda.is_available() else 'cpu'
print(device)
# Load MNIST dataset as tensors
dataloader = DataLoader(
MNIST("../../data/processed/MNIST", download=False, transform=transforms.ToTensor()),
batch_size=batch_size,
shuffle=True,
)
# -
# Let's initialize our generator, discriminator, and optimizers.
gen = Generator(z_dim).to(device)
gen_opt = torch.optim.Adam(gen.parameters(), lr=lr)
disc = Discriminator().to(device)
disc_opt = torch.optim.Adam(disc.parameters(), lr=lr)
# Notice that our generator has many more parameters. It's much easier to be a critic, so to keep it balanced, we give it a smaller "brain"
noise = torch.randn((2, z_dim)).to(device)
summary(gen, noise)
1
z = torch.randn((2, 1, 28 *28)).to(device)
summary(disc, z)
1
# Before we start training our GAN, we will need to create some functions to calculate the discriminator's loss and the generator's loss. This is how the discriminator and generator will know how they are doing and improve themselves. Since the generator is needed when calculating the discriminator's loss, you will need to call `.detach()` on the generator result to ensure that only the discriminator is updated!
# `get_disc_loss` will return the loss of a discriminator given a generator and real images
# - Parameters:
# - gen: the generator model, which returns an image given z-dimensional noise
# - disc: the discriminator model, which returns a single-dimensional prediction of real/fake
# - criterion:
# the loss function, which should be used to compare the discriminator's predictions to the ground truth reality of the images (e.g. fake = 0, real = 1)
# - real: a batch of real images
# - num_images: the number of images the generator should produce, which is also the length of the real images
# - z_dim: the dimension of the noise tensor
# - device: the device type
# +
def get_noise(n_samples, z_dim, device='cpu'):
return torch.randn((n_samples, z_dim)).to(device)
def get_disc_loss(gen, disc, criterion, real, num_images, z_dim, device):
"""Train the discriminator on a batch of real and fake images"""
# 1. Create noise vectors and generate a batch of num_images fake images.
# Make sure to pass the device argument to the noise.
noise = get_noise(num_images, z_dim, device)
# Don't forget to detach the generator!
fake_images = gen(noise).detach() # detach to avoid training G on these labels
# 2. Train Fake Images
# Get the discriminator's prediction of the fake image and calculate the loss.
pred_fake = disc(fake_images)
# Remember the loss function you set earlier? You need a 'ground truth' tensor in order to calculate the loss.
# All of these are fake, so the label is 0
ground_truth_fake = torch.zeros_like(pred_fake)
loss_fake = criterion(pred_fake, ground_truth_fake)
loss_fake.backward(retain_graph=True)
# Repeat the process with `ground_truth_real`
# Train Real Images
pred_real = disc(real)
ground_truth_real = torch.ones_like(pred_real)
loss_real = criterion(pred_real, ground_truth_real)
loss_real.backward(retain_graph=True)
disc_loss = (loss_real + loss_fake) / 2
return disc_loss
# -
# `get_gen_loss` will return the loss of a generator given a discriminator
# - Parameters:
# - gen: the generator model, which returns an image given z-dimensional noise
# - disc: the discriminator model, which returns a single-dimensional prediction of real/fake
# - criterion: the loss function, which should be used to compare
# the discriminator's predictions to the ground truth reality of the images
# (e.g. fake = 0, real = 1)
# - num_images: the number of images the generator should produce,
# which is also the length of the real images
# - z_dim: the dimension of the noise tensor
# - device: the device type
def get_gen_loss(gen, disc, criterion, num_images, z_dim, device):
"""Train the generator to fool the discriminator."""
# Create noise vectors and generate a batch of fake images.
noise = get_noise(num_images, z_dim, device)
# Get the discriminator's prediction of the fake image.
fake_images = gen(noise)
# Get the discriminator's prediction of the fake image.
pred_fake = disc(fake_images)
# Target vectors with 1`s. In this case, 1 represents real
# From the perspective of the generator, "true" or 1 is the answer it wants
target = torch.ones_like(pred_fake)
# Calculate the generator's loss.
gen_loss = criterion(pred_fake, target)
gen_loss.backward(retain_graph=True)
return gen_loss
# # Training Time !
# For each epoch, we will process the entire dataset in batches. For every batch, we will need to update the discriminator and generator using their loss.
#
# <div class="alert alert-info" style="font-size:100%">
#
# **Warning:** <br>
# Note that you may see a loss to be greater than 1, this is okay since binary cross entropy loss can be any positive number for a sufficiently confident wrong guess.
#
# It’s also often the case that the discriminator will outperform the generator, especially at the start, and balancing them is difficult. The most important thing is that neither one gets too good (that is, near-perfect accuracy), which is remarkably hard to do in a standard GAN.
#
# **Computation Time:** On a GPU, this should take about 15 seconds per 500 steps, on average, while on CPU it will take roughly 1.5 minutes.
# </div>
#
#
#
cur_step = 0
mean_generator_loss = 0
mean_discriminator_loss = 0
test_generator = True # Whether the generator should be tested
for epoch in tqdm(range(n_epochs), unit='epoch'):
print('epoch', epoch)
# Dataloader returns the batches
for real, _ in tqdm(dataloader, desc='train'):
cur_batch_size = len(real)
# Flatten the batch of real images from the dataset
real = real.view(cur_batch_size, -1).to(device)
## Update Discriminator ##
# Zero out the gradients before backpropagation
disc_opt.zero_grad()
# Calculate discriminator loss
disc_loss = get_disc_loss(
gen, disc, criterion, real, cur_batch_size, z_dim, device
)
# Update gradients
disc_loss.backward(retain_graph=True)
# Update optimizer
disc_opt.step()
# For testing purposes, we keep track of the generator weights
if test_generator:
old_generator_weights = gen.gen[0][0].weight.detach().clone()
# Zero out the gradients
gen_opt.zero_grad()
# Calculate the generator loss, assigning it to gen_loss
gen_loss = get_gen_loss(gen, disc, criterion, cur_batch_size, z_dim, device)
# Backprop through the generator (update the gradients and optimizer)
gen_loss.backward(retain_graph=True)
gen_opt.step()
# We make sure that your code changes the generator weights
if test_generator:
assert torch.any(
gen.gen[0][0].weight.detach().clone() != old_generator_weights
)
# Keep track of the average discriminator loss
mean_discriminator_loss += disc_loss.item() / display_step
# Keep track of the average generator loss
mean_generator_loss += gen_loss.item() / display_step
## Visualization code ##
if cur_step % display_step == 0 and cur_step > 0:
print(
'Step {}: Generator loss: {}, discriminator loss: {}'.format(cur_step, mean_generator_loss, mean_discriminator_loss)
)
fake_noise = get_noise(cur_batch_size, z_dim, device=device)
fake = gen(fake_noise)
print("Fake images")
show_tensor_images(fake)
print("Real images")
show_tensor_images(real)
mean_generator_loss = 0
mean_discriminator_loss = 0
cur_step += 1
# # Applications:
#
# - Anomaly Detection: e.g. MadGan https://arxiv.org/abs/1901.04997
# - Synthetic Data: Use the generator to help in the training, when you don't have enougth data. This is used a lot in medical data where you have few data points. In the end the discrimator
# - Adversarial Examples: Are something we may have to harden our models again
# - Privacy Preserving: Instead of handing over real patient data
# - Super Resolution: Deep Rocks SR
| 40.224944 | 458 | 0.700626 | 2,690 | 18,061 | 4.595539 | 0.235316 | 0.024268 | 0.00728 | 0.009626 | 0.262983 | 0.22739 | 0.200291 | 0.174891 | 0.145284 | 0.123605 | 0 | 0.016726 | 0.208848 | 18,061 | 448 | 459 | 40.314732 | 0.848415 | 0.661093 | 0 | 0.117647 | 0 | 0 | 0.021218 | 0.004449 | 0 | 0 | 0 | 0 | 0.007353 | 1 | 0.073529 | false | 0 | 0.066176 | 0.036765 | 0.205882 | 0.036765 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7d9ea2df8609be0b637480364da1b5f5ab37477 | 4,414 | py | Python | query_broker/src/qb_pull/pull_query.py | dlf412/thunderCopyright | c736c9eefc7c934cc830d9d6f27a00cf147e02aa | [
"MIT"
] | 1 | 2021-06-10T02:56:43.000Z | 2021-06-10T02:56:43.000Z | query_broker/src/qb_pull/pull_query.py | dlf412/thunderCopyright | c736c9eefc7c934cc830d9d6f27a00cf147e02aa | [
"MIT"
] | null | null | null | query_broker/src/qb_pull/pull_query.py | dlf412/thunderCopyright | c736c9eefc7c934cc830d9d6f27a00cf147e02aa | [
"MIT"
] | 1 | 2020-03-25T23:55:58.000Z | 2020-03-25T23:55:58.000Z | import json
from mysystem import *
from utils import trans2json
import pull_global_vars as gv
from pull_util import *
from hash import Hash
#from redis_oper import write2redis
import base64
import time
MEDIA_REQ_TIMEOUT = 3
def query_hash(data):
result_hash_list = []
start_time=time.time()
if data['params'].has_key('url'):
if data['params']['url']['hash'] != None and data['params']['url']['hash'] != '':
ret_code, result = query_vddb_async(
data['params']['url']['hash'], data)
if ret_code == 1:
end_time = time.time()
#gv.statsd_conn.timing("thunder.querybroker_qbpull", (end_time-start_time)*1000)
return ret_code, result
result_hash_list.append((ret_code, result))
if data['params']['thunder_hash'] != None and data['params']['thunder_hash'] != '':
ret_code, result = query_vddb_async(
data['params']['thunder_hash'], data)
if ret_code == 1:
end_time = time.time()
#gv.statsd_conn.timing("thunder.querybroker_qbpull", (end_time-start_time)*1000)
return ret_code, result
result_hash_list.append((ret_code, result))
if data['params'].has_key('seed_file'):
seed_file_hash = ''
if data['params']['seed_file']['hash'] != '':
seed_file_hash = data['params']['seed_file']['hash']
else:
ret_code, bt_file_name = download_file(
data['params']['seed_file']['path'], gv.file_tmpdir)
if ret_code:
client_id = data['params']['additional_info']['client_id']
with open(bt_file_name, 'rb') as fp:
seed_file_content = fp.read()
seed_file_hash = Hash(
filename=bt_file_name, content=seed_file_content).value
data['params']['seed_file']['hash'] = seed_file_hash
try:
os.remove(bt_file_name)
except OSError:
g_logger.error(trans2json(
"delete bt file %s error %s" % (bt_file_name, traceback.format_exc())))
ret_code, result = query_vddb_async(seed_file_hash, data)
if ret_code == 1:
end_time = time.time()
#gv.statsd_conn.timing("thunder.querybroker_qbpull", (end_time-start_time)*1000)
return ret_code, result
result_hash_list.append((ret_code, result))
if data['params'].has_key('files'):
hash_list = []
data_list = []
for i in data['params']['files']:
dna_hash = i['hash']
hash_list.append(dna_hash)
data_list.append(data)
result_list = map(query_vddb_async, hash_list, data_list)
for i in range(len(result_list)):
if result_list[i][0] == 1:
end_time = time.time()
#gv.statsd_conn.timing("thunder.querybroker_qbpull", (end_time-start_time)*1000)
return result_list[i][0], result_list[i][1]
end_time = time.time()
#gv.statsd_conn.timing("thunder.querybroker_qbpull", (end_time-start_time)*1000)
return 3, None
def url_scheme(url):
scheme = None
parts = url.split('://', 1)
if len(parts) >= 2:
scheme = parts[0]
return scheme
def query_vddb_async(req_hash, data):
g_logger.debug(trans2json("query vddb async by hash %s" % str(req_hash)))
mysystem = mysystem(gv.mysystem_user, gv.mysystem_passwd,
gv.mysystem_url, False, MEDIA_REQ_TIMEOUT, g_logger)
uuid = data['params']['external_id']
ret, status_listing = mysystem.query(req_hash, uuid)
working_cnt = 0
copyrighted_cnt = 0
uncopyrighted_cnt = 0
status_cnt = len(status_listing)
for status in status_listing:
if status['status'] == STATUS_COPYRIGHTED:
copyrighted_cnt += 1
if status['status'] == STATUS_UNCOPYRIGHTED:
uncopyrighted_cnt += 1
if status['status'] == STATUS_WORKING:
working_cnt += 1
# all can not check
if ret == STATUS_UNDETECTED:
ret_code = 2
return ret_code, status_listing
if status_cnt > 0:
if copyrighted_cnt == status_cnt or working_cnt == status_cnt or uncopyrighted_cnt == status_cnt:
ret_code = 1
return ret_code, status_listing
return 4, None
| 39.410714 | 105 | 0.596964 | 563 | 4,414 | 4.397869 | 0.207815 | 0.050889 | 0.047254 | 0.024233 | 0.42811 | 0.37399 | 0.3437 | 0.325929 | 0.298465 | 0.265347 | 0 | 0.015152 | 0.282284 | 4,414 | 111 | 106 | 39.765766 | 0.766414 | 0.101269 | 0 | 0.1875 | 0 | 0 | 0.086616 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0.010417 | 0.083333 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7dc48471ad360dc8b2e30a52e38cd140a5af543 | 2,120 | py | Python | tests/unit_tests/test_rm/test_cobalt.py | radical-cybertools/radical.pilot | 4ce3efbf3e2f045b5c48fb848e9f65f9f5ea17e9 | [
"MIT"
] | 47 | 2015-03-16T01:08:11.000Z | 2022-02-02T10:36:39.000Z | tests/unit_tests/test_rm/test_cobalt.py | radical-cybertools/radical.pilot | 4ce3efbf3e2f045b5c48fb848e9f65f9f5ea17e9 | [
"MIT"
] | 1,856 | 2015-01-02T09:32:20.000Z | 2022-03-31T21:45:06.000Z | tests/unit_tests/test_rm/test_cobalt.py | radical-cybertools/radical.pilot | 4ce3efbf3e2f045b5c48fb848e9f65f9f5ea17e9 | [
"MIT"
] | 28 | 2015-06-10T18:15:14.000Z | 2021-11-07T04:36:45.000Z | #!/usr/bin/env python3
# pylint: disable=protected-access, unused-argument, no-value-for-parameter
__copyright__ = 'Copyright 2021, The RADICAL-Cybertools Team'
__license__ = 'MIT'
import os
from unittest import mock, TestCase
from radical.pilot.agent.resource_manager import RMInfo
from radical.pilot.agent.resource_manager.cobalt import Cobalt
# ------------------------------------------------------------------------------
#
class CobaltTestCase(TestCase):
# --------------------------------------------------------------------------
#
@mock.patch.object(Cobalt, '__init__', return_value=None)
def test_init_from_scratch(self, mocked_init):
os.environ['COBALT_PARTNAME'] = '1' # node id -> node name: 'nid00001'
rm_cobalt = Cobalt(cfg=None, log=None, prof=None)
rm_info = rm_cobalt._init_from_scratch(RMInfo({'cores_per_node': 1}))
self.assertEqual(rm_info.node_list[0]['node_name'], 'nid00001')
self.assertEqual(rm_info.node_list[0]['cores'], [0]) # list of cores
# --------------------------------------------------------------------------
#
@mock.patch.object(Cobalt, '__init__', return_value=None)
def test_init_from_scratch_error(self, mocked_init):
rm_cobalt = Cobalt(cfg=None, log=None, prof=None)
with self.assertRaises(RuntimeError):
# `cores_per_node` not defined
rm_cobalt._init_from_scratch(RMInfo({'cores_per_node': None}))
for cobalt_env_var in ['COBALT_NODEFILE', 'COBALT_PARTNAME']:
if cobalt_env_var in os.environ:
del os.environ[cobalt_env_var]
with self.assertRaises(RuntimeError):
# both $COBALT_NODEFILE and $COBALT_PARTNAME are not set
rm_cobalt._init_from_scratch(RMInfo({'cores_per_node': 1}))
# ------------------------------------------------------------------------------
if __name__ == '__main__':
tc = CobaltTestCase()
tc.test_init_from_scratch()
tc.test_init_from_scratch_error()
# ------------------------------------------------------------------------------
| 33.650794 | 80 | 0.558019 | 225 | 2,120 | 4.888889 | 0.36 | 0.050909 | 0.095455 | 0.069091 | 0.459091 | 0.411818 | 0.346364 | 0.291818 | 0.291818 | 0.189091 | 0 | 0.011905 | 0.167925 | 2,120 | 62 | 81 | 34.193548 | 0.611678 | 0.289151 | 0 | 0.214286 | 0 | 0 | 0.120563 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a7dc9e1650e34ca20939ec399664cf109bf4022c | 5,859 | py | Python | magnet_tools.py | carpeanon/distance_metric | de2a61c7b7e97398cdb35115bb026b046c88d944 | [
"MIT"
] | 115 | 2016-09-21T15:22:59.000Z | 2020-07-01T05:09:07.000Z | magnet_tools.py | berooo/tf-magnet-loss | d654b7a296b5f71d0e78a510e1b5fcd3ea0c5f65 | [
"MIT"
] | 8 | 2016-11-15T08:44:38.000Z | 2019-07-04T09:41:37.000Z | magnet_tools.py | berooo/tf-magnet-loss | d654b7a296b5f71d0e78a510e1b5fcd3ea0c5f65 | [
"MIT"
] | 32 | 2016-09-26T01:08:07.000Z | 2020-05-06T01:27:14.000Z | from math import ceil
import numpy as np
from sklearn.cluster import KMeans
def compute_reps(extract_fn, X, chunk_size):
"""Compute representations for input in chunks."""
chunks = int(ceil(float(X.shape[0]) / chunk_size))
reps = []
for i in range(chunks):
start = i * chunk_size
stop = start + chunk_size
chunk_reps = extract_fn(X[start:stop])
reps.append(chunk_reps)
return np.vstack(reps)
class ClusterBatchBuilder(object):
"""Sample minibatches for magnet loss."""
def __init__(self, labels, k, m, d):
self.num_classes = np.unique(labels).shape[0]
self.labels = labels
self.k = k
self.m = m
self.d = d
self.centroids = None
self.assignments = np.zeros_like(labels, int)
self.cluster_assignments = {}
self.cluster_classes = np.repeat(range(self.num_classes), k)
self.example_losses = None
self.cluster_losses = None
self.has_loss = None
def update_clusters(self, rep_data, max_iter=20):
"""
Given an array of representations for the entire training set,
recompute clusters and store example cluster assignments in a
quickly sampleable form.
"""
# Lazily allocate array for centroids
if self.centroids is None:
self.centroids = np.zeros([self.num_classes * self.k, rep_data.shape[1]])
for c in range(self.num_classes):
class_mask = self.labels == c
class_examples = rep_data[class_mask]
kmeans = KMeans(n_clusters=self.k, init='k-means++', n_init=1, max_iter=max_iter)
kmeans.fit(class_examples)
# Save cluster centroids for finding impostor clusters
start = self.get_cluster_ind(c, 0)
stop = self.get_cluster_ind(c, self.k)
self.centroids[start:stop] = kmeans.cluster_centers_
# Update assignments with new global cluster indexes
self.assignments[class_mask] = self.get_cluster_ind(c, kmeans.predict(class_examples))
# Construct a map from cluster to example indexes for fast batch creation
for cluster in range(self.k * self.num_classes):
cluster_mask = self.assignments == cluster
self.cluster_assignments[cluster] = np.flatnonzero(cluster_mask)
def update_losses(self, indexes, losses):
"""
Given a list of examples indexes and corresponding losses
store the new losses and update corresponding cluster losses.
"""
# Lazily allocate structures for losses
if self.example_losses is None:
self.example_losses = np.zeros_like(self.labels, float)
self.cluster_losses = np.zeros([self.k * self.num_classes], float)
self.has_loss = np.zeros_like(self.labels, bool)
# Update example losses
indexes = np.array(indexes)
self.example_losses[indexes] = losses
self.has_loss[indexes] = losses
# Find affected clusters and update the corresponding cluster losses
clusters = np.unique(self.assignments[indexes])
for cluster in clusters:
cluster_inds = self.assignments == cluster
cluster_example_losses = self.example_losses[cluster_inds]
# Take the average closs in the cluster of examples for which we have measured a loss
self.cluster_losses[cluster] = np.mean(cluster_example_losses[self.has_loss[cluster_inds]])
def gen_batch(self):
"""
Sample a batch by first sampling a seed cluster proportionally to
the mean loss of the clusters, then finding nearest neighbor
"impostor" clusters, then sampling d examples uniformly from each cluster.
The generated batch will consist of m clusters each with d consecutive
examples.
"""
# Sample seed cluster proportionally to cluster losses if available
if self.cluster_losses is not None:
p = self.cluster_losses / np.sum(self.cluster_losses)
seed_cluster = np.random.choice(self.num_classes * self.k, p=p)
else:
seed_cluster = np.random.choice(self.num_classes * self.k)
# Get imposter clusters by ranking centroids by distance
sq_dists = ((self.centroids[seed_cluster] - self.centroids) ** 2).sum(axis=1)
# Assure only clusters of different class from seed are chosen
sq_dists[self.get_class_ind(seed_cluster) == self.cluster_classes] = np.inf
# Get top impostor clusters and add seed
clusters = np.argpartition(sq_dists, self.m-1)[:self.m-1]
clusters = np.concatenate([[seed_cluster], clusters])
# Sample examples uniformly from cluster
batch_indexes = np.empty([self.m * self.d], int)
for i, c in enumerate(clusters):
x = np.random.choice(self.cluster_assignments[c], self.d, replace=False)
start = i * self.d
stop = start + self.d
batch_indexes[start:stop] = x
# Translate class indexes to index for classes within the batch
class_inds = self.get_class_ind(clusters)
batch_class_inds = []
inds_map = {}
class_count = 0
for c in class_inds:
if c not in inds_map:
inds_map[c] = class_count
class_count += 1
batch_class_inds.append(inds_map[c])
return batch_indexes, np.repeat(batch_class_inds, self.d)
def get_cluster_ind(self, c, i):
"""
Given a class index and a cluster index within the class
return the global cluster index
"""
return c * self.k + i
def get_class_ind(self, c):
"""
Given a cluster index return the class index.
"""
return c / self.k
| 38.045455 | 103 | 0.636286 | 768 | 5,859 | 4.708333 | 0.223958 | 0.033462 | 0.030973 | 0.014934 | 0.076051 | 0.024336 | 0.024336 | 0.024336 | 0.024336 | 0.024336 | 0 | 0.003085 | 0.280765 | 5,859 | 153 | 104 | 38.294118 | 0.855007 | 0.258577 | 0 | 0 | 0 | 0 | 0.002178 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.035714 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38ea9b8192ecf74f7a1c5e4457b0e4608d455f97 | 5,126 | py | Python | payment_multisafepay_official/models/payment_transaction.py | stesi/official-odoo-integration | 969fd6f8773f4120189b2574bb8669bb43801bc9 | [
"MIT"
] | null | null | null | payment_multisafepay_official/models/payment_transaction.py | stesi/official-odoo-integration | 969fd6f8773f4120189b2574bb8669bb43801bc9 | [
"MIT"
] | 1 | 2021-11-29T10:48:54.000Z | 2022-03-15T09:57:36.000Z | payment_multisafepay_official/models/payment_transaction.py | stesi/official-odoo-integration | 969fd6f8773f4120189b2574bb8669bb43801bc9 | [
"MIT"
] | 6 | 2020-12-13T07:27:58.000Z | 2021-12-27T03:00:00.000Z | from odoo import models, fields, _
from odoo.addons.payment.models.payment_acquirer import ValidationError
import logging
import pprint
from datetime import datetime
_logger = logging.getLogger(__name__)
class MultiSafepayPaymentTransaction(models.Model):
_inherit = 'payment.transaction'
multisafepay_order_id = fields.Char(string='Order ID in MultiSafepay')
def _multisafepay_form_get_tx_from_data(self, data):
multisafepay_order_id = data.get('transactionid')
if multisafepay_order_id is None:
raise ValidationError('Invalid transaction id')
reference = multisafepay_order_id.split('_')[0]
tx = self.search([('reference', '=', reference)])
if not tx or len(tx) > 1:
error_msg = _('received data for reference %s') % (pprint.pformat(reference))
if not tx:
error_msg += _('; no order found')
else:
error_msg += _('; multiple order found')
_logger.info(error_msg)
raise ValidationError(error_msg)
return tx
def _multisafepay_form_get_invalid_parameters(self, data):
return []
def _multisafepay_form_validate(self, data):
multisafepay_client = self.acquirer_id.get_multisafepay_client()
order = multisafepay_client.order.get(data.get('transactionid'))
if self.handle_refund_transactions(order):
return
if self.state == 'done':
return True
if not order.get('success', False):
error_message = order.get('error_info', 'Request failed')
self._set_transaction_error(error_message)
return True
if not order.get('data').get('order_id'):
self._set_transaction_cancel()
return True
order_status = order.get('data').get('status', False)
self.write({
'acquirer_reference': order.get('data').get('transaction_id', 'undefined'),
'multisafepay_order_id': order.get('data').get('order_id', 'undefined'),
})
if order_status in ['void', 'declined', ] and data.get('type') == 'cancel':
self._set_transaction_cancel()
return True
if order_status in ['completed', 'shipped']:
self._set_transaction_done()
return True
if order_status in ['initialized', 'uncleared', ]:
self._set_transaction_pending()
return True
self._set_transaction_error('Transaction status: ' + order_status)
return True
def update_order(self):
if not self.invoice_ids:
return
multisafepay_client = self.acquirer_id.get_multisafepay_client()
multisafepay_client.order.update(self.multisafepay_order_id, {
'invoice_id': self.invoice_ids[0].id
})
def handle_refund_transactions(self, order):
if order.get('data', {}).get('payment_details', {}).get('type', '') in ['PAYPAL', 'AFTERPAY']:
costs = order.get('data').get('costs', [])
if not costs or order.get('data').get('status', False) != 'completed':
return False
for cost in costs:
if cost.get('status', 'void') == 'void':
continue
invoice = self.env['account.move'].sudo().search(
[('multisafepay_refund_id', '=', cost.get('transaction_id'))], limit=1)
if not invoice:
continue
invoice.set_refund_paid()
return True
else:
related_transactions = order.get('data').get('related_transactions', [])
if not related_transactions:
return False
for related_tx in related_transactions:
if related_tx.get('status', False) == 'completed':
invoice = self.env['account.move'].sudo().search(
[('multisafepay_refund_id', '=', related_tx.get('transaction_id'))], limit=1)
if not invoice:
continue
invoice.set_refund_paid()
return True
class StockPicking(models.Model):
_inherit = 'stock.picking'
def send_to_shipper(self):
super(StockPicking, self).send_to_shipper()
order = self.env['sale.order'].sudo().search([('name', 'ilike', self.origin)], limit=1)
multisafepay_transactions = list(filter(lambda tx: tx.provider == 'multisafepay', order.transaction_ids))
if not multisafepay_transactions:
return
multisafepay_client = multisafepay_transactions[0].acquirer_id.get_multisafepay_client()
for multisafepay_tx in multisafepay_transactions:
multisafepay_client.order.update(multisafepay_tx.multisafepay_order_id, {
"status": "shipped",
"tracktrace_code": self.carrier_tracking_ref,
"tracktrace_url": self.carrier_tracking_url,
"ship_date": datetime.now().strftime("%d-%m-%Y"),
"carrier": self.carrier_id.name,
})
| 38.541353 | 113 | 0.602224 | 543 | 5,126 | 5.432781 | 0.237569 | 0.026102 | 0.03661 | 0.045763 | 0.214576 | 0.204068 | 0.122034 | 0.122034 | 0.086102 | 0.086102 | 0 | 0.001906 | 0.283652 | 5,126 | 132 | 114 | 38.833333 | 0.801471 | 0 | 0 | 0.301887 | 0 | 0 | 0.142021 | 0.01268 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0 | 0.04717 | 0.009434 | 0.301887 | 0.018868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38ec57b31d0a6997ccf276c07dc3ba95ee1b7f78 | 3,598 | py | Python | espnet2/utils/nested_dict_action.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 5,053 | 2017-12-13T06:21:41.000Z | 2022-03-31T13:38:29.000Z | espnet2/utils/nested_dict_action.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 3,666 | 2017-12-14T05:58:50.000Z | 2022-03-31T22:11:49.000Z | espnet2/utils/nested_dict_action.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 1,709 | 2017-12-13T01:02:42.000Z | 2022-03-31T11:57:45.000Z | import argparse
import copy
import yaml
class NestedDictAction(argparse.Action):
"""Action class to append items to dict object.
Examples:
>>> parser = argparse.ArgumentParser()
>>> _ = parser.add_argument('--conf', action=NestedDictAction,
... default={'a': 4})
>>> parser.parse_args(['--conf', 'a=3', '--conf', 'c=4'])
Namespace(conf={'a': 3, 'c': 4})
>>> parser.parse_args(['--conf', 'c.d=4'])
Namespace(conf={'a': 4, 'c': {'d': 4}})
>>> parser.parse_args(['--conf', 'c.d=4', '--conf', 'c=2'])
Namespace(conf={'a': 4, 'c': 2})
>>> parser.parse_args(['--conf', '{d: 5, e: 9}'])
Namespace(conf={'d': 5, 'e': 9})
"""
_syntax = """Syntax:
{op} <key>=<yaml-string>
{op} <key>.<key2>=<yaml-string>
{op} <python-dict>
{op} <yaml-string>
e.g.
{op} a=4
{op} a.b={{c: true}}
{op} {{"c": True}}
{op} {{a: 34.5}}
"""
def __init__(
self,
option_strings,
dest,
nargs=None,
default=None,
choices=None,
required=False,
help=None,
metavar=None,
):
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
default=copy.deepcopy(default),
type=None,
choices=choices,
required=required,
help=help,
metavar=metavar,
)
def __call__(self, parser, namespace, values, option_strings=None):
# --{option} a.b=3 -> {'a': {'b': 3}}
if "=" in values:
indict = copy.deepcopy(getattr(namespace, self.dest, {}))
key, value = values.split("=", maxsplit=1)
if not value.strip() == "":
value = yaml.load(value, Loader=yaml.Loader)
if not isinstance(indict, dict):
indict = {}
keys = key.split(".")
d = indict
for idx, k in enumerate(keys):
if idx == len(keys) - 1:
d[k] = value
else:
if not isinstance(d.setdefault(k, {}), dict):
# Remove the existing value and recreates as empty dict
d[k] = {}
d = d[k]
# Update the value
setattr(namespace, self.dest, indict)
else:
try:
# At the first, try eval(), i.e. Python syntax dict.
# e.g. --{option} "{'a': 3}" -> {'a': 3}
# This is workaround for internal behaviour of configargparse.
value = eval(values, {}, {})
if not isinstance(value, dict):
syntax = self._syntax.format(op=option_strings)
mes = f"must be interpreted as dict: but got {values}\n{syntax}"
raise argparse.ArgumentTypeError(self, mes)
except Exception:
# and the second, try yaml.load
value = yaml.load(values, Loader=yaml.Loader)
if not isinstance(value, dict):
syntax = self._syntax.format(op=option_strings)
mes = f"must be interpreted as dict: but got {values}\n{syntax}"
raise argparse.ArgumentError(self, mes)
d = getattr(namespace, self.dest, None)
if isinstance(d, dict):
d.update(value)
else:
# Remove existing params, and overwrite
setattr(namespace, self.dest, value)
| 33.626168 | 84 | 0.480545 | 391 | 3,598 | 4.352941 | 0.30179 | 0.045828 | 0.035253 | 0.044653 | 0.235605 | 0.195652 | 0.168038 | 0.168038 | 0.141011 | 0.141011 | 0 | 0.011921 | 0.370484 | 3,598 | 106 | 85 | 33.943396 | 0.739514 | 0.256809 | 0 | 0.123288 | 0 | 0 | 0.116226 | 0.009973 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027397 | false | 0 | 0.041096 | 0 | 0.09589 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38ece89f4ee3a113e85bb525e2c7c95aa81878b8 | 3,447 | py | Python | webtest.py | manzino0705/Hungry_Genie | 5934f0c278a6bedb9a0ca844b34a24cf3ac6f88d | [
"MIT"
] | null | null | null | webtest.py | manzino0705/Hungry_Genie | 5934f0c278a6bedb9a0ca844b34a24cf3ac6f88d | [
"MIT"
] | null | null | null | webtest.py | manzino0705/Hungry_Genie | 5934f0c278a6bedb9a0ca844b34a24cf3ac6f88d | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
import time
host_url = 'http://172.30.1.36:5050'
#options = webdriver.ChromeOptions()
#options.add_experimental_option("excludeSwitches", ["enable-logging"])
driver = webdriver.Chrome('C:/Users/joowon/Desktop/Hungry_Genie/chromedriver.exe')
'''driver.maximize_window()
driver.get(host_url)'''
event_items = dict()
recipe_titles = []
def stockPage():
driver.get(host_url+'/pic')
time.sleep(3)
driver.get(host_url+'/inventory')
def recommendPage():
current_url = driver.current_url
tail = current_url.split('/')[-1]
if not tail.startswith('recipe'):
driver.get(host_url+'/recipe')
recipeEventItem()
def possiblePage(query):
current_url = driver.current_url
tail = current_url.split('/')[-1]
if not tail.startswith('possible'):
driver.get(host_url+'/possible?food='+query)
def openWindow():
current_url = driver.current_url
tail = current_url.split('/')[-1]
if tail=='':
driver.maximize_window()
def closeWindow():
driver.minimize_window()
driver.get(host_url)
def quit():
driver.quit()
def recipeEventItem():
global event_items, recipe_titles
event_items = dict()
recipe_title1=driver.find_element_by_xpath('/html/body/div[2]/div/div[1]/div/div[1]/h1[1]').text
recipe_title2=driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div[1]/div[1]/h2[1]').text
recipe_buttons1 = driver.find_element_by_xpath('/html/body/div[2]/div/div[1]/div/div[2]/button')
recipe_buttons2 = driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div[1]/div[2]/button')
recipe_titles = [recipe_title1, recipe_title2]
event_items = {recipe_title1: recipe_buttons1,recipe_title2: recipe_buttons2}
print("event_items : ",event_items)
card_bodies = driver.find_elements_by_id('naver')
for body in card_bodies:
product_title = body.find_elements_by_tag_name('h5')
if product_title:
product_a = body.find_element_by_tag_name('a')
event_items[product_title[0].text] = product_a
print("event_items",event_items)
def possibleEvent():
a = driver.find_element_by_tag_name('a')
print('a tag click')
a.click()
def clickEvent(input_text):
global event_items
print(event_items)
count_max = 0
pick_item = ''
for k, v in event_items.items():
count = 0
for noun in k.split():
if input_text.find(noun)!=-1:
count+=1
if count_max<count:
count_max = count
pick_item = k
if pick_item=='':
return False
else:
event_items[pick_item].send_keys(Keys.ENTER)
return True
def bigR(query):
driver.get(host_url+'/big_recipe?food='+query)
def bigR2(query):
max_count = 0
pick = ""
for title in recipe_titles:
count = 0
for t in title.split():
if query.find(t)!=-1:
count += 1
if count>max_count:
max_count = count
pick = title
if pick=="":
return "false"
else:
return pick
if __name__=='__main__':
mainPage()
print(driver.current_url.split('/'))
'''card_bodies = driver.find_elements_by_class_name('card-body')
event_items = dict()
for body in card_bodies:
recipe_title = body.find_elements_by_tag_name('h2')
if recipe_title:
recipe_button = body.find_element_by_tag_name('button')
event_items[recipe_title[0].text] = recipe_button
else:
product_title = body.find_elements_by_tag_name('h5')
if product_title:
product_a = body.find_element_by_tag_name('a')
event_items[product_title[0].text] = product_a
event_items['새우 볶음밥'].click()'''
| 26.312977 | 100 | 0.72846 | 528 | 3,447 | 4.484848 | 0.231061 | 0.071791 | 0.043919 | 0.047297 | 0.392314 | 0.336571 | 0.29223 | 0.26098 | 0.26098 | 0.26098 | 0 | 0.01953 | 0.123586 | 3,447 | 130 | 101 | 26.515385 | 0.764316 | 0.030461 | 0 | 0.152174 | 0 | 0.043478 | 0.141928 | 0.087035 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119565 | false | 0 | 0.043478 | 0 | 0.206522 | 0.054348 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38efcbeb70f0ee864f425aaf1929758117aaf7cd | 7,723 | py | Python | generator.py | miethe/DnD-Character-Generator | 8716124f1feb21029373619d8919d8ff4ec7091b | [
"MIT"
] | 3 | 2019-03-22T01:21:08.000Z | 2022-01-05T10:40:29.000Z | generator.py | miethe/DnD-Character-Generator | 8716124f1feb21029373619d8919d8ff4ec7091b | [
"MIT"
] | 11 | 2019-03-22T12:39:13.000Z | 2019-03-22T18:57:44.000Z | generator.py | miethe/DnD-Character-Generator | 8716124f1feb21029373619d8919d8ff4ec7091b | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch.distributions import OneHotCategorical
from torchvision.transforms import Compose
from data import Vocabulary, OneHot, Genders, Races, ToTensor
from utils import load_model
class Generator:
"""Base Generator class that can load trained model and require every subclass to implement `generate` method"""
def __init__(self, model_path, device="cpu"):
self.model = load_model(model_path, device=device)
self.device = device
def generate(self, num_samples):
raise NotImplementedError
class RNNCellGenerator(Generator):
def __init__(self, model_path, device="cpu"):
super().__init__(model_path, device)
self.vocab = Vocabulary()
self.races = Races()
self.genders = Genders()
self.to_tensor = ToTensor()
self.name_transform = Compose([self.vocab, OneHot(self.vocab.size), ToTensor()])
self.race_transform = Compose([self.races, OneHot(self.races.size), ToTensor()])
self.gender_transform = Compose([self.genders, OneHot(self.genders.size), ToTensor()])
def _init_random_input(self):
"""Helper function that initialize random letter, race and gender"""
letter = np.random.choice(self.vocab.start_letters)
race = np.random.choice(self.races.available_races)
gender = np.random.choice(self.genders.available_genders)
return letter, race, gender
def _transform_input(self, letter, race, gender):
"""Helper function to transform input into tensors"""
letter_tensor = self.name_transform(letter).to(self.device)
race_tensor = self.race_transform(race).to(self.device)
gender_tensor = self.gender_transform(gender).to(self.device)
return letter_tensor, race_tensor, gender_tensor
def generate(self, num_samples):
with torch.no_grad():
print("_" * 20)
for _ in range(num_samples):
hx, cx = self.model.init_states(batch_size=1, device=self.device)
letter, race, gender = self._init_random_input()
letter_t, race_t, gender_t = self._transform_input(letter, race, gender)
input = torch.cat([letter_t, race_t, gender_t], 1)
outputs = [letter]
while True:
output, hx, cx = self.model(input, hx, cx)
sample = OneHotCategorical(logits=output).sample()
index = torch.argmax(sample)
char = self.vocab.idx2char[index.item()]
outputs.append(char)
input = torch.cat([sample, race_t, gender_t], 1)
if char == '.' or len(outputs) == 50:
break
print("Start letter: {}, Race: {}, Gender: {}".format(letter, race, gender))
print("Generated sample: {}".format(''.join(map(str, outputs))))
print("_" * 20)
class RNNLayerGenerator(Generator):
def __init__(self, model_path, device="cpu", max_len=50, verbose=1):
super().__init__(model_path, device)
self.max_len = max_len
self.verbose = verbose
self.vocab = Vocabulary()
self.races = Races()
self.genders = Genders()
self.to_tensor = ToTensor()
self.name_transform = Compose([self.vocab, OneHot(self.vocab.size), ToTensor()])
self.race_transform = Compose([self.races, OneHot(self.races.size), ToTensor()])
self.gender_transform = Compose([self.genders, OneHot(self.genders.size), ToTensor()])
def _init_random_input(self, skip_ran_gen=[]):
"""Helper function that initialize random letter, race and gender"""
ran_opt = ['letter', 'race', 'gender']
letter = ''
gender = ''
race = ''
if not skip_ran_gen:
letter = np.random.choice(self.vocab.start_letters)
race = np.random.choice(self.races.available_races)
gender = np.random.choice(self.genders.available_genders)
else:
for i in ran_opt:
if i not in skip_ran_gen:
if i is 'letter':
letter = np.random.choice(self.vocab.start_letters)
elif i is 'race':
race = np.random.choice(self.races.available_races)
elif i is 'gender':
gender = np.random.choice(self.genders.available_genders)
return letter, race, gender
def _transform_input(self, letter, race, gender):
"""Helper function to transform input into tensors"""
letter_tensor = self.name_transform(letter).to(self.device)
race_tensor = self.race_transform(race).to(self.device)
gender_tensor = self.gender_transform(gender).to(self.device)
return letter_tensor, race_tensor, gender_tensor
def _expand_dims(self, *tensors):
"""Add dimension along 0-axis to tensors"""
return [torch.unsqueeze(t, 0) for t in tensors]
def sample(self, letter, race, gender):
"""Sample name from start letter, race and gender"""
with torch.no_grad():
assert letter in self.vocab.start_letters, "Invalid letter"
assert race in self.races.available_races, "Invalid race"
assert gender in self.genders.available_genders, "Invalid gender"
# Prepare inputs
letter_t, race_t, gender_t = self._transform_input(letter, race, gender)
letter_t, race_t, gender_t = self._expand_dims(letter_t, race_t, gender_t)
# Merge all input tensors
input = torch.cat([letter_t, race_t, gender_t], 2)
outputs = [letter]
# Initialize hidden states
hx, cx = self.model.init_states(batch_size=1, device=self.device)
while True:
output, hx, cx = self.model(input, hx, cx, lengths=torch.tensor([1]))
sample = OneHotCategorical(logits=output).sample()
index = torch.argmax(sample)
char = self.vocab.get_char(index.item())
if char == '.' or len(outputs) == self.max_len:
break
outputs.append(char)
input = torch.cat([sample, race_t, gender_t], 2)
name = ''.join(map(str, outputs))
return name
def generate(self, num_samples, in_race, in_gender):
"""Sample random names"""
gen_names = []
ran_gen_names = []
if in_race is not '':
ran_gen_names.append('race')
if in_gender is not '':
ran_gen_names.append('gender')
for _ in range(num_samples):
letter, race, gender = self._init_random_input(ran_gen_names)
race = race + in_race
gender = gender + in_gender
gen_name = self.sample(letter, race, gender)
gen_names.append([gen_name, race, gender])
return gen_names
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-mp", "--model_path")
parser.add_argument("-race")
parser.add_argument("-number")
parser.add_argument("-gender")
args = parser.parse_args()
if args.number:
number = int(args.number)
else:
number = 5
if args.race:
race = args.race
else:
race = ''
if args.gender:
gender = args.gender
else:
gender = ''
dnd = RNNLayerGenerator(model_path="./models/rnn_layer_epoch_250.pt")
tuples = dnd.generate(number, race, gender)
for name_tuple in tuples:
print (name_tuple[0])
| 36.77619 | 116 | 0.604169 | 916 | 7,723 | 4.898472 | 0.168122 | 0.035659 | 0.046356 | 0.036104 | 0.595052 | 0.561177 | 0.535547 | 0.508357 | 0.473145 | 0.435257 | 0 | 0.004352 | 0.285899 | 7,723 | 209 | 117 | 36.952153 | 0.809248 | 0.064483 | 0 | 0.473333 | 0 | 0 | 0.031468 | 0.004316 | 0 | 0 | 0 | 0 | 0.02 | 1 | 0.08 | false | 0 | 0.046667 | 0 | 0.193333 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38f14e67101448d388b5a97f778bba52d64293ce | 4,010 | py | Python | CookiesPool/tester.py | coffeeTeaOne/CookiesPool | f5c2d16e6eaad5edabf2feaa671c30d2424ff925 | [
"Apache-2.0"
] | null | null | null | CookiesPool/tester.py | coffeeTeaOne/CookiesPool | f5c2d16e6eaad5edabf2feaa671c30d2424ff925 | [
"Apache-2.0"
] | null | null | null | CookiesPool/tester.py | coffeeTeaOne/CookiesPool | f5c2d16e6eaad5edabf2feaa671c30d2424ff925 | [
"Apache-2.0"
] | null | null | null | import asyncio
import random
from CookiesPool.generator import Getter
from CookeisLog.logger import ProxyLogger
from ConnDB.db_redis import RedisClient
try:
from aiohttp import ClientError
except:
from aiohttp import ClientProxyConnectionError as ProxyConnectionError
class Tester(object):
# 首页认证
# TEST_URL1 = 'https://m.weibo.cn/comments/hotflow?id=4315958677377740&mid=4315958677377740&max_id_type=0'
# 翻页获取数据测试
# TEST_URL2 = 'https://m.weibo.cn/comments/hotflow?id=4315958677377740&mid=4315958677377740&max_id=13854002826337244&max_id_type=0'
# TEST_URL3 = 'https://m.weibo.cn/comments/hotflow?id=4315958677377740&mid=4315958677377740&max_id=156226683202&max_id_type=0'
# TEST_URL4 = 'https://m.weibo.cn/comments/hotflow?id=4319937411346068&mid=4319937411346068&max_id=138145067263628&max_id_type=0'
# 新浪微博测试url
TEST_URL = 'https://m.weibo.cn/api/config'
def __init__(self):
"""
实例化
:param total:池总数
"""
self.redis = RedisClient()
self.log = ProxyLogger().logger
def test_single_cookies(self, cookies=None):
"""
测试单个代理
:param proxy:
:return:
"""
import requests
# 首页验证
try:
# allow_redirects=False不让请求跳转
res = requests.get(self.TEST_URL, cookies=cookies, timeout=10, allow_redirects=False).json()
print(res)
if res['data']['login']:
return True
else:
# self.redis.del_ip(str(cookies))
# 重新获取cookies,这里是在mysql随机获取一组用户名和密码生成
# self.redis.save(str(Getter().run()))
return False
except requests.exceptions.ConnectTimeout as e:
self.log.error('请求超时!{}'.format(str(e.args)))
return False
except Exception as e:
print('删除cookies2')
# self.redis.del_ip(str(cookies))
# self.redis.save(str(Getter().run()))
return False
def run(self, code):
"""
测试主函数
:return:
"""
self.log.info('测试器开始运行!')
try:
count = self.redis.get_count()
except:
print(4)
self.log.error('数据库连接错误!')
return False
self.log.info('当前剩余' + str(count) + '个cookies')
mid_num = 5 - count
# 测试池里不足5个
if mid_num > 0:
while mid_num:
try:
result = Getter().run(code)
self.redis.save(key=list(result.keys())[0],value=list(result.values())[0])
self.log.info('cookies插入成功!')
mid_num -= 0
except Exception as e:
self.log.error('cookies没有存入redis或获取cookies出错!' + str(e.args))
try:
# 获取所有的cookies
all_cookies = self.redis.get_all()
# self.log.info('池里的ip:' + ','.join(test_proxies))
import json
for cookies in all_cookies:
cookies = json.loads(cookies)
flag = self.test_single_cookies(cookies=cookies)
if flag:
print('{}测试成功!'.format(str(cookies)))
self.log.info('{}测试成功!'.format(str(cookies)))
continue
else:
# 从池里删除该cookies
self.redis.del_ip(str(cookies))
# 重新获取cookies,这里是在mysql随机获取一组用户名和密码生成
result = Getter().run(code)
self.redis.save(key=list(result.keys())[0], value=list(result.values())[0])
print(cookies)
# self.redis.del_ip(str(cookies))
return True
except Exception as e:
self.log.error('测试器发生错误:' + str(e.args))
return False
if __name__ == '__main__':
Tester().run(code='sina')
| 35.803571 | 136 | 0.536908 | 411 | 4,010 | 5.119221 | 0.328467 | 0.047053 | 0.026141 | 0.030894 | 0.361692 | 0.323194 | 0.30038 | 0.257605 | 0.168251 | 0.168251 | 0 | 0.07362 | 0.349626 | 4,010 | 111 | 137 | 36.126126 | 0.733129 | 0.233915 | 0 | 0.333333 | 0 | 0 | 0.055634 | 0.010211 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.130435 | 0 | 0.304348 | 0.072464 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38f3007b186b5848cb06120c188c05e489d11a9b | 4,235 | py | Python | run_imagenet_eval.py | LiliMeng/hamiltonian-revnet | fc7481cef70b579627f229ab542abd3a1761637f | [
"MIT"
] | null | null | null | run_imagenet_eval.py | LiliMeng/hamiltonian-revnet | fc7481cef70b579627f229ab542abd3a1761637f | [
"MIT"
] | null | null | null | run_imagenet_eval.py | LiliMeng/hamiltonian-revnet | fc7481cef70b579627f229ab542abd3a1761637f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Evaluates a CNN on ImageNet.
Author: Mengye Ren (mren@cs.toronto.edu)
Usage:
./run_imagenet_eval.py --id [EXPERIMENT ID] \
--logs [LOGS FOLDER] \
--results [SAVE FOLDER]
Flags:
--id: Experiment ID, optional for new experiment.
--logs: Path to logs folder, default is ./logs/public.
--results: Path to save folder, default is ./results/imagenet.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import os
import tensorflow as tf
from tqdm import tqdm
from resnet.configs.config_factory import get_config_from_json
from resnet.data_tfrecord.imagenet_data import ImagenetData
from resnet.data_tfrecord.image_processing import inputs
from resnet.models import get_model
from resnet.utils import logger, ExperimentLogger
flags = tf.flags
flags.DEFINE_string("id", None, "Experiment ID")
flags.DEFINE_string("results", "./results/imagenet", "Saving folder")
flags.DEFINE_string("logs", "./logs/public", "Logging folder")
flags.DEFINE_integer("ckpt_num", -1, "Checkpoint step number")
FLAGS = tf.flags.FLAGS
log = logger.get()
NUM_GPU = 1
NUM_VALID = 50000
BSIZE = 50
NUM_BATCH = NUM_VALID // BSIZE
def _get_config():
save_folder = os.path.join(FLAGS.results, FLAGS.id)
return get_config_from_json(os.path.join(save_folder, "conf.json"))
def _get_model(config, inp, label):
with log.verbose_level(2):
with tf.name_scope("Valid"):
with tf.variable_scope("Model"):
mvalid = get_model(
config.model_class,
config,
inp=inp,
label=label,
is_training=False,
inference_only=True)
return mvalid
def _get_dataset(config):
"""Prepares a dataset input tensors."""
num_preprocess_threads = FLAGS.num_preprocess_threads * NUM_GPU
dataset = ImagenetData(subset="validation")
images, labels = inputs(
dataset,
cycle=True,
batch_size=BSIZE,
num_preprocess_threads=num_preprocess_threads)
return images, labels
def evaluate(sess, model, num_batch=100):
"""Runs evaluation."""
num_correct = 0.0
count = 0
for bidx in tqdm(range(num_batch)):
correct = model.eval_step(sess)
num_correct += np.sum(correct)
count += correct.size
acc = (num_correct / count)
return acc
def eval_model(config, model, save_folder, logs_folder=None, ckpt_num=-1):
log.info("Config: {}".format(config.__dict__))
exp_logger = ExperimentLogger(logs_folder)
# Initializes variables.
with tf.Session() as sess:
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if ckpt_num == -1:
ckpt = tf.train.latest_checkpoint(save_folder)
elif ckpt_num >= 0:
ckpt = os.path.join(save_folder, "model.ckpt-{}".format(ckpt_num))
else:
raise ValueError("Invalid checkpoint number {}".format(ckpt_num))
if not os.path.exists(ckpt + ".meta"):
raise ValueError("Checkpoint not exists")
saver.restore(sess, ckpt)
train_acc = evaluate(sess, model, num_batch=100)
val_acc = evaluate(sess, model, num_batch=NUM_BATCH)
niter = int(ckpt.split("-")[-1])
exp_logger.log_train_acc(niter, train_acc)
exp_logger.log_valid_acc(niter, val_acc)
return val_acc
def main():
config = _get_config()
exp_id = FLAGS.id
save_folder = os.path.realpath(
os.path.abspath(os.path.join(FLAGS.results, exp_id)))
if FLAGS.logs is not None:
logs_folder = os.path.realpath(
os.path.abspath(os.path.join(FLAGS.logs, exp_id)))
if not os.path.exists(logs_folder):
os.makedirs(logs_folder)
else:
logs_folder = None
# Evaluates a model.
with tf.Graph().as_default():
np.random.seed(0)
tf.set_random_seed(1234)
# Configures dataset objects.
log.info("Building dataset")
inp, label = _get_dataset(config)
# Builds models.
log.info("Building models")
model = _get_model(config, inp, label)
eval_model(config, model, save_folder, logs_folder, ckpt_num=FLAGS.ckpt_num)
if __name__ == "__main__":
main()
| 29.006849 | 80 | 0.684298 | 580 | 4,235 | 4.77931 | 0.301724 | 0.02381 | 0.018038 | 0.016234 | 0.147908 | 0.094877 | 0.063492 | 0.063492 | 0.034632 | 0.034632 | 0 | 0.008252 | 0.198819 | 4,235 | 145 | 81 | 29.206897 | 0.808724 | 0.145927 | 0 | 0.020202 | 0 | 0 | 0.072423 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.10101 | 0 | 0.212121 | 0.010101 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38f38a04cae9e65a24a4b76feb3b9b41d32401c7 | 2,126 | py | Python | setup.py | cgohlke/psdtags | 5e15d5cb6ea894b437248a9ff08b3c222a5b5ed6 | [
"BSD-3-Clause"
] | 5 | 2022-01-15T01:29:19.000Z | 2022-02-04T01:18:40.000Z | setup.py | cgohlke/psdtags | 5e15d5cb6ea894b437248a9ff08b3c222a5b5ed6 | [
"BSD-3-Clause"
] | 2 | 2022-02-17T02:38:59.000Z | 2022-02-17T23:43:13.000Z | setup.py | cgohlke/psdtags | 5e15d5cb6ea894b437248a9ff08b3c222a5b5ed6 | [
"BSD-3-Clause"
] | null | null | null | # psdtags/setup.py
"""Psdtags package setuptools script."""
import sys
import re
from setuptools import setup
with open('psdtags/psdtags.py') as fh:
code = fh.read()
version = re.search(r"__version__ = '(.*?)'", code).groups()[0]
description = re.search(r'"""(.*)\.(?:\r\n|\r|\n)', code).groups()[0]
readme = re.search(
r'(?:\r\n|\r|\n){2}"""(.*)"""(?:\r\n|\r|\n){2}[__version__|from]',
code,
re.MULTILINE | re.DOTALL,
).groups()[0]
readme = '\n'.join(
[description, '=' * len(description)] + readme.splitlines()[1:]
)
license = re.search(
r'(# Copyright.*?(?:\r\n|\r|\n))(?:\r\n|\r|\n)+""',
code,
re.MULTILINE | re.DOTALL,
).groups()[0]
license = license.replace('# ', '').replace('#', '')
if 'sdist' in sys.argv:
with open('LICENSE', 'w') as fh:
fh.write('BSD 3-Clause License\n\n')
fh.write(license)
with open('README.rst', 'w') as fh:
fh.write(readme)
setup(
name='psdtags',
version=version,
description=description,
long_description=readme,
author='Christoph Gohlke',
author_email='cgohlke@uci.edu',
license='BSD',
url='https://www.lfd.uci.edu/~gohlke/',
project_urls={
'Bug Tracker': 'https://github.com/cgohlke/psdtags/issues',
'Source Code': 'https://github.com/cgohlke/psdtags',
# 'Documentation': 'https://',
},
packages=['psdtags'],
entry_points={'console_scripts': ['psdtags = psdtags.psdtags:main']},
python_requires='>=3.8',
install_requires=['numpy>=1.19.2'],
extras_require={
'all': [
'matplotlib>=3.3',
'tifffile>=2021.11.2',
'imagecodecs>=2021.11.20',
]
},
platforms=['any'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
)
| 26.911392 | 73 | 0.565381 | 251 | 2,126 | 4.7251 | 0.426295 | 0.016863 | 0.015177 | 0.020236 | 0.155143 | 0.079258 | 0.072513 | 0 | 0 | 0 | 0 | 0.024082 | 0.218721 | 2,126 | 78 | 74 | 27.25641 | 0.689946 | 0.0381 | 0 | 0.095238 | 0 | 0.015873 | 0.400883 | 0.074583 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38f4b64178e6d43fe8a47b6b17ce1936dd0fe7e2 | 1,542 | py | Python | raylab/utils/checkpoints.py | angelolovatto/raylab | ebaea8df1a391fb844e75df62ccf1e2e07311d88 | [
"MIT"
] | 29 | 2020-05-05T13:25:33.000Z | 2022-01-03T14:12:29.000Z | raylab/utils/checkpoints.py | angelolovatto/raylab | ebaea8df1a391fb844e75df62ccf1e2e07311d88 | [
"MIT"
] | 215 | 2019-11-26T12:59:39.000Z | 2022-02-01T12:38:31.000Z | raylab/utils/checkpoints.py | angelolovatto/raylab | ebaea8df1a391fb844e75df62ccf1e2e07311d88 | [
"MIT"
] | 7 | 2020-06-12T01:42:02.000Z | 2021-05-27T03:40:42.000Z | """Utilities for handling experiment results."""
import os.path as osp
import pickle
import warnings
from ray.rllib.utils import merge_dicts
from raylab.agents.registry import get_agent_cls
def get_agent_from_checkpoint(checkpoint, agent_name, env=None, **config_kwargs):
"""Instatiate and restore agent class from checkpoint."""
config = get_config_from_checkpoint(checkpoint, **config_kwargs)
agent_cls = get_agent_cls(agent_name)
agent = agent_cls(env=env, config=config)
agent.restore(checkpoint)
return agent
def get_config_from_checkpoint(checkpoint, use_eval_config=True, config_overrides=None):
"""Find and load configuration for checkpoint file."""
config = {}
# Load configuration from file
config_dir = osp.dirname(checkpoint)
config_path = osp.join(config_dir, "params.pkl")
if not osp.exists(config_path):
config_path = osp.join(config_dir, "../params.pkl")
if not osp.exists(config_path):
raise ValueError(
"Could not find params.pkl in either the checkpoint dir or "
"its parent directory."
)
with open(config_path, "rb") as file:
config = pickle.load(file)
if use_eval_config:
if "evaluation_config" not in config:
warnings.warn("Evaluation agent requested but none in config.")
eval_conf = config.get("evaluation_config", {})
config = merge_dicts(config, eval_conf)
if config_overrides:
config = merge_dicts(config, config_overrides)
return config
| 33.521739 | 88 | 0.70428 | 202 | 1,542 | 5.173267 | 0.346535 | 0.047847 | 0.0689 | 0.044019 | 0.176077 | 0.112919 | 0.112919 | 0.112919 | 0.112919 | 0.112919 | 0 | 0 | 0.20428 | 1,542 | 45 | 89 | 34.266667 | 0.851671 | 0.112192 | 0 | 0.0625 | 0 | 0 | 0.135994 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.15625 | 0 | 0.28125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38f7550244967de23617ebb78ca7b6a5afdab9a3 | 2,690 | py | Python | rkn/utils/csv_reader.py | OlefirenkoK/monitoring | ad5c4f1445083d04c967acaedfad025ecc6573b7 | [
"Apache-2.0"
] | null | null | null | rkn/utils/csv_reader.py | OlefirenkoK/monitoring | ad5c4f1445083d04c967acaedfad025ecc6573b7 | [
"Apache-2.0"
] | null | null | null | rkn/utils/csv_reader.py | OlefirenkoK/monitoring | ad5c4f1445083d04c967acaedfad025ecc6573b7 | [
"Apache-2.0"
] | null | null | null | import logging
import csv
import os
import re
from molly.conf import settings
from rkn.utils.constants import RKN_DUMP
IP_REGEX = re.compile(r'(\d{1,3})[.](\d{1,3})[.](\d{1,3})[.](\d{1,3})')
# DOMAIN_REGEX = re.compile(r'^(?P<subset>\*\.)?(?P<base>([a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,})$')
DOMAIN_REGEX = re.compile(r'^(?P<subset>\*\.)?(?P<base>(\w+(-\w+)*\.)+\w{2,})$', re.IGNORECASE)
logger = logging.getLogger(__name__)
class IncorrectSchemaError(Exception):
"""Raise if given fields are incorrect"""
class RknAnalyzer:
VALID_SCHEMA_LENGTH = 6
@classmethod
def analyze(cls, fields):
try:
ip_list, domain = cls._parse_normal_schema(fields)
except IncorrectSchemaError:
ip_list, domain = cls._parse_incorrect_schema(fields)
return ip_list, domain
@classmethod
def _parse_normal_schema(cls, fields):
if len(fields) != cls.VALID_SCHEMA_LENGTH:
raise IncorrectSchemaError
ip_list, domain = cls._serialization_to_ip_list(fields[0]), cls._serialization_to_domain(fields[1])
return ip_list, domain
@classmethod
def _parse_incorrect_schema(cls, fields):
if len(fields) == cls.VALID_SCHEMA_LENGTH - 1:
ip_list, domain = None, cls._serialization_to_domain(fields[0])
elif len(fields) > cls.VALID_SCHEMA_LENGTH:
ip_list, domain = cls._serialization_to_ip_list(fields[0]), cls._serialization_to_domain(fields[1])
else:
ip_list, domain = None, None
return ip_list, domain
@staticmethod
def _serialization_to_ip_list(field):
ip_list = IP_REGEX.findall(field)
return ip_list
@staticmethod
def _serialization_to_domain(field):
match = DOMAIN_REGEX.match(field)
if match:
domain = match.groupdict().get('base')
else:
domain = None
return domain
def is_blocked(mirrors, domain):
raise NotImplementedError
def set_blocked(mirror):
raise NotImplementedError
def check_blocked_mirrors(mirrors):
dump_path = os.path.join(settings.repo_path, RKN_DUMP)
with open(dump_path, encoding='ISO-8859-1') as f:
parser = csv.reader(f, delimiter=';', quotechar='|')
for fields in parser:
_, domain = RknAnalyzer.analyze(fields)
if is_blocked(mirrors, domain):
set_blocked(domain)
def main():
pass
if __name__ == '__main__':
settings = type('Settings', (object, ), {'repo_path': '/tmp/z-i_repo'})
RKN_DUMP = 'dump.csv'
import time
start = time.time()
x = main()
stop = time.time()
print('Time: {} ||| x = {}'.format(stop - start, x))
| 26.633663 | 111 | 0.637175 | 347 | 2,690 | 4.688761 | 0.305476 | 0.051629 | 0.06638 | 0.036878 | 0.326368 | 0.258758 | 0.240934 | 0.195452 | 0.195452 | 0.147511 | 0 | 0.012446 | 0.22342 | 2,690 | 100 | 112 | 26.9 | 0.766395 | 0.049071 | 0 | 0.205882 | 0 | 0.014706 | 0.068966 | 0.037226 | 0 | 0 | 0 | 0 | 0 | 1 | 0.132353 | false | 0.014706 | 0.102941 | 0 | 0.352941 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38f93bfc7d6c06366b05f8d7c0d8ee057288269d | 3,226 | py | Python | DecisionEngine/drdecisions/datarobotapiwrapper/business_logic/datarobot_client.py | joemeree/ai_engineering | 7dba6c808f27d895afc86cf119e876384106fb3c | [
"Apache-2.0"
] | 9 | 2021-04-08T17:45:46.000Z | 2022-02-28T09:43:44.000Z | DecisionEngine/drdecisions/datarobotapiwrapper/business_logic/datarobot_client.py | joemeree/ai_engineering | 7dba6c808f27d895afc86cf119e876384106fb3c | [
"Apache-2.0"
] | 13 | 2020-08-14T15:17:00.000Z | 2022-02-27T20:12:44.000Z | DecisionEngine/drdecisions/datarobotapiwrapper/business_logic/datarobot_client.py | joemeree/ai_engineering | 7dba6c808f27d895afc86cf119e876384106fb3c | [
"Apache-2.0"
] | 3 | 2020-08-14T12:47:28.000Z | 2022-03-23T18:58:13.000Z | from http import HTTPStatus
import pandas as pd
import requests
from spyne import ArgumentError
class DataRobotClient:
def __init__(self, prediction_server):
self.prediction_url = prediction_server.server_url
self.https_session = self.get_https_session(prediction_server)
def get_https_session(self, prediction_server):
https_session = requests.Session()
https_session.headers.update(
{'datarobot-key': prediction_server.datarobot_key,
'Content-Type': 'application/json',
'x-forwarded-proto': 'https'})
https_session.auth = (
prediction_server.datarobot_username, prediction_server.api_token)
return https_session
def get_predictions(self, features, gorupby_ids):
prediction_url = self.get_prediction_url(gorupby_ids)
predictions_response = self._request_predictions(features, prediction_url)
predictions = self._parse_response(
predictions_response, features.index)
return predictions
def get_prediction_url(self, gorupby_ids):
if len(gorupby_ids) == 1:
full_url = f'{self.prediction_url}/predApi/v1.0/deployments/{gorupby_ids[0]}/predictions'
else:
full_url = f'{self.prediction_url}/predApi/v1.0/' \
f'{gorupby_ids[0]}/{gorupby_ids[1]}/predict'
return full_url
def _request_predictions(self, features, full_url):
predictions_response = self.https_session.post(
full_url,
data=features.to_json(orient='records'))
if predictions_response.status_code != HTTPStatus.OK:
raise ArgumentError(
faultstring=predictions_response.content.decode('utf-8'))
return predictions_response.json()
@staticmethod
def _parse_response(predictions_json, index):
unordered = {item['rowId']: item['prediction']
for item in predictions_json['data']}
# The order of predictions which are returned by the server does not
# match the order of the rows which were sent for scoring.
# The server uses 'rowId' field to indicate the original order.
ordered = [unordered[key] for key in sorted(unordered.keys())]
return pd.DataFrame({'prediction': ordered}, index=index)
def add_predictions(self, prepared_df, prediction_column):
grouped_predictions = []
if 'deployment_id' in prepared_df.columns:
groupby_columns = ['deployment_id']
else:
groupby_columns = ['project_id', 'model_id']
grouped_features = prepared_df.groupby(groupby_columns)
for gorupby_ids, features in grouped_features:
# http://pandas.pydata.org/pandas-docs/stable/groupby.html#iterating-through-groups
ids = [gorupby_ids]
if isinstance(gorupby_ids, tuple):
ids = [id for id in gorupby_ids]
predictions = self.get_predictions(
features,
ids)
grouped_predictions.append(predictions)
prepared_df[prediction_column] = \
pd.concat(grouped_predictions)['prediction']
return prepared_df
| 38.86747 | 101 | 0.65871 | 359 | 3,226 | 5.668524 | 0.334262 | 0.054054 | 0.025061 | 0.011794 | 0.034398 | 0.034398 | 0.034398 | 0.034398 | 0.034398 | 0 | 0 | 0.003738 | 0.253565 | 3,226 | 82 | 102 | 39.341463 | 0.841362 | 0.082455 | 0 | 0.032258 | 0 | 0 | 0.104604 | 0.051117 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112903 | false | 0 | 0.064516 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38fa60670074ef83930669bbfa53365152ad18f4 | 5,900 | py | Python | weasyl/test/test_comment.py | hyena/weasyl | a43ad885eb07ae89d6639f289a5b95f3a177439c | [
"Apache-2.0"
] | null | null | null | weasyl/test/test_comment.py | hyena/weasyl | a43ad885eb07ae89d6639f289a5b95f3a177439c | [
"Apache-2.0"
] | null | null | null | weasyl/test/test_comment.py | hyena/weasyl | a43ad885eb07ae89d6639f289a5b95f3a177439c | [
"Apache-2.0"
] | null | null | null | import pytest
import unittest
from libweasyl import staff
from libweasyl.models import site
from weasyl import define as d
from weasyl import comment
from weasyl import orm
from weasyl import shout
from weasyl.error import WeasylError
from weasyl.test import db_utils
@pytest.mark.usefixtures('db')
class TestRemoveComment(object):
generation_parameters = [
("submit", db_utils.create_submission_comment, comment.remove,
db_utils.create_submission),
("journal", db_utils.create_journal_comment, comment.remove,
db_utils.create_journal),
("char", db_utils.create_character_comment, comment.remove,
db_utils.create_character),
(None, db_utils.create_shout, shout.remove, db_utils.create_shout),
]
@pytest.fixture(autouse=True, params=generation_parameters)
def setUp(self, request, monkeypatch):
# userid of owner of the journal/submission/character
self.owner = db_utils.create_user()
# userid of the comment poster
self.commenter = db_utils.create_user()
# userid of a moderator
self.moderator = db_utils.create_user()
# userid of another user who isn't a moderator
self.another_user = db_utils.create_user()
# mock out staff.MODS
monkeypatch.setattr(staff, 'MODS', {self.moderator})
(self.feature, self.create_function, self.remove_function, call) = request.param
self.target = call(self.owner) if self.feature is not None else self.owner
self.commentid = self.create_function(self.commenter, self.target)
self.args = {'commentid': self.commentid}
if self.feature is not None:
self.args['feature'] = self.feature
def test_commenter_can_remove(self):
assert self.target == self.remove_function(self.commenter, **self.args)
def test_commenter_can_not_remove_with_replies(self):
# reply to the existing comment
self.create_function(self.another_user, self.target, parentid=self.commentid)
pytest.raises(WeasylError, self.remove_function, self.commenter, **self.args)
def test_owner_can_remove(self):
assert self.target == self.remove_function(self.owner, **self.args)
def test_mod_can_remove(self):
assert self.target == self.remove_function(self.moderator, **self.args)
def test_other_user_can_not_remove(self):
pytest.raises(
WeasylError, self.remove_function, self.another_user, **self.args)
@pytest.mark.usefixtures("db")
class CheckNotificationsTestCase(unittest.TestCase):
def setUp(self):
self.owner = db_utils.create_user()
self.commenter1 = db_utils.create_user()
self.commenter2 = db_utils.create_user()
def count_notifications(self, user):
return (
d.connect().query(site.SavedNotification)
.filter(site.SavedNotification.userid == user)
.count())
def add_and_remove_comments(self, feature, **kwargs):
kwargs['content'] = 'hello'
# commenter1 posts a comment c1 on submission s
c1 = comment.insert(self.commenter1, **kwargs)
self.assertEqual(1, self.count_notifications(self.owner))
# commenter2 posts a reply to c1
c2 = comment.insert(self.commenter2, parentid=c1, **kwargs)
self.assertEqual(1, self.count_notifications(self.commenter1))
# owner posts a reply to c2
c3 = comment.insert(self.owner, parentid=c2, **kwargs)
self.assertEqual(1, self.count_notifications(self.commenter2))
# commenter1 responds to owner
comment.insert(self.commenter1, parentid=c3, **kwargs)
self.assertEqual(2, self.count_notifications(self.owner))
# owner deletes comment thread
comment.remove(self.owner, feature=feature, commentid=c1)
self.assertEqual(0, self.count_notifications(self.owner))
self.assertEqual(0, self.count_notifications(self.commenter1))
self.assertEqual(0, self.count_notifications(self.commenter2))
def test_add_and_remove_submission(self):
s = db_utils.create_submission(self.owner)
self.add_and_remove_comments('submit', submitid=s)
def test_add_and_remove_journal(self):
j = db_utils.create_journal(self.owner)
self.add_and_remove_comments('journal', journalid=j)
def test_add_and_remove_character(self):
c = db_utils.create_character(self.owner)
self.add_and_remove_comments('char', charid=c)
def test_add_and_remove_shout(self):
# commenter1 posts a shout on owner's page
c1 = shout.insert(self.commenter1, orm.Comment(userid=self.owner,
content="hello"))
self.assertEqual(1, self.count_notifications(self.owner))
# commenter2 posts a reply to c1
c2 = shout.insert(self.commenter2, orm.Comment(userid=self.owner,
content="hello", parentid=c1))
self.assertEqual(1, self.count_notifications(self.commenter1))
# owner posts a reply to c2
c3 = shout.insert(self.owner, orm.Comment(userid=self.owner,
content="hello", parentid=c2))
self.assertEqual(1, self.count_notifications(self.commenter2))
# commenter1 responds to owner
shout.insert(self.commenter1, orm.Comment(userid=self.owner,
content="hello", parentid=c3))
self.assertEqual(2, self.count_notifications(self.owner))
# owner deletes comment thread
shout.remove(self.owner, commentid=c1)
self.assertEqual(0, self.count_notifications(self.owner))
self.assertEqual(0, self.count_notifications(self.commenter1))
self.assertEqual(0, self.count_notifications(self.commenter2))
| 41.258741 | 88 | 0.674576 | 717 | 5,900 | 5.387727 | 0.167364 | 0.051256 | 0.060575 | 0.094227 | 0.517732 | 0.466218 | 0.400725 | 0.357494 | 0.327466 | 0.309345 | 0 | 0.012051 | 0.226441 | 5,900 | 142 | 89 | 41.549296 | 0.834356 | 0.087119 | 0 | 0.164948 | 0 | 0 | 0.016763 | 0 | 0 | 0 | 0 | 0 | 0.175258 | 1 | 0.134021 | false | 0 | 0.103093 | 0.010309 | 0.278351 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38fac27273e9ff544f7703c73929b2e3e95e3830 | 1,408 | py | Python | 7_banco_de_dados/bd_sqlite.py | AdrianaViabL/Curso-Python-udemy | a4f230354985d0f6026a1e7b4913a8f64e205654 | [
"Apache-2.0"
] | null | null | null | 7_banco_de_dados/bd_sqlite.py | AdrianaViabL/Curso-Python-udemy | a4f230354985d0f6026a1e7b4913a8f64e205654 | [
"Apache-2.0"
] | null | null | null | 7_banco_de_dados/bd_sqlite.py | AdrianaViabL/Curso-Python-udemy | a4f230354985d0f6026a1e7b4913a8f64e205654 | [
"Apache-2.0"
] | null | null | null | import sqlite3
conexao = sqlite3.connect('basedados.db')
cursor = conexao.cursor() # para executar comandos SQL dentro do banco de dados
cursor.execute('CREATE TABLE IF NOT EXISTS cliente (' # criando uma tabela
'id INTEGER PRIMARY KEY AUTOINCREMENT,' # criando o indice da tabela - nome tipo de dado
'nome TEXT,' # campo tipo string
'peso REAL' # campo tipo float
')')
# varias formas de inserir dados na tabela
# cursor.execute('INSERT INTO cliente (nome, peso) VALUES (?, ?)', ('Maria', 60)) # preenchendo a tabela
# cursor.execute('INSERT INTO cliente (nome, peso) VALUES (:nome, :peso)',
# {'nome': 'Zezinho', 'peso': 80.6})
# cursor.execute('INSERT INTO cliente VALUES (:id, :nome, :peso)',
# {'id': None, 'nome': 'qualquer', 'peso': 180})
#
# conexao.commit() #salvando os dados alterados ate o momento
# cursor.execute('UPDATE cliente SET nome=:nome WHERE id=:id',
# {'nome': 'Joana', 'id': 7})
#
# cursor.execute('DELETE FROM cliente WHERE id=:id',
# {'id': 7})
# cursor.execute('SELECT * FROM cliente')
cursor.execute('SELECT nome, peso FROM cliente WHERE peso > 80') # um select mais especifico
# cursor.execute('DELETE FROM cliente')
# conexao.commit()
for linha in cursor.fetchall():
nome, peso = linha
print(nome, peso)
cursor.close()
conexao.close()
| 37.052632 | 105 | 0.629972 | 178 | 1,408 | 4.983146 | 0.466292 | 0.131905 | 0.064262 | 0.07779 | 0.214205 | 0.11274 | 0.11274 | 0.11274 | 0.11274 | 0 | 0 | 0.012891 | 0.228693 | 1,408 | 37 | 106 | 38.054054 | 0.803867 | 0.644886 | 0 | 0 | 0 | 0 | 0.317895 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38fc823d11b23777ebac367516ee14276ea5886e | 1,703 | py | Python | locations/spiders/noted/homebase.py | bealbrown/allhours | f750ee7644246a97bd16879f14115d7845f76b89 | [
"MIT"
] | null | null | null | locations/spiders/noted/homebase.py | bealbrown/allhours | f750ee7644246a97bd16879f14115d7845f76b89 | [
"MIT"
] | null | null | null | locations/spiders/noted/homebase.py | bealbrown/allhours | f750ee7644246a97bd16879f14115d7845f76b89 | [
"MIT"
] | null | null | null | import scrapy
import re
import json
from locations.hourstudy import inputoutput
class ArgosSpider(scrapy.Spider):
name = "homebase"
allowed_domains = ["www.homebase.co.uk"]
download_delay = 0.5
start_urls = (
'https://www.homebase.co.uk/stores',
)
def parse_stores(self, response):
data = re.findall(r"var com_bunnings_locations_mapLocations = [^;]+", response.body_as_unicode())
json_data = json.loads(data[0].replace("var com_bunnings_locations_mapLocations = " ,''))
properties = {
'addr_full': json_data[0]['Store']["Address"]["Address"] +json_data[0]['Store']["Address"]["AddressLineTwo"],
'phone': json_data[0]['Store']["Phone"],
'city': json_data[0]['Store']["Address"]["Suburb"],
'state': json_data[0]['Store']["Address"]["State"],
'postcode': json_data[0]['Store']["Address"]["Postcode"],
'country': json_data[0]['Store']["Address"]["Country"],
'ref': json_data[0]['Store']['StoreID'],
'website': response.url,
'lat': float(json_data[0]['Store']['Location']["Latitude"]),
'lon': float(json_data[0]['Store']['Location']["Longitude"]),
}
hours = response.xpath('//time[@itemprop="openingHours"]/@datatime').extract()
if hours != []:
properties['opening_hours'] = "; ".join(x for x in hours)
yield inputoutput(**properties)
def parse(self, response):
urls = response.xpath('//div[@class="store-listing__state__list alpha"]/ul/li/a/@href').extract()
for path in urls:
yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)
| 42.575 | 121 | 0.601292 | 194 | 1,703 | 5.123711 | 0.463918 | 0.088531 | 0.090543 | 0.140845 | 0.251509 | 0.054326 | 0 | 0 | 0 | 0 | 0 | 0.009651 | 0.209043 | 1,703 | 39 | 122 | 43.666667 | 0.728285 | 0 | 0 | 0 | 0 | 0 | 0.296536 | 0.101585 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.323529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac01e7b5615f5bcc5d827e0f5bf6aa9d3337a73b | 1,884 | py | Python | tests/test_modeling_encoder_decoder.py | ari-holtzman/transformers | 8725c545e8feeecdcee0ad92ca1d80cee8f0c6e4 | [
"Apache-2.0"
] | 83 | 2020-01-23T10:46:42.000Z | 2021-10-31T10:54:14.000Z | tests/test_modeling_encoder_decoder.py | ari-holtzman/transformers | 8725c545e8feeecdcee0ad92ca1d80cee8f0c6e4 | [
"Apache-2.0"
] | 11 | 2021-02-19T18:44:51.000Z | 2022-01-06T01:50:23.000Z | tests/test_modeling_encoder_decoder.py | ari-holtzman/transformers | 8725c545e8feeecdcee0ad92ca1d80cee8f0c6e4 | [
"Apache-2.0"
] | 24 | 2020-01-25T05:11:08.000Z | 2022-01-21T21:13:26.000Z | # coding=utf-8
# Copyright 2018 The Hugging Face Inc. Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
from transformers import is_torch_available
from .utils import require_torch, slow
if is_torch_available():
from transformers import BertModel, BertForMaskedLM, Model2Model
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP
@require_torch
class EncoderDecoderModelTest(unittest.TestCase):
@slow
def test_model2model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
model = Model2Model.from_pretrained(model_name)
self.assertIsInstance(model.encoder, BertModel)
self.assertIsInstance(model.decoder, BertForMaskedLM)
self.assertEqual(model.decoder.config.is_decoder, True)
self.assertEqual(model.encoder.config.is_decoder, False)
def test_model2model_from_pretrained_not_bert(self):
logging.basicConfig(level=logging.INFO)
with self.assertRaises(ValueError):
_ = Model2Model.from_pretrained("roberta")
with self.assertRaises(ValueError):
_ = Model2Model.from_pretrained("distilbert")
with self.assertRaises(ValueError):
_ = Model2Model.from_pretrained("does-not-exist")
| 36.941176 | 77 | 0.743631 | 233 | 1,884 | 5.866953 | 0.48927 | 0.076811 | 0.109729 | 0.065838 | 0.265545 | 0.176298 | 0.120702 | 0 | 0 | 0 | 0 | 0.011032 | 0.182059 | 1,884 | 50 | 78 | 37.68 | 0.876055 | 0.305732 | 0 | 0.192308 | 0 | 0 | 0.023975 | 0 | 0 | 0 | 0 | 0 | 0.269231 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac01ecf62f629807476fd86ddb9e26b5bacf17fc | 4,493 | py | Python | test.py | YanshuHu/combinatorics-oj1 | 551286aaac63094b74a3bbb00462a1bd696608fd | [
"Apache-2.0"
] | null | null | null | test.py | YanshuHu/combinatorics-oj1 | 551286aaac63094b74a3bbb00462a1bd696608fd | [
"Apache-2.0"
] | null | null | null | test.py | YanshuHu/combinatorics-oj1 | 551286aaac63094b74a3bbb00462a1bd696608fd | [
"Apache-2.0"
] | null | null | null | lst1 = [8,3,9,6,4,7,5,2,1]
lst2 = [10,11,12,8,3,9,6,4,7,5,2,1]
lst3 = [8,9,3,6,7,4,5,2,1]
lst4 = [8,3,9,6,4,7,5,2,1]
lst = [7, 2, 6, 4, 2, 3, 2, 1]
lst5 = [14, 4, 8, 17, 16, 2, 12, 6, 18, 3, 10, 13, 9, 5, 1, 11, 19, 15, 7, 20]
lst6 = [1, 17, 11, 20, 7, 15, 13, 10, 6, 16, 12, 19, 8, 18, 5, 3, 4, 14, 9, 2]
lst7 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
k3 = [2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
#k = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,2,0]
k1 = [0,0,0,0,4,0,2,0]
k2 = [0,0,0,0,0,0,0,5]
def main():
add_1(lst1, k2)
def shift_1(lst):
new_lst = lst
shifted_num = []
while new_lst:
count = 0
compare = new_lst[0]
for i in range(len(new_lst)):
if new_lst[i] < new_lst[0]:
count += 1
shifted_num.append(count)
del new_lst[0]
shifted_num.pop()
print("shifted_num: ", shifted_num)
return shifted_num
def add_1(lst1, k):
shifted_num = shift_1(lst1)
limit = len(shifted_num) + 1
limit_list = []
added_list = []
k = k[::-1]
for i in range(limit):
limit_list.append(i + 1)
limit_list.pop(0)
#the list i want to work with
true_list = shifted_num[::-1]
print("here",true_list)
list_len = len(true_list)
print(k)
for i in range(list_len):
if (true_list[i]+k[i]) > limit_list[i]:
true_list[i+1] += 1
a = (true_list[i] + k[i]) - (limit_list[i])
added_list.append(a)
elif (true_list[i]+k[i]) == limit_list[i]:
if len(true_list) != 1:
true_list[i+1] += 1
else:
true_list.append(1)
added_list.append(0)
else:
added_list.append(true_list[i] + k[i])
#print("here", true_list )
#print("limit_list: ", limit_list)
#print("added_list: ", added_list[::-1])
print(added_list[::-1])
return added_list[::-1]
def helper1(lst, start):
new_lst = lst[start:]
index_of_zeros = []
index_of_carry = []
for i in range(len(new_lst)):
if new_lst[i] == 0:
index_of_zeros.append(i+ start)
elif new_lst[i] > 0:
index_of_carry.append(i)
#print(index_of_carry, start)
index_of_carry = index_of_carry[1] + start
a = []
a.append([index_of_carry])
a.append(index_of_zeros)
return a
def subtract_1(lst, k):
shifted_num = shift_1(lst)
limit_list = []
k = k[::-1]
limit = len(shifted_num) + 1
for i in range(limit):
limit_list.append(i + 1)
limit_list.pop(0)
top = limit_list
lst = shifted_num[::-1]
subtract_list = []
list_len = len(lst)
next_borrow = 1
#print(top)
for i in range(list_len):
if (lst[i] - k[i]) >= 0:
subtract_list.append(lst[i] - k[i])
while (lst[i] - k[i]) < 0:
if lst[i+next_borrow] > 0:
lst[i+next_borrow] -= 1
lst[i] = lst[i] + top[i]
if (lst[i] - k[i]) >= 0:
subtract_list.append(lst[i] - k[i])
elif lst[i+next_borrow] == 0:
a = helper1(lst, i)
index_of_carry = a[0][0]
index_of_zeros = a[1][0]
temp = lst[:index_of_carry]
lst[index_of_carry] -= 1
for j in range(len(temp)):
lst[j] += top[j]
if (lst[i] - k[i]) > 0:
subtract_list.append(lst[i] - k[i])
#print("subtract_list:", subtract_list[::-1])
return subtract_list[::-1]
def dict_order(lst):
limit = len(lst) + 1
temp = []
for i in range(limit):
temp.append(-1)
for i in range(len(lst)):
bigger = False
current_carry = lst[i] + 1
if i == 0:
temp[0] = current_carry
for j in temp[:i]:
if j <= (current_carry):
current_carry += 1
temp[i] = current_carry
j = -10
elif current_carry not in temp[:i]:
temp[i] = current_carry
while (current_carry) in temp[:i]:
current_carry += 1
temp[i] = current_carry
left = []
no = []
for i in range(len(temp)):
left.append(i+1)
for i in left:
if i not in temp:
no.append(i)
for i in range(len(temp)):
if temp[i] == -1:
temp[i] = no[0]
return temp
if __name__ == "__main__":
main()
| 29.559211 | 78 | 0.496995 | 748 | 4,493 | 2.823529 | 0.117647 | 0.039773 | 0.052557 | 0.0625 | 0.34233 | 0.26089 | 0.232955 | 0.183239 | 0.147727 | 0.134943 | 0 | 0.092437 | 0.337859 | 4,493 | 151 | 79 | 29.754967 | 0.617479 | 0.056087 | 0 | 0.244444 | 0 | 0 | 0.005906 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0 | 0 | 0.081481 | 0.02963 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac039830abf5089ba22a9b04adc253017a99c08f | 1,332 | py | Python | toontown/speedchat/TTSCResistanceMenu.py | AnonymousDeveloper65535/open-toontown | 3d05c22a7d960ad843dde231140447c46973dba5 | [
"BSD-3-Clause"
] | 8 | 2017-10-10T11:41:01.000Z | 2021-02-23T12:55:47.000Z | toontown/speedchat/TTSCResistanceMenu.py | AnonymousDeveloper65535/open-toontown | 3d05c22a7d960ad843dde231140447c46973dba5 | [
"BSD-3-Clause"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | toontown/speedchat/TTSCResistanceMenu.py | AnonymousDeveloper65535/open-toontown | 3d05c22a7d960ad843dde231140447c46973dba5 | [
"BSD-3-Clause"
] | 3 | 2021-06-03T05:36:36.000Z | 2021-06-22T15:07:31.000Z | from direct.showbase import PythonUtil
from otp.speedchat.SCMenu import SCMenu
from otp.speedchat.SCMenuHolder import SCMenuHolder
from toontown.chat import ResistanceChat
from TTSCResistanceTerminal import TTSCResistanceTerminal
class TTSCResistanceMenu(SCMenu):
def __init__(self):
SCMenu.__init__(self)
self.accept('resistanceMessagesChanged', self.__resistanceMessagesChanged)
self.__resistanceMessagesChanged()
submenus = []
def destroy(self):
SCMenu.destroy(self)
def clearMenu(self):
SCMenu.clearMenu(self)
def __resistanceMessagesChanged(self):
self.clearMenu()
try:
lt = base.localAvatar
except:
return
phrases = lt.resistanceMessages
for menuIndex in ResistanceChat.resistanceMenu:
menu = SCMenu()
for itemIndex in ResistanceChat.getItems(menuIndex):
textId = ResistanceChat.encodeId(menuIndex, itemIndex)
charges = lt.getResistanceMessageCharges(textId)
if charges > 0:
menu.append(TTSCResistanceTerminal(textId, charges))
textId = ResistanceChat.encodeId(menuIndex, 0)
menuName = ResistanceChat.getMenuName(textId)
self.append(SCMenuHolder(menuName, menu))
| 33.3 | 82 | 0.671171 | 113 | 1,332 | 7.787611 | 0.415929 | 0.034091 | 0.036364 | 0.084091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002026 | 0.259009 | 1,332 | 39 | 83 | 34.153846 | 0.889564 | 0 | 0 | 0 | 0 | 0 | 0.018769 | 0.018769 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.15625 | 0 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac058504d526451f2a71f4aed47f17253ac7d617 | 10,381 | py | Python | bin/tweak-json.py | jozefizso/boxcutter-windows | 2df7aa1742d76c4539cd043605ad3653ad1f38b5 | [
"Apache-2.0"
] | null | null | null | bin/tweak-json.py | jozefizso/boxcutter-windows | 2df7aa1742d76c4539cd043605ad3653ad1f38b5 | [
"Apache-2.0"
] | null | null | null | bin/tweak-json.py | jozefizso/boxcutter-windows | 2df7aa1742d76c4539cd043605ad3653ad1f38b5 | [
"Apache-2.0"
] | null | null | null | import json
import os
import re
import shutil
import sys
import time
winrm = True
ssh = False
keep_input_artifact = True
vmx_data_post = False
compression_level = 0
chocolatey = False
add_debugging = True
set_packer_debug = False
add_debug_log = True
add_unzip_vbs = False
add_shell_command = False
add_ssh_uninstaller = False
tools_upload_flavor = False
default_cm = 'nocm'
attach_provisions_iso = False
attach_windows_iso = True
attach_vboxguestadditions_iso = True
attach_shared_folder = False
if add_ssh_uninstaller:
add_debugging = False
add_debug_log = False
vmx_data_post = False
def touch(filename, mtime):
with open(filename, 'a+'):
pass
os.utime(filename, (mtime, mtime))
return 0
def touch_by_file(filename, touch_filename):
touch(filename, os.path.getmtime(touch_filename))
if len(sys.argv) < 2:
sys.exit('Usage: ' + sys.argv[0] + ' filename.json')
if len(sys.argv) >= 3:
winrm = True
vmx_data_post = True
json_file_path = sys.argv[1]
orig = json_file_path + '.orig'
print('Updating ' + json_file_path)
if not os.path.isfile(orig):
mtime = os.path.getmtime(json_file_path)
shutil.copyfile(json_file_path, orig)
touch(orig, mtime)
json_file = open(orig, 'rb')
json_data = json.load(json_file)
debug_cmd = 'floppy/zzz-debug-log.cmd'
save_logs_cmd = 'script/save-logs.cmd'
unzip_vbs = 'floppy/unzip.vbs'
wget_exe = '.windows/wget.exe'
download_cmd = 'floppy/_download.cmd'
packer_config_cmd = 'floppy/_packer_config.cmd'
packer_config_local_cmd = 'floppy/_packer_config_local.cmd'
shutdown_seconds = '10'
timeout_seconds = '10000'
if winrm:
winrm_suffix = '_winrm'
else:
winrm_suffix = ''
shutdown_comment = 'Packer Shutdown'
shutdown_command = 'shutdown /s /t %s /f /d p:4:1 /c "%s"' % (shutdown_seconds, shutdown_comment)
cwd = os.getcwd()
provisions_iso = cwd + '/.windows/provisions/provisions.iso'
windows_iso = 'C:/Program Files (x86)/VMware/VMware Workstation/windows.iso'
vboxguestadditions_iso = "C:/Progra~1/Oracle/VirtualBox/VBoxGuestAdditions.iso"
for i, a in enumerate(json_data['builders']):
if re.search('^(vmware|virtualbox)\-', a['type']):
del a['keep_failed_build']
#a['output_directory'] = 'output-%s_%s%s' % (a['type'], a['vm_name'], winrm_suffix)
#a['ssh_wait_timeout'] = timeout_seconds + 's'
#a['shutdown_timeout'] = timeout_seconds + 's'
#a['shutdown_command'] = shutdown_command
if add_ssh_uninstaller:
del a['shutdown_timeout']
#del a['shutdown_command']
#a['shutdown_command'] = 'choice /C Y /N /T %s /D Y /M "Waiting %s seconds"' % (timeout_seconds, timeout_seconds)
#a['http_directory'] = 'floppy'
floppy_files = dict.fromkeys(a['floppy_files'], True)
if add_debug_log:
if os.path.exists(debug_cmd):
floppy_files[debug_cmd] = True
if os.path.exists(download_cmd):
floppy_files[download_cmd] = True
if os.path.exists(packer_config_cmd):
floppy_files[packer_config_cmd] = True
if os.path.exists(packer_config_local_cmd):
floppy_files[packer_config_local_cmd] = True
if os.path.exists(wget_exe):
floppy_files[wget_exe] = True
if add_unzip_vbs:
if os.path.exists(unzip_vbs):
floppy_files[unzip_vbs] = True
if not ssh:
if 'floppy/cygwin.bat' in floppy_files:
del floppy_files['floppy/cygwin.bat']
if 'floppy/openssh.bat' in floppy_files:
del floppy_files['floppy/openssh.bat']
a['floppy_files'] = sorted(floppy_files)
if re.search('^vmware\-', a['type']):
# to turn off to see if Cygwin is failing because of this
if winrm or add_ssh_uninstaller:
# buggy with winrm
a['tools_upload_flavor'] = ''
# a['disk_type_id'] = "0"
# a['skip_compaction'] = compression_level == 0
if winrm:
a['communicator'] = 'winrm'
a['winrm_username'] = 'vagrant'
a['winrm_password'] = 'vagrant'
a['winrm_timeout'] = timeout_seconds + 's'
if not tools_upload_flavor:
a['tools_upload_flavor'] = ''
if not 'vmx_data' in a:
a['vmx_data'] = {}
if attach_shared_folder:
a['vmx_data']['sharedFolder.maxNum'] = '1'
a['vmx_data']['sharedFolder0.enabled'] = 'TRUE'
a['vmx_data']['sharedFolder0.expiration'] = 'never'
a['vmx_data']['sharedFolder0.guestName'] = 'C'
a['vmx_data']['sharedFolder0.hostPath'] = 'C:\\'
a['vmx_data']['sharedFolder0.present'] = 'TRUE'
a['vmx_data']['sharedFolder0.readAccess'] = 'TRUE'
a['vmx_data']['sharedFolder0.writeAccess'] = 'TRUE'
a['vmx_data']['hgfs.maprootshare'] = 'TRUE'
a['vmx_data']['sound.autodetect'] = 'TRUE'
a['vmx_data']['sound.filename'] = '-1'
#a['vmx_data']['sound.pciSlotNumber'] = '32'
a['vmx_data']['sound.present'] = 'TRUE'
a['vmx_data']['sound.startconnected'] = 'TRUE'
a['vmx_data']['sound.virtualdev'] = 'hdaudio'
# a['vmx_data']['virtualhw.version'] = '10'
if attach_provisions_iso:
if os.path.exists(provisions_iso):
a['vmx_data']['ide1:1.deviceType'] = 'cdrom-image'
a['vmx_data']['ide1:1.fileName'] = provisions_iso
a['vmx_data']['ide1:1.present'] = 'TRUE'
a['vmx_data']['ide1:1.startConnected'] = 'TRUE'
if attach_windows_iso:
if os.path.exists(windows_iso):
a['vmx_data']['scsi0:1.present'] = 'TRUE'
a['vmx_data']['scsi0:1.deviceType'] = 'cdrom-image'
a['vmx_data']['scsi0:1.fileName'] = '{{ user `vmware_windows_iso` }}'
if vmx_data_post:
if not 'vmx_data_post' in a:
a['vmx_data_post'] = {}
a['vmx_data_post']['ethernet0.virtualDev'] = 'vmxnet3'
a['vmx_data_post']['RemoteDisplay.vnc.enabled'] = 'false'
a['vmx_data_post']['RemoteDisplay.vnc.port'] = '5900'
a['vmx_data_post']['scsi0.virtualDev'] = 'lsilogic'
if re.search('^virtualbox\-', a['type']):
if not 'vboxmanage' in a:
a['vboxmanage'] = []
if attach_provisions_iso:
if os.path.exists(provisions_iso):
a['vboxmanage'].append([
"storageattach",
"{{.Name}}",
"--storagectl",
"IDE Controller",
"--port",
"1",
"--device",
"1",
"--type",
"dvddrive",
"--medium",
provisions_iso
])
if attach_vboxguestadditions_iso:
if os.path.exists(vboxguestadditions_iso):
# a['guest_additions_url'] = vboxguestadditions_iso
a['vboxmanage'].append([
"storageattach",
"{{.Name}}",
"--storagectl",
"SATA",
"--port",
"1",
"--device",
"0",
"--type",
"dvddrive",
"--medium",
vboxguestadditions_iso
])
# builders: modify iso properties
a['iso_checksum'] = '{{ user `iso_checksum` }}'
a['iso_checksum_type'] = '{{ user `iso_checksum_type` }}'
a['iso_url'] = '{{ user `iso_url` }}/{{ user `iso_name` }}'
for i in json_data['post-processors']:
if i['type'] == 'vagrant':
i['keep_input_artifact'] = keep_input_artifact
i['compression_level'] = compression_level
#if winrm:
# i['output'] = 'winrm-' + i['output']
#if compression_level == 0:
# i['only'] = 'force-vagrant'
#else:
del i['only']
packer_debug_env = 'PACKER_DEBUG=1'
if add_shell_command:
env_vars = [
"CM={{user `cm`}}",
"CM_VERSION={{user `cm_version`}}",
]
if set_packer_debug:
env_vars.append(packer_debug_env)
debug_step = {
"environment_vars": env_vars,
"script": debug_cmd,
"type": "shell",
}
json_data['provisioners'].insert(0, debug_step)
for i, a in enumerate(json_data['provisioners']):
if a['type'] != 'windows-shell':
continue
if winrm:
# use winrm defaults
if 'remote_path' in a:
del a['remote_path']
if 'execute_command' in a:
del a['execute_command']
#a['guest_os_type'] = 'windows'
if 'inline' in a:
if winrm or add_ssh_uninstaller:
if re.search('^rm ', a['inline'][0]):
del json_data['provisioners'][i]
continue
#if winrm:
#a['binary'] = 'true'
if 'script' in a:
continue
if not 'scripts' in a:
continue
#if 'execute_command' in a:
# a['execute_command'] = re.sub(' /c ', ' /q /c ', a['execute_command'])
if set_packer_debug:
if 'environment_vars' in a:
packer_debug = False
for j in a['environment_vars']:
if j == packer_debug_env:
packer_debug = True
break
if not packer_debug:
a['environment_vars'].append(packer_debug_env)
scripts = []
if add_debugging:
if os.path.exists('script/dump-logs.cmd'):
scripts.append('script/dump-logs.cmd')
# don't need any more:
#scripts.append('script/01-install-handle.cmd')
for j in a['scripts']:
if j == 'script/clean.bat':
if add_debugging:
scripts.append('script/save-logs.cmd')
scripts.append('script/save-temp-dirs.cmd')
if chocolatey:
scripts.append('script/nuget.cmd')
#scripts.append('script/reboot.cmd')
scripts.append('script/chocolatey.cmd')
if compression_level == 0:
if j == 'script/clean.bat':
continue
if j == "script/ultradefrag.bat":
continue
if j == "script/uninstall-7zip.bat":
continue
if j == "script/sdelete.bat":
continue
#if not add_ssh_uninstaller:
scripts.append(j)
if add_debug_log:
scripts.append(debug_cmd)
if add_ssh_uninstaller:
if re.search('cygwin', json_file_path):
scripts.append('script/uninstall-cygwin.cmd')
else:
scripts.append('script/uninstall-openssh.cmd')
a['scripts'] = scripts
if 'variables' in json_data:
json_data['variables']['cm'] = default_cm
json_data['variables']['shutdown_command'] = shutdown_command
json_data['variables']['vmware_windows_iso'] = windows_iso
#json_data['variables']['iso_checksum_type'] = 'sha1'
#json_data['variables']['iso_name'] = json_data['variables']['iso_url']
#json_data['variables']['iso_url'] = 'iso'
new_data = json_data
mtime = os.path.getmtime(json_file_path)
new_data = json.dumps(new_data, sort_keys=True, indent=2, separators=(',', ': '))
json_file.close()
json_file = open(json_file_path, 'w')
json_file.write(new_data)
json_file.close()
touch(json_file_path, mtime)
| 28.286104 | 119 | 0.634332 | 1,376 | 10,381 | 4.549419 | 0.188227 | 0.039137 | 0.037061 | 0.024601 | 0.242492 | 0.130831 | 0.084824 | 0.0377 | 0.015655 | 0.015655 | 0 | 0.008291 | 0.209903 | 10,381 | 366 | 120 | 28.363388 | 0.754938 | 0.121761 | 0 | 0.238636 | 0 | 0.003788 | 0.28985 | 0.064729 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007576 | false | 0.007576 | 0.022727 | 0 | 0.034091 | 0.003788 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac06e360125ea87afe87a22384883409be8b9b4a | 414 | py | Python | Week-1/Editorials/Contest_2/problem-1/solution.py | tanayduggad0299/CP-Buddy-Series | 29b85801f216e10e1817ce0769dd2d9d98856163 | [
"MIT"
] | 58 | 2020-08-02T16:38:43.000Z | 2021-04-11T15:17:07.000Z | Week-1/Editorials/Contest_2/problem-1/solution.py | tanayduggad0299/CP-Buddy-Series | 29b85801f216e10e1817ce0769dd2d9d98856163 | [
"MIT"
] | 29 | 2020-08-03T08:48:05.000Z | 2020-10-05T08:25:09.000Z | Week-1/Editorials/Contest_2/problem-1/solution.py | tanayduggad0299/CP-Buddy-Series | 29b85801f216e10e1817ce0769dd2d9d98856163 | [
"MIT"
] | 44 | 2020-08-02T16:51:08.000Z | 2021-03-04T13:51:01.000Z | str1 = input()
for z1 in range(3):
record = []
for i in range((len(str1))-1):
if str1[i] == str1[i+1] and (i-1) not in record:
record.append(i)
x = 0
#print(record)
for j in range(len(record)):
record[j] -= x
x += 2
for w in record:
str1 = str1[0: w:] + str1[w+1 + 1::]
if(len(str1)) > 0:
print(str1)
else:
print("Empty String")
| 18 | 56 | 0.483092 | 67 | 414 | 2.985075 | 0.358209 | 0.105 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.073529 | 0.342995 | 414 | 22 | 57 | 18.818182 | 0.661765 | 0.031401 | 0 | 0 | 0 | 0 | 0.03 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac099066ce5ca31f6943b66899eedcd21ff0e142 | 1,244 | py | Python | Example Auth.py | Dropout1337/HWID-Authentication-API | f13c43bd2eba67b54c6902506c37cfc838400690 | [
"MIT"
] | 5 | 2020-10-26T08:37:19.000Z | 2021-07-19T20:05:52.000Z | Example Auth.py | bryonpokemon/HWID-Authentication-API | f13c43bd2eba67b54c6902506c37cfc838400690 | [
"MIT"
] | null | null | null | Example Auth.py | bryonpokemon/HWID-Authentication-API | f13c43bd2eba67b54c6902506c37cfc838400690 | [
"MIT"
] | 2 | 2021-02-11T16:13:04.000Z | 2021-02-23T05:38:41.000Z | import requests
import subprocess
import os
from time import sleep
hwid = subprocess.check_output('wmic csproduct get uuid').decode().split('\n')[1].strip()
class Authentication:
def Check(hwid):
check = requests.get(f'http://127.0.0.1:5000/api/v1/hwid?type=check&hwid={hwid}').text
if 'success' in check:
print(f'[\033[32m+\033[39m] Success Welcome Back, {os.getenv("UserName")}!')
sleep(2)
Program()
elif 'invalid_hwid' in check:
print(f'[\033[91m-\033[39m] Invalid HWID\033[91m:\033[39m {hwid}')
sleep(5)
Main()
def Program():
os.system('cls')
input('Hello World')
def Main():
os.system('cls & title [Authentication] By Dropout')
print(f'''
\033[97m╔═╗╦ ╦╔╦╗╦ ╦╔═╗╔╗╔╔╦╗╦╔═╗╔═╗╔╦╗╦╔═╗╔╗╔\033[39m
\033[37m╠═╣║ ║ ║ ╠═╣║╣ ║║║ ║ ║║ ╠═╣ ║ ║║ ║║║║\033[39m
\033[91m╩ ╩╚═╝ ╩ ╩ ╩╚═╝╝╚╝ ╩ ╩╚═╝╩ ╩ ╩ ╩╚═╝╝╚╝\033[39m
[\033[91mDISCORD\033[39m] 766589097161654272
[\033[91mWEBSITE\033[39m] google.com
''')
Authentication.Check(hwid)
if __name__ == "__main__":
Main() | 32.736842 | 95 | 0.487138 | 159 | 1,244 | 4.327044 | 0.496855 | 0.069767 | 0.039244 | 0.037791 | 0.046512 | 0 | 0 | 0 | 0 | 0 | 0 | 0.12978 | 0.30627 | 1,244 | 38 | 96 | 32.736842 | 0.560834 | 0 | 0 | 0.064516 | 0 | 0.064516 | 0.539735 | 0.107616 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.129032 | 0 | 0.258065 | 0.096774 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac0a8442d7e7f8d168fa257f509315a2e9b34247 | 6,034 | py | Python | solver/testsolver.py | jiaming-wang/N_SR | 75eb04647ba0e778476fb0714fa20c8226e968b2 | [
"Apache-2.0"
] | 9 | 2020-05-13T14:02:37.000Z | 2021-12-06T06:54:47.000Z | solver/testsolver.py | jiaming-wang/N_SR | 75eb04647ba0e778476fb0714fa20c8226e968b2 | [
"Apache-2.0"
] | 1 | 2021-05-09T13:13:23.000Z | 2021-05-11T12:58:32.000Z | solver/testsolver.py | jiaming-wang/N_SR | 75eb04647ba0e778476fb0714fa20c8226e968b2 | [
"Apache-2.0"
] | 6 | 2020-11-13T08:15:41.000Z | 2021-09-15T17:56:58.000Z | #!/usr/bin/env python
# coding=utf-8
'''
@Author: wjm
@Date: 2020-02-17 22:19:38
LastEditTime: 2021-08-20 23:44:53
@Description: file content
'''
from solver.basesolver import BaseSolver
import os, torch, time, cv2, importlib
import torch.backends.cudnn as cudnn
from data.data import *
from torch.utils.data import DataLoader
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
class Testsolver(BaseSolver):
def __init__(self, cfg):
super(Testsolver, self).__init__(cfg)
net_name = self.cfg['algorithm'].lower()
lib = importlib.import_module('model.' + net_name)
net = lib.Net
self.model = net(
args = self.cfg
)
self.fmap_block = list()
self.input_block = list()
## define hook
def forward_hook(self, module, data_input, data_output):
self.fmap_block.append(data_output)
self.input_block.append(data_input)
def check(self):
self.cuda = self.cfg['gpu_mode']
torch.manual_seed(self.cfg['seed'])
if self.cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
if self.cuda:
torch.cuda.manual_seed(self.cfg['seed'])
cudnn.benchmark = True
gups_list = self.cfg['gpus']
self.gpu_ids = []
for str_id in gups_list:
gid = int(str_id)
if gid >=0:
self.gpu_ids.append(gid)
torch.cuda.set_device(self.gpu_ids[0])
self.model_path = os.path.join(self.cfg['checkpoint'], self.cfg['test']['model'])
self.model = self.model.cuda(self.gpu_ids[0])
self.model = torch.nn.DataParallel(self.model, device_ids=self.gpu_ids)
self.model.load_state_dict(torch.load(self.model_path, map_location=lambda storage, loc: storage)['net'])
def test(self):
self.model.eval()
avg_time= []
for batch in self.data_loader:
input, target, bicubic, name = Variable(batch[0]), Variable(batch[1]), Variable(batch[2]), batch[3]
if self.cuda:
input = input.cuda(self.gpu_ids[0])
target = target.cuda(self.gpu_ids[0])
bicubic = bicubic.cuda(self.gpu_ids[0])
if self.cfg['algorithm'] == 'VDSR' or self.cfg['algorithm'] == 'SRCNN':
input = bicubic
## hook
# if self.cuda:
# hadle_hook = self.model.module.res_b1.register_forward_hook(self.forward_hook)
# else:
# hadle_hook = self.model.res_b1.register_forward_hook(self.forward_hook)
t0 = time.time()
with torch.no_grad():
prediction = self.model(input)
t1 = time.time()
if self.cfg['data']['normalize'] :
target = (target+1) /2
prediction = (prediction+1) /2
bicubic = (bicubic+1) /2
## remove hook, save feature maps
# hadle_hook.remove()
# self.fmap_block = self.fmap_block[0].squeeze().detach().cpu()
# self.fmap_block = (self.fmap_block*255).numpy().astype(np.uint8)
# for i in range(0, self.fmap_block[0].shape[1]-1):
# plt.imsave('./1/{}.png'.format(str(i)), self.fmap_block[i,:,:], cmap = plt.cm.jet)
# self.fmap_block = list()
# self.input_block = list()
print("===> Processing: %s || Timer: %.4f sec." % (name[0], (t1 - t0)))
avg_time.append(t1 - t0)
self.save_img(bicubic.cpu().data, name[0][0:-4]+'_bic.png')
self.save_img(target.cpu().data, name[0][0:-4]+'_gt.png')
self.save_img(prediction.cpu().data, name[0][0:-4]+'.png')
print("===> AVG Timer: %.4f sec." % (np.mean(avg_time)))
def eval(self):
self.model.eval()
avg_time= []
for batch in self.data_loader:
input, bicubic, name = Variable(batch[0]), Variable(batch[1]), batch[2]
if self.cuda:
input = input.cuda(self.gpu_ids[0])
bicubic = bicubic.cuda(self.gpu_ids[0])
t0 = time.time()
with torch.no_grad():
prediction = self.model(input)
t1 = time.time()
print("===> Processing: %s || Timer: %.4f sec." % (name[0], (t1 - t0)))
avg_time.append(t1 - t0)
self.save_img(bicubic.cpu().data, name[0][0:-4]+'_Bic.png')
self.save_img(prediction.cpu().data, name[0][0:-4]+'.png')
print("===> AVG Timer: %.4f sec." % (np.mean(avg_time)))
def save_img(self, img, img_name):
save_img = img.squeeze().clamp(0, 1).numpy().transpose(1,2,0)
# save img
save_dir=os.path.join('results/',self.cfg['test']['type'])
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_fn = save_dir +'/'+ img_name
cv2.imwrite(save_fn, cv2.cvtColor(save_img*255, cv2.COLOR_BGR2RGB), [cv2.IMWRITE_PNG_COMPRESSION, 0])
def run(self):
self.check()
if self.cfg['test']['type'] == 'test':
self.dataset = get_test_data(self.cfg, self.cfg['test']['test_dataset'], self.cfg['data']['upsacle'])
self.data_loader = DataLoader(self.dataset, shuffle=False, batch_size=1,
num_workers=self.cfg['threads'])
self.test()
elif self.cfg['test']['type'] == 'eval':
self.dataset = get_eval_data(self.cfg, self.cfg['test']['test_dataset'], self.cfg['data']['upsacle'])
self.data_loader = DataLoader(self.dataset, shuffle=False, batch_size=1,
num_workers=self.cfg['threads'])
self.eval()
else:
raise ValueError('Mode error!') | 40.496644 | 117 | 0.552536 | 777 | 6,034 | 4.148005 | 0.2574 | 0.049953 | 0.031027 | 0.023891 | 0.420416 | 0.406143 | 0.373255 | 0.373255 | 0.303134 | 0.303134 | 0 | 0.026403 | 0.296984 | 6,034 | 149 | 118 | 40.496644 | 0.73338 | 0.116838 | 0 | 0.314286 | 0 | 0 | 0.082783 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.085714 | 0 | 0.161905 | 0.038095 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac0aa66496d742424259aeb2af9334da135b1f85 | 22,223 | py | Python | t5x/infer.py | ultrons/t5x | e684a307fe62e4a088f457cc592c299cfb070794 | [
"Apache-2.0"
] | null | null | null | t5x/infer.py | ultrons/t5x | e684a307fe62e4a088f457cc592c299cfb070794 | [
"Apache-2.0"
] | null | null | null | t5x/infer.py | ultrons/t5x | e684a307fe62e4a088f457cc592c299cfb070794 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=line-too-long
# pyformat: disable
r"""This script runs inference on a T5X-compatible model.
"""
# pyformat: enable
# pylint:enable=line-too-long
import concurrent.futures
import functools
import hashlib
import json
import os
import re
import shutil
import time
from typing import Any, Callable, Iterator, List, Mapping, Optional, Sequence, Tuple
from absl import logging
import jax
import jax.numpy as jnp
import seqio
from t5x import models
from t5x import multihost_utils
from t5x import partitioning
from t5x import utils
import tensorflow as tf
from tensorflow.io import gfile
# Automatically search for gin files relative to the T5X package.
_DEFAULT_GIN_SEARCH_PATHS = [
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
]
AUTOTUNE = tf.data.experimental.AUTOTUNE
class FailFastThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor):
"""Wrapper for ThreadPoolExecutor that crashes main thread on exceptions.
NOTE: this class should be used only from the main thread.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._incomplete_futures: List[concurrent.futures.Future] = []
def check_for_exceptions(self, wait: bool = False):
"""Raises any exceptions from complete futures on the main thread."""
still_incomplete_futures = []
for future in self._incomplete_futures:
try:
exception = future.exception(timeout=0 if wait else None)
except concurrent.futures.TimeoutError:
still_incomplete_futures.append(future)
if exception is not None:
raise exception
self._incomplete_futures = still_incomplete_futures
def submit(self, *args, **kwargs) -> concurrent.futures.Future:
"""Submit function to threadpool, capturing the returned future."""
future = super().submit(*args, **kwargs)
self._incomplete_futures.append(future)
self.check_for_exceptions(wait=False)
return future
def shutdown(self, *args, wait: bool = False, **kwargs):
self.check_for_exceptions(wait=wait)
super().shutdown(*args, **kwargs)
def create_task_from_tfexample_file(
paths: Sequence[str], file_type: str, inputs_key: str,
targets_key: Optional[str], features: Mapping[str, seqio.Feature]) -> str:
"""Registers ad-hoc Task for file-based dataset of TFExamples.
Args:
paths: Input file paths; all files should have type `file_type` and contain
binary-serialized TFExample protos.
file_type: Input file type; e.g., 'tfrecord', 'recordio', 'sstable'. For
keyed formats like 'sstable', we ignore the keys and use only the values.
inputs_key: Name of TFExample feature containing the input text for T5X. The
value of this feature should be a UTF8-encoded string.
targets_key: Optional name of a TFExample feature containing the target text
(relevant only in scoring mode). The value of this feature should be a
UTF8-encoded string.
features: Should have entries for keys 'inputs' and (if targets_key is not
None) 'targets', mapping to `seqio.Feature` objects that specify
attributes like vocabulary, add_eos, etc. These attributes are used for
preprocessing and featurizing the input text.
Returns:
Name of the newly-registered Task. This Task has a split named 'infer' that
contains the preprocessed and featurized input dataset.
"""
# tf.io.gfile.glob supports lists, in contrast to gfile.glob.
files = tf.io.gfile.glob(paths)
if files:
logging.info('Using tfexample files %s', files)
else:
# Fail early if there's something wrong with the input file pattern.
raise ValueError('Missing or invalid paths: %s' % paths)
reader = {
'tfrecord':
tf.data.TFRecordDataset,
}[file_type]
# TODO(adarob): Remove after b/180658446 is resolved.
def reserialize_tfexample(x):
def _reserialize(s):
ex = tf.train.Example()
ex.ParseFromString(s)
return ex.SerializeToString()
return tf.compat.v1.py_func(
_reserialize, inp=[x], Tout=tf.string, stateful=False)
def reserialize_reader(filenames):
return reader(filenames).map(
reserialize_tfexample, num_parallel_calls=AUTOTUNE)
feature_description = {inputs_key: tf.io.FixedLenFeature([], tf.string)}
if targets_key:
feature_description[targets_key] = tf.io.FixedLenFeature([], tf.string)
# Create a unique, deterministic task name.
task_id = hashlib.md5(
':'.join(list(paths) +
[inputs_key, targets_key or '']).encode()).hexdigest()[:10]
task = seqio.TaskRegistry.add(
name=f'infer_{task_id}',
source=seqio.TFExampleDataSource({'infer': paths},
feature_description=feature_description,
reader_cls=reserialize_reader),
preprocessors=[
functools.partial(
seqio.preprocessors.rekey,
key_map={
'inputs': inputs_key,
'targets': targets_key
}), seqio.preprocessors.tokenize_and_append_eos
],
output_features=features)
return task.name
def write_inferences_to_file(
path: str,
inferences: Sequence[Any],
task_ds: tf.data.Dataset,
mode: str,
vocabulary: Optional[seqio.Vocabulary] = None) -> None:
"""Write model predictions, along with pretokenized inputs, to JSONL file.
Args:
path: File path to write to.
inferences: Model inferences, output of either score_batch or predict_batch.
task_ds: Original task dataset. Features from task with suffix
`_pretokenized` are added to the outputs.
mode: Prediction mode, either 'predict', 'score' or 'predict_with_aux'.
vocabulary: Task output vocabulary. Only used in `predict` mode in order to
decode predicted outputs into string.
"""
if mode == 'predict' and not vocabulary:
raise ValueError('`vocabulary` parameter required in `predict` mode')
def _json_compat(value):
if isinstance(value, bytes):
return value.decode('utf-8')
elif isinstance(value, (jnp.bfloat16, jnp.floating)):
return float(value)
elif isinstance(value, jnp.integer):
return float(value)
elif isinstance(value, jnp.ndarray):
return value.tolist()
else:
return value
with gfile.GFile(path, 'w') as f:
for inp, output in zip(task_ds, inferences):
json_dict = {}
pretokenized = {
k: v for k, v in inp.items() if k.endswith('_pretokenized')
}
if pretokenized:
json_dict['input'] = {
k: _json_compat(v.numpy()) for k, v in pretokenized.items()
}
if mode == 'predict':
json_dict['prediction'] = _json_compat(
vocabulary.decode_tf(tf.constant(output)).numpy()) # pytype: disable=attribute-error
elif mode == 'score':
json_dict['score'] = _json_compat(output)
elif mode == 'predict_with_aux':
pred_text, pred_aux = output
json_dict['prediction'] = _json_compat(
vocabulary.decode_tf(tf.constant(pred_text)).numpy()) # pytype: disable=attribute-error
json_dict['aux'] = jax.tree_map(_json_compat, pred_aux)
else:
raise ValueError(f'Invalid mode: {mode}')
json_str = json.dumps(json_dict, cls=seqio.TensorAndNumpyEncoder)
f.write(json_str + '\n')
WriteFn = Callable[
[str, Sequence[Any], tf.data.Dataset, str, Optional[seqio.Vocabulary]],
None]
def infer(*,
mode: str,
model: models.BaseTransformerModel,
dataset_cfg: utils.DatasetConfig,
restore_checkpoint_cfg: utils.RestoreCheckpointConfig,
partitioner: partitioning.BasePartitioner,
output_dir: str,
checkpoint_period: int,
shard_id: int = 0,
num_shards: int = 1,
run_xprof: bool = True,
merge_epoch_results: bool = True,
write_fn: WriteFn = write_inferences_to_file):
"""Infer function.
Args:
mode: Either 'predict' to decode targets, 'score' to compute the log
likelihood of given targets, or 'predict_with_aux' for both.
model: The model object to use for inference.
dataset_cfg: Specification for the dataset to infer based on.
restore_checkpoint_cfg: Specification for the model parameter checkpoint to
load.
partitioner: Partitioner for model parameters and data across devices.
output_dir: Path to directory to write temporary files and final results.
checkpoint_period: The intermediate results and dataset iterator will be
checkpointed on each multiple of this number of batches to enable
continuation after a failure.
shard_id: Index of dataset shard for this instance to use if splitting the
work across multiple jobs.
num_shards: Total number of dataset shards to split dataset across.
run_xprof: Whether to take an xprof snapshot during run.
merge_epoch_results: Whether to merge results of all epochs into a single
json file.
write_fn: Callable function used to serialized and write inferences out to
files.
"""
if mode not in ('predict', 'score', 'predict_with_aux'):
raise ValueError(
"`mode` must be one of 'predict', 'score' or 'predict_with_aux'. "
f"Got '{mode}'")
# Remove double-slashes in directory path to avoid inconsistencies.
output_dir = re.sub(r'(?<!gs:)([\/]{2,})', '/', output_dir)
ds_vocabs = utils.get_vocabulary(dataset_cfg)
if (ds_vocabs[0] != model.input_vocabulary or
ds_vocabs[1] != model.output_vocabulary):
raise ValueError(
'Model and Task vocabularies do not match.\n'
f'Task Input: {ds_vocabs[0]}, Model Input: {model.input_vocabulary}\n'
f'Task Output: {ds_vocabs[1]}, Model Output: {model.output_vocabulary}')
batch_size = dataset_cfg.batch_size
# Set up dataset.
if dataset_cfg.module:
utils.import_module(dataset_cfg.module)
host_shard_info = seqio.ShardInfo(index=shard_id, num_shards=num_shards)
task_or_mixture = seqio.get_mixture_or_task(dataset_cfg.mixture_or_task_name)
feature_converter = model.FEATURE_CONVERTER_CLS(pack=False) # pytype:disable=not-instantiable
def _get_dataset(dataset_provider):
# TODO(adarob): assert pack is false, shuffle is false, seed?
return dataset_provider.get_dataset(
sequence_length=dataset_cfg.task_feature_lengths,
split=dataset_cfg.split,
shuffle=False,
num_epochs=1,
shard_info=host_shard_info,
use_cached=dataset_cfg.use_cached,
seed=dataset_cfg.seed)
# Each "epoch" should be how often we checkpoint the input dataset and flush
# the inferences to disk.
logging.info('Inferring with checkpoints every %d batches of %d examples.',
checkpoint_period, batch_size)
logging.info('Initializing model, optimizer, and step functions.')
input_shapes = {
k: (batch_size,) + spec.shape for k, spec in feature_converter(
_get_dataset(task_or_mixture),
dataset_cfg.task_feature_lengths).element_spec.items()
}
# Initialize optimizer from the existing checkpoint.
# TODO(adarob): Support inference over multiple checkpoints.
train_step_initializer = utils.TrainStateInitializer(
optimizer_def=model.optimizer_def,
init_fn=model.get_initial_variables,
input_shapes=input_shapes,
partitioner=partitioner)
train_state = train_step_initializer.from_checkpoint([restore_checkpoint_cfg])
if mode == 'predict':
infer_step = model.predict_batch
elif mode == 'predict_with_aux':
infer_step = model.predict_batch_with_aux
else: # mode == 'score'
infer_step = model.score_batch
infer_fn = functools.partial(
utils.get_infer_fn(
infer_step=infer_step,
batch_size=batch_size,
train_state_axes=train_step_initializer.train_state_axes,
partitioner=partitioner),
train_state=train_state)
def infer_task(task: seqio.Task):
tmp_dir = os.path.join(output_dir,
f'tmp-{task.name}-{shard_id:05}-of-{num_shards:05}')
if jax.process_index() == 0:
gfile.makedirs(tmp_dir)
# Use `max_workers=1` to ensure writes occur sequentially.
write_thread_pool = FailFastThreadPoolExecutor(max_workers=1)
logging.info("Loading dataset for task '%s'.", task.name)
ds = _get_dataset(task)
model_ds = feature_converter(
ds, task_feature_lengths=dataset_cfg.task_feature_lengths)
# Zip task and model features.
# (task, model)
infer_ds = tf.data.Dataset.zip((ds, model_ds))
# Create batches the size of each epoch and index them.
# (i, [(task, model)] * epoch_size)
infer_ds = infer_ds.padded_batch(
checkpoint_period * batch_size, drop_remainder=False).enumerate()
infer_ds_iter: Iterator[Tuple[int, Any]] = iter(infer_ds.prefetch(AUTOTUNE))
# Create checkpoint manager and restore state, if applicable.
ckpt_path = os.path.join(tmp_dir, 'input.ckpt')
input_ckpt = tf.train.Checkpoint(ds=infer_ds_iter)
if gfile.glob(ckpt_path + '*'):
logging.info('Restoring input iterator from %s', ckpt_path)
input_ckpt.read(ckpt_path).assert_consumed()
output_fname = f'{task.name}-{mode}.jsonl-{shard_id:05}-of-{num_shards:05}'
logging.info("Starting inference loop for shard %d of %d of task '%s'.",
shard_id, num_shards, task.name)
def _write_epoch_and_canonicalize_ckpt(epoch: int, epoch_path: str,
inferences: Sequence[Any],
task_ds: tf.data.Dataset,
epoch_ckpt_path: str):
write_tick = time.time()
logging.info('Writing epoch %d results to %s', epoch, epoch_path)
write_fn(epoch_path, inferences, task_ds, mode,
task.output_features['targets'].vocabulary)
write_time = time.time() - write_tick
logging.info('Writing completed in %02f seconds (%02f examples/sec).',
write_time,
len(inferences) / write_time)
update_measurement_series('writing_total_sec', epoch, write_time)
update_measurement_series('writing_examples_per_sec', epoch,
len(inferences) / write_time)
# Canonicalize checkpoint.
for fname in gfile.glob(epoch_ckpt_path + '*'):
gfile.rename(
fname, fname.replace(epoch_ckpt_path, ckpt_path), overwrite=True)
# Main Loop over "epochs".
for epoch, epoch_batch in infer_ds_iter:
logging.info('Starting epoch %d', epoch)
epoch_tick = time.time()
# Take an Xprof trace after the first loop has compiled everything.
if epoch == 1:
multihost_utils.sync_devices(f'{task.name}:start_xprof')
utils.start_xprof(seconds=5, maybe_run=run_xprof, description='infer')
# Load the dataset for the next epoch. We can't use `infer_ds_iter`
# directly since `infer_fn` needs to know the exact size of each epoch,
# which may be smaller for the final one.
epoch_ds = tf.data.Dataset.from_tensor_slices(epoch_batch)
epoch_ds.cache().prefetch(AUTOTUNE)
# Unzip epoch dataset in to pretokenized and model datasets.
task_ds = epoch_ds.map(lambda p, m: p, num_parallel_calls=AUTOTUNE)
model_ds = epoch_ds.map(lambda p, m: m, num_parallel_calls=AUTOTUNE)
logging.info('Running inference on %d batches.', checkpoint_period)
# Sort by and strip index.
inferences = [
x[1]
for x in sorted(infer_fn(model_ds.enumerate()), key=lambda x: x[0])
]
if jax.process_index() == 0:
epoch_time = time.time() - epoch_tick
logging.info('Epoch completed in %02f seconds (%02f examples/sec).',
epoch_time,
len(inferences) / epoch_time)
update_measurement_series('inference_total_sec', epoch, epoch_time)
update_measurement_series('inference_examples_per_sec', epoch,
len(inferences) / epoch_time)
epoch_path = os.path.join(tmp_dir, f'{output_fname}-epoch{epoch:05}')
# Store iterator checkpoint in temporary location before writing the
# model output asynchronously. After outputs are written, the checkpoint
# will be moved to the canonical location to be used if restart occurs.
ckpt_tick = time.time()
epoch_ckpt_path = input_ckpt.write(
os.path.join(tmp_dir, f'{epoch}.ckpt'))
logging.info(
'Checkpoint written to temporary location in %02f seconds.',
time.time() - ckpt_tick)
# These will execute sequentially since the ThreadPool size is 1.
write_thread_pool.submit(
_write_epoch_and_canonicalize_ckpt,
epoch=epoch,
epoch_path=epoch_path,
inferences=inferences,
task_ds=task_ds,
epoch_ckpt_path=epoch_ckpt_path)
# Wait for checkpoint to be written before continuing.
multihost_utils.sync_devices(f'{task.name}:checkpoint_epoch{epoch:05}')
logging.info("Finished inference for task '%s'.", task.name)
logging.info('Waiting for epoch writes to complete.')
write_thread_pool.shutdown(wait=True)
if jax.process_index() == 0 and merge_epoch_results:
logging.info('Merging epoch results.')
# Merge epochs into single file.
epoch_paths = sorted(
gfile.glob(os.path.join(tmp_dir, f'{output_fname}-epoch?????')))
assert int(epoch_paths[-1][-5:]) + 1 == len(epoch_paths), (
f'Expecting {int(epoch_paths[-1][-5:])} epoch paths, found '
f'{len(epoch_paths)}')
output_path = os.path.join(output_dir, output_fname)
with gfile.GFile(output_path, 'wb') as merged:
for epoch_path in epoch_paths:
with gfile.GFile(epoch_path, 'rb') as ef:
shutil.copyfileobj(ef, merged)
logging.info('Results written to %s.', output_path)
logging.info('Deleting temporary files.')
gfile.rmtree(tmp_dir)
# Wait for host 0 to finish writing before exiting.
multihost_utils.sync_devices(f'{task.name}:complete')
for task in seqio.get_subtasks(task_or_mixture):
logging.info("Starting inference for task '%s'", task.name)
infer_task(task)
logging.info('DONE')
def update_measurement_series(series_name: str, step: int, value: float):
"""Not implemented externally."""
del series_name, step, value
if __name__ == '__main__':
# pylint:disable=g-import-not-at-top
from absl import app
from absl import flags
import gin
from t5x import gin_utils
# pylint:enable=g-import-not-at-top
FLAGS = flags.FLAGS
jax.config.parse_flags_with_absl()
flags.DEFINE_integer(
'shard_id',
default=None,
help='Index to use for splitting the Task across multiple inference '
'runs. NB: If set, this overrides --gin.infer.shard_id')
flags.DEFINE_multi_string(
'gin_file',
default=None,
help='Path to gin configuration file. Multiple paths may be passed and '
'will be imported in the given order, with later configurations '
'overriding earlier ones.')
flags.DEFINE_multi_string(
'gin_bindings', default=[], help='Individual gin bindings.')
flags.DEFINE_list(
'gin_search_paths',
default=['.'],
help='Comma-separated list of gin config path prefixes to be prepended '
'to suffixes given via `--gin_file`. If a file appears in. Only the '
'first prefix that produces a valid path for each suffix will be '
'used.')
flags.DEFINE_string(
'tfds_data_dir', None,
'If set, this directory will be used to store datasets prepared by '
'TensorFlow Datasets that are not available in the public TFDS GCS '
'bucket. Note that this flag overrides the `tfds_data_dir` attribute of '
'all `Task`s.')
def main(argv: Sequence[str]):
"""Wrapper for pdb post mortems."""
_main(argv)
def _main(argv: Sequence[str]):
"""True main function."""
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.tfds_data_dir:
seqio.set_tfds_data_dir_override(FLAGS.tfds_data_dir)
# Create gin-configurable version of `infer`.
infer_using_gin = gin.configurable(infer)
gin_utils.parse_gin_flags(
# User-provided gin paths take precedence if relative paths conflict.
FLAGS.gin_search_paths + _DEFAULT_GIN_SEARCH_PATHS,
FLAGS.gin_file,
FLAGS.gin_bindings)
# See http://yaqs/7882016229479677952 for further gin-config discussion.
def _get_gin_parameter(key: str) -> Any:
value = gin.query_parameter(key)
if isinstance(value, gin.config.ConfigurableReference):
if value.evaluate:
return value.scoped_configurable_fn()
return value.scoped_configurable_fn
return value
shard_id = (
FLAGS.shard_id
if FLAGS.shard_id is not None else _get_gin_parameter('infer.shard_id'))
if shard_id == 0:
gin_utils.summarize_gin_config(
model_dir=_get_gin_parameter('infer.output_dir'),
summary_writer=None,
step=0)
if FLAGS.shard_id is not None:
# We fall back to this flag since XM does not support sweeps over flags
# with '.' in them (it treats them like nested dictionaries).
# TODO(adarob): Figure out a workaround so we can deprecate this flag.
infer_using_gin(shard_id=FLAGS.shard_id)
else:
infer_using_gin()
gin_utils.run(main)
| 38.24957 | 100 | 0.682536 | 2,938 | 22,223 | 4.981961 | 0.218176 | 0.014279 | 0.005739 | 0.004099 | 0.147435 | 0.089909 | 0.055817 | 0.025005 | 0.025005 | 0.020496 | 0 | 0.006022 | 0.222922 | 22,223 | 580 | 101 | 38.315517 | 0.841566 | 0.267021 | 0 | 0.089189 | 0 | 0 | 0.155435 | 0.021716 | 0 | 0 | 0 | 0.003448 | 0.005405 | 1 | 0.048649 | false | 0.002703 | 0.067568 | 0.005405 | 0.156757 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac0b89202a0d8051b2ab4d7be5df86386a3fe043 | 5,850 | py | Python | mesa/batchrunner.py | DanielWeitzenfeld/mesa | 2f36c7c85a3a998b19caf4de83ecb80dc41013f4 | [
"MIT"
] | null | null | null | mesa/batchrunner.py | DanielWeitzenfeld/mesa | 2f36c7c85a3a998b19caf4de83ecb80dc41013f4 | [
"MIT"
] | null | null | null | mesa/batchrunner.py | DanielWeitzenfeld/mesa | 2f36c7c85a3a998b19caf4de83ecb80dc41013f4 | [
"MIT"
] | null | null | null | from itertools import product
import pandas as pd
class BatchRunner(object):
'''
Manage a batch run or parameter sweep of a given model.
This class is instantiated with a model class, and model parameters
associated with one or more values. It is also instantiated with model- and
agent-level reporters, dictionaries mapping a variable name to a function
which collects some data from the model or its agents at the end of the run
and stores it.
Note that by default, the reporters only collect data at the *end* of the
run. To get step by step data, simply have a reporter store the model's
entire DataCollector object.
'''
model_cls = None
parameter_values = {}
iterations = 1
model_reporters = {}
agent_reporters = {}
model_vars = {}
agent_vars = {}
def __init__(self, model_cls, parameter_values, iterations=1,
max_steps=1000, model_reporters=None, agent_reporters=None):
'''
Create a new BatchRunner for a given model with the given parameters.
Args:
model_cls: The class of model to batch-run.
parameter_values: Dictionary of parameters to their values or
ranges of values. For example:
{"param_1": range(5),
"param_2": [1, 5, 10],
"const_param": 100}
iterations: How many times to run the model at each combination of
parameters.
max_steps: After how many steps to halt each run if it hasn't
halted on its own.
model_reporters: Dictionary of variables to collect on each run at
the end, with variable names mapped to a function to collect
them. For example:
{"agent_count": lambda m: m.schedule.get_agent_count()}
agent_reporters: Like model_reporters, but each variable is now
collected at the level of each agent present in the model at
the end of the run.
'''
self.model_cls = model_cls
self.parameter_values = {param: self.make_iterable(vals)
for param, vals in parameter_values.items()}
self.iterations = iterations
self.max_steps = max_steps
self.model_reporters = model_reporters
self.agent_reporters = agent_reporters
if self.model_reporters:
self.model_vars = {}
if self.agent_reporters:
self.agent_vars = {}
def run_all(self):
'''
Run the model at all parameter combinations and store results.
'''
params = self.parameter_values.keys()
param_ranges = self.parameter_values.values()
run_count = 0
for param_values in list(product(*param_ranges)):
kwargs = dict(zip(params, param_values))
for _ in range(self.iterations):
model = self.model_cls(**kwargs)
self.run_model(model)
# Collect and store results:
if self.model_reporters:
key = tuple(list(param_values) + [run_count])
self.model_vars[key] = self.collect_model_vars(model)
if self.agent_reporters:
for agent_id, reports in self.collect_agent_vars.items():
key = tuple(list(param_values) + [run_count, agent_id])
self.agent_vars[key] = reports
run_count += 1
def run_model(self, model):
'''
Run a model object to completion, or until reaching max steps.
If your model runs in a non-standard way, this is the method to modify
in your subclass.
'''
while model.running and model.schedule.steps < self.max_steps:
model.step()
def collect_model_vars(self, model):
'''
Run reporters and collect model-level variables.
'''
model_vars = {}
for var, reporter in self.model_reporters.items():
model_vars[var] = reporter(model)
return model_vars
def collect_agent_vars(self, model):
'''
Run reporters and collect agent-level variables.
'''
agent_vars = {}
for agent in model.schedule.agents:
agent_record = {}
for var, reporter in self.agent_reporters.items():
agent_record[var] = reporter(agent)
agent_vars[agent.unique_id] = agent_record
return agent_vars
def get_model_vars_dataframe(self):
'''
Generate a pandas DataFrame from the model-level collected variables.
'''
index_col_names = list(self.parameter_values.keys())
index_col_names.append("Run")
records = []
for key, val in self.model_vars.items():
record = dict(zip(index_col_names, key))
for k, v in val.items():
record[k] = v
records.append(record)
return pd.DataFrame(records)
def get_agent_vars_dataframe(self):
'''
Generate a pandas DataFrame from the agent-level variables collected.
'''
index_col_names = list(self.parameter_values.keys())
index_col_names += ["Run", "AgentID"]
records = []
for key, val in self.agent_vars.items():
record = dict(zip(index_col_names, key))
for k, v in val.items():
record[k] = v
records.append(record)
return pd.DataFrame(records)
@staticmethod
def make_iterable(val):
'''
Helper method to ensure a value is a non-string iterable.
'''
if hasattr(val, "__iter__") and type(val) is not str:
return val
else:
return [val]
| 37.025316 | 79 | 0.591111 | 720 | 5,850 | 4.645833 | 0.243056 | 0.034978 | 0.023318 | 0.008969 | 0.201495 | 0.189537 | 0.162033 | 0.122571 | 0.122571 | 0.093871 | 0 | 0.004609 | 0.332479 | 5,850 | 157 | 80 | 37.261147 | 0.851985 | 0.342222 | 0 | 0.268293 | 0 | 0 | 0.006076 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.02439 | 0 | 0.292683 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac106e7e862898d45dbc929cd42c0f33246f9e8d | 4,511 | py | Python | app/resources/order_item.py | early-month-subsidy/backend-server | 9abe7a0372eab5899dfc593783034583b6652577 | [
"MIT"
] | null | null | null | app/resources/order_item.py | early-month-subsidy/backend-server | 9abe7a0372eab5899dfc593783034583b6652577 | [
"MIT"
] | 5 | 2021-03-18T21:41:18.000Z | 2022-03-11T23:35:36.000Z | app/resources/order_item.py | early-month-subsidy/backend-server | 9abe7a0372eab5899dfc593783034583b6652577 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
# Copyright 2018 Vinzor Co.,Ltd.
#
# comment
#
# 18-12-9 leo : Init
from flask_restful import Resource, reqparse
from flask_jwt_extended import jwt_required, get_jwt_identity
from .. import db
from ..models import OrderItem, User, Board, OrderItemStatus
order_item_create_parser = reqparse.RequestParser()
order_item_create_parser.add_argument('quantity', type=int, help='This field cannot be blank', required=True)
order_item_create_parser.add_argument('food_id', type=int, help='This field cannot be blank', required=True)
order_item_update_parser = reqparse.RequestParser()
order_item_update_parser.add_argument('quantity', type=int, required=False)
order_item_update_parser.add_argument('action', required=False)
class OrderItemAll(Resource):
def get(self, board_id):
order_items = OrderItem.find_by_board_id(board_id)
return {
'order_items': [i.to_json() for i in order_items]
}, 200
@jwt_required
def post(self, board_id):
data = order_item_create_parser.parse_args()
current_user = User.find_by_username(get_jwt_identity())
board = Board.find_by_id(board_id)
board.occupation = True
# travis order items in board
order_items = OrderItem.find_by_board_id(board_id)
check_items_exists = False
order_item = None
for i in order_items:
if i.owner_id == current_user.id and i.food_id == data['food_id']:
i.quantity = data['quantity']
check_items_exists = True
order_item = i
break
if not check_items_exists:
order_item = OrderItem(quantity=data['quantity'],
owner_id=current_user.id,
board_id=board_id,
food_id=data['food_id'])
try:
db.session.add(order_item)
db.session.add(board)
db.session.commit()
return {
'order_item': order_item.to_json()
}, 200
except:
db.session.rollback()
return {
'message': 'Something went wrong.'
}, 500
# TODO: cancel order item by seller
class OrderItemSingle(Resource):
def get(self, order_item_id):
order_item = OrderItem.find_by_id(order_item_id)
return {
'order_item': order_item.to_json()
}, 200
@jwt_required
def put(self, order_item_id):
data = order_item_update_parser.parse_args()
current_user = User.find_by_username(get_jwt_identity())
order_item = OrderItem.find_by_id(order_item_id)
if order_item.owner_id == current_user.id and order_item.status == OrderItemStatus.ORDERING:
action = data['action']
if action == 'increment':
order_item.quantity += 1
elif action == 'decrement':
order_item.quantity -= 1
elif data['quantity']:
order_item.quantity = data['quantity']
else:
return {
'message': 'Unknown action.'
}, 403
try:
db.session.add(order_item)
db.session.commit()
return {
'order_item': order_item.to_json()
}, 200
except:
db.session.rollback()
return {
'message': 'Something went wrong.'
}, 500
else:
return {
'message': 'This order item is not yours or this item is confirmed.'
}, 403
@jwt_required
def delete(self, order_item_id):
current_user = User.find_by_username(get_jwt_identity())
order_item = OrderItem.find_by_id(order_item_id)
if order_item.owner_id == current_user.id and order_item.status == OrderItemStatus.ORDERING:
try:
db.session.delete(order_item)
db.session.commit()
return {
'message': 'Order item %s delete success.' % order_item_id
}, 200
except:
db.session.rollback()
return {
'message': 'Something went wrong.'
}, 500
else:
return {
'message': 'This order item is not yours or this item is confirmed.'
}, 403
| 36.088 | 109 | 0.569497 | 514 | 4,511 | 4.727626 | 0.22179 | 0.151852 | 0.031687 | 0.034568 | 0.602881 | 0.527984 | 0.447737 | 0.447737 | 0.4107 | 0.363374 | 0 | 0.015116 | 0.340058 | 4,511 | 124 | 110 | 36.379032 | 0.801142 | 0.0317 | 0 | 0.566038 | 0 | 0 | 0.10507 | 0 | 0 | 0 | 0 | 0.008065 | 0 | 1 | 0.04717 | false | 0 | 0.037736 | 0 | 0.207547 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac13c016d51c41945f9dfebde6454638f97f236b | 1,083 | py | Python | Z_ALL_FILE/Jy1/DTTEST.py | omikabir/omEngin | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | [
"Apache-2.0"
] | null | null | null | Z_ALL_FILE/Jy1/DTTEST.py | omikabir/omEngin | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | [
"Apache-2.0"
] | null | null | null | Z_ALL_FILE/Jy1/DTTEST.py | omikabir/omEngin | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | [
"Apache-2.0"
] | 1 | 2021-04-29T21:46:02.000Z | 2021-04-29T21:46:02.000Z | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import os
import MySQLdb
from datetime import *
db = os.getcwd() + "\\OMDB.csv"
semcol = os.getcwd() + "\\semcols.txt"
cat = os.getcwd() + "\\catdef.txt"
conn= MySQLdb.connect("localhost","root","admin","om2")
x = datetime.now()
y = datetime.strftime(x, "%m-%d-%Y %H:%M:%S")
svpt = os.getcwd() + "\\OMDW.csv"
df = pd.read_csv(svpt)
df['LASTOCCURRENCE'] = pd.to_datetime(df['LASTOCCURRENCE'])
df['LASTOCCURRENCE'] = df['LASTOCCURRENCE'].map(lambda x: x.strftime("%d/%m/%Y %H:%M:%S"))
df = df.assign(NW = y)
df['DUR'] = df.apply(lambda x : pd.to_datetime(x.NW) - pd.to_datetime(x.LASTOCCURRENCE) ,axis=1)
df['DUR'] = df['DUR'].astype('timedelta64[m]')
#df['LASTOCCURRENCE'] = df['LASTOCCURRENCE'].map(lambda x: x.strftime("%d/%m/%Y %H:%M:%S"))
#df = df.assign(NW = y)
#print(df.dtypes)
#df['DUR'] = pd.to_datetime(y - pd.to_datetime(df['LASTOCCURRENCE'])
#df['DUR'] = df.apply(lambda x : y - pd.to_datetime(x.LASTOCCURRENCE)) ,axis=1)
# In[ ]:
df
# In[ ]:
# In[ ]:
# In[ ]:
| 19 | 96 | 0.622345 | 174 | 1,083 | 3.833333 | 0.33908 | 0.167916 | 0.107946 | 0.017991 | 0.446777 | 0.446777 | 0.305847 | 0.209895 | 0.209895 | 0.209895 | 0 | 0.007471 | 0.134811 | 1,083 | 56 | 97 | 19.339286 | 0.704376 | 0.315789 | 0 | 0 | 0 | 0 | 0.246897 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.263158 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac14a45217bf58f1e0eeec0323ce65af3fc1c511 | 3,667 | py | Python | wagtailsnapshotpublisher/panels.py | yohanlebret/wagtail-snapshotpublisher | cec9276c81c9ac91950b4d621868cc16e8935d28 | [
"MIT"
] | null | null | null | wagtailsnapshotpublisher/panels.py | yohanlebret/wagtail-snapshotpublisher | cec9276c81c9ac91950b4d621868cc16e8935d28 | [
"MIT"
] | 1 | 2020-04-20T14:08:21.000Z | 2020-04-20T14:08:25.000Z | wagtailsnapshotpublisher/panels.py | yohanlebret/wagtail-snapshotpublisher | cec9276c81c9ac91950b4d621868cc16e8935d28 | [
"MIT"
] | 1 | 2021-04-11T07:36:35.000Z | 2021-04-11T07:36:35.000Z | """
.. module:: wagtailsnapshotpublisher.panels
"""
from django.forms.utils import pretty_name
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from wagtail.admin.edit_handlers import EditHandler, FieldPanel
class BaseReadOnlyPanel(EditHandler):
""" BaseReadOnlyPanel """
def render(self):
""" render """
value = getattr(self.instance, self.attr)
if callable(value):
value = value()
return format_html('<div style="padding-top: 1.2em;">{}</div>', value)
def render_as_object(self):
""" render_as_object """
return format_html(
'<fieldset><legend>{}</legend>'
'<ul class="fields"><li><div class="field">{}</div></li></ul>'
'</fieldset>',
self.heading, self.render())
def render_as_field(self):
""" render_as_field """
return format_html(
'<div class="field">'
'<label>{}{}</label>'
'<div class="field-content">{}</div>'
'</div>',
self.heading, _(':'), self.render())
def required_fields(self):
""" required_fields """
fields = []
return fields
class ReadOnlyPanel:
""" ReadOnlyPanel """
def __init__(self, attr, heading=None, classname='', help_text=''):
""" __init__ """
self.attr = attr
self.heading = pretty_name(self.attr) if heading is None else heading
self.classname = classname
self.help_text = help_text
def required_fields(self):
""" required_fields """
raise AttributeError
def bind_to(self, model):
""" bind_to """
return type(str(_('ReadOnlyPanel')), (BaseReadOnlyPanel,),
{'attr': self.attr, 'heading': self.heading,
'classname': self.classname})(heading=self.heading,
classname=self.classname,
help_text=self.help_text)
class BaseEditableOnCreatedPanel(FieldPanel):
""" BaseEditableOnCreatedPanel """
def render_as_object(self):
""" render_as_object """
if self.instance.id is not None:
value = getattr(self.instance, self.attr)
if callable(value):
value = value()
return format_html(
'<fieldset><legend>{}</legend>'
'<ul class="fields"><li><div class="field"><div style="padding-top: 1.2em;">{}</div></div></li></ul>'
'</fieldset>',
self.heading, value)
return super(BaseEditableOnCreatedPanel, self).render_as_object()
class EditableOnCreatedPanel:
""" EditableOnCreatedPanel """
def __init__(self, attr, heading=None, classname='', help_text=''):
""" __init__ """
self.attr = attr
self.heading = pretty_name(self.attr) if heading is None else heading
self.classname = classname
self.help_text = help_text
def required_fields(self):
""" required_fields """
raise AttributeError
def bind_to(self, model):
""" bind_to """
return type(str(_('EditableOnCreatedPanel')), (BaseEditableOnCreatedPanel,),
{'attr': self.attr, 'heading': self.heading,
'classname': self.classname})(field_name=self.attr,
heading=self.heading,
classname=self.classname,
help_text=self.help_text)
| 34.59434 | 117 | 0.546496 | 347 | 3,667 | 5.587896 | 0.204611 | 0.045384 | 0.036101 | 0.055699 | 0.631769 | 0.614234 | 0.570913 | 0.546674 | 0.506447 | 0.452811 | 0 | 0.001604 | 0.31988 | 3,667 | 105 | 118 | 34.92381 | 0.775862 | 0.076084 | 0 | 0.567164 | 0 | 0.014925 | 0.132219 | 0.066869 | 0 | 0 | 0 | 0 | 0 | 1 | 0.164179 | false | 0 | 0.059701 | 0 | 0.402985 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac156b3a9f605a47bf78ef0b7495d1155d8e5150 | 2,499 | py | Python | alibabacloud/utils/ini_helper.py | wallisyan/alibabacloud-python-sdk-v2 | 6e024c97cded2403025a7dd8fea8261e41872156 | [
"Apache-2.0"
] | 21 | 2018-12-20T07:34:13.000Z | 2020-03-05T14:32:08.000Z | alibabacloud/utils/ini_helper.py | wallisyan/alibabacloud-python-sdk-v2 | 6e024c97cded2403025a7dd8fea8261e41872156 | [
"Apache-2.0"
] | 22 | 2018-12-21T13:22:33.000Z | 2020-06-29T08:37:09.000Z | alibabacloud/utils/ini_helper.py | wallisyan/alibabacloud-python-sdk-v2 | 6e024c97cded2403025a7dd8fea8261e41872156 | [
"Apache-2.0"
] | 12 | 2018-12-29T05:45:55.000Z | 2022-01-05T09:59:30.000Z | # Copyright 2019 Alibaba Cloud Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from alibabacloud.exceptions import ClientException, ConfigNotFoundException
from alibabacloud.vendored import six
# parse ini file
def _parse_nested(config_value):
parsed = {}
for line in config_value.splitlines():
line = line.strip()
if not line:
continue
key, value = line.split('=', 1)
parsed[key.strip()] = value.strip()
return parsed
def raw_config_parse(config_filename, parse_subsections=True):
config = {}
path = config_filename
if path is not None:
path = os.path.expandvars(path)
path = os.path.expanduser(path)
if not os.path.isfile(path):
raise ConfigNotFoundException(path=path)
cp = six.moves.configparser.RawConfigParser()
try:
cp.read([path])
except six.moves.configparser.ParsingError:
raise ClientException(
msg='Credentials file (%s) format is incorrect.' % path
)
except six.moves.configparser.Error:
raise ClientException(
msg='Cannot read credentials from (%s).' % path
)
else:
for section in cp.sections():
config[section] = {}
for option in cp.options(section):
config_value = cp.get(section, option)
if parse_subsections and config_value.startswith('\n'):
try:
config_value = _parse_nested(config_value)
except ValueError:
raise ClientException(
msg='Unable to parse ini file: %s.' % path
)
config[section][option] = config_value
return config
def load_config(config_filename):
parsed = raw_config_parse(config_filename)
return parsed
| 34.232877 | 76 | 0.612645 | 284 | 2,499 | 5.31338 | 0.447183 | 0.051027 | 0.039761 | 0.021206 | 0.076872 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005214 | 0.309324 | 2,499 | 72 | 77 | 34.708333 | 0.869061 | 0.236094 | 0 | 0.145833 | 0 | 0 | 0.057022 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac19d873d14871a60478d1d56639d010b237b612 | 1,223 | py | Python | examples/iris.py | Catastropha/ignis | 0fce3b4502666bf3257670c11e3a9c018e04baac | [
"MIT"
] | null | null | null | examples/iris.py | Catastropha/ignis | 0fce3b4502666bf3257670c11e3a9c018e04baac | [
"MIT"
] | null | null | null | examples/iris.py | Catastropha/ignis | 0fce3b4502666bf3257670c11e3a9c018e04baac | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.optim as optim
import pandas as pd
from ignis import fit
from ignis.loaders import create_loaders
from ignis.callbacks import EarlyStop, ModelCheckpoint
df = pd.read_csv('examples/iris.csv')
data = df.drop(columns=['Id', 'Species'])
labels = df['Species']
labels = pd.get_dummies(labels)
train_loader, validation_loader = create_loaders(
x=data.values,
y=labels.values,
validation_split=0.1,
)
class Model(nn.Module):
def __init__(self, ):
super(Model, self).__init__()
self.fc1 = nn.Linear(4, 8)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(8, 3)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.sigmoid(self.fc2(x))
return x
model = Model()
loss_fn = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
callbacks = [
EarlyStop(monitor='train_loss', patience=3),
ModelCheckpoint(monitor='validation_loss', filepath='best_model.pt'),
]
fit(
train_loader=train_loader,
validation_loader=validation_loader,
model=model,
loss_fn=loss_fn,
optimizer=optimizer,
epochs=500,
callbacks=callbacks,
verbose=True,
)
| 22.648148 | 73 | 0.674571 | 168 | 1,223 | 4.761905 | 0.422619 | 0.03375 | 0.0825 | 0.0675 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018311 | 0.196239 | 1,223 | 53 | 74 | 23.075472 | 0.795524 | 0 | 0 | 0 | 0 | 0 | 0.058054 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.139535 | 0 | 0.232558 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac1bd4cf186652fa83871cf049f13360eecad546 | 2,611 | py | Python | vvvvid_scraper.py | antoks1/VVVVID-Downloader | 872fae77ce5a620b5f40edbeaf0a816c7fc8b499 | [
"MIT"
] | 1 | 2021-03-17T09:41:40.000Z | 2021-03-17T09:41:40.000Z | vvvvid_scraper.py | KarlBlackheart/VVVVID-Downloader | 6aadd37c0da7a9837d269e9e25b2fa9e1ff0ef7e | [
"MIT"
] | null | null | null | vvvvid_scraper.py | KarlBlackheart/VVVVID-Downloader | 6aadd37c0da7a9837d269e9e25b2fa9e1ff0ef7e | [
"MIT"
] | null | null | null | '''
VVVVID Downloader - VVVVID Scraper Utility Functions
Author: CoffeeStraw
GitHub: https://github.com/CoffeeStraw/VVVVID-Downloader
'''
import re
from copy import deepcopy
def parse_url(url):
'''
Parse a given link to extract show_id and content name (url formatted)
'''
# Compatibility with old link format
url = url.replace("/#!show/", '/show/')
# Parsing URL
pattern = r"show/([0-9]+)/(.+?)/"
return re.search(pattern, url).groups()
def convert_text_to_url_format(text):
'''
Format a text correctly for the url concatenation
'''
text = re.sub(r'[^a-zA-Zàèéìòù\s\-\']', '', text)
text = text.replace("à","a")
text = re.sub("è|é", "e", text)
text = text.replace("ì","i")
text = text.replace("ò","o")
text = text.replace("ù","u")
text = re.sub(r'[\s\']+', '-', text)
return text.lower()
def get_content_infos(requests_obj, show_id):
'''
Retrieves some informations for the content to beautify output,
specifically description and well formatted name
'''
infos_url = 'https://www.vvvvid.it/vvvvid/ondemand/' + show_id + '/info/'
json_file = requests_obj['session'].get(
infos_url,
headers=requests_obj['headers'],
params=requests_obj['payload']
).json()
return json_file['data']['title'], json_file['data']['description']
def get_seasons(requests_obj, url, show_id, url_name):
'''
Returns a dictionary containing seasons with url
'''
# Downloading episodes informations
json_file = requests_obj['session'].get(
"https://www.vvvvid.it/vvvvid/ondemand/" + show_id + "/seasons/",
headers=requests_obj['headers'],
params=requests_obj['payload']
).json()
# Extracting seasons from json
seasons = {}
for i, season in enumerate(json_file['data']):
seasons[str(json_file['data'][i]['season_id'])] = {
'name': json_file['data'][i]['name'],
'episodes': json_file['data'][i]['episodes']
}
# Check if the link is a link to a single episode.
# If it is, then return only a single season with episodes starting from the selected one
# IMPROVABLE? IT IS A DIRTY SOLUTION
pattern = url_name + "(.+)$"
additional_infos = re.findall(pattern, url)[0]
if additional_infos != "/":
stop = False
additional_infos = re.findall("/(.+)/(.+)/(.+)/", additional_infos)[0]
seasons_c = deepcopy(seasons)
for season_id, season in seasons_c.items():
if not stop and season_id == additional_infos[0]:
for j, episode in enumerate(season['episodes']):
if str(episode['video_id']) == str(additional_infos[1]):
stop = True
break
else:
del seasons[season_id]['episodes'][0]
else:
del seasons[season_id]
return seasons | 28.380435 | 90 | 0.676369 | 367 | 2,611 | 4.683924 | 0.340599 | 0.051193 | 0.041885 | 0.022688 | 0.162885 | 0.137289 | 0.103549 | 0.103549 | 0.061664 | 0 | 0 | 0.003172 | 0.15473 | 2,611 | 92 | 91 | 28.380435 | 0.775714 | 0.266182 | 0 | 0.192308 | 0 | 0 | 0.174169 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.038462 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac1f2a0870b94a6ab51d4e27da4e587d596b573f | 2,192 | py | Python | routes/challenges.py | xdfcfc0xa/THMC-Challenge-Server | ffd08bdc78cdbe99555abae9be07af1dbeeddf5d | [
"MIT"
] | 55 | 2016-09-06T20:46:36.000Z | 2022-03-28T01:29:17.000Z | routes/challenges.py | xdfcfc0xa/THMC-Challenge-Server | ffd08bdc78cdbe99555abae9be07af1dbeeddf5d | [
"MIT"
] | 17 | 2016-09-06T21:02:16.000Z | 2020-04-16T19:46:58.000Z | routes/challenges.py | xdfcfc0xa/THMC-Challenge-Server | ffd08bdc78cdbe99555abae9be07af1dbeeddf5d | [
"MIT"
] | 23 | 2016-09-07T17:12:23.000Z | 2022-01-17T14:03:06.000Z | from flask import Blueprint, g, request, render_template, flash, redirect, url_for
from utils import decorators, ratelimit
from data import challenge
import exceptions
challenges = Blueprint("challenges", __name__, template_folder="../templates/challenges")
@challenges.route('/challenges/')
@decorators.must_be_allowed_to("view challenges")
@decorators.competition_started_required
@decorators.confirmed_email_required
def index():
stages = challenge.get_stages()
challs = challenge.get_challenges()
solved = challenge.get_solved(g.team)
solves = challenge.get_solve_counts()
categories = challenge.get_categories()
first_stage = {chall.alias: True for chall in challs[stages[0].id]} if stages else None
return render_template("challenges.html", stages=stages, first_stage=first_stage, challenges=challs, solved=solved, categories=categories, solves=solves)
@challenges.route('/challenges/<challenge_id>/solves/')
@decorators.must_be_allowed_to("view challenge solves")
@decorators.must_be_allowed_to("view challenges")
@decorators.competition_started_required
@decorators.confirmed_email_required
def show_solves(challenge_id):
try:
chall = challenge.get_challenge(alias=challenge_id)
except exceptions.ValidationError as e:
flash(str(e))
return redirect(url_for(".index"))
solves = challenge.get_challenge_solves(chall)
return render_template("challenge_solves.html", challenge=chall, solves=solves)
@challenges.route('/submit/<challenge_id>/', methods=["POST"])
@decorators.must_be_allowed_to("solve challenges")
@decorators.must_be_allowed_to("view challenges")
@decorators.competition_running_required
@decorators.confirmed_email_required
@ratelimit.ratelimit(limit=10, per=120)
def submit(challenge_id):
try:
chall = challenge.get_challenge(challenge_id)
except exceptions.ValidationError as e:
flash(str(e))
return redirect(url_for(".index"))
flag = request.form["flag"]
try:
challenge.submit_flag(chall, g.user, g.team, flag)
flash("Success!")
except exceptions.ValidationError as e:
flash(str(e))
return redirect(url_for('.index'))
| 36.533333 | 157 | 0.757299 | 268 | 2,192 | 5.966418 | 0.283582 | 0.060038 | 0.050031 | 0.07192 | 0.445278 | 0.404628 | 0.404628 | 0.328956 | 0.328956 | 0.328956 | 0 | 0.003146 | 0.130018 | 2,192 | 59 | 158 | 37.152542 | 0.835343 | 0 | 0 | 0.387755 | 0 | 0 | 0.115876 | 0.046077 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061224 | false | 0 | 0.081633 | 0 | 0.244898 | 0.040816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac226e0ac0f48598e5309967aeb2d65971658fc1 | 524 | py | Python | modulestf/logger.py | tjunnone/modules.tf-lambda | d631fcc3dbea752e5ed3ba23ce59ab47b4dc28e7 | [
"MIT"
] | 312 | 2018-09-15T12:56:49.000Z | 2022-03-14T06:04:06.000Z | modulestf/logger.py | tjunnone/modules.tf-lambda | d631fcc3dbea752e5ed3ba23ce59ab47b4dc28e7 | [
"MIT"
] | 30 | 2018-09-15T17:41:29.000Z | 2021-09-30T02:08:10.000Z | modulestf/logger.py | tjunnone/modules.tf-lambda | d631fcc3dbea752e5ed3ba23ce59ab47b4dc28e7 | [
"MIT"
] | 48 | 2018-09-17T12:05:16.000Z | 2022-01-20T11:35:51.000Z | import logging
import sys
# Logging snippet was from https://gist.github.com/niranjv/fb95e716151642e8ca553b0e38dd152e
def setup_logging():
logger = logging.getLogger()
for h in logger.handlers:
logger.removeHandler(h)
h = logging.StreamHandler(sys.stdout)
# use whatever format you want here
FORMAT = "[%(levelname)s]\t%(asctime)s.%(msecs)dZ\t%(name)s\t%(message)s\n"
h.setFormatter(logging.Formatter(FORMAT))
logger.addHandler(h)
logger.setLevel(logging.INFO)
return logger
| 26.2 | 91 | 0.711832 | 68 | 524 | 5.470588 | 0.647059 | 0.010753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.048055 | 0.166031 | 524 | 19 | 92 | 27.578947 | 0.803204 | 0.234733 | 0 | 0 | 0 | 0.083333 | 0.160804 | 0.160804 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac245191bcbf09412032dd511eb73013a1842213 | 14,720 | py | Python | eval_sp2021/plots/create_plots.py | barryZZJ/dp-sniper | 71a3fc06f3fc319b023bde9aad8f05b8c5a47a80 | [
"MIT"
] | 13 | 2021-03-30T15:39:35.000Z | 2022-02-21T08:30:45.000Z | eval_sp2021/plots/create_plots.py | barryZZJ/dp-sniper | 71a3fc06f3fc319b023bde9aad8f05b8c5a47a80 | [
"MIT"
] | null | null | null | eval_sp2021/plots/create_plots.py | barryZZJ/dp-sniper | 71a3fc06f3fc319b023bde9aad8f05b8c5a47a80 | [
"MIT"
] | 4 | 2021-06-30T08:37:45.000Z | 2022-03-05T03:21:14.000Z | from typing import Dict
import pandas as pd
import glob
import os
import json
import numpy as np
import sri_plot_helper as sph
import matplotlib.ticker as ticker
import math
import argparse
figure_height = 26
font_size = 8
x_axis_label_offset_top = -0.027
# color configuration
color_reg = '#183646'
color_mlp = '#115c8e'
color_statdp_1 = '#b784a8'
color_statdp_2 = '#d7bbc7'
class DataReader:
"""
A helper class for reading log data.
"""
def __init__(self, logs_dir: str):
self.logs_dir = logs_dir
def read_data(self, experiment_label: str) -> Dict:
"""
Read the data for a given label.
"""
# keys: data_type, values: information for this data_type
data = {}
# filename pattern to cover
pattern = os.path.join(self.logs_dir, experiment_label + "_data.log")
for filename in glob.glob(pattern):
with open(filename, "r") as f:
for line in f:
if len(line) > 1: # skip empty lines
elem = json.loads(line)
# extract and remove context
mechanism = elem['ctx'][0]
del elem['ctx']
# determine data type
data_type = next(iter(elem))
if data_type not in data:
data[data_type] = []
# add context information
value = elem[data_type]
if isinstance(value, float) or isinstance(value, int):
row = {data_type: value}
else:
row = value
row['mechanism'] = mechanism
data[data_type].append(row)
# convert information to data frame
for data_type in data.keys():
df = pd.DataFrame(data[data_type])
if 'mechanism' in df.columns:
# improve naming
rename = {
**{f'SparseVectorTechnique{i}': f'SVT{i}' for i in range(1, 7)},
'Rappor': 'RAPPOR',
'OneTimeRappor': 'OneTimeRAPPOR',
'TruncatedGeometricMechanism': 'TruncatedGeometric'
}
for old, new in rename.items():
df['mechanism'] = df['mechanism'].replace(old, new)
n = df['mechanism'].value_counts().max()
if n == 1:
# set index
df = df.set_index('mechanism')
data[data_type] = df
return data
def add_old_flag(df):
"""
Mark all mechanisms which were originally evaluated in StatDP [1].
[1] Ding, Zeyu, Yuxin Wang, Guanhong Wang, Danfeng Zhang, and Daniel Kifer.
"Detecting Violations of Differential Privacy." In Proceedings of the 2018
ACM SIGSAC Conference on Computer and Communications Security - CCS ’18.
https://doi.org/10.1145/3243734.3243818.
"""
old_mechanisms = \
['NoisyHist1', 'NoisyHist2'] + \
[f'ReportNoisyMax{i}' for i in range(1, 5)] + \
[f'SVT{i}' for i in range(1, 7) if i != 2]
df['old'] = False
df.loc[old_mechanisms, 'old'] = True
df = df.sort_values(by=['old', 'mechanism'])
return df
def label_barh(ax, pos, val, color, to_text=lambda v: "{:.3f}".format(round(v, 3)), logindent=None, project=True):
"""
Add horizontal bar labels.
"""
# get axis information
x_min = ax.get_xlim()[0]
x_max = ax.get_xlim()[1]
ax_width = ax.get_position().bounds[2]
for (p, v) in zip(pos, val):
label_indent = 0.005 * (x_max - x_min) / ax_width
text = to_text(v)
if text == 'nan':
text = 'Error'
if math.isnan(v):
v = x_min
if project:
v = min(v, x_max)
v = max(v, x_min)
if x_min <= v <= x_max:
if logindent:
v *= logindent
else:
v += label_indent
ax.text(v, p - 0.03, text,
color=color,
fontsize=font_size,
horizontalalignment='left',
verticalalignment='center'
)
def get_powers():
"""
Get data for power evaluation.
"""
# parse tool results
reg = tool_reg_data['eps_lcb'].add_prefix('tool-reg-')
mlp = tool_mlp_data['eps_lcb'].add_prefix('tool-mlp-')
# parse StatDP results
statdp_1 = statdp_1_data['statdp_result']
statdp_1 = statdp_1[['eps_lcb', 'eps_preliminary']].add_prefix('statdp_1-')
statdp_2 = statdp_2_data['statdp_result']
statdp_2 = statdp_2[['eps_lcb', 'eps_preliminary']].add_prefix('statdp_2-')
ret = reg.join(mlp, how='outer').join(statdp_1, how='outer').join(statdp_2, how='outer')
ret = ret[['tool-reg-eps_lcb', 'tool-mlp-eps_lcb', 'statdp_1-eps_lcb', 'statdp_1-eps_preliminary',
'statdp_2-eps_lcb', 'statdp_2-eps_preliminary']]
ret = add_old_flag(ret)
return ret
def plot_powers(output_dir):
"""
Plot power evaluation.
"""
df = get_powers()
df_old = df[df['old']]
df_new = df[~ df['old']]
sph.configure_plots("IEEE", font_size)
fig, axes = sph.subplots(
2, 1,
figsize=(11, figure_height),
nice_grid='x',
gridspec_kw={'height_ratios': [len(df_old), len(df_new)]}
)
for df_ax, ax in zip([df_old, df_new], axes):
mechanisms = df_ax.index.values.tolist()
# set axis limits
ax.set_xlim(0, 0.6)
ind = np.arange(len(mechanisms))
width = 0.23
# plot data
y = -ind
x = df_ax['statdp_2-eps_lcb']
ax.barh(y, df_ax['statdp_2-eps_preliminary'], width * 0.98, label='StatDP claimed (repeated)', fill=False,
edgecolor=color_statdp_2, linewidth=0.3)
bar_statdp_2 = ax.barh(y, x, width, label='StatDP (repeated)', color=color_statdp_2)
label_barh(ax, y, x, color=color_statdp_2)
y = y + width
x = df_ax['statdp_1-eps_lcb']
ax.barh(y, df_ax['statdp_1-eps_preliminary'], width * 0.98, label='StatDP claimed', fill=False,
edgecolor=color_statdp_1, linewidth=0.3)
bar_statdp_1 = ax.barh(y, x, width, label='StatDP', color=color_statdp_1)
label_barh(ax, y, x, color=color_statdp_1)
y = y + width
x = df_ax['tool-mlp-eps_lcb']
bar_tool_mlp = ax.barh(y, x, width, label='DD-Search Neural Network', color=color_mlp)
label_barh(ax, y, x, color=color_mlp)
y = y + width
x = df_ax['tool-reg-eps_lcb']
bar_tool_reg = ax.barh(y, x, width, label='DD-Search Logistic Regression', color=color_reg)
label_barh(ax, y, x, color=color_reg)
# label correctly
ax.yaxis.set_ticks_position('none')
ax.set_yticks(-ind + 1.5*width)
ax.set_yticklabels(mechanisms)
# set label
axes[0].set_xlabel(r'$\xi$')
axes[0].xaxis.set_label_coords(1.07, x_axis_label_offset_top)
axes[1].set_xlabel(r'$\xi$')
axes[1].xaxis.set_label_coords(1.07, x_axis_label_offset_top * len(df_old) / len(df_new))
# fix layout
fig.tight_layout(w_pad=0)
# add legend (must be after fixing layout)
axes[1].legend((bar_tool_reg, bar_tool_mlp, bar_statdp_1, bar_statdp_2),
("DD-Search Logistic Regression", "DD-Search Neural Network", "StatDP-Fixed (1\\textsuperscript{st} run)", "StatDP-Fixed (2\\textsuperscript{nd} run)"), loc='upper right')
# save output
output_file = os.path.join(output_dir, 'eval-powers.pdf')
sph.savefig(output_file)
improvement_reg_1 = df['tool-reg-eps_lcb'] / df['statdp_1-eps_lcb']
improvement_reg_2 = df['tool-reg-eps_lcb'] / df['statdp_2-eps_lcb']
improvement_mlp_1 = df['tool-mlp-eps_lcb'] / df['statdp_1-eps_lcb']
improvement_mlp_2 = df['tool-mlp-eps_lcb'] / df['statdp_2-eps_lcb']
print("Max [Median] power factor (Logistic, run 1): {} [{}]".format(improvement_reg_1.max(),
improvement_reg_1.mean()))
print("Max [Median] power factor (Logistic, run 2): {} [{}]".format(improvement_reg_2.max(),
improvement_reg_2.mean()))
print("Max [Median] power factor (MLP, run 1): {} [{}]".format(improvement_mlp_1.max(),
improvement_mlp_1.mean()))
print("Max [Median] power factor (MLP, run 2): {} [{}]".format(improvement_mlp_2.max(),
improvement_mlp_2.mean()))
def get_runtimes():
reg = pd.concat([tool_reg_data['time_dd_search'], tool_reg_data['time_final_estimate_eps']], axis=1)
reg = reg.loc[:, ~reg.columns.duplicated()] # drop duplicate columns
reg = reg.add_prefix('tool-reg-')
mlp = pd.concat([tool_mlp_data['time_dd_search'], tool_mlp_data['time_final_estimate_eps']], axis=1)
mlp = mlp.loc[:, ~mlp.columns.duplicated()] # drop duplicate columns
mlp = mlp.add_prefix('tool-mlp-')
# parse StatDP results
statdp_1 = pd.concat([statdp_1_data[k] for k in statdp_1_data.keys() if k.endswith("_time")], axis=1)\
.add_suffix("_1")
statdp_2 = pd.concat([statdp_2_data[k] for k in statdp_2_data.keys() if k.endswith("_time")], axis=1)\
.add_suffix("_2")
ret = reg.join(mlp, how='outer').join(statdp_1, how='outer').join(statdp_2, how='outer')
ret = ret[['tool-reg-time_dd_search', 'tool-mlp-time_dd_search', 'tool-reg-time_final_estimate_eps',
'tool-mlp-time_final_estimate_eps', 'statdp_time_1', 'statdp_time_2']]
ret = add_old_flag(ret)
return ret
def time_to_str(t):
if math.isnan(t):
return 'Error'
seconds = t
if seconds < 60:
return "{:.0f}".format(round(seconds)) + "sec"
minutes = seconds / 60
if minutes < 60:
return "{:.0f}".format(round(minutes)) + "min"
hours = minutes / 60
return "{:.0f}".format(round(hours)) + "h"
@ticker.FuncFormatter
def time_formatter(x, pos):
return time_to_str(x)
def plot_runtimes(output_dir):
times = get_runtimes()
times_old = times[times['old']]
times_new = times[~ times['old']]
sph.configure_plots("IEEE", font_size)
fig, axes = sph.subplots(
2, 1,
figsize=(11, figure_height),
nice_grid='x',
gridspec_kw={'height_ratios': [len(times_old), len(times_new)]}
)
for times_ax, ax in zip([times_old, times_new], axes):
mechanisms = times_ax.index.values.tolist()
ax.set_xlim(1, times.max().max())
ind = np.arange(len(mechanisms))
width = 0.23
y = -ind
x = times_ax['statdp_time_2']
ax.barh(y, x, width, label='StatDP (repeated)', color=color_statdp_2)
label_barh(ax, y, x, color=color_statdp_2, to_text=time_to_str, logindent=1.05)
y = y + width
x = times_ax['statdp_time_1']
ax.barh(y, x, width, label='StatDP', color=color_statdp_1)
label_barh(ax, y, x, color=color_statdp_1, to_text=time_to_str, logindent=1.05)
time_tool_mlp = times_ax['tool-mlp-time_dd_search'] + times_ax['tool-mlp-time_final_estimate_eps']
y = y + width
x = time_tool_mlp
ax.barh(y, x, width, label='DD-Search MLP', color=color_mlp)
label_barh(ax, y, x, color=color_mlp, to_text=time_to_str, logindent=1.05)
time_tool_reg = times_ax['tool-reg-time_dd_search'] + times_ax['tool-reg-time_final_estimate_eps']
y = y + width
x = time_tool_reg
ax.barh(y, x, width, label='DD-Search Logistic', color=color_reg)
label_barh(ax, y, x, color=color_reg, to_text=time_to_str, logindent=1.05)
# label correctly
ax.yaxis.set_ticks_position('none')
ax.set_yticks(-ind + 1.5*width)
ax.set_yticklabels(mechanisms)
# set x axis as times
ax.set_xscale('log')
# ax.xaxis.set_major_formatter(time_formatter)
# set label
axes[0].set_xlabel('sec')
axes[0].xaxis.set_label_coords(0.98, x_axis_label_offset_top)
axes[1].set_xlabel('sec')
axes[1].xaxis.set_label_coords(0.98, x_axis_label_offset_top * len(times_old) / len(times_new))
# fix layout
fig.tight_layout()
# save output
output_file = os.path.join(output_dir, 'eval-runtimes.pdf')
sph.savefig(output_file)
speedup_reg_1 = times['statdp_time_1'] / (times['tool-reg-time_dd_search'] + times['tool-reg-time_final_estimate_eps'])
speedup_mlp_1 = times['statdp_time_1'] / (times['tool-mlp-time_dd_search'] + times['tool-mlp-time_final_estimate_eps'])
speedup_reg_2 = times['statdp_time_2'] / (times['tool-reg-time_dd_search'] + times['tool-reg-time_final_estimate_eps'])
speedup_mlp_2 = times['statdp_time_2'] / (times['tool-mlp-time_dd_search'] + times['tool-mlp-time_final_estimate_eps'])
print("Average speedup (Logistic, run 1):", speedup_reg_1.mean())
print("Average speedup (Logistic, run 2):", speedup_reg_2.mean())
print("Average speedup (MLP, run 1):", speedup_mlp_1.mean())
print("Average speedup (MLP, run 2):", speedup_mlp_2.mean())
def analyze_probe_times():
x_1 = statdp_1_data["statdp_time_one_probe"]
x_2 = statdp_2_data["statdp_time_one_probe"]
x = pd.concat([x_1, x_2])
x = x.groupby('mechanism').mean()
y = pd.concat([tool_reg_data["time_dd_search"], tool_reg_data["time_final_estimate_eps"]], axis=1)
y["time_tool"] = y.time_dd_search + y.time_final_estimate_eps
z = pd.concat([x, y], axis=1)
z["speedup"] = z.statdp_time_one_probe / z.time_tool
print(z[["time_tool", "statdp_time_one_probe", "speedup"]])
print("Average per-probe speedup:", z["speedup"].mean())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', required=True, help='the directory containing the input data logs')
parser.add_argument('--output-dir', required=True, help='the directory to be used for the created plots')
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
tool_reg_data = DataReader(args.data_dir).read_data("dd_search_reg")
tool_mlp_data = DataReader(args.data_dir).read_data("dd_search_mlp")
statdp_1_data = DataReader(args.data_dir).read_data("statdp_1")
statdp_2_data = DataReader(args.data_dir).read_data("statdp_2")
plot_powers(args.output_dir)
plot_runtimes(args.output_dir)
analyze_probe_times()
| 36.345679 | 190 | 0.60034 | 2,066 | 14,720 | 4.023717 | 0.168925 | 0.023578 | 0.017322 | 0.02887 | 0.51221 | 0.444124 | 0.370624 | 0.32792 | 0.26633 | 0.238903 | 0 | 0.023621 | 0.263723 | 14,720 | 404 | 191 | 36.435644 | 0.743403 | 0.075 | 0 | 0.140741 | 0 | 0 | 0.19001 | 0.05678 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040741 | false | 0 | 0.037037 | 0.003704 | 0.114815 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac251fe9e4702d162dc629ff393a339d4225cbd1 | 676 | py | Python | geotiff/process.py | h4k1m0u/scikit-image-scripts | 2197f23b904463b358421bc8a8bd85a3cb4cc2f1 | [
"MIT"
] | 40 | 2017-04-09T00:00:42.000Z | 2021-09-27T15:36:00.000Z | geotiff/process.py | h4k1m0u/scikit-image-scripts | 2197f23b904463b358421bc8a8bd85a3cb4cc2f1 | [
"MIT"
] | null | null | null | geotiff/process.py | h4k1m0u/scikit-image-scripts | 2197f23b904463b358421bc8a8bd85a3cb4cc2f1 | [
"MIT"
] | 15 | 2017-03-03T00:31:53.000Z | 2021-07-15T13:41:47.000Z | #!/usr/bin/env python
"""Process images opened with GDAL."""
import logging
from geotiff.io import IO
from sentinel_hub.constants import LOGFILE
# logging to file
logging.basicConfig(
filename=LOGFILE,
level=logging.DEBUG,
format='[LOG] %(asctime)s: %(message)s'
)
class Process:
"""Processing of images opened with GDAL."""
@staticmethod
def process(path_in):
"""Open/Process/Write the image in the given path.
Args:
path_in(str)
"""
arr_in = IO.read(path_in)
arr_out = arr_in
path_out = path_in
IO.write(arr_out, path_out)
logging.info('%s processed [Ok]' % path_in)
| 21.125 | 58 | 0.630178 | 91 | 676 | 4.549451 | 0.538462 | 0.072464 | 0.077295 | 0.096618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.254438 | 676 | 31 | 59 | 21.806452 | 0.821429 | 0.266272 | 0 | 0 | 0 | 0 | 0.103524 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.1875 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac26107cbb9a809da7b55e9ed4941e8ca10d0cf6 | 1,319 | py | Python | leetcode/91_Decode_Ways.py | thiakx/leetcode | cda5b3844331fb244c336bce7a551eafe946531d | [
"MIT"
] | null | null | null | leetcode/91_Decode_Ways.py | thiakx/leetcode | cda5b3844331fb244c336bce7a551eafe946531d | [
"MIT"
] | null | null | null | leetcode/91_Decode_Ways.py | thiakx/leetcode | cda5b3844331fb244c336bce7a551eafe946531d | [
"MIT"
] | null | null | null | import unittest
# A message containing letters from A-Z is being encoded to numbers using the following mapping:
# 'A' -> 1
# 'B' -> 2
# Given a non-empty string containing only digits, determine the total number of ways to decode it.
s = "226"
output_value = 3
class funcTest(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.numDecodings(s), output_value)
class Solution:
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
n = len(s)
if s == "":
return 0
# we don't care about the exact alphabets, we only care about number of ways
# dp store number of ways for each char in s
dp = [1] + n * [0]
# by default, each non zero number has 1 way +
# consider the 2 digits combo before current number.
# (there are 27 letters, don't need consider beyond 2 digits combo)
for i in range(1, n + 1):
if s[i - 1] != "0": # 0 has no matching alphabet
dp[i] += dp[i - 1]
if i != 1 and "09" < s[i - 2:i] < "27":
dp[i] += dp[i - 2]
return dp[-1]
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False) # extra conditions for jupyter notebook
| 29.977273 | 101 | 0.573161 | 193 | 1,319 | 3.865285 | 0.528497 | 0.016086 | 0.048257 | 0.016086 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031077 | 0.316907 | 1,319 | 43 | 102 | 30.674419 | 0.796892 | 0.440485 | 0 | 0 | 0 | 0 | 0.051724 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.095238 | false | 0 | 0.047619 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac2789666e72ceb9fbeb9dabfea66d4985154be5 | 558 | py | Python | python/python-algorithm-intervew/demo/m-demo-1.py | bum12ark/algorithm | b6e262b0c29a8b5fb551db5a177a40feebc411b4 | [
"MIT"
] | 1 | 2022-03-06T03:49:31.000Z | 2022-03-06T03:49:31.000Z | python/python-algorithm-intervew/demo/m-demo-1.py | bum12ark/algorithm | b6e262b0c29a8b5fb551db5a177a40feebc411b4 | [
"MIT"
] | null | null | null | python/python-algorithm-intervew/demo/m-demo-1.py | bum12ark/algorithm | b6e262b0c29a8b5fb551db5a177a40feebc411b4 | [
"MIT"
] | null | null | null | import collections
def solution(lottos, win_nums):
# 당첨 갯수: 순위
ranking = {
6: 1,
5: 2,
4: 3,
3: 4,
2: 5,
1: 6,
0: 6
}
# lottos와 win_nums의 갯수
l_count = collections.Counter(lottos)
w_count = collections.Counter(win_nums)
worst = len(lottos) - len(w_count - l_count)
best = worst + l_count[0]
return [ranking[best], ranking[worst]]
if __name__ == '__main__':
lottos = [44, 1, 0, 0, 31, 25]
win_nums = [31, 10, 45, 1, 6, 19]
solution(lottos, win_nums)
| 19.241379 | 48 | 0.539427 | 80 | 558 | 3.5375 | 0.4625 | 0.09894 | 0.120141 | 0.14841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 0.329749 | 558 | 28 | 49 | 19.928571 | 0.665775 | 0.053763 | 0 | 0 | 0 | 0 | 0.015238 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.05 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac29b73ecad50faf13ac93ac2b467f807cd2b65c | 1,322 | py | Python | ide/src/find_window_normal_search.py | Pfeifenjoy/compilerbau-WS17-18 | d04dddd49ba452aae4d6ec0e1408af8401c0edcc | [
"MIT"
] | null | null | null | ide/src/find_window_normal_search.py | Pfeifenjoy/compilerbau-WS17-18 | d04dddd49ba452aae4d6ec0e1408af8401c0edcc | [
"MIT"
] | null | null | null | ide/src/find_window_normal_search.py | Pfeifenjoy/compilerbau-WS17-18 | d04dddd49ba452aae4d6ec0e1408af8401c0edcc | [
"MIT"
] | 1 | 2018-02-06T21:52:04.000Z | 2018-02-06T21:52:04.000Z | from PyQt5.QtWidgets import QWidget, QGridLayout, QLabel, QLineEdit
from find import Find
class FindWindowNormalSearch(QWidget):
def __init__(self, editor_tab_widget, parent=None):
super(FindWindowNormalSearch, self).__init__(parent)
self.editor_tab_widget = editor_tab_widget
layout = QGridLayout(self)
self.find_label = QLabel(self)
self.find_label.setText('Find:')
self.find_input = QLineEdit(self)
self.find_input.textChanged.connect(self.slot_text_changed)
self.find_input.returnPressed.connect(self.slot_return_pressed)
layout.addWidget(self.find_label, 0, 0)
layout.addWidget(self.find_input, 0, 1)
self.setLayout(layout)
def slot_text_changed(self):
text_to_find = self.find_input.text()
options = {
'regular_expression': False,
'case_sensitive': False,
'whole_word': False,
'wrap': True,
'forward_search': True,
'line': 0,
'index': 0,
'show': True,
'posix': False
}
Find.find(self.editor_tab_widget, text_to_find, options)
def slot_return_pressed(self):
Find.find_next(self.editor_tab_widget)
def focus_input(self):
self.find_input.setFocus()
| 30.045455 | 71 | 0.63767 | 153 | 1,322 | 5.215686 | 0.366013 | 0.100251 | 0.097744 | 0.095238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007179 | 0.262481 | 1,322 | 43 | 72 | 30.744186 | 0.811282 | 0 | 0 | 0 | 0 | 0 | 0.062784 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121212 | false | 0 | 0.060606 | 0 | 0.212121 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac2bad3da2dfb55ac8d7aec2a8614b9fba397bb4 | 442 | py | Python | src/SurveyDataViewer/settings/linux_server.py | UCHIC/SurveyDataViewer | 6027ea075a5c11c7686304eb9dd169664cee5c58 | [
"BSD-3-Clause"
] | 10 | 2015-01-20T17:04:47.000Z | 2020-10-24T02:16:00.000Z | src/SurveyDataViewer/settings/linux_server.py | UCHIC/SurveyDataViewer | 6027ea075a5c11c7686304eb9dd169664cee5c58 | [
"BSD-3-Clause"
] | 65 | 2015-01-16T19:17:18.000Z | 2018-02-12T23:03:11.000Z | src/SurveyDataViewer/settings/linux_server.py | UCHIC/SurveyDataViewer | 6027ea075a5c11c7686304eb9dd169664cee5c58 | [
"BSD-3-Clause"
] | 2 | 2019-07-08T20:57:14.000Z | 2020-06-02T13:29:25.000Z | from SurveyDataViewer.settings.base import *
DEBUG = False
TEMPLATE_DEBUG = False
DEPLOYED = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
if "host" in data:
ALLOWED_HOSTS.append(data["host"])
if "host_alt" in data:
ALLOWED_HOSTS.append(data["host_alt"])
SITE_URL = 'surveys/'
STATIC_ROOT = data["static_root"]
STATIC_URL = SITE_URL + data["static_url"]
MEDIA_ROOT = data["media_root"]
MEDIA_URL = SITE_URL + data["media_url"]
| 22.1 | 44 | 0.723982 | 66 | 442 | 4.590909 | 0.424242 | 0.118812 | 0.085809 | 0.118812 | 0.211221 | 0.211221 | 0.211221 | 0 | 0 | 0 | 0 | 0.015666 | 0.133484 | 442 | 19 | 45 | 23.263158 | 0.775457 | 0 | 0 | 0 | 0 | 0 | 0.204082 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac2c1ebedcb0277b5f468a76c9912861addb8ccd | 803 | py | Python | DeepLearning/LossFunctions/MultiCSE.py | ThisGame42/Deep-Learning-Models-4-Muscle-Bones-Segmentation | 1f34f0c5870b57994d652c0d77600ec7f25ec4a9 | [
"Apache-2.0"
] | 2 | 2021-03-15T10:22:55.000Z | 2021-06-03T15:44:01.000Z | DeepLearning/LossFunctions/MultiCSE.py | ThisGame42/Deep-Learning-Models-4-Muscle-Bones-Segmentation | 1f34f0c5870b57994d652c0d77600ec7f25ec4a9 | [
"Apache-2.0"
] | null | null | null | DeepLearning/LossFunctions/MultiCSE.py | ThisGame42/Deep-Learning-Models-4-Muscle-Bones-Segmentation | 1f34f0c5870b57994d652c0d77600ec7f25ec4a9 | [
"Apache-2.0"
] | 1 | 2022-02-02T03:52:32.000Z | 2022-02-02T03:52:32.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from Utils.Evaluation import flatten
class MultiClassCSE(nn.Module):
def __init__(self, weights=None, num_classes=14):
super(MultiClassCSE, self).__init__()
self.num_classes = num_classes
# if weights is None:
# weights = torch.ones(num_classes) / num_classes
# assert torch.sum(weights) == 1.
# self.weights = weights
def forward(self, inputs, targets):
inputs = F.softmax(inputs, dim=1)
probs_f = flatten(inputs)
target_f = flatten(targets)
cse_loss = torch.zeros(self.num_classes)
for i in range(probs_f.size()[0]):
cse_loss[i] = F.binary_cross_entropy(probs_f[i], target_f[i])
return torch.mean(cse_loss)
| 28.678571 | 73 | 0.646326 | 110 | 803 | 4.5 | 0.445455 | 0.121212 | 0.052525 | 0.080808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008278 | 0.247821 | 803 | 27 | 74 | 29.740741 | 0.811258 | 0.156912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac2d7c7d406a5ac2e573e93b6de05648d90c5d05 | 2,246 | py | Python | dynamodb-streams-lambda-filter/table_summary.py | MauriceBrg/snippets | 7fb3a5fa553fc72e4327eeac26521b63fc2dbcd5 | [
"Unlicense"
] | 2 | 2022-01-10T16:07:27.000Z | 2022-02-23T03:41:21.000Z | dynamodb-streams-lambda-filter/table_summary.py | MauriceBrg/snippets | 7fb3a5fa553fc72e4327eeac26521b63fc2dbcd5 | [
"Unlicense"
] | null | null | null | dynamodb-streams-lambda-filter/table_summary.py | MauriceBrg/snippets | 7fb3a5fa553fc72e4327eeac26521b63fc2dbcd5 | [
"Unlicense"
] | null | null | null | import time
from datetime import datetime
import boto3
from event_generator import USER_IDS, VIDEO_IDS, TABLE_NAME
QUERY_INTERVAL_IN_SECONDS = 15
ATTRIBUTES = ["views", "duration", "likes", "dislikes"]
TABLE = boto3.resource("dynamodb").Table(TABLE_NAME)
DYNAMODB = boto3.resource("dynamodb")
TABLE_CELL_WIDTH = 10
def get_summary(pk, key, list_of_ids):
response = DYNAMODB.batch_get_item(
RequestItems={
TABLE_NAME: {
"Keys": [
{"PK": f"{pk}#{id_}", "SK": "SUMMARY"} for id_ in list_of_ids
]
}
}
)
items = response["Responses"][TABLE_NAME]
clean_items = []
for item in items:
clean_item = {key: item[key]}
for attr in ATTRIBUTES:
clean_item[attr] = int(item.get(attr, 0))
clean_items.append(clean_item)
return sorted(clean_items, key=lambda x: x[key])
def print_table(list_of_rows, key):
def get_divider(num_cells, row_char="-", middle_char="+"):
single_cell = row_char * TABLE_CELL_WIDTH
cells = [single_cell for _ in range(num_cells)]
return "|" + middle_char.join(cells) + "|"
def get_row(values, middle_char="|"):
values = [str(value).ljust(TABLE_CELL_WIDTH)[:TABLE_CELL_WIDTH] for value in values]
return "|" + middle_char.join(values) + "|"
num_cells = len(ATTRIBUTES) + 1
print("")
print(get_divider(num_cells))
header = [key] + ATTRIBUTES
print(get_row(header))
print(get_divider(num_cells))
attrs = [key] + ATTRIBUTES
for row in list_of_rows:
cells = [row.get(item) for item in attrs]
print(get_row(cells))
print(get_divider(num_cells))
print(f"Fetched at {datetime.now().isoformat(timespec='seconds')}")
print("")
def format_delta(old, new):
if old < new:
return f"+ {new - old}"
if old > new:
return f"- {new - old}"
return "± 0"
def main():
while True:
print_table(get_summary("VIDEO", "videoId", VIDEO_IDS), "videoId")
print_table(get_summary("USER", "userId", USER_IDS), "userId")
print("x" * 56)
time.sleep(QUERY_INTERVAL_IN_SECONDS)
if __name__ == "__main__":
main()
| 25.235955 | 92 | 0.612199 | 291 | 2,246 | 4.460481 | 0.312715 | 0.03698 | 0.043143 | 0.05547 | 0.085516 | 0.032357 | 0.032357 | 0 | 0 | 0 | 0 | 0.007151 | 0.252894 | 2,246 | 88 | 93 | 25.522727 | 0.765793 | 0 | 0 | 0.080645 | 0 | 0 | 0.094835 | 0.020481 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.064516 | 0 | 0.258065 | 0.193548 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac2e298d76be29a6946644d2526f90fa069cc64f | 2,437 | py | Python | scripts/docstr2md.py | vishalbelsare/neworder | 38635fca64f239a9e8eb1a671872c174e1814678 | [
"MIT"
] | 17 | 2017-12-08T10:21:18.000Z | 2022-01-13T09:29:43.000Z | scripts/docstr2md.py | vishalbelsare/neworder | 38635fca64f239a9e8eb1a671872c174e1814678 | [
"MIT"
] | 61 | 2018-07-21T21:37:12.000Z | 2021-07-10T12:49:15.000Z | scripts/docstr2md.py | vishalbelsare/neworder | 38635fca64f239a9e8eb1a671872c174e1814678 | [
"MIT"
] | 6 | 2019-06-06T18:29:31.000Z | 2021-08-20T13:32:17.000Z |
import importlib
module_name = "neworder"
md = "docs/api.md"
type_mapping = {
"<class 'pybind11_builtins.pybind11_type'>": "class",
"<class 'instancemethod'>": "instance method",
"<class 'wrapper_descriptor'>": "(ignore)",
"<class 'builtin_function_or_method'>": "function",
"<class 'module'>": "module",
"<class 'type'>": "class",
"<class 'property'>": "property"
}
def badge(t):
colour = {
"class": "darkgreen",
"property": "lightgreen",
"instance method": "orange",
"function": "red",
"module": "blue"
}
h = ""
return "" % (t, h, t, colour[t])
def format_overloads(lines):
for i, l in enumerate(lines):
if l[:2] == "1." or l[:2] == "2." or l[:2] == "3." or l[:2] == "4.":
lines[i] = "```python\n" + l[2:].replace("_neworder_core", "neworder") + "\n```"
return lines
def format_heading(l, a, t):
return "%s %s `%s`\n\n" % ("#"*l, badge(t), ".".join(a))
def format_docstr(m, t):
if not m.__doc__:
return "\n`__doc__` empty\n\n"
doc = m.__doc__
lines = format_overloads(doc.split("\n"))
for i,l in enumerate(lines):
lines[i] = l.lstrip()
if t in ["instance method", "function"]:
lines[0] = "```python\n" + lines[0].replace("_neworder_core", "neworder") + "\n```"
return "\n".join(lines) + "\n"
def recurse_attrs(m, parents, l, f):
attrs = [a for a in dir(m) if a[:2] != "__" or a == "__init__"]
#print(attrs)
#print("%s: parents=%s" % (m, ".".join(parents)))
for a in attrs:
if a in ["np", "numpy"]:
break
sm = getattr(m, a)
print(a, str(type(sm)))
t = type_mapping.get(str(type(sm)), None)
#t = str(type(sm))
if t is None: break
if t == "(ignore)": continue
if t != "instance method" and t != "function" or (t == "function" and l == 2):
f.write("---\n\n")
# if t == "module":
# l = 1
if hasattr(sm, "__name__"):
name = sm.__name__.replace("_neworder_core", "neworder")
else:
name = a
f.write(format_heading(l, [name], t))
f.write(format_docstr(sm, t))
if ("class" in t or "module" in t or "property" in t) and "itertools" not in t:
recurse_attrs(sm, parents + [name], l+1, f)
parents = parents[:-2]
module = importlib.import_module(module_name)
with open(md, "w") as f:
f.write("#  `neworder`\n")
recurse_attrs(module, ["neworder"], 2, f)
| 29.361446 | 87 | 0.573656 | 359 | 2,437 | 3.749304 | 0.259053 | 0.008915 | 0.008915 | 0.060178 | 0.114413 | 0.081724 | 0 | 0 | 0 | 0 | 0 | 0.010786 | 0.201067 | 2,437 | 82 | 88 | 29.719512 | 0.680534 | 0.042265 | 0 | 0.03125 | 0 | 0.015625 | 0.307692 | 0.036098 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078125 | false | 0 | 0.03125 | 0.015625 | 0.1875 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac2e7b4a896462983a57713d0c8ede6a92617e94 | 800 | py | Python | minigest/docfisc/models/documento_commerciale.py | ctrlmaniac/minigest | 2bfceb57e41c872e4112e24d0e6991164846888b | [
"MIT"
] | null | null | null | minigest/docfisc/models/documento_commerciale.py | ctrlmaniac/minigest | 2bfceb57e41c872e4112e24d0e6991164846888b | [
"MIT"
] | 1 | 2021-09-22T19:10:20.000Z | 2021-09-22T19:10:20.000Z | minigest/docfisc/models/documento_commerciale.py | ctrlmaniac/minigest | 2bfceb57e41c872e4112e24d0e6991164846888b | [
"MIT"
] | null | null | null | from decimal import Decimal
from django.db import models
class DocumentoCommerciale(models.Model):
aliquota_iva = models.DecimalField(
max_digits=19,
decimal_places=2,
)
corrispettivo = models.DecimalField(
max_digits=19,
decimal_places=2,
help_text="Il totale del corrispettivo compreso di IVA",
)
@property
def totale(self):
return self.corrispettivo
@property
def imposta(self):
imposta = (self.corrispettivo * self.aliquota_iva) / (100 + self.aliquota_iva)
return Decimal(round(imposta, 2))
@property
def imponibile(self):
imponibile = (self.corrispettivo * 100) / (100 + self.aliquota_iva)
return Decimal(round(imponibile, 2))
class Meta:
abstract = True
| 23.529412 | 86 | 0.65 | 88 | 800 | 5.806818 | 0.409091 | 0.086106 | 0.088063 | 0.105675 | 0.309198 | 0.309198 | 0.309198 | 0.168297 | 0 | 0 | 0 | 0.028765 | 0.26125 | 800 | 33 | 87 | 24.242424 | 0.835871 | 0 | 0 | 0.28 | 0 | 0 | 0.05375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.08 | 0.04 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac38b6ee92f6e8acb73aea206b7e6151faeaeab1 | 2,546 | py | Python | yaps/utils/jobqueue.py | indraniel/yaps | 084cb71c5b3e4d237085e4b56a30f370578f88fe | [
"BSD-2-Clause"
] | null | null | null | yaps/utils/jobqueue.py | indraniel/yaps | 084cb71c5b3e4d237085e4b56a30f370578f88fe | [
"BSD-2-Clause"
] | null | null | null | yaps/utils/jobqueue.py | indraniel/yaps | 084cb71c5b3e4d237085e4b56a30f370578f88fe | [
"BSD-2-Clause"
] | null | null | null | import os, sqlite3, time
# for future threading considerations...punting for now...
from dummy_thread import get_ident
from yaps.utils.scheduler import bsub
class DrmaaJobQueue(object):
__create = (
'CREATE TABLE IF NOT EXISTS queue '
'( id INTEGER PRIMARY KEY AUTOINCREMENT, jobId INTEGER )'
)
__count = 'SELECT COUNT(*) from queue'
__iterate = 'SELECT id, jobId FROM queue ORDER BY jobId'
__append = 'INSERT INTO QUEUE (jobId) VALUES (?)'
__write_lock = 'BEGIN IMMEDIATE'
__jobs = 'SELECT jobId FROM queue ORDER BY jobId'
__clear = 'DELETE FROM queue'
__vacuum = 'VACUUM'
def __init__(self, path, logger):
self.path = os.path.abspath(path)
self.log = logger
self._connection_cache = {}
with self._get_db_connection() as c:
c.execute(self.__create)
def __len__(self):
count = 0
with self._get_db_connection() as c:
count = c.execute(self.__count).next()[0]
return count
def __iter__(self):
with self._get_db_connection() as c:
for id, job_id in c.execute(self.__iterate):
yield job_id
def _get_db_connection(self):
id = get_ident()
if id not in self._connection_cache:
self._connection_cache[id] = sqlite3.Connection(self.path, timeout=60)
return self._connection_cache[id]
def append(self, job_id):
with self._get_db_connection() as c:
c.execute(self.__write_lock)
c.execute(self.__append, (job_id,))
c.commit() # unlock the database
def jobs(self):
with self._get_db_connection() as c:
cursor = c.execute(self.__jobs)
job_ids = [ row[0] for row in cursor.fetchall() ]
return job_ids
def clear(self):
self.log.info("Clearing out the LSF job DB")
with self._get_db_connection() as c:
c.execute(self.__write_lock)
c.execute(self.__clear)
c.execute(self.__vacuum)
c.commit() # unlock the database
def wait(self, timeout, log):
if len(self) > 0:
ids = [str(j) for j in self.jobs()]
log.info("See {} lsf jobs to wait for:\n\t{}".format(len(ids), "\n\t".join(ids)))
bsub.poll(ids, timeout=timeout, log=log)
self.clear()
time.sleep(30) # wait a few seconds for the file system to catch up
else:
print("There are no LSF jobs to wait for!")
| 33.946667 | 93 | 0.595837 | 338 | 2,546 | 4.230769 | 0.328402 | 0.05035 | 0.075524 | 0.054545 | 0.265734 | 0.243357 | 0.169231 | 0.151049 | 0.109091 | 0.109091 | 0 | 0.005609 | 0.299686 | 2,546 | 74 | 94 | 34.405405 | 0.796411 | 0.057738 | 0 | 0.163934 | 0 | 0 | 0.1533 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131148 | false | 0 | 0.04918 | 0 | 0.377049 | 0.016393 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac38ba63ce6ed82a3db14912fa4a776881514704 | 1,299 | py | Python | forecast_utils/api_arima.py | FernandoRoldan93/CC2-Airflow | 963c5f58a59b4d60a8551f2553a6ec6e232a5fbd | [
"Apache-2.0"
] | null | null | null | forecast_utils/api_arima.py | FernandoRoldan93/CC2-Airflow | 963c5f58a59b4d60a8551f2553a6ec6e232a5fbd | [
"Apache-2.0"
] | null | null | null | forecast_utils/api_arima.py | FernandoRoldan93/CC2-Airflow | 963c5f58a59b4d60a8551f2553a6ec6e232a5fbd | [
"Apache-2.0"
] | null | null | null | import json
from datetime import datetime
from flask import Flask, Response, jsonify
import pandas as pd
app = Flask(__name__)
import Forecast
@app.route("/arima/<string:intervalo>", methods=['GET'])
def get_prediccion_arima(intervalo):
## Se comprueba si el intervalo es el correcto, de no ser asi, se devuelve un mensaje de error
if intervalo not in ['24','48','72']:
return Response("Petición no valida, el intervalo tiene que ser a 24, 48 o 72 horas", status=400)
## Se crea un objeto de la clase forecast y se obtiene la prediccion, si el modelo no se ha creado con anterioridad se contruye y se realiza la consulta
forecast = Forecast.Forecast()
fc_hum, fc_temp = forecast.predict_weather_ARIMA(intervalo)
## Se crea una lista de horas que van desde la hora actual hasta el intervalo indicado de hora en hora
intervalo = int(intervalo)
horas = pd.date_range(datetime.now(), periods=intervalo, freq="H")
list_horas = []
for hora in horas:
list_horas.append(hora.strftime('%Y.%m.%d:%H.%M'))
## Se crea la respuesta en formato json
count = 0
result = []
while count < intervalo:
result.append({'hour':list_horas[count], 'temperature':fc_temp[count], 'humidity':fc_hum[count]})
count +=1
return Response(json.dumps(result, indent=4), 200, mimetype="application/json") | 39.363636 | 153 | 0.73826 | 206 | 1,299 | 4.57767 | 0.524272 | 0.034995 | 0.033934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019039 | 0.150885 | 1,299 | 33 | 154 | 39.363636 | 0.835902 | 0.290993 | 0 | 0 | 0 | 0 | 0.168675 | 0.027382 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.217391 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac39a3ab63bc3bff7a0188e946c4320a0c6d251e | 1,173 | py | Python | ebextensions-validator/ebextensions_validator/main.py | aws-samples/aws-elastic-beanstalk-deployment-workflow | 2b8cdc2e2bf2a91b52e901fb881c01368df9154f | [
"Apache-2.0"
] | null | null | null | ebextensions-validator/ebextensions_validator/main.py | aws-samples/aws-elastic-beanstalk-deployment-workflow | 2b8cdc2e2bf2a91b52e901fb881c01368df9154f | [
"Apache-2.0"
] | null | null | null | ebextensions-validator/ebextensions_validator/main.py | aws-samples/aws-elastic-beanstalk-deployment-workflow | 2b8cdc2e2bf2a91b52e901fb881c01368df9154f | [
"Apache-2.0"
] | 1 | 2021-08-04T07:30:19.000Z | 2021-08-04T07:30:19.000Z | #!/usr/bin/env python
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
from . import validator as validator
import logging
import sys
def main(args=None):
if not args:
args = sys.argv[1:]
parser = argparse.ArgumentParser(
description="Validate a config file from the .ebextensions directory against a allowlist of dictionaries"
)
parser.add_argument("config_file", help="Config file from .ebextensions directory")
parser.add_argument(
"allowlist_file",
help="File which defines a allowlist of dictionaries. Regex can be used",
)
parser.add_argument(
"-v",
"--verbose",
help="Print information about not allowlisted configuration",
action="store_true",
)
args = parser.parse_args(args)
if args.verbose:
logging.basicConfig(format="* %(message)s", level=logging.INFO)
result = validator.validate(args.config_file, args.allowlist_file)
if result == True:
print("Configuration is in allowlist")
elif result == False:
print("Configuration NOT in allowlist")
| 30.076923 | 113 | 0.681159 | 143 | 1,173 | 5.524476 | 0.559441 | 0.050633 | 0.064557 | 0.060759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003275 | 0.219096 | 1,173 | 38 | 114 | 30.868421 | 0.85917 | 0.104859 | 0 | 0.068966 | 0 | 0 | 0.350525 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.137931 | 0 | 0.172414 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac3b5e87bba5a4b56517eeb618d283bbbc99469b | 482 | py | Python | src/flotilla/cli/utils.py | pebble/flotilla | 23d9b3aefd8312879549c50e52ea73f3e3f493be | [
"MIT"
] | 5 | 2016-01-01T15:50:21.000Z | 2018-11-27T17:38:15.000Z | src/flotilla/cli/utils.py | pebble/flotilla | 23d9b3aefd8312879549c50e52ea73f3e3f493be | [
"MIT"
] | 27 | 2015-12-17T07:49:56.000Z | 2018-07-13T15:06:33.000Z | src/flotilla/cli/utils.py | pebble/flotilla | 23d9b3aefd8312879549c50e52ea73f3e3f493be | [
"MIT"
] | 7 | 2015-12-01T22:04:24.000Z | 2021-11-28T13:21:35.000Z | import logging
from botocore.exceptions import ClientError
QUEUE_NOT_FOUND = 'AWS.SimpleQueueService.NonExistentQueue'
logger = logging.getLogger('flotilla')
def get_queue(sqs, queue_name):
try:
return sqs.get_queue_by_name(QueueName=queue_name)
except ClientError as e:
error_code = e.response['Error'].get('Code')
if error_code != QUEUE_NOT_FOUND:
raise e
logger.info('Queue %s not found.', queue_name)
return None
| 25.368421 | 59 | 0.695021 | 62 | 482 | 5.193548 | 0.548387 | 0.074534 | 0.080745 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.213693 | 482 | 18 | 60 | 26.777778 | 0.849604 | 0 | 0 | 0 | 0 | 0 | 0.155602 | 0.080913 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac3c2c77f1ffb1aa868f31078f385ef6e642aef9 | 7,613 | py | Python | tenable/io/plugins.py | allenmichael/pyTenable | 8372cfdf3ced99de50227f6fbb37d6db2b26291e | [
"MIT"
] | null | null | null | tenable/io/plugins.py | allenmichael/pyTenable | 8372cfdf3ced99de50227f6fbb37d6db2b26291e | [
"MIT"
] | 1 | 2021-08-18T17:26:30.000Z | 2021-08-18T17:26:30.000Z | tenable/io/plugins.py | allenmichael/pyTenable | 8372cfdf3ced99de50227f6fbb37d6db2b26291e | [
"MIT"
] | null | null | null | '''
plugins
=======
The following methods allow for interaction into the Tenable.io
:devportal:`plugins <plugins>` API endpoints.
Methods available on ``tio.plugins``:
.. rst-class:: hide-signature
.. autoclass:: PluginsAPI
.. automethod:: families
.. automethod:: family_details
.. automethod:: list
.. automethod:: plugin_details
'''
from datetime import date
from tenable.io.base import TIOEndpoint, TIOIterator
class PluginIterator(TIOIterator):
'''
The plugins iterator provides a scalable way to work through plugin result
sets of any size. The iterator will walk through each page of data,
returning one record at a time. If it reaches the end of a page of
records, then it will request the next page of information and then continue
to return records from the next page (and the next, and the next) until the
counter reaches the total number of records that the API has reported.
Attributes:
count (int): The current number of records that have been returned
page (list):
The current page of data being walked through. pages will be
cycled through as the iterator requests more information from the
API.
page_count (int): The number of record returned from the current page.
total (int):
The total number of records that exist for the current request.
populate_maptable (bool):
Informs the iterator whether to construct the plugin to family maps
for injecting the plugin family data into each item.
'''
_maptable = None
populate_maptable = False
def _populate_family_cache(self):
'''
Generates the maptable to use to graft on the plugin family information
to the plugins. Effectively what we doing is generating a dictionary of
2 subdictionaries. Each one of these is a simple hash table allowing
the iterator to resolve the name of the family by ID and the family ID
by the plugin membership. This information is currently lacking in the
plugin listing output and was requested by a customer.
.. note::
This currently seems to add about 7-10 seconds before the first item
is returned, as it seems to take this long to generate the data. We
can focus on reducing this time later on with the introduction of
multi-threaded iterators && async API calls.
'''
self._maptable = {
'plugins': dict(),
'families': dict()
}
for family in self._api.plugins.families():
self._maptable['families'][family['id']] = family['name']
for fam_id in self._maptable['families'].keys():
for plugin in self._api.plugins.family_details(fam_id)['plugins']:
self._maptable['plugins'][plugin['id']] = fam_id
def next(self):
item = super(PluginIterator, self).next()
# If the populate_maptable flag is set, then we will build the mappings.
if not self._maptable and self.populate_maptable:
self._populate_family_cache()
# If the maptable exists, then graft on the plugin family information
# on to to the item.
if self._maptable:
try:
fid = self._maptable['plugins'][item['id']]
item['family_id'] = fid
item['family_name'] = self._maptable['families'][fid]
except KeyError:
self._log.warning("plugin id {} not found in plugin family".format(item['id']))
item['family_id'] = None
item['family_name'] = None
return item
class PluginsAPI(TIOEndpoint):
'''
This will contain all methods related to plugins
'''
def families(self):
'''
List the available plugin families.
:devportal:`plugins: families <plugins-families>`
Returns:
:obj:`list`:
List of plugin family resource records.
Examples:
>>> for family in tio.plugins.families():
... pprint(family)
'''
return self._api.get('plugins/families').json()['families']
def family_details(self, family_id):
'''
Retrieve the details for a specific plugin family.
:devportal:`plugins: family-details plugins-family-details>`
Args:
family_id (int): The plugin family unique identifier.
Returns:
:obj:`dict`:
Returns a dictionary stating the id, name, and plugins that are
housed within the plugin family.
Examples:
>>> family = tio.plugins.family_details(1)
'''
return self._api.get('plugins/families/{}'.format(
self._check('family_id', family_id, int)
)).json()
def plugin_details(self, plugin_id):
'''
Retrieve the details for a specific plugin.
:devportal:`plugins: plugin-details <plugins-plugin-details>`
Args:
plugin_id (int): The plugin id for the requested plugin.
Returns:
:obj:`dict`:
A dictionary stating the id, name, family, and any other
relevant attributes associated to the plugin.
Examples:
>>> plugin = tio.plugins.plugin_details(19506)
>>> pprint(plugin)
'''
return self._api.get('plugins/plugin/{}'.format(
self._check('plugin_id', plugin_id, int))).json()
def list(self, page=None, size=None, last_updated=None, num_pages=None):
'''
Get the listing of plugin details from Tenable.io.
:devportal:`plugins: list <>`_
Args:
size (int, optional):
The number of records to retrieve. Default is 1000
page (int, optional):
The starting page to retrieve. Default is 0.
last_updated (date, optional):
A datetime.date object stating when the threshold for the last
updated field can be for a plugin.
num_pages (int, optional):
The total number of pages to request before stopping the
iterator.
Returns:
:obj:`PluginsIterator`:
An iterator that handles the page management of the requested
records.
Examples:
Getting the listing of all plugins:
>>> for plugin in tio.plugins.list():
... pprint(plugin)
Retrieving all of the plugins updated since 2019-01-01:
>>> for plugin in tio.plugins.list(last_updated=date(2019, 1, 1)):
... pprint(plugin)
Informing the iterator to cache the plugin family data for injection
into each item:
>>> plugins = tio.plugins.list(last_updated=date(2019, 1, 1))
>>> plugins.populate_maptable = True
>>> for plugin in plugins:
... pprint(plugin)
'''
return PluginIterator(self._api,
_api_version=2,
_size=self._check('size', size, int, default=1000),
_page_num=self._check('page', page, int, default=1),
_query={
'last_updated': self._check('last_updated', last_updated, date,
default=date(1970, 1, 1)).strftime('%Y-%m-%d')
},
_pages_total=self._check('num_pages', num_pages, int),
_path='plugins/plugin',
_resource='plugin_details')
| 36.252381 | 95 | 0.599238 | 909 | 7,613 | 4.931793 | 0.261826 | 0.022083 | 0.020076 | 0.010707 | 0.106402 | 0.093241 | 0.032567 | 0.032567 | 0.015615 | 0 | 0 | 0.008992 | 0.313411 | 7,613 | 209 | 96 | 36.425837 | 0.84867 | 0.588467 | 0 | 0 | 0 | 0 | 0.128033 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.039216 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac3c89997e32303ba8a0853aca1b5f546f6d429c | 5,533 | py | Python | bot.py | sayanmedya/Confession-Bot | cbd8d8ff16a77d0ce17c1cc45780d055bc6a6c11 | [
"MTLL"
] | null | null | null | bot.py | sayanmedya/Confession-Bot | cbd8d8ff16a77d0ce17c1cc45780d055bc6a6c11 | [
"MTLL"
] | null | null | null | bot.py | sayanmedya/Confession-Bot | cbd8d8ff16a77d0ce17c1cc45780d055bc6a6c11 | [
"MTLL"
] | 4 | 2020-12-12T10:45:25.000Z | 2021-08-14T15:23:12.000Z | import os
import asyncio
import discord
import datetime
import requests
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
TENOR_API_KEY = os.getenv('TENOR_API_KEY')
bot = commands.Bot(command_prefix='?')
servers = {}
confession_channel = {
# server_id : confession_channel_id
718886828891176997 : 771297583753855007,
786247752635908116 : 786247752635908119,
414279195821080597 : 782703953086775346,
768834106611204096 : 770160051414892567,
784002698588061727 : 785057888959332353
}
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def get_tenor_url(view_url):
if view_url.lower().endswith('gif'):
return view_url
gif_id = view_url.split('-')[-1]
url = f'https://api.tenor.com/v1/gifs?ids={gif_id}&key={TENOR_API_KEY}'
res = requests.get(url)
if res.status_code == 200:
return res.json()['results'][0]['media'][0]['gif']['url']
else:
return None
def get_giphy_url(view_url):
if view_url.lower().endswith('gif'):
return view_url
else:
gif_id = view_url.split('-')[-1]
return f'https://media.giphy.com/media/{gif_id}/giphy.gif'
def prepare_embed(msg):
embedVar = discord.Embed(title='Anonymous Confession')
embedVar.timestamp = datetime.datetime.utcnow()
if msg.content:
embedVar.description = msg.content
if msg.embeds:
data = msg.embeds[0]
if data.type == 'image':
embedVar.set_image(url=data.url)
if data.url == msg.content:
embedVar.description = None
if data.type == 'gifv' and data.provider.name == 'Tenor':
embedVar.set_image(url=get_tenor_url(data.url))
if data.url == msg.content:
embedVar.description = None
if data.type == 'gifv' and data.provider.name == 'Giphy':
embedVar.set_image(url=get_giphy_url(data.url))
if data.url == msg.content:
embedVar.description = None
if msg.attachments:
file = msg.attachments[0]
if file.url.lower().endswith(('png', 'jpeg', 'jpg', 'gif', 'webp')):
embedVar.set_image(url=file.url)
else:
embedVar.add_field(name='Attachment', value=f'[{file.filename}]({file.url})')
return embedVar
@bot.event
async def on_ready():
print(f'{bot.user} is connected to the following guild:\n')
for guild in bot.guilds:
print(f'{guild.name} (id: {guild.id})\n')
for guild in bot.guilds:
servers[guild.id] = {}
async for member in guild.fetch_members(limit=None):
servers[guild.id][member.id] = True
async def check_if_delete(msg, confession, confirmation):
def check(deleted_msg):
return msg.id == deleted_msg.id
try:
await bot.wait_for('message_delete', timeout=120, check=check)
await confession.delete()
await confirmation.edit(content=f'✅ Confession with message id `{confession.id}` in {confession.channel.mention} has been deleted.')
except asyncio.TimeoutError:
return
@bot.command()
@commands.dm_only()
async def confess(ctx):
mutual_servers = []
for guild in bot.guilds:
if ctx.author.id in servers[guild.id]:
mutual_servers.append(guild)
embedVar = discord.Embed(title = 'Server Select')
embedVar.description = '**'
i = 0
for guild in mutual_servers:
i = i + 1
embedVar.description += str(i) + ' - ' + guild.name + '\n\n'
embedVar.description += '**'
embedVar.set_footer(text='You have 1 minute to select a server - send "cancel" to cancel')
await ctx.send(embed=embedVar)
def server_select(msg):
return msg.channel == ctx.channel and msg.author == ctx.author and ((is_int(msg.content) and int(msg.content) <= i and int(msg.content) >= 1) or msg.content == 'cancel')
try:
msg = await bot.wait_for('message', timeout=60, check=server_select)
except asyncio.TimeoutError:
await ctx.send('⏳ Server selection timed out. Please start a new confession.')
return
if msg.content == 'cancel':
await ctx.send('✅ Cancelled')
return
guild = mutual_servers[int(msg.content) - 1]
confess_in = bot.get_channel(confession_channel[guild.id])
embedVar = discord.Embed()
embedVar.title = 'Confessions : ' + guild.name
embedVar.description = f'Simply type your confession / send a image link / upload a file to post it anonymously in {confess_in.mention}.'
embedVar.set_footer(text='You have 2 minutes to respond - type "cancel" to abort')
await ctx.send(embed=embedVar)
def check_confess(msg):
return msg.channel == ctx.channel and msg.author == ctx.author
try:
msg = await bot.wait_for('message', timeout=120, check=check_confess)
except asyncio.TimeoutError:
await ctx.send('⏳ Your confession timed out. Please start a new confession.')
return
if msg.content == 'cancel':
await ctx.send('✅ Cancelled')
return
confession = await confess_in.send(embed = prepare_embed(msg))
confirmation = await ctx.send(f'✅ Your confession has been added to {confess_in.mention}!')
asyncio.create_task(check_if_delete(msg, confession, confirmation))
def check_edit(before, after):
return msg.id == after.id
edit_count = 0
if msg.edited_at:
await confession.edit(embed = prepare_embed(msg))
edit_count += 1
await confirmation.edit(content=f'✅ Confession with message id `{confession.id}` in {confess_in.mention} has been edited ({edit_count}).')
while True:
try:
before, after = await bot.wait_for('message_edit', timeout=120, check=check_edit)
await confession.edit(embed = prepare_embed(after))
edit_count += 1
await confirmation.edit(content=f'✅ Confession with message id `{confession.id}` in {confess_in.mention} has been edited ({edit_count}).')
except asyncio.TimeoutError:
return
bot.run(TOKEN)
| 30.234973 | 171 | 0.721851 | 807 | 5,533 | 4.850062 | 0.229244 | 0.030659 | 0.021461 | 0.029637 | 0.398569 | 0.353858 | 0.28743 | 0.268012 | 0.226622 | 0.226622 | 0 | 0.044285 | 0.14296 | 5,533 | 182 | 172 | 30.401099 | 0.779418 | 0.005964 | 0 | 0.292517 | 0 | 0.027211 | 0.211531 | 0.018007 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054422 | false | 0 | 0.047619 | 0.027211 | 0.22449 | 0.013605 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac3d63c660e4b2feef891a3c806fb6e86926252e | 2,434 | py | Python | msticpy/datamodel/entities/file_hash.py | kubajir/msticpy | 7b319b71b191b5f75dcf9afd87492523a74b5ad7 | [
"MIT"
] | 820 | 2019-05-16T07:24:34.000Z | 2022-03-31T09:18:10.000Z | msticpy/datamodel/entities/file_hash.py | kubajir/msticpy | 7b319b71b191b5f75dcf9afd87492523a74b5ad7 | [
"MIT"
] | 205 | 2019-06-24T19:24:19.000Z | 2022-03-30T23:13:46.000Z | msticpy/datamodel/entities/file_hash.py | kubajir/msticpy | 7b319b71b191b5f75dcf9afd87492523a74b5ad7 | [
"MIT"
] | 171 | 2019-06-23T13:53:12.000Z | 2022-03-29T18:22:46.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""FileHash Entity class."""
from typing import Any, Mapping
from ..._version import VERSION
from ...common.utility import export
from .entity import Entity
from .entity_enums import Algorithm
__version__ = VERSION
__author__ = "Ian Hellen"
# pylint: disable=invalid-name
@export
class FileHash(Entity):
"""
File Hash class.
Attributes
----------
Algorithm : Algorithm
FileHash Algorithm
Value : str
FileHash Value
"""
ID_PROPERTIES = ["Value"]
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
self.Algorithm: Algorithm = Algorithm.Unknown
self.Value: str = ""
super().__init__(src_entity=src_entity, **kwargs)
if src_event is not None:
self._create_from_event(src_event)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Algorithm}: {self.Value}"
@property
def name_str(self) -> str:
"""Return Entity Name."""
return self.Value
def _create_from_event(self, src_event):
self.Algorithm = src_event["Algorithm"]
self.Value = src_event["HashValue"]
_entity_schema = {
# The hash algorithm (type System.String)
"Algorithm": "Algorithm",
# Value (type System.String)
"Value": None,
"TimeGenerated": None,
"StartTime": None,
"EndTime": None,
}
| 26.456522 | 76 | 0.559984 | 248 | 2,434 | 5.334677 | 0.366935 | 0.042328 | 0.039305 | 0.028723 | 0.176871 | 0.0839 | 0.055933 | 0 | 0 | 0 | 0 | 0 | 0.279376 | 2,434 | 91 | 77 | 26.747253 | 0.754276 | 0.428924 | 0 | 0.054054 | 0 | 0 | 0.096558 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.135135 | 0 | 0.378378 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac4032d24a99fa9575dca1e83f85d24928f415c1 | 309 | py | Python | exercises/app.py | dana19-meet/y2s18-flask | 2f964cf3499fd6522ee44549eb6a0308db9c4dd3 | [
"MIT"
] | null | null | null | exercises/app.py | dana19-meet/y2s18-flask | 2f964cf3499fd6522ee44549eb6a0308db9c4dd3 | [
"MIT"
] | null | null | null | exercises/app.py | dana19-meet/y2s18-flask | 2f964cf3499fd6522ee44549eb6a0308db9c4dd3 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def home_page():
dancers = ["batseva", "ohad neharin", "maddie"]
return render_template(
"index.html" ,
dancers=dancers,
likes_same_sport=True)
if __name__ == '__main__':
app.run(debug = True) | 23.769231 | 51 | 0.644013 | 37 | 309 | 4.918919 | 0.72973 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.216828 | 309 | 13 | 52 | 23.769231 | 0.752066 | 0 | 0 | 0 | 0 | 0 | 0.141935 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac40e7ae712e9e906aa332752299e401ade1380a | 1,087 | py | Python | src/Classes/MSDS400/Module 4/from_matrix.py | bmoretz/Python-Playground | a367ec7659b85c24363c21b5c0ac25db08ffa1f6 | [
"MIT"
] | null | null | null | src/Classes/MSDS400/Module 4/from_matrix.py | bmoretz/Python-Playground | a367ec7659b85c24363c21b5c0ac25db08ffa1f6 | [
"MIT"
] | null | null | null | src/Classes/MSDS400/Module 4/from_matrix.py | bmoretz/Python-Playground | a367ec7659b85c24363c21b5c0ac25db08ffa1f6 | [
"MIT"
] | null | null | null | import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
def show_graph_with_labels(adjacency_matrix, mylabels):
rows, cols = np.where(adjacency_matrix == 1)
edges = zip( rows.tolist(), cols.tolist() )
g = nx.Graph()
g.add_edges_from(edges)
nx.draw(g, node_size=500, labels=mylabels, with_labels=True)
return g
A = np.matrix( [ \
[ 0, 1, 0, 1, 0, 0 ], \
[ 1, 0, 0, 1, 1, 1 ], \
[ 0, 0, 0, 0, 1, 1 ], \
[ 1, 1, 0, 0, 1, 0 ], \
[ 0, 1, 0, 1, 0, 1 ], \
[ 0, 1, 1, 1, 1, 0 ], \
] )
labels={}
labels[0]=r'$a$'
labels[1]=r'$b$'
labels[2]=r'$c$'
labels[3]=r'$d$'
labels[4]=r'$e$'
labels[5]=r'$f$'
G = show_graph_with_labels( A, labels )
plt.show()
if __name__ == '__main__':
print( 'Number of vertices {0}'.format( G.number_of_nodes() ) )
print( 'Number of edges {0}'.format( G.number_of_edges() ) )
print( 'Number of loops {0}'.format( G.number_of_selfloops() ) )
paths = nx.all_simple_paths( G, source = 0, target = 2 )
longest = 0
for p in paths:
l = len( p )
if( longest < l ):
longest = l
print( 'Longest Path {0}'.format( longest ) ) | 22.183673 | 65 | 0.599816 | 191 | 1,087 | 3.267016 | 0.356021 | 0.032051 | 0.033654 | 0.025641 | 0.134615 | 0.046474 | 0.022436 | 0 | 0 | 0 | 0 | 0.06085 | 0.198712 | 1,087 | 49 | 66 | 22.183673 | 0.655568 | 0 | 0 | 0 | 0 | 0 | 0.09375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.078947 | 0 | 0.131579 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac4219dad353465a54c72f84d97b1b9e0974b7fc | 1,402 | py | Python | mako/app.py | zer0tonin/mako | 12420056e13e1acd333e686537d5ebc909450620 | [
"MIT"
] | null | null | null | mako/app.py | zer0tonin/mako | 12420056e13e1acd333e686537d5ebc909450620 | [
"MIT"
] | 1 | 2021-06-02T04:22:46.000Z | 2021-06-02T04:22:46.000Z | mako/app.py | zer0tonin/mako | 12420056e13e1acd333e686537d5ebc909450620 | [
"MIT"
] | null | null | null | import asyncio
import logging
import yaml
from aioredis import create_redis_pool
from discord.ext.commands import Bot
from mako.gifs.database import GifsDatabase
from mako.gifs.cog import GifsReact
from mako.stats.cog import Stats
from mako.stats.counter import Counter
from mako.stats.xp import XPAggregator
from mako.stats.notifier import Notifier
from mako.reminder.cog import Reminder
logger = logging.getLogger(__name__)
async def start_bot(config):
logger.info("Running the client")
redis = await create_redis_pool(
"redis://{}:{}".format(config["redis"]["host"], config["redis"]["port"]),
encoding="utf-8",
)
gifs_database = GifsDatabase()
bot = Bot(command_prefix="!", description="Bip Boop")
bot.add_cog(GifsReact(bot, gifs_database))
bot.add_cog(
Stats(
bot,
Counter(redis),
XPAggregator(redis, config["levels"]),
Notifier(redis),
config,
)
)
bot.add_cog(Reminder(bot, redis, config))
await bot.start(config["token"])
def run():
with open("config/config.yml", "r") as stream:
try:
config = yaml.safe_load(stream)
logging.basicConfig(level=config["logging_level"])
asyncio.run(start_bot(config))
except yaml.YAMLError:
logger.exception("Failed to parse config")
exit(1)
| 25.962963 | 81 | 0.651213 | 171 | 1,402 | 5.233918 | 0.415205 | 0.06257 | 0.058101 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00186 | 0.233238 | 1,402 | 53 | 82 | 26.45283 | 0.830698 | 0 | 0 | 0 | 0 | 0 | 0.090585 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.285714 | 0 | 0.309524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac43981504c3e1916c721d45ebfcb24c0bc24e32 | 18,188 | py | Python | plot_interaction.py | ada-shen/Interpret_quality | e58d8e24a44005bde1eadbf8ef34c715d02a19cf | [
"MIT"
] | 1 | 2022-02-07T15:24:44.000Z | 2022-02-07T15:24:44.000Z | plot_interaction.py | ada-shen/Interpret_quality | e58d8e24a44005bde1eadbf8ef34c715d02a19cf | [
"MIT"
] | 1 | 2021-12-18T05:02:02.000Z | 2022-02-08T08:45:57.000Z | plot_interaction.py | ada-shen/Interpret_quality | e58d8e24a44005bde1eadbf8ef34c715d02a19cf | [
"MIT"
] | 1 | 2022-02-08T08:44:52.000Z | 2022-02-08T08:44:52.000Z | import sys
import numpy as np
import argparse
import os
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import matplotlib.patches as patches
from matplotlib import rcParams
from tools.final_util import get_folder_name_list, mkdir
from tools.final_util import NUM_POINTS, NUM_REGIONS, SHAPENET_INTER_SELECTED_SAMPLE, MODELNET_INTER_SELECTED_SAMPLE
plt.rc('font',family='Times New Roman')
config = {
"mathtext.fontset":'stix',
}
rcParams.update(config)
font_size = 33
model_names = ["pointnet", "pointnet2", "pointconv", "dgcnn", "gcnn", "gcnn_adv"]
def get_interaction_normal_adv_pose(args):
print("\n#### get interaction ####")
all_mean_inter_normal, all_abs_mean_inter_normal, all_mean_inter_adv, all_abs_mean_inter_adv = [],[],[],[]
for i in selected_sample_idx:
name = folder_name_list[i]
print("======= %s ========" % name)
base_folder = args.exp_folder + "%s/" % name
interaction_folder = base_folder + "interaction_seed%d/" % args.gen_pair_seed
orders_mean_inter_normal, orders_abs_mean_inter_normal, orders_mean_inter_adv, orders_abs_mean_inter_adv = [],[],[],[]
for ratio in args.ratios:
orders_interaction_normal = np.load(interaction_folder + "normal/ratio%d_%s_interaction.npy" % (
int(ratio * 100), args.output_type)) # (num_pairs, num_context)
orders_interaction_adv = np.load(interaction_folder + "%s_adv/ratio%d_%s_interaction.npy" % (args.mode,
int(ratio * 100), args.output_type)) # (num_pairs, num_context)
mean_inter_normal = orders_interaction_normal.mean() # scalar
abs_mean_inter_normal = np.abs(orders_interaction_normal.mean(axis=1)).mean() # scalar
mean_inter_adv = orders_interaction_adv.mean() # scalar
abs_mean_inter_adv = np.abs(orders_interaction_adv.mean(axis=1)).mean() # scalar
orders_mean_inter_normal.append(mean_inter_normal)
orders_abs_mean_inter_normal.append(abs_mean_inter_normal)
orders_mean_inter_adv.append(mean_inter_adv)
orders_abs_mean_inter_adv.append(abs_mean_inter_adv)
all_mean_inter_normal.append(orders_mean_inter_normal)
all_abs_mean_inter_normal.append(orders_abs_mean_inter_normal)
all_mean_inter_adv.append(orders_mean_inter_adv)
all_abs_mean_inter_adv.append(orders_abs_mean_inter_adv)
return np.array(all_mean_inter_normal), np.array(all_abs_mean_inter_normal), \
np.array(all_mean_inter_adv), np.array(all_abs_mean_inter_adv) # (num_pc, num_ratios)
def get_interaction_max_min_pose(args):
print("\n#### get interaction ####")
all_mean_inter, all_abs_mean_inter = [], []
for i in selected_sample_idx:
name = folder_name_list[i]
print("======= %s ========" % name)
base_folder = args.exp_folder + "%s/" % name
interaction_folder = base_folder + "interaction_seed%d/" % args.gen_pair_seed
single_region_folder = interaction_folder + "%s_adv_single_region/" % args.mode
pose_mean_inter, pose_abs_mean_inter = [], []
for region_folder_name in sorted(os.listdir(single_region_folder)):
if not os.path.isdir(single_region_folder + region_folder_name):
continue
print("----- %s ------" % (region_folder_name))
range_rank = int(region_folder_name[10:12]) # get range rank information from folder name, 1-based rank
if range_rank != 1:
continue
region_folder = single_region_folder + region_folder_name + "/"
orders_mean_inter_normal, orders_abs_mean_inter_normal = [],[]
for ratio in args.ratios:
orders_interaction_normal = np.load(region_folder + "normal/ratio%d_%s_interaction.npy" % (
int(ratio * 100), args.output_type)) # (num_pairs, num_context) interaction of a single region and its neighbor
mean_inter_normal = orders_interaction_normal.mean() # scalar
abs_mean_inter_normal = np.abs(orders_interaction_normal.mean(axis=1)).mean() # scalar
orders_mean_inter_normal.append(mean_inter_normal)
orders_abs_mean_inter_normal.append(abs_mean_inter_normal)
pose_mean_inter.append(orders_mean_inter_normal)
pose_abs_mean_inter.append(orders_abs_mean_inter_normal)
all_mean_inter.append(pose_mean_inter)
all_abs_mean_inter.append(pose_abs_mean_inter)
return np.array(all_mean_inter), np.array(all_abs_mean_inter) # (num_pc, 1, num_ratios), interaction of the most sensitive region at normal pose
def ax_bar_plot(ax, orders, interaction, title=None):
bar_width = 0.04
ax.bar(orders, interaction, bar_width)
ax.set_xlabel("order",fontsize=font_size,labelpad = 0)
ax.set_ylabel("interaction",fontsize=font_size,labelpad = 0)
x = np.array([0,1.2])
ax.set_xticks(x)
ax.set_xticklabels(['0', 'n-2'])
ax.tick_params(labelsize=font_size)
if title is not None:
ax.set_title(title)
def ax_bar_plot_double(ax, orders, interaction_normal, interaction_adv, title=None, labels=None, color2=None):
bar_width = 0.035
if title is not None:
ax.set_title(title)
if labels is not None:
ax.bar(orders, interaction_normal, bar_width, label=labels[0]) # label="$I^{(m)}_{nor}$")
if color2 is not None:
ax.bar(orders+bar_width+0.005, interaction_adv, bar_width, label=labels[1], color=color2) # label="$I^{(m)}_{adv}$")
else:
ax.bar(orders + bar_width + 0.005, interaction_adv, bar_width, label=labels[1],) # label="$I^{(m)}_{adv}$")
ax.legend()
else:
ax.bar(orders, interaction_normal, bar_width)
if color2 is not None:
ax.bar(orders + bar_width + 0.005, interaction_adv, bar_width, color=color2)
else:
ax.bar(orders + bar_width + 0.005, interaction_adv, bar_width)
ax.set_xlabel("order",fontsize=font_size,labelpad = -25)
ax.set_ylabel("interaction",fontsize=font_size,labelpad = 0)
x = np.array([0,1.2])
ax.set_xticks(x+bar_width/2+0.0025)
ax.set_xticklabels(['0', 'n-2'])
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax.tick_params(labelsize=font_size)
def plot_inter_single_region_vs_normal_avg(args):
mean_inter_single_region, abs_mean_inter_single_region = get_interaction_max_min_pose(args) # (num_pc, 1, num_ratios)
mean_inter_normal, abs_mean_inter_normal, mean_inter_adv, abs_mean_inter_adv = get_interaction_normal_adv_pose(args) # (num_pc, num_ratios)
save_dir = "figures/interaction_final_%s/" % args.dataset
mkdir(save_dir)
np.save(save_dir + "%s_%s_mean_inter_single_region.npy" % (args.model, args.dataset), mean_inter_single_region)
np.save(save_dir + "%s_%s_abs_mean_inter_single_region.npy" % (args.model, args.dataset), abs_mean_inter_single_region)
np.save(save_dir + "%s_%s_mean_inter_normal.npy" % (args.model, args.dataset), mean_inter_normal)
np.save(save_dir + "%s_%s_abs_mean_inter_normal.npy" % (args.model, args.dataset), abs_mean_inter_normal)
np.save(save_dir + "%s_%s_mean_inter_adv.npy" % (args.model, args.dataset), mean_inter_adv)
np.save(save_dir + "%s_%s_abs_mean_inter_adv.npy" % (args.model, args.dataset), abs_mean_inter_adv)
print("shape: ", mean_inter_single_region.shape)
orders = np.arange(0,1.3,0.1)
fig = plt.figure(figsize=(5, 5), dpi=200)
ax = fig.add_subplot(1, 1, 1)
ax_bar_plot_double(ax, orders,np.abs(mean_inter_normal).mean(axis=0), np.abs(mean_inter_single_region[:, 0, :]).mean(axis=0), color2='y')
fig.subplots_adjust(top=0.55, bottom=0.2, right=0.95, left=0.35)
plt.savefig(
save_dir + "single_region_top_range_compare_%s_%s_%s_seed%d_all_pc.png" % (
args.model, args.mode, args.output_type, args.gen_pair_seed))
plt.close()
def plot_inter_normal_adv_pose(args):
mean_inter_normal, abs_mean_inter_normal, mean_inter_adv, abs_mean_inter_adv = get_interaction_normal_adv_pose(args) # (num_pc, num_ratios)
print(mean_inter_normal.shape)
orders = np.arange(0,1.3,0.1)
fig = plt.figure(figsize=(5, 5),dpi=200)
ax = fig.add_subplot(1, 1, 1)
# $\mathbb{E}_{X\in \mathcal{X}} |\mathbb{E}_{i,j} [I_{ij}^{(m)}]| $
ax_bar_plot_double(ax, orders, np.abs(mean_inter_normal).mean(axis=0), np.abs(mean_inter_adv).mean(axis=0))
plt.subplots_adjust(top=0.55, bottom=0.2, right=0.95, left=0.35)
save_dir = "figures/interaction_final_%s/" % args.dataset
mkdir(save_dir)
plt.savefig(
save_dir + "global_in_one_%s_%s_%s_seed%d_all_pc.png" % (args.model, args.mode, args.output_type, args.gen_pair_seed))
plt.close()
def ax_bar_plot_double_for_all(ax, orders, interaction1, interaction2, title=None, color2=None,
show_legend=False, label=None):
bar_width = 0.03
if title is not None:
ax.set_title(title, fontsize=font_size, y=1.1)
if label is not None:
ax.bar(orders, interaction1, bar_width, color="#4169E1", label=label[0])
if color2 is not None:
ax.bar(orders + bar_width + 0.006, interaction2, bar_width, color=color2,label=label[1])
else:
ax.bar(orders + bar_width + 0.006, interaction2, bar_width, label=label[1])
else:
ax.bar(orders, interaction1, bar_width, color="#4169E1")
if color2 is not None:
ax.bar(orders + bar_width + 0.006, interaction2, bar_width, color=color2)
else:
ax.bar(orders + bar_width + 0.006, interaction2, bar_width)
# ax.set_xlabel("order m",fontsize=font_size,labelpad = -20)
ax.set_ylabel("$I^{(m)}$", fontsize=font_size-5, labelpad=-5)
x = np.array([0,1.2])
ax.set_xticks(x+bar_width/2+0.003)
ax.set_xticklabels(['0', 'n-2'])
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax.tick_params(labelsize=font_size)
if show_legend and label is not None:
ax.legend(loc=7, bbox_to_anchor=(1.02, 1.2), borderaxespad=0., fancybox=False, frameon=False, mode="expand",
labelspacing=1, fontsize=font_size, handlelength=1, handletextpad=0.3)
def plot_inter_in_one():
orders = np.arange(0,1.3,0.1)
model_names_show = ["PointNet", "PointNet++", "PointConv", "DGCNN", "GCNN", "adv-GCNN"]
data = {"modelnet10":{"normal":[], "adv":[], "single_region":[]},
"shapenet":{"normal":[], "adv":[], "single_region":[]}
}
for dataset in ["modelnet10", "shapenet"]:
for model_name in model_names:
save_dir = "figures/interaction_final_%s/" % dataset
mean_inter_normal = np.load(save_dir + "%s_%s_mean_inter_normal.npy" % (model_name, dataset))
mean_inter_adv = np.load(save_dir + "%s_%s_mean_inter_adv.npy" % (model_name, dataset))
mean_inter_single_region = np.load(save_dir + "%s_%s_mean_inter_single_region.npy" % (model_name, dataset))
data[dataset]["normal"].append(np.abs(mean_inter_normal).mean(axis=0))
data[dataset]["adv"].append(np.abs(mean_inter_adv).mean(axis=0))
data[dataset]["single_region"].append(np.abs(mean_inter_single_region[:, 0, :]).mean(axis=0))
fig = plt.figure(figsize=(30, 9), dpi=100)
ax_dataset = fig.add_axes([0.002, 0, 0.102, 1])
ax_dataset.spines['top'].set_visible(False)
ax_dataset.spines['right'].set_visible(False)
ax_dataset.spines['bottom'].set_visible(False)
ax_dataset.spines['left'].set_visible(False)
ax_dataset.set_axis_off()
rect1 = patches.Rectangle(xy=(0.65, 0.73), width=0.4, height=0.23, color="#D8BFD8")
rect2 = patches.Rectangle(xy=(0.65, 0.51), width=0.4, height=0.2, color="#D8BFD8")
rect3 = patches.Rectangle(xy=(0.65, 0.23), width=0.4, height=0.23, color="#D8BFD8")
rect4 = patches.Rectangle(xy=(0.65, 0.01), width=0.4, height=0.2, color="#D8BFD8")
ax_dataset.add_patch(rect1)
ax_dataset.add_patch(rect2)
ax_dataset.add_patch(rect3)
ax_dataset.add_patch(rect4)
ax_dataset.text(x=0.76, y=0.735, s="ModelNet10", ha="left", va="bottom", fontsize=font_size-5, rotation=90)
ax_dataset.text(x=0.76, y=0.53, s="ShapeNet", ha="left", va="bottom", fontsize=font_size-5, rotation=90)
ax_dataset.text(x=0.76, y=0.235, s="ModelNet10", ha="left", va="bottom", fontsize=font_size-5, rotation=90)
ax_dataset.text(x=0.76, y=0.03, s="ShapeNet", ha="left", va="bottom", fontsize=font_size-5, rotation=90)
ax_legend1 = fig.add_axes([0.2, 0.95, 0.6, 0.05])
ax_legend1.spines['top'].set_visible(False)
ax_legend1.spines['right'].set_visible(False)
ax_legend1.spines['bottom'].set_visible(False)
ax_legend1.spines['left'].set_visible(False)
ax_legend1.set_axis_off()
legend1 = patches.Rectangle(xy=(0,0), width=0.06, height=0.7, color="#4169E1")
legend2 = patches.Rectangle(xy=(0.3,0), width=0.06, height=0.7, color="#FF7F24")
ax_legend1.add_patch(legend1)
ax_legend1.add_patch(legend2)
ax_legend1.text(x=0.08, y=0, s="normal samples", ha="left", va="bottom", fontsize=font_size)
ax_legend1.text(x=0.38, y=0, s="adversarial samples (using rotations for attack, instead of perturbations)", ha="left", va="bottom", fontsize=font_size)
ax_legend2 = fig.add_axes([0.2, 0.45, 0.6, 0.05])
ax_legend2.spines['top'].set_visible(False)
ax_legend2.spines['right'].set_visible(False)
ax_legend2.spines['bottom'].set_visible(False)
ax_legend2.spines['left'].set_visible(False)
ax_legend2.set_axis_off()
legend1 = patches.Rectangle(xy=(0,0), width=0.06, height=0.7, color="#4169E1")
legend2 = patches.Rectangle(xy=(0.3,0), width=0.06, height=0.7, color="#A2CD5A")
ax_legend2.add_patch(legend1)
ax_legend2.add_patch(legend2)
ax_legend2.text(x=0.08, y=0, s="among all regions", ha="left", va="bottom", fontsize=font_size)
ax_legend2.text(x=0.38, y=0, s="among most rotation-sensitive regions", ha="left", va="bottom", fontsize=font_size)
for i, model_name in enumerate(model_names_show):
ax = fig.add_axes([0.16 + 0.145*i, 0.75, 0.085, 0.125])
ax_bar_plot_double_for_all(ax, orders, data["modelnet10"]["normal"][i], data["modelnet10"]["adv"][i], title=model_name,color2="#FF7F24")
for i, model_name in enumerate(model_names_show):
ax = fig.add_axes([0.16 + 0.145*i, 0.55, 0.085, 0.125])
ax_bar_plot_double_for_all(ax, orders, data["shapenet"]["normal"][i], data["shapenet"]["adv"][i], color2='#FF7F24')
for i, model_name in enumerate(model_names_show):
ax = fig.add_axes([0.16 + 0.145*i, 0.26, 0.085, 0.125])
ax_bar_plot_double_for_all(ax, orders, data["modelnet10"]["normal"][i], data["modelnet10"]["single_region"][i], title=model_name,color2="#A2CD5A")
for i, model_name in enumerate(model_names_show):
ax = fig.add_axes([0.16 + 0.145*i, 0.06, 0.085, 0.125])
ax_bar_plot_double_for_all(ax, orders, data["shapenet"]["normal"][i], data["shapenet"]["single_region"][i], color2='#A2CD5A')
for i in range(6):
fig.text(x=0.185 + 0.145*i,y=0.695,s="order",ha="left",va="bottom",fontsize=font_size)
fig.text(x=0.185 + 0.145*i,y=0.495,s="order",ha="left",va="bottom",fontsize=font_size)
fig.text(x=0.185 + 0.145*i,y=0.205,s="order",ha="left",va="bottom",fontsize=font_size)
fig.text(x=0.185 + 0.145*i,y=0.005,s="order",ha="left",va="bottom",fontsize=font_size)
fig.text(x=0.04,y=0.7,s="(a)",ha="left",va="bottom",fontsize=font_size+5)
fig.text(x=0.04, y=0.2, s="(b)", ha="left", va="bottom", fontsize=font_size+5)
save_dir = "figures_show/interaction_all/"
mkdir(save_dir)
plt.savefig(save_dir + "interaction_all.pdf")
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='gcnn_adv',
choices=['pointnet', 'pointnet2', 'pointconv', 'dgcnn', 'gcnn', 'gcnn_adv'])
parser.add_argument('--dataset', type=str, default='shapenet', metavar='N',choices=['modelnet10', 'shapenet'])
parser.add_argument("--ratios", default=[0., 0.04, 0.07, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.], type=list)
parser.add_argument('--gen_pair_seed', type=int, default=1, help='seed used in gen_pair, only used for checking instability')
parser.add_argument('--mode', type=str, default='rotate')
parser.add_argument('--output_type', default='pred', type=str, choices=["gt", "pred"])
parser.add_argument("--num_pairs_random", default=300, type=int) # number of random pairs when gen_pair_type is random
parser.add_argument("--num_save_context_max", default=100, type=int) # # max number of contexts for each I_ij
parser.add_argument("--plot_mode", default="all", type=str,
choices=["all","single_region_vs_normal_avg","normal_vs_adv"])
args = parser.parse_args()
args.num_points = NUM_POINTS
args.num_regions = NUM_REGIONS
args.exp_folder = './checkpoints/exp_MODEL_%s_DATA_%s_POINTNUM_%d_REGIONNUM_%d_shapley_test/' % (
args.model, args.dataset, args.num_points, args.num_regions)
folder_name_list = get_folder_name_list(args)
if args.dataset == "modelnet10":
selected_sample_idx = MODELNET_INTER_SELECTED_SAMPLE
else:
selected_sample_idx = SHAPENET_INTER_SELECTED_SAMPLE
if args.plot_mode == "normal_vs_adv":
plot_inter_normal_adv_pose(args)
elif args.plot_mode == "single_region_vs_normal_avg":
plot_inter_single_region_vs_normal_avg(args)
elif args.plot_mode == "all":
plot_inter_in_one()
else:
raise Exception(f"plot_mode [{args.mode}] not implemented")
| 51.670455 | 157 | 0.666593 | 2,749 | 18,188 | 4.122226 | 0.116042 | 0.072273 | 0.047653 | 0.031768 | 0.681521 | 0.63131 | 0.540858 | 0.510148 | 0.424197 | 0.36622 | 0 | 0.042855 | 0.186607 | 18,188 | 351 | 158 | 51.817664 | 0.723131 | 0.037497 | 0 | 0.287719 | 0 | 0 | 0.12735 | 0.043793 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02807 | false | 0 | 0.038596 | 0 | 0.073684 | 0.024561 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac477619cb9224c8cde4f200c2887ee75d30f1fe | 4,082 | py | Python | packages/legacy/bundles/reactor_anu_spectra_v01.py | gnafit/gna | c1a58dac11783342c97a2da1b19c97b85bce0394 | [
"MIT"
] | 5 | 2019-10-14T01:06:57.000Z | 2021-02-02T16:33:06.000Z | packages/legacy/bundles/reactor_anu_spectra_v01.py | gnafit/gna | c1a58dac11783342c97a2da1b19c97b85bce0394 | [
"MIT"
] | null | null | null | packages/legacy/bundles/reactor_anu_spectra_v01.py | gnafit/gna | c1a58dac11783342c97a2da1b19c97b85bce0394 | [
"MIT"
] | null | null | null |
from load import ROOT as R
import gna.constructors as C
import numpy as N
from collections import OrderedDict
from gna.bundle import *
from scipy.interpolate import interp1d
class reactor_anu_spectra_v01(TransformationBundleLegacy):
short_names = dict( U5 = 'U235', U8 = 'U238', Pu9 = 'Pu239', Pu1 = 'Pu241' )
debug = False
def __init__(self, *args, **kwargs):
self.isotopes = kwargs['namespaces'] = [self.short_names.get(s,s) for s in kwargs['cfg'].isotopes]
super(reactor_anu_spectra_v01, self).__init__( *args, **kwargs )
self.load_data()
def build(self):
model_edges_t = C.Points( self.model_edges, ns=self.common_namespace )
model_edges_t.points.setLabel('E0 (model)')
self.objects['edges'] = model_edges_t
self.shared.reactor_anu_edges = model_edges_t.single()
self.corrections=None
if self.cfg.get('corrections', None):
self.corrections, = execute_bundles(cfg=self.cfg.corrections, shared=self.shared)
newx = self.shared.points
segments_t=None
for isotope in self.isotopes:
ns = self.common_namespace(isotope)
spectrum_raw_t = C.Points( self.spectra[isotope], ns=self.common_namespace )
spectrum_raw_t.points.setLabel('S0(E0):\n'+isotope)
self.objects[('spectrum_raw', isotope)] = spectrum_raw_t
if self.corrections:
spectrum_t = R.Product(ns=self.common_namespace)
spectrum_t.multiply( spectrum_raw_t )
for corr in self.corrections.bundles.values():
spectrum_t.multiply( corr.outputs[isotope] )
spectrum_t.product.setLabel('S(E0):\n'+isotope)
else:
spectrum_t = spectrum_raw_t
interp_expo_t = R.InterpExpoSorted(self.cfg.strategy['underflow'], self.cfg.strategy['overflow'], ns=self.common_namespace)
interp_expo_t.interp.setLabel('S(E):\n'+isotope)
if segments_t:
interp_expo_t.interpolate(segments_t, model_edges_t, spectrum_t, newx)
else:
interp_expo_t.interpolate(model_edges_t, spectrum_t, newx)
segments_t = interp_expo_t.segments
"""Store data"""
self.objects[('spectrum', isotope)] = spectrum_t
self.objects[('interp', isotope)] = interp_expo_t
self.transformations_out[isotope] = interp_expo_t.interp
self.outputs[isotope] = interp_expo_t.interp.interp
def load_data(self):
"""Read raw input spectra"""
self.spectra_raw = OrderedDict()
dtype = [ ('enu', 'd'), ('yield', 'd') ]
if self.debug:
print('Load files:')
for ns in self.namespaces:
data = self.load_file(self.cfg.filename, dtype, isotope=ns.name)
self.spectra_raw[ns.name] = data
"""Read parametrization edges"""
self.model_edges = N.ascontiguousarray( self.cfg.edges, dtype='d' )
if self.debug:
print( 'Bin edges:', self.model_edges )
"""Compute the values of spectra on the parametrization"""
self.spectra=OrderedDict()
self.shared.reactor_anu_fcn=OrderedDict()
fcns = self.shared.reactor_anu_fcn
for name, (x, y) in self.spectra_raw.items():
f = interp1d( x, N.log(y), bounds_error=True )
fcns[name] = lambda e: N.exp(f(e))
model = N.exp(f(self.model_edges))
self.spectra[name] = model
def define_variables(self):
pass
def load_file(self, filenames, dtype, **kwargs):
for format in filenames:
fname = format.format(**kwargs)
try:
data = N.loadtxt(fname, dtype, unpack=True)
except:
pass
else:
if self.debug:
print( kwargs, fname )
print( data )
return data
raise Exception('Failed to load file for '+str(kwargs))
| 40.415842 | 135 | 0.598236 | 493 | 4,082 | 4.764706 | 0.275862 | 0.042571 | 0.037463 | 0.0447 | 0.116645 | 0.020434 | 0 | 0 | 0 | 0 | 0 | 0.008978 | 0.290544 | 4,082 | 100 | 136 | 40.82 | 0.802141 | 0.00539 | 0 | 0.098765 | 0 | 0 | 0.045604 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061728 | false | 0.024691 | 0.074074 | 0 | 0.185185 | 0.049383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac4b999bd13b9e7150cbab2606ab51e0c02f1c83 | 2,222 | py | Python | composer/lm_adlmidi/training/train.py | lucasnfe/music-bardo | 6ab3655c00e80cad55064a9ead4534a9511516b5 | [
"MIT"
] | 12 | 2020-10-30T19:45:05.000Z | 2022-03-25T07:43:50.000Z | composer/lm_adlmidi/training/train.py | lucasnfe/music-bardo | 6ab3655c00e80cad55064a9ead4534a9511516b5 | [
"MIT"
] | 1 | 2020-12-30T17:24:12.000Z | 2020-12-30T17:24:12.000Z | composer/lm_adlmidi/training/train.py | lucasnfe/music-bardo | 6ab3655c00e80cad55064a9ead4534a9511516b5 | [
"MIT"
] | 1 | 2022-02-02T13:53:26.000Z | 2022-02-02T13:53:26.000Z | import training.load_data
import training.schedulers
import training.checkpoint
import tensorflow as tf
def generative_loss(labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
def perplexity(labels, logits):
"""
Popular metric for evaluating language modelling architectures.
More info: http://cs224d.stanford.edu/lecture_notes/LectureNotes4.pdf
"""
cross_entropy = generative_loss(labels, logits)
return tf.keras.backend.mean(tf.keras.backend.exp(tf.keras.backend.mean(cross_entropy, axis=-1)))
def calc_steps(dataset_midi_files, seq_length, batch_size):
# Get a list of the txt files associated to the midi files
dataset_txt_files = training.load_data.midi2text_paths(dataset_midi_files)
# Read all files in the dataset directory
list_ds = tf.data.Dataset.from_tensor_slices(dataset_txt_files)
n_steps = 0
for filepath in list_ds:
text = tf.io.read_file(filepath)
words = tf.strings.split(text, sep=" ")
n_tokens = words.shape[-1]
n_chunks = n_tokens//(seq_length + 1)
n_steps += n_chunks//batch_size
return n_steps
def train_language_model(language_model, params, train_dataset, test_dataset, n_train_steps):
# Compile model with given optimizer and defined loss
# mirrored_strategy = tf.distribute.MirroredStrategy()
# with mirrored_strategy.scope():
lr_schedule = training.schedulers.GPTSchedule(learning_rate=params["lr"],
n_training_steps=n_train_steps * params["epochs"],
schedule=params["schedule"],
warmup=params["warmup"])
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
language_model.compile(optimizer, loss=generative_loss, metrics=[perplexity])
# Add checkpoint callback
weights_callback = training.checkpoint.SaveModelCallback(language_model, optimizer, params["check"])
history = language_model.fit(train_dataset, epochs=params["epochs"], validation_data=test_dataset, callbacks=[weights_callback])
return history
| 41.924528 | 132 | 0.711521 | 281 | 2,222 | 5.398577 | 0.451957 | 0.023072 | 0.027686 | 0.034278 | 0.051417 | 0.051417 | 0.051417 | 0 | 0 | 0 | 0 | 0.011186 | 0.19532 | 2,222 | 52 | 133 | 42.730769 | 0.837248 | 0.176418 | 0 | 0 | 0 | 0 | 0.018837 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0.033333 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac4de92dc93fc74858f07c213ac9db5ed4dba97b | 14,104 | py | Python | papermerge/core/views/documents.py | ebdavison/papermerge | d177f1af331214e0f62407624e7029ce4953bd9b | [
"Apache-2.0"
] | null | null | null | papermerge/core/views/documents.py | ebdavison/papermerge | d177f1af331214e0f62407624e7029ce4953bd9b | [
"Apache-2.0"
] | null | null | null | papermerge/core/views/documents.py | ebdavison/papermerge | d177f1af331214e0f62407624e7029ce4953bd9b | [
"Apache-2.0"
] | null | null | null | import os
import json
import logging
from django.shortcuts import redirect
from django.urls import reverse
from django.http import (
HttpResponse,
HttpResponseRedirect,
HttpResponseForbidden,
Http404
)
from django.conf import settings
from django import views
from django.contrib.auth.decorators import login_required
from pmworker.storage import (
upload_document_to_s3,
download,
download_hocr,
copy2doc_url
)
from pmworker.pdfinfo import get_pagecount
from pmworker.endpoint import Endpoint
from pmworker.step import Step
from pmworker.shortcuts import extract_img
from papermerge.core.lib.hocr import Hocr
from papermerge.core.models import (
Folder, Document, BaseTreeNode, Access
)
from papermerge.core.utils import (
get_tenant_name
)
from papermerge.core.storage import (
is_storage_left
)
logger = logging.getLogger(__name__)
def copy_to_clipboard(request, node_ids):
"""
It would be nice to have something like
request.clipboard.add(node_ids) though... but
this implementation will be post poned for later.
"""
tenant_name = get_tenant_name()
clipboard_id = "{}.{}.clipboard.node_ids".format(
tenant_name,
request.user.id
)
request.session[clipboard_id] = node_ids
def reset_clipboard(request):
tenant_name = get_tenant_name()
clipboard_id = "{}.{}.clipboard.node_ids".format(
tenant_name,
request.user.id
)
request.session[clipboard_id] = []
def get_clipboard(request):
tenant_name = get_tenant_name()
clipboard_id = "{}.{}.clipboard.node_ids".format(
tenant_name,
request.user.id
)
if request.session.get(clipboard_id, False):
return request.session[clipboard_id]
return []
def get_from_clipboard(request):
"""
It would be nice to have something like
request.clipboard though... but
this implementation will be post poned for later.
"""
tenant_name = get_tenant_name()
clipboard_id = "{}.{}.clipboard.node_ids".format(
tenant_name,
request.user.id
)
return request.session.get(clipboard_id, [])
def index(request):
return redirect('boss:core_basetreenode_changelist')
@login_required
def cut_node(request):
if request.method == 'GET':
return redirect('boss:core_basetreenode_changelist')
node_ids = request.POST.getlist('node_ids[]', False)
parent_id = request.POST.get('parent_id', False)
copy_to_clipboard(request, node_ids)
if parent_id:
return redirect(
reverse(
'boss:core_basetreenode_changelist_obj', args=(parent_id,)
)
)
return redirect('boss:core_basetreenode_changelist')
@login_required
def clipboard(request):
if request.method == 'GET':
clipboard = get_clipboard(request)
return HttpResponse(
json.dumps({'clipboard': clipboard}),
content_type="application/json",
)
return HttpResponse(
json.dumps({'clipboard': []}),
content_type="application/json",
)
@login_required
def paste_node(request):
if request.method == 'GET':
return redirect('boss:core_basetreenode_changelist')
parent_id = request.POST.get('parent_id', False)
if parent_id:
parent = BaseTreeNode.objects.filter(id=parent_id).first()
else:
parent = None
node_ids = get_from_clipboard(request)
# iterate through all node ids and change their
# parent to new one (parent_id)
for node in BaseTreeNode.objects.filter(id__in=node_ids):
node.refresh_from_db()
if parent:
parent.refresh_from_db()
Document.objects.move_node(node, parent)
reset_clipboard(request)
if parent_id:
return redirect(
reverse(
'boss:core_basetreenode_changelist_obj', args=(parent_id,)
)
)
return redirect('boss:core_basetreenode_changelist')
@login_required
def delete_node(request):
"""
Delete selected nodes.
Mandatory parameters node_ids[] and title:
"""
if request.method == 'GET':
return redirect('boss:core_basetreenode_changelist')
node_ids = request.POST.getlist('node_ids[]', False)
parent_id = request.POST.get('parent_id', False)
BaseTreeNode.objects.filter(id__in=node_ids).delete()
if parent_id:
return redirect(
reverse(
'boss:core_basetreenode_changelist_obj', args=(parent_id,)
)
)
else:
return redirect('boss:core_basetreenode_changelist')
@login_required
def rename_node(request, redirect_to):
"""
Renames a node (changes its title field).
Mandatory parameters node_id and title.
redirect_to = (change | list)
change = will redirect to changeform of given doc
list = will redirect to list view of given parent_id
"""
if request.method == 'GET':
return redirect('boss:core_basetreenode_changelist')
node_id = request.POST.get('node_id', False)
title = request.POST.get('title', False)
if not (node_id and title):
logger.info(
"Invalid params for rename_node: node_id=%s title=%s",
node_id,
title
)
return redirect('boss:core_basetreenode_changelist')
node = BaseTreeNode.objects.get(id=node_id)
if not node:
return redirect('boss:core_basetreenode_changelist')
node.title = title
node.save()
# Node can be renamed in two places:
# 1. In changeform view
# 2. In changelist view
# In case 1. redirect_to == 'change' in other case
# redirect_to == 'list'
if redirect_to == 'change':
return redirect(
reverse(
'boss:core_basetreenode_change', args=(node_id,)
)
)
# means redirect_to == 'list' i.e this rename was
# called from changelist view.
if node.parent_id:
return redirect(
reverse(
'boss:core_basetreenode_changelist_obj', args=(node.parent_id,)
)
)
else:
return redirect('boss:core_basetreenode_changelist')
@login_required
def create_folder(request):
"""
Creates a new folder.
Mandatory parameters parent_id and title:
* If either parent_id or title are missing - does nothing,
just redirects to root folder.
* If parent_id < 0 => creates a folder with parent root.
* If parent_id >= 0 => creates a folder with given parent id.
"""
if request.method == 'GET':
return redirect('boss:core_basetreenode_changelist')
parent_id = request.POST.get('parent_id', False)
title = request.POST.get('title', False)
if not (parent_id and title):
logger.info(
"Invalid params for create_folder: parent=%s title=%s",
parent_id,
title
)
return redirect('boss:core_basetreenode_changelist')
if int(parent_id) < 0:
parent_folder = None
else:
parent_folder = Folder.objects.filter(id=parent_id).first()
# if not existing parent_id was given, redirect to root
if not parent_folder:
return redirect('boss:core_basetreenode_changelist')
Folder.objects.create(
title=title,
parent=parent_folder,
user=request.user
)
# must redirect to parent of created folder
if int(parent_id) == -1:
return redirect('boss:core_basetreenode_changelist')
return redirect(
reverse(
'boss:core_basetreenode_changelist_obj', args=(parent_id,)
)
)
class DocumentsUpload(views.View):
def post(self, request):
files = request.FILES.getlist('file')
if not files:
logger.warning(
"POST request.FILES is empty. Forgot adding file?"
)
if len(files) > 1:
logger.warning(
"More then one files per ajax? how come?"
)
return HttpResponse(
json.dumps({}),
content_type="application/json",
status_code=400
)
f = files[0]
logger.debug("upload for f=%s user=%s", f, request.user)
if not is_storage_left(f.temporary_file_path()):
logger.warning("Storage is full for user=%s.", request.user)
msg = "Cannot upload file {}. Storage is full.".format(f.name)
return HttpResponse(
json.dumps({'error': msg}),
status=400,
content_type="application/json"
)
user = request.user
size = os.path.getsize(f.temporary_file_path())
parent_id = request.POST.get('parent', "-1")
if parent_id and "-1" in parent_id:
parent_id = None
lang = request.POST.get('language')
notes = request.POST.get('notes')
page_count = get_pagecount(f.temporary_file_path())
logger.info("creating document {}".format(f.name))
doc = Document.create_document(
user=user,
title=f.name,
size=size,
lang=lang,
file_name=f.name,
parent_id=parent_id,
notes=notes,
page_count=page_count
)
logger.debug("uploading to {}".format(doc.doc_ep.url()))
copy2doc_url(
src_file_path=f.temporary_file_path(),
doc_url=doc.doc_ep.url()
)
if settings.S3:
upload_document_to_s3(
doc.doc_ep
)
if settings.OCR:
Document.ocr_async(
document=doc,
page_count=page_count,
lang=lang
)
# upload only one file at time.
# after each upload return a json object with
# following fields:
#
# - title
# - preview_url
# - doc_id
# - action_url -> needed for renaming/deleting selected item
#
# with that info a new thumbnail will be created.
action_url = reverse(
'boss:core_basetreenode_change', args=(doc.id,)
)
preview_url = reverse(
'core:preview', args=(doc.id, 200, 1)
)
result = {
'title': doc.title,
'doc_id': doc.id,
'action_url': action_url,
'preview_url': preview_url
}
logger.info("and response is!")
return HttpResponse(
json.dumps(result),
content_type="application/json"
)
@login_required
def usersettings(request, option, value):
if option == 'documents_view':
user_settings = request.user.preferences
if value in ('list', 'grid'):
user_settings['views__documents_view'] = value
user_settings['views__documents_view']
return HttpResponseRedirect(
request.META.get('HTTP_REFERER')
)
@login_required
def hocr(request, id, step=None, page="1"):
logger.debug(f"hocr for doc_id={id}, step={step}, page={page}")
try:
doc = Document.objects.get(id=id)
except Document.DoesNotExist:
raise Http404("Document does not exists")
doc_ep = doc.doc_ep
if request.user.has_perm(Access.PERM_READ, doc):
if not doc_ep.exists():
download(doc_ep)
page_count = get_pagecount(doc_ep.url())
if page > page_count or page < 0:
raise Http404("Page does not exists")
page_ep = doc.page_eps[page]
logger.debug(f"Extract words from {page_ep.hocr_url()}")
if not page_ep.hocr_exists():
# check if HOCR data exists on S3
if settings.S3 and page_ep.hocr_exists(ep=Endpoint.S3):
# ok, it should be able to download it.
download_hocr(page_ep)
else:
# normal scenario, HOCR is not yet ready
raise Http404("HOCR data not yet ready.")
# At this point local HOCR data should be available.
hocr = Hocr(
hocr_file_path=page_ep.hocr_url()
)
return HttpResponse(
json.dumps({
'hocr': hocr.good_json_words(),
'hocr_meta': hocr.get_meta()
}),
content_type="application/json",
)
return HttpResponseForbidden()
@login_required
def preview(request, id, step=None, page="1"):
try:
doc = Document.objects.get(id=id)
except Document.DoesNotExist:
raise Http404("Document does not exists")
if request.user.has_perm(Access.PERM_READ, doc):
doc_ep = doc.doc_ep
if not doc_ep.exists():
download(doc_ep)
page_ep = doc.get_page_ep(
page_num=page,
step=Step(step),
)
if not page_ep.img_exists():
extract_img(page_ep)
try:
with open(page_ep.img_url(), "rb") as f:
return HttpResponse(f.read(), content_type="image/jpeg")
except IOError:
raise
return redirect('core:index')
@login_required
def document_download(request, id):
try:
doc = Document.objects.get(id=id)
except Document.DoesNotExist:
raise Http404("Document does not exists")
if doc.user.username == request.user.username:
try:
file_handle = open(doc.doc_ep.url(), "rb")
except OSError:
logger.error(
"Cannot open local version of %s" % doc.doc_ep.url()
)
return redirect(
'boss:core_basetreenode_changelist_obj', args=(id,)
)
resp = HttpResponse(
file_handle.read(),
content_type="application/pdf"
)
disposition = "attachment; filename=%s" % doc.title
resp['Content-Disposition'] = disposition
file_handle.close()
return resp
return redirect(
'boss:core_basetreenode_changelist_obj', args=(id,)
)
| 26.511278 | 79 | 0.606211 | 1,653 | 14,104 | 4.981851 | 0.168179 | 0.036916 | 0.058288 | 0.080146 | 0.435458 | 0.395628 | 0.348512 | 0.323254 | 0.295082 | 0.258166 | 0 | 0.005029 | 0.295094 | 14,104 | 531 | 80 | 26.561205 | 0.823275 | 0.114436 | 0 | 0.348066 | 0 | 0 | 0.156478 | 0.077023 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044199 | false | 0 | 0.049724 | 0.002762 | 0.198895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac4edd7e2d605cd03db4fe627367ad1ef5e0d794 | 11,699 | py | Python | libs/yowsup/yowsup/yowsup/layers/coder/tokendictionary.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 22 | 2017-07-14T20:01:17.000Z | 2022-03-08T14:22:39.000Z | libs/yowsup/yowsup/yowsup/layers/coder/tokendictionary.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 6 | 2017-07-14T21:03:50.000Z | 2021-06-10T19:08:32.000Z | libs/yowsup/yowsup/yowsup/layers/coder/tokendictionary.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 13 | 2017-07-14T20:13:14.000Z | 2020-11-12T08:06:05.000Z | class TokenDictionary:
def __init__(self):
self.dictionary = [
'',
'',
'',
'account',
'ack',
'action',
'active',
'add',
'after',
'all',
'allow',
'apple',
'audio',
'auth',
'author',
'available',
'bad-protocol',
'bad-request',
'before',
'bits',
'body',
'broadcast',
'cancel',
'category',
'challenge',
'chat',
'clean',
'code',
'composing',
'config',
'contacts',
'count',
'create',
'creation',
'debug',
'default',
'delete',
'delivery',
'delta',
'deny',
'digest',
'dirty',
'duplicate',
'elapsed',
'enable',
'encoding',
'encrypt',
'error',
'event',
'expiration',
'expired',
'fail',
'failure',
'false',
'favorites',
'feature',
'features',
'feature-not-implemented',
'field',
'file',
'filehash',
'first',
'free',
'from',
'g.us',
'gcm',
'get',
'google',
'group',
'groups',
'groups_v2',
'http://etherx.jabber.org/streams',
'http://jabber.org/protocol/chatstates',
'ib',
'id',
'image',
'img',
'index',
'internal-server-error',
'ip',
'iq',
'item-not-found',
'item',
'jabber:iq:last',
'jabber:iq:privacy',
'jabber:x:event',
'jid',
'kind',
'last',
'leave',
'list',
'max',
'mechanism',
'media',
'message_acks',
'message',
'method',
'microsoft',
'mimetype',
'missing',
'modify',
'msg',
'mute',
'name',
'nokia',
'none',
'not-acceptable',
'not-allowed',
'not-authorized',
'notification',
'notify',
'off',
'offline',
'order',
'owner',
'owning',
'p_o',
'p_t',
'paid',
'participant',
'participants',
'participating',
'paused',
'picture',
'pin',
'ping',
'pkmsg',
'platform',
'port',
'presence',
'preview',
'probe',
'prop',
'props',
'qcount',
'query',
'raw',
'read',
'readreceipts',
'reason',
'receipt',
'relay',
'remote-server-timeout',
'remove',
'request',
'required',
'resource-constraint',
'resource',
'response',
'result',
'retry',
'rim',
's_o',
's_t',
's.us',
's.whatsapp.net',
'seconds',
'server-error',
'server',
'service-unavailable',
'set',
'show',
'silent',
'size',
'skmsg',
'stat',
'state',
'status',
'stream:error',
'stream:features',
'subject',
'subscribe',
'success',
'sync',
't',
'text',
'timeout',
'timestamp',
'tizen',
'to',
'true',
'type',
'unavailable',
'unsubscribe',
'upgrade',
'uri',
'url',
'urn:ietf:params:xml:ns:xmpp-sasl',
'urn:ietf:params:xml:ns:xmpp-stanzas',
'urn:ietf:params:xml:ns:xmpp-streams',
'urn:xmpp:ping',
'urn:xmpp:whatsapp:account',
'urn:xmpp:whatsapp:dirty',
'urn:xmpp:whatsapp:mms',
'urn:xmpp:whatsapp:push',
'urn:xmpp:whatsapp',
'user',
'user-not-found',
'v',
'value',
'version',
'voip',
'w:g',
'w:p:r',
'w:p',
'w:profile:picture',
'w',
'wait',
'WAUTH-2',
'xmlns:stream',
'xmlns',
'1',
'chatstate',
'crypto',
'phash',
'enc',
'class',
'off_cnt',
'w:g2',
'promote',
'demote',
'creator',
'background',
'backoff',
'chunked',
'context',
'full',
'in',
'interactive',
'out',
'registration',
'sid',
'urn:xmpp:whatsapp:sync',
'flt',
's16',
'u8',
]
self.secondaryDictionary = [
'adpcm',
'amrnb',
'amrwb',
'mp3',
'pcm',
'qcelp',
'wma',
'h263',
'h264',
'jpeg',
'mpeg4',
'wmv',
'audio/3gpp',
'audio/aac',
'audio/amr',
'audio/mp4',
'audio/mpeg',
'audio/ogg',
'audio/qcelp',
'audio/wav',
'audio/webm',
'audio/x-caf',
'audio/x-ms-wma',
'image/gif',
'image/jpeg',
'image/png',
'video/3gpp',
'video/avi',
'video/mp4',
'video/mpeg',
'video/quicktime',
'video/x-flv',
'video/x-ms-asf',
'302',
'400',
'401',
'402',
'403',
'404',
'405',
'406',
'407',
'409',
'410',
'500',
'501',
'503',
'504',
'abitrate',
'acodec',
'app_uptime',
'asampfmt',
'asampfreq',
'clear',
'conflict',
'conn_no_nna',
'cost',
'currency',
'duration',
'extend',
'fps',
'g_notify',
'g_sound',
'gone',
'google_play',
'hash',
'height',
'invalid',
'jid-malformed',
'latitude',
'lc',
'lg',
'live',
'location',
'log',
'longitude',
'max_groups',
'max_participants',
'max_subject',
'mode',
'napi_version',
'normalize',
'orighash',
'origin',
'passive',
'password',
'played',
'policy-violation',
'pop_mean_time',
'pop_plus_minus',
'price',
'pricing',
'redeem',
'Replaced by new connection',
'resume',
'signature',
'sound',
'source',
'system-shutdown',
'username',
'vbitrate',
'vcard',
'vcodec',
'video',
'width',
'xml-not-well-formed',
'checkmarks',
'image_max_edge',
'image_max_kbytes',
'image_quality',
'ka',
'ka_grow',
'ka_shrink',
'newmedia',
'library',
'caption',
'forward',
'c0',
'c1',
'c2',
'c3',
'clock_skew',
'cts',
'k0',
'k1',
'login_rtt',
'm_id',
'nna_msg_rtt',
'nna_no_off_count',
'nna_offline_ratio',
'nna_push_rtt',
'no_nna_con_count',
'off_msg_rtt',
'on_msg_rtt',
'stat_name',
'sts',
'suspect_conn',
'lists',
'self',
'qr',
'web',
'w:b',
'recipient',
'w:stats',
'forbidden',
'max_list_recipients',
'en-AU',
'en-GB',
'es-MX',
'pt-PT',
'zh-Hans',
'zh-Hant',
'relayelection',
'relaylatency',
'interruption',
'Bell.caf',
'Boing.caf',
'Glass.caf',
'Harp.caf',
'TimePassing.caf',
'Tri-tone.caf',
'Xylophone.caf',
'aurora.m4r',
'bamboo.m4r',
'chord.m4r',
'circles.m4r',
'complete.m4r',
'hello.m4r',
'input.m4r',
'keys.m4r',
'note.m4r',
'popcorn.m4r',
'pulse.m4r',
'synth.m4r',
'Apex.m4r',
'Beacon.m4r',
'Bulletin.m4r',
'By The Seaside.m4r',
'Chimes.m4r',
'Circuit.m4r',
'Constellation.m4r',
'Cosmic.m4r',
'Crystals.m4r',
'Hillside.m4r',
'Illuminate.m4r',
'Night Owl.m4r',
'Opening.m4r',
'Playtime.m4r',
'Presto.m4r',
'Radar.m4r',
'Radiate.m4r',
'Ripples.m4r',
'Sencha.m4r',
'Signal.m4r',
'Silk.m4r',
'Slow Rise.m4r',
'Stargaze.m4r',
'Summit.m4r',
'Twinkle.m4r',
'Uplift.m4r',
'Waves.m4r',
'eligible',
'planned',
'current',
'future',
'disable',
'expire',
'start',
'stop',
'accuracy',
'speed',
'bearing',
'recording',
'key',
'identity',
'w:gp2',
'admin',
'locked',
'unlocked',
'new',
'battery',
'archive',
'adm',
'plaintext_size',
'plaintext_disabled',
'plaintext_reenable_threshold',
'compressed_size',
'delivered',
'everyone',
'transport',
'mspes',
'e2e_groups',
'e2e_images',
'encr_media',
'encrypt_v2',
'encrypt_image',
'encrypt_sends_push',
'force_long_connect',
'audio_opus',
'video_max_edge',
'call-id',
'call',
'preaccept',
'accept',
'offer',
'reject',
'busy',
'te',
'terminate',
'begin',
'end',
'opus',
'rtt',
'token',
'priority',
'p2p',
'rate',
'amr',
'ptt',
'srtp',
'os',
'browser',
'encrypt_group_gen2'
]
def getToken(self, index, secondary = False):
targetDict = self.dictionary
if secondary:
targetDict = self.secondaryDictionary
elif index > 236 and index < (236 + len(self.secondaryDictionary)):
targetDict = self.secondaryDictionary
index = index - 237
if index < 0 or index > len(targetDict) - 1:
return None
return targetDict[index]
def getIndex(self, token):
if token in self.dictionary:
return (self.dictionary.index(token), False)
elif token in self.secondaryDictionary:
return (self.secondaryDictionary.index(token), True)
return None
| 22.115312 | 75 | 0.35892 | 826 | 11,699 | 4.993947 | 0.623487 | 0.011879 | 0.021818 | 0.011636 | 0.016 | 0.016 | 0 | 0 | 0 | 0 | 0 | 0.021216 | 0.49235 | 11,699 | 528 | 76 | 22.157197 | 0.673346 | 0 | 0 | 0.013436 | 0 | 0 | 0.332507 | 0.026327 | 0 | 0 | 0 | 0 | 0 | 1 | 0.005758 | false | 0.005758 | 0 | 0 | 0.017274 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac4fb5fe3522c748057420a65d31946e928e5bc9 | 3,476 | py | Python | tests/helper_commons.py | JA-Bar/simple-learning | c59ce4231a4ca6d4c0359eeff85ca43c85e0348f | [
"MIT"
] | null | null | null | tests/helper_commons.py | JA-Bar/simple-learning | c59ce4231a4ca6d4c0359eeff85ca43c85e0348f | [
"MIT"
] | null | null | null | tests/helper_commons.py | JA-Bar/simple-learning | c59ce4231a4ca6d4c0359eeff85ca43c85e0348f | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from itertools import repeat
from typing import Union, List, Tuple
import numpy as np
import numpy.testing as np_test
import torch
import simple_learning as sl
np.random.seed(42)
A_TOLERANCE = 1e-6
R_TOLERANCE = 1e-6
@dataclass
class Constructor:
functions: Union[List, Tuple]
arguments: Union[List, Tuple]
def evaluate_function_with_pytorch(simple_learning_function, torch_function, constructor):
"""Apply a simple_learning_function and a torch_function to the same numpy array based Tensors
built with constructor, compare the results of both and the gradients of the leaf tensors.
Args:
simple_learning_function: Callable to evaluate the from the simple_learning library.
torch_function: Callable to evaluate from the pytorch library.
constructor: Pair of iterables ((func1, func2), (args_to_func1, args_to_func2)) to build
the numpy arrays used as parameters to both functions.
The arrays are initiated as func1(*args_to_func1), func2(*args_to_func2). The
result to each func will be a parameter to be used in both simple_learning and pytorch
functions.
In the case that only one constructor function is provided, but multiple
argument iterables, the function will be broadcasted to all other argument iterables:
Given: ((func1,), (args_to_func1, more_args_to_func1))
It's equivalent to: func1(*args_to_func1), func1(*more_args_to_func1).
Raises:
AssertionError if the simple_learning and pytorch functions results differ by more than the
set absolute or relative tolerances.
"""
constructor = Constructor(*constructor)
if not isinstance(constructor.functions, (list, tuple)):
constructor.functions = (constructor.functions, )
# if the number of functions doesn't match the number of given argument iterables, repeat
# broadcast the first given function to all args
n_functions = len(constructor.functions)
n_arguments = len(constructor.arguments)
if n_functions < n_arguments:
constructor.functions = repeat(constructor.functions[0], n_arguments)
args_arrays = [func(*args) for (func, args) in zip(constructor.functions, constructor.arguments)]
# apply simple_learning function to Tensors
sl_args = [sl.Tensor(arg.copy()) for arg in args_arrays]
sl_result = simple_learning_function(*sl_args)
sl_result.backward(np.ones_like(sl_result.data))
# apply the same function to pytorch's Tensors
pt_args = [torch.tensor(arg.copy().astype('float32'), requires_grad=True) for arg in args_arrays]
pt_result = torch_function(*pt_args)
pt_result.backward(torch.ones_like(pt_result))
# check if the forward pass is correct
obtained_result = sl_result.data
expected_result = pt_result.detach().numpy()
np_test.assert_allclose(obtained_result, expected_result, R_TOLERANCE, A_TOLERANCE)
# check if the backward pass if correct (the arguments' gradients)
for sl_a, pt_a in zip(sl_args, pt_args):
if sl_a.grad is None and pt_a.grad is None:
continue # if neither of the Tensors required grad, skip them
obtained_grad = sl_a.grad
expected_grad = pt_a.grad.numpy()
np_test.assert_allclose(obtained_grad, expected_grad, R_TOLERANCE, A_TOLERANCE)
| 40.418605 | 107 | 0.714614 | 479 | 3,476 | 5.002088 | 0.292276 | 0.052588 | 0.027546 | 0.020033 | 0.086811 | 0.027546 | 0 | 0 | 0 | 0 | 0 | 0.008843 | 0.219217 | 3,476 | 85 | 108 | 40.894118 | 0.873987 | 0.465478 | 0 | 0 | 0 | 0 | 0.003944 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.026316 | false | 0 | 0.184211 | 0 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac506abbf25aa8526a668caf375a867bfb855572 | 1,467 | py | Python | dizoo/classic_control/cartpole/config/cartpole_sqn_config.py | sailxjx/DI-engine | c6763f8e2ba885a2a02f611195a1b5f8b50bff00 | [
"Apache-2.0"
] | 464 | 2021-07-08T07:26:33.000Z | 2022-03-31T12:35:16.000Z | dizoo/classic_control/cartpole/config/cartpole_sqn_config.py | sailxjx/DI-engine | c6763f8e2ba885a2a02f611195a1b5f8b50bff00 | [
"Apache-2.0"
] | 177 | 2021-07-09T08:22:55.000Z | 2022-03-31T07:35:22.000Z | dizoo/classic_control/cartpole/config/cartpole_sqn_config.py | sailxjx/DI-engine | c6763f8e2ba885a2a02f611195a1b5f8b50bff00 | [
"Apache-2.0"
] | 92 | 2021-07-08T12:16:37.000Z | 2022-03-31T09:24:41.000Z | from easydict import EasyDict
update_per_collect = 8
cartpole_sqn_config = dict(
env=dict(
collector_env_num=8,
evaluator_env_num=5,
n_evaluator_episode=5,
stop_value=195,
),
policy=dict(
cuda=False,
model=dict(
obs_shape=4,
action_shape=2,
encoder_hidden_size_list=[64, 64],
# Whether to use dueling head.
dueling=True,
),
learn=dict(
multi_gpu=False,
update_per_collect=update_per_collect,
batch_size=64,
learning_rate_q=0.001,
learning_rate_alpha=0.001,
alpha=0.2,
target_entropy=0.2,
),
collect=dict(
n_sample=update_per_collect * 2,
nstep=1,
),
other=dict(
eps=dict(
type='exp',
start=1.,
end=0.8,
decay=2000,
), replay_buffer=dict(replay_buffer_size=10000, )
),
)
)
cartpole_sqn_config = EasyDict(cartpole_sqn_config)
main_config = cartpole_sqn_config
cartpole_sqn_create_config = dict(
env=dict(
type='cartpole',
import_names=['dizoo.classic_control.cartpole.envs.cartpole_env'],
),
env_manager=dict(type='base'),
policy=dict(type='sqn'),
)
cartpole_sqn_create_config = EasyDict(cartpole_sqn_create_config)
create_config = cartpole_sqn_create_config
| 26.196429 | 74 | 0.572597 | 169 | 1,467 | 4.633136 | 0.449704 | 0.112388 | 0.081737 | 0.117497 | 0.074074 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041965 | 0.334015 | 1,467 | 55 | 75 | 26.672727 | 0.759468 | 0.019087 | 0 | 0.153846 | 0 | 0 | 0.045929 | 0.033403 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.038462 | 0 | 0.038462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac51086493dea87cea7befdd7771d3cb8a3ba7c2 | 834 | py | Python | distributed/http/routing.py | willirath/distributed | 67fe8208a0a6edc18e02a4c5080d81fb11eab338 | [
"BSD-3-Clause"
] | 1 | 2020-08-11T16:09:14.000Z | 2020-08-11T16:09:14.000Z | distributed/http/routing.py | willirath/distributed | 67fe8208a0a6edc18e02a4c5080d81fb11eab338 | [
"BSD-3-Clause"
] | 2 | 2021-05-11T16:00:55.000Z | 2021-08-23T20:45:22.000Z | distributed/http/routing.py | willirath/distributed | 67fe8208a0a6edc18e02a4c5080d81fb11eab338 | [
"BSD-3-Clause"
] | 1 | 2020-06-19T11:38:14.000Z | 2020-06-19T11:38:14.000Z | from tornado import web
import tornado.httputil
class RoutingApplication(web.Application):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.applications = []
def find_handler(self, request: tornado.httputil.HTTPServerRequest, **kwargs):
handler = super().find_handler(request, **kwargs)
if handler and not issubclass(handler.handler_class, web.ErrorHandler):
return handler
else:
for app in self.applications:
handler = app.find_handler(request, **kwargs) or handler
if handler and not issubclass(handler.handler_class, web.ErrorHandler):
break
return handler
def add_application(self, application: web.Application):
self.applications.append(application)
| 36.26087 | 87 | 0.654676 | 87 | 834 | 6.114943 | 0.37931 | 0.090226 | 0.067669 | 0.090226 | 0.221805 | 0.221805 | 0.221805 | 0.221805 | 0.221805 | 0.221805 | 0 | 0 | 0.252998 | 834 | 22 | 88 | 37.909091 | 0.853933 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.111111 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac528de5deaa35c654fcd1a01342f8e99f82e318 | 22,081 | py | Python | rqt_joint_trajectory_controller/src/rqt_joint_trajectory_controller/joint_trajectory_controller.py | StratomInc/ros2_controllers | e9e7ea89772442cfaf41896fa215df003fc66a59 | [
"Apache-2.0"
] | null | null | null | rqt_joint_trajectory_controller/src/rqt_joint_trajectory_controller/joint_trajectory_controller.py | StratomInc/ros2_controllers | e9e7ea89772442cfaf41896fa215df003fc66a59 | [
"Apache-2.0"
] | null | null | null | rqt_joint_trajectory_controller/src/rqt_joint_trajectory_controller/joint_trajectory_controller.py | StratomInc/ros2_controllers | e9e7ea89772442cfaf41896fa215df003fc66a59 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (C) 2014, PAL Robotics S.L.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of PAL Robotics S.L. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
from ament_index_python import get_resource
import rclpy
from rclpy.qos import QoSProfile, QoSReliabilityPolicy, QoSDurabilityPolicy, QoSLivelinessPolicy, qos_profile_sensor_data
from rclpy.duration import Duration
from rcl_interfaces.srv import GetParameters
from rqt_gui_py.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding.QtCore import QTimer, Signal
from python_qt_binding.QtWidgets import QWidget, QFormLayout
from control_msgs.msg import JointTrajectoryControllerState
from controller_manager.controller_manager_services import list_controllers
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from .double_editor import DoubleEditor
from .joint_limits_urdf import get_joint_limits
from .update_combo import update_combo
# TODO:
# - Better UI suppor for continuous joints (see DoubleEditor TODO)
# - Can we load controller joints faster?, it's currently pretty slow
# - If URDF is reloaded, allow to reset the whole plugin?
# - Allow to configure:
# - URDF location
# - Command publishing and state update frequency
# - Controller manager and jtc monitor frequency
# - Min trajectory duration
# - Fail gracefully when the URDF or some other requisite is not set
# - Could users confuse the enable/disable button with controller start/stop
# (in a controller manager sense)?
# - Better decoupling between model and view
# - Tab order is wrong. For the record, this did not work:
# QWidget.setTabOrder(self._widget.controller_group,
# self._widget.joint_group)
# QWidget.setTabOrder(self._widget.joint_group,
# self._widget.speed_scaling_group)
# NOTE:
# Controller enable/disable icons are in the public domain, and available here:
# freestockphotos.biz/photos.php?c=all&o=popular&s=0&lic=all&a=all&set=powocab_u2
class JointTrajectoryController(Plugin):
"""
Graphical frontend for a C{JointTrajectoryController}.
There are two modes for interacting with a controller:
1. B{Monitor mode} Joint displays are updated with the state reported
by the controller. This is a read-only mode and does I{not} send
control commands. Every time a new controller is selected, it starts
in monitor mode for safety reasons.
2. B{Control mode} Joint displays update the control command that is
sent to the controller. Commands are sent periodically evan if the
displays are not being updated by the user.
To control the aggressiveness of the motions, the maximum speed of the
sent commands can be scaled down using the C{Speed scaling control}
This plugin is able to detect and keep track of all active controller
managers, as well as the JointTrajectoryControllers that are I{running}
in each controller manager.
For a controller to be compatible with this plugin, it must comply with
the following requisites:
- The controller type contains the C{JointTrajectoryController}
substring, e.g., C{position_controllers/JointTrajectoryController}
- The controller exposes the C{command} and C{state} topics in its
ROS interface.
Additionally, there must be a URDF loaded with a valid joint limit
specification, namely position (when applicable) and velocity limits.
A reference implementation of the C{JointTrajectoryController} is
available in the C{joint_trajectory_controller} ROS package.
"""
_cmd_pub_freq = 10.0 # Hz
_widget_update_freq = 30.0 # Hz
_ctrlrs_update_freq = 1 # Hz
_min_traj_dur = 5.0 / _cmd_pub_freq # Minimum trajectory duration
jointStateChanged = Signal([dict])
_state_sub = None
def __init__(self, context):
super(JointTrajectoryController, self).__init__(context)
self.setObjectName('JointTrajectoryController')
# Initialize members
self._jtc_name = [] # Name of selected joint trajectory controller
self._cm_ns = [] # Namespace of the selected controller manager
self._joint_pos = {} # name->pos map for joints of selected controller
self._joint_names = [] # Ordered list of selected controller joints
self._jtc_joints_info = {} # Lazily evaluated as needed
self._robot_joint_limits = {} # Lazily evaluated on first use
self._node = context.node
self._widget = QWidget()
_, package_path = get_resource('packages', 'rqt_joint_trajectory_controller')
ui_file = os.path.join(package_path, 'share', 'rqt_joint_trajectory_controller', 'resource', 'joint_trajectory_controller.ui')
loadUi(ui_file, self._widget)
self._widget.setObjectName('JointTrajectoryControllerUi')
# Setup speed scaler
speed_scaling = DoubleEditor(1.0, 100.0)
speed_scaling.spin_box.setSuffix('%')
speed_scaling.spin_box.setValue(50.0)
speed_scaling.spin_box.setDecimals(0)
speed_scaling.setEnabled(False)
self._widget.speed_scaling_layout.addWidget(speed_scaling)
self._speed_scaling_widget = speed_scaling
speed_scaling.valueChanged.connect(self._on_speed_scaling_change)
self._on_speed_scaling_change(speed_scaling.value())
# Show _widget.windowTitle on left-top of each plugin (when
# it's set in _widget). This is useful when you open multiple
# plugins at once. Also if you open multiple instances of your
# plugin at once, these lines add number to make it easy to
# tell from pane to pane.
if context.serial_number() > 1:
self._widget.setWindowTitle(self._widget.windowTitle() +
(' (%d)' % context.serial_number()))
# Add widget to the user interface
context.add_widget(self._widget)
# Timer for sending commands to active controller
self._update_cmd_timer = QTimer(self)
self._update_cmd_timer.setInterval(1000.0 / self._cmd_pub_freq)
self._update_cmd_timer.timeout.connect(self._update_cmd_cb)
# Timer for updating the joint widgets from the controller state
self._update_act_pos_timer = QTimer(self)
self._update_act_pos_timer.setInterval(1000.0 /
self._widget_update_freq)
self._update_act_pos_timer.timeout.connect(self._update_joint_widgets)
# Timer for controller manager updates
self._list_cm = ['/controller_manager']
self._update_cm_list_timer = QTimer(self)
self._update_cm_list_timer.setInterval(1000.0 /
self._ctrlrs_update_freq)
self._update_cm_list_timer.timeout.connect(self._update_cm_list)
self._update_cm_list_timer.start()
# Timer for running controller updates
self._update_jtc_list_timer = QTimer(self)
self._update_jtc_list_timer.setInterval(1000.0 /
self._ctrlrs_update_freq)
self._update_jtc_list_timer.timeout.connect(self._update_jtc_list)
self._update_jtc_list_timer.start()
# Signal connections
w = self._widget
w.enable_button.toggled.connect(self._on_jtc_enabled)
w.jtc_combo.currentIndexChanged[str].connect(self._on_jtc_change)
w.cm_combo.currentIndexChanged[str].connect(self._on_cm_change)
self._cmd_pub = None # Controller command publisher
self._state_sub = None # Controller state subscriber
self._state_sub = self._node.create_subscription(
JointTrajectoryControllerState,
'/state',
self._state_cb,
10)
self._list_controllers = None
def shutdown_plugin(self):
self._update_cmd_timer.stop()
self._update_act_pos_timer.stop()
self._update_cm_list_timer.stop()
self._update_jtc_list_timer.stop()
self._unregister_state_sub()
self._unregister_cmd_pub()
def save_settings(self, plugin_settings, instance_settings):
instance_settings.set_value('cm_ns', self._cm_ns)
instance_settings.set_value('jtc_name', self._jtc_name)
def restore_settings(self, plugin_settings, instance_settings):
# Restore last session's controller_manager, if present
self._update_cm_list()
cm_ns = instance_settings.value('cm_ns')
cm_combo = self._widget.cm_combo
cm_list = [cm_combo.itemText(i) for i in range(cm_combo.count())]
try:
idx = cm_list.index(cm_ns)
cm_combo.setCurrentIndex(idx)
# Resore last session's controller, if running
self._update_jtc_list()
jtc_name = instance_settings.value('jtc_name')
jtc_combo = self._widget.jtc_combo
jtc_list = [jtc_combo.itemText(i) for i in range(jtc_combo.count())]
try:
idx = jtc_list.index(jtc_name)
jtc_combo.setCurrentIndex(idx)
except (ValueError):
pass
except (ValueError):
pass
# def trigger_configuration(self):
# Comment in to signal that the plugin has a way to configure
# This will enable a setting button (gear icon) in each dock widget
# title bar
# Usually used to open a modal configuration dialog
def _update_cm_list(self):
update_combo(self._widget.cm_combo, self._list_cm)
def _update_jtc_list(self):
# Clear controller list if no controller information is available
if not self._list_controllers:
self._widget.jtc_combo.clear()
return
# List of running controllers with a valid joint limits specification
# for _all_ their joints
running_jtc = self._running_jtc_info()
if running_jtc and not self._robot_joint_limits:
self._robot_joint_limits = get_joint_limits(n=self._node) # Lazy evaluation
valid_jtc = []
for jtc_info in running_jtc:
has_limits = all(name in self._robot_joint_limits
for name in self._jtc_joint_names(jtc_name=jtc_info.name))
if has_limits:
valid_jtc.append(jtc_info);
valid_jtc_names = [data.name for data in valid_jtc]
# Update widget
update_combo(self._widget.jtc_combo, sorted(valid_jtc_names))
def _on_speed_scaling_change(self, val):
self._speed_scale = val / self._speed_scaling_widget.slider.maximum()
def _on_joint_state_change(self, actual_pos):
#assert(len(actual_pos) == len(self._joint_pos))
for name in actual_pos.keys():
try:
self._joint_pos[name]['position'] = actual_pos[name]
except (KeyError):
pass
def _on_cm_change(self, cm_ns):
self._cm_ns = cm_ns
if cm_ns:
self._list_controllers = list_controllers(self._node, cm_ns).controller
# NOTE: Clear below is important, as different controller managers
# might have controllers with the same name but different
# configurations. Clearing forces controller re-discovery
self._widget.jtc_combo.clear()
self._update_jtc_list()
else:
self._list_controllers = None
def _on_jtc_change(self, jtc_name):
self._unload_jtc()
self._jtc_name = jtc_name
if self._jtc_name:
self._load_jtc()
def _on_jtc_enabled(self, val):
# Don't allow enabling if there are no controllers selected
if not self._jtc_name:
self._widget.enable_button.setChecked(False)
return
# Enable/disable joint displays
for joint_widget in self._joint_widgets():
joint_widget.setEnabled(val)
# Enable/disable speed scaling
self._speed_scaling_widget.setEnabled(val)
if val:
# Widgets send desired position commands to controller
self._update_act_pos_timer.stop()
self._update_cmd_timer.start()
else:
# Controller updates widgets with actual position
self._update_cmd_timer.stop()
self._update_act_pos_timer.start()
def _load_jtc(self):
# Initialize joint data corresponding to selected controller
running_jtc = self._running_jtc_info()
self._joint_names = next(self._jtc_joint_names(x.name) for x in running_jtc
if x.name == self._jtc_name)
for name in self._joint_names:
self._joint_pos[name] = {}
# Update joint display
try:
layout = self._widget.joint_group.layout()
for name in self._joint_names:
limits = self._robot_joint_limits[name]
joint_widget = DoubleEditor(limits['min_position'],
limits['max_position'])
layout.addRow(name, joint_widget)
# NOTE: Using partial instead of a lambda because lambdas
# "will not evaluate/look up the argument values before it is
# effectively called, breaking situations like using a loop
# variable inside it"
from functools import partial
par = partial(self._update_single_cmd_cb, name=name)
joint_widget.valueChanged.connect(par)
except:
# TODO: Can we do better than swallow the exception?
from sys import exc_info
print('Unexpected error:', exc_info()[0])
# Enter monitor mode (sending commands disabled)
self._on_jtc_enabled(False)
# Setup ROS interfaces
jtc_ns = self._resolve_controller_ns(self._cm_ns, self._jtc_name)
state_topic = '/state'
cmd_topic = jtc_ns + '/joint_trajectory'
# self._state_sub = self._node.create_subscription(
# JointTrajectoryControllerState,
# '/state',
# self._state_cb,
# 10)
# self._state_sub
# print("state sub set up")
self._cmd_pub = self._node.create_publisher(JointTrajectory, cmd_topic, 1)
# Start updating the joint positions
self.jointStateChanged.connect(self._on_joint_state_change)
def _unload_jtc(self):
# Stop updating the joint positions
try:
self.jointStateChanged.disconnect(self._on_joint_state_change)
except:
pass
# Reset ROS interfaces
#self._unregister_state_sub()
self._unregister_cmd_pub()
# Clear joint widgets
# NOTE: Implementation is a workaround for:
# https://bugreports.qt-project.org/browse/QTBUG-15990 :(
layout = self._widget.joint_group.layout()
if layout is not None:
while layout.count():
layout.takeAt(0).widget().deleteLater()
# Delete existing layout by reparenting to temporary
QWidget().setLayout(layout)
self._widget.joint_group.setLayout(QFormLayout())
# Reset joint data
self._joint_names = []
self._joint_pos = {}
# Enforce monitor mode (sending commands disabled)
self._widget.enable_button.setChecked(False)
def _running_jtc_info(self):
controller_list = self._list_controllers
jtc_list = [c for c in controller_list if 'JointTrajectoryController' in c.type]
running_jtc_list = [c for c in jtc_list if c.state=='active']
return running_jtc_list
def _unregister_cmd_pub(self):
if self._cmd_pub is not None:
self._node.destroy_publisher(self._cmd_pub)
self._state_sub = None
def _unregister_state_sub(self):
if self._state_sub is not None:
self._node.destroy_subscription(self._state_sub)
self._state_sub = None
def _state_cb(self, msg):
actual_pos = {}
for i in range(len(msg.joint_names)):
joint_name = msg.joint_names[i]
joint_pos = msg.actual.positions[i]
actual_pos[joint_name] = joint_pos
self.jointStateChanged.emit(actual_pos)
def _update_single_cmd_cb(self, val, name):
self._joint_pos[name]['command'] = val
def _update_cmd_cb(self):
dur = []
traj = JointTrajectory()
traj.joint_names = self._joint_names
point = JointTrajectoryPoint()
for name in traj.joint_names:
pos = self._joint_pos[name]['position']
cmd = pos
try:
cmd = self._joint_pos[name]['command']
except (KeyError):
pass
max_vel = self._robot_joint_limits[name]['max_velocity']
dur.append(max(abs(cmd - pos) / max_vel, self._min_traj_dur))
point.positions.append(cmd)
point.time_from_start = Duration(seconds=(max(dur) / self._speed_scale)).to_msg()
traj.points.append(point)
self._cmd_pub.publish(traj)
def _update_joint_widgets(self):
rclpy.spin_once(self._node)
joint_widgets = self._joint_widgets()
for id in range(len(joint_widgets)):
joint_name = self._joint_names[id]
try:
joint_pos = self._joint_pos[joint_name]['position']
joint_widgets[id].setValue(joint_pos)
except (KeyError):
pass # Can happen when first connected to controller
def _joint_widgets(self): # TODO: Cache instead of compute every time?
widgets = []
layout = self._widget.joint_group.layout()
for row_id in range(layout.rowCount()):
widgets.append(layout.itemAt(row_id,
QFormLayout.FieldRole).widget())
return widgets
def _jtc_joint_names(self, jtc_name):
# NOTE: We assume that there is at least one hardware interface that
# claims resources (there should be), and the resource list is fetched
# from the first available interface
if jtc_name not in self._jtc_joints_info:
self._jtc_joints_info[jtc_name] = call_get_parameters(node=self._node, node_name=jtc_name, parameter_names=['joints']).values[0].string_array_value
return self._jtc_joints_info[jtc_name]
def _resolve_controller_ns(self, cm_ns, controller_name):
"""
Resolve a controller's namespace from that of the controller manager.
Controllers are assumed to live one level above the controller
manager, e.g.
>>> _resolve_controller_ns('/path/to/controller_manager', 'foo')
'/path/to/foo'
In the particular case in which the controller manager is not
namespaced, the controller is assumed to live in the root namespace
>>> _resolve_controller_ns('/', 'foo')
'/foo'
>>> _resolve_controller_ns('', 'foo')
'/foo'
@param cm_ns Controller manager namespace (can be an empty string)
@type cm_ns str
@param controller_name Controller name (non-empty string)
@type controller_name str
@return Controller namespace
@rtype str
"""
assert(controller_name)
ns = cm_ns.rsplit('/', 1)[0]
if ns != '/':
ns += '/'
ns += controller_name
return ns
# call_get_parameters taken from ros2cli
# there does not appear to be a way yet to easily get params hosted by another node
# https://github.com/ros2/ros2cli/blob/c00dec0a72c049d3a4a8a80f1324ea24dc8373c6/ros2param/ros2param/api/__init__.py#L122
def call_get_parameters(*, node, node_name, parameter_names):
# create client
client = node.create_client(
GetParameters,
'{node_name}/get_parameters'.format_map(locals()))
# call as soon as ready
ready = client.wait_for_service(timeout_sec=5.0)
if not ready:
raise RuntimeError('Wait for service timed out')
request = GetParameters.Request()
request.names = parameter_names
future = client.call_async(request)
rclpy.spin_until_future_complete(node, future)
# handle response
response = future.result()
if response is None:
e = future.exception()
raise RuntimeError(
'Exception while calling service of node '
"'{args.node_name}': {e}".format_map(locals()))
return response
| 41.119181 | 157 | 0.665731 | 2,770 | 22,081 | 5.051986 | 0.229964 | 0.022152 | 0.007074 | 0.009718 | 0.189796 | 0.115335 | 0.054666 | 0.046091 | 0.036587 | 0.036587 | 0 | 0.005946 | 0.26122 | 22,081 | 536 | 158 | 41.195896 | 0.851897 | 0.362484 | 0 | 0.188153 | 0 | 0 | 0.035868 | 0.014245 | 0 | 0 | 0 | 0.003731 | 0.003484 | 1 | 0.083624 | false | 0.020906 | 0.062718 | 0 | 0.195122 | 0.003484 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac529fb06621a45b098423759b1de3c0c84e3074 | 819 | py | Python | LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/246_Strobogrammatic_Number.py | Sycamore-City-passerby/ML | 605cfc70bdda2c99e5f1c16b25812b59c98a72ad | [
"MIT"
] | null | null | null | LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/246_Strobogrammatic_Number.py | Sycamore-City-passerby/ML | 605cfc70bdda2c99e5f1c16b25812b59c98a72ad | [
"MIT"
] | null | null | null | LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/246_Strobogrammatic_Number.py | Sycamore-City-passerby/ML | 605cfc70bdda2c99e5f1c16b25812b59c98a72ad | [
"MIT"
] | null | null | null | class Solution:
lookup = {'0':'0', '1':'1', '6':'9', '8':'8', '9':'6'}
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
"""
n = len(num)
for i in range(int((n+1) / 2)):
if num[n-1-i] not in self.lookup or num[i] != self.lookup[num[n-1-i]]:
return False
return True
if __name__ == '__main__':
print(Solution().isStrobogrammatic("69"))
"""
Time Complexity = O(N)
Space Complexity = O(1)
A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is represented as a string.
Example:
Input: "69"
Output: true
"""
| 24.088235 | 114 | 0.537241 | 110 | 819 | 3.927273 | 0.581818 | 0.013889 | 0.023148 | 0.027778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039855 | 0.326007 | 819 | 33 | 115 | 24.818182 | 0.742754 | 0.032967 | 0 | 0 | 0 | 0 | 0.049875 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.5 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac52f8692788d0f5172b16f2b0ebe9b3b76b5eca | 2,148 | py | Python | haproxyspoa/spoa_payloads.py | krrg/haproxy-python-spoa | f8a3c4dcea1c0451683dbc89c035009911b234e2 | [
"Apache-2.0"
] | 4 | 2021-04-06T01:46:58.000Z | 2022-01-10T12:38:29.000Z | haproxyspoa/spoa_payloads.py | krrg/haproxy-python-spoa | f8a3c4dcea1c0451683dbc89c035009911b234e2 | [
"Apache-2.0"
] | null | null | null | haproxyspoa/spoa_payloads.py | krrg/haproxy-python-spoa | f8a3c4dcea1c0451683dbc89c035009911b234e2 | [
"Apache-2.0"
] | null | null | null | import io
from collections import defaultdict
from typing import Dict
from haproxyspoa.spoa_data_types import parse_string, parse_typed_data, write_string, write_typed_autodetect
def parse_list_of_messages(payload: io.BytesIO) -> dict:
messages = {}
while payload.tell() != len(payload.getbuffer()):
message_name = parse_string(payload)
num_args = int.from_bytes(payload.read(1), byteorder='little', signed=False)
arguments = defaultdict(list)
for _ in range(num_args):
key, value = parse_key_value_pair(payload)
arguments[key].append(value)
# For convenience in the handlers, flatten arguments
# that have only one value mapping to the same key.
for argkey in arguments.keys():
if len(arguments[argkey]) == 1:
arguments[argkey] = arguments[argkey][0]
messages[message_name] = arguments
# Hide the default dict implementation
for k in messages.keys():
messages[k] = dict(messages[k])
return messages
def parse_key_value_pair(payload: io.BytesIO):
key = parse_string(payload)
value = parse_typed_data(payload)
return key, value
class Action:
SET_VAR = 1
UNSET_VAR = 2
def __init__(self, _type: int, args: int):
self.type = _type,
self.args = args
def write_list_of_actions(actions: list) -> bytes:
buffer = io.BytesIO()
for action in actions:
_type = bytes([action.type])
num_args = bytes([len(action.args)])
buffer.write(_type)
buffer.write(num_args)
for arg in action.args:
buffer.write(write_typed_autodetect(arg))
return buffer.getvalue()
def parse_kv_list(payload: io.BytesIO):
kv_list = {}
while payload.tell() != len(payload.getbuffer()):
key = parse_string(payload)
value = parse_typed_data(payload)
kv_list[key] = value
return kv_list
def write_kv_list(kv: Dict[str, bytes]) -> bytes:
buffer = io.BytesIO()
for k, v in kv.items():
buffer.write(write_string(k))
buffer.write(v)
return buffer.getvalue()
| 26.195122 | 108 | 0.650372 | 278 | 2,148 | 4.827338 | 0.298561 | 0.033532 | 0.031297 | 0.028316 | 0.19225 | 0.122206 | 0.070045 | 0.070045 | 0.070045 | 0 | 0 | 0.003096 | 0.248138 | 2,148 | 81 | 109 | 26.518519 | 0.827864 | 0.064246 | 0 | 0.188679 | 0 | 0 | 0.002991 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0 | 0.075472 | 0 | 0.339623 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac5308b9f366d273fd9d3a91a0c4bfcd1cab82dc | 8,145 | py | Python | oss-internship-2020/libuv/generator/wrapper_generator.py | oshogbo/sandboxed-api | 8e82b900f4d873219b3abfa2fd06ecbd416edefd | [
"Apache-2.0"
] | 1 | 2022-02-10T10:38:30.000Z | 2022-02-10T10:38:30.000Z | oss-internship-2020/libuv/generator/wrapper_generator.py | oshogbo/sandboxed-api | 8e82b900f4d873219b3abfa2fd06ecbd416edefd | [
"Apache-2.0"
] | null | null | null | oss-internship-2020/libuv/generator/wrapper_generator.py | oshogbo/sandboxed-api | 8e82b900f4d873219b3abfa2fd06ecbd416edefd | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script generating a wrapper API for LibUV.
Note: This scriptis highly specific to LibUV's source code and does not
generalize to any other library
"""
import os
import re
import sys
from typing import List
def get_var_type(string: str) -> str:
"""Gets the type from an argument variable.
Args:
string: Input variable declaration
Returns:
The type of the argument variable as a string, e.g. "int x" -> "int".
"""
var = string.strip()
# Unnamed variable
if var in ("void", "...") or var[-1] == "*":
return var
return " ".join(var.split(" ")[:-1]).strip()
def get_var_name(string: str) -> str:
"""Gets the name from an argument variable.
Args:
string: Input variable declaration
Returns:
The name of the arguments variable as a string, e.g. "int x" -> "x".
"""
var = string.strip()
# Not an actual variable
if var in ("void", "..."):
return ""
# Unnamed variable, use an arbitrary name
if var[-1] == "*":
return var.split("_")[1]
return var.split(" ")[-1].strip()
def fix_method_type(string: str) -> str:
"""Fixes the method type.
Args:
string: A parameter type declaration
Returns:
A fixed up string replacing pointers to concrete types with pointers to
void, e.g. "const int*" -> "const void*".
"""
method_type = string.strip()
# Const pointer
if "*" in method_type and "const" in method_type:
return "const void*"
# Regular pointer
if "*" in method_type:
return "void*"
# Not a pointer
return method_type
def fix_argument(string: str) -> str:
"""Fixes an argument.
Args:
string: An argument type to fix
Returns:
The fixed up argument as a string, e.g. "const int* x" -> "const void* x".
"""
arg_type = get_var_type(string)
arg_name = get_var_name(string)
# Array argument, becomes a pointer
if "[" in arg_name:
arg_type += "*"
arg_name = arg_name.split("[")[0] + arg_name.split("]")[-1]
# Pointer (in LibUV, types endind in "_cb" or "_func" are pointers)
if "*" in arg_type or "_cb" in arg_type or "_func" in arg_type:
if "const" in arg_type:
return "const void* " + arg_name
return "void* " + arg_name
# Not a pointer
return arg_type + " " + arg_name
def fix_call_argument(string: str) -> str:
"""Fixes an argument in a call the orignal method.
Args:
string: A method call argument
Returns:
The fixed call argument, e.g. "const int* x" ->
"reinterpret_cast<const int*>(x)".
"""
arg_type = get_var_type(string)
arg_name = get_var_name(string)
# Array argument, becomes a pointer
if "[" in arg_name:
arg_type += "*"
arg_name = arg_name.split("[")[0] + arg_name.split("]")[-1]
# Pointer (in LibUV, types endind in "_cb" or "_func" are pointers)
if "*" in arg_type or "_cb" in arg_type or "_func" in arg_type:
return "reinterpret_cast<" + arg_type + ">(" + arg_name + ")"
# Not a pointer
return arg_name
def read_file(filename: str) -> str:
"""Returns contents of filename as a string.
Args:
filename: The name of the file to read
Returns:
The contents of the file as a string.
"""
file = open(filename, "r")
return str(file.read())
def clean_file(text: str) -> str:
"""Prepares the file for parsing.
In particular, removes comments and macros from text
Additionally, moves pointer asterisks next to its type
Args:
text: The contents of the text file to prepare
Returns:
The cleaned up file contents.
"""
result = text
result = re.sub(r"//.*?\n", "", result, flags=re.S)
result = re.sub(r"/\*.*?\*/", "", result, flags=re.S)
result = re.sub(r"#.*?\n", "", result, flags=re.S)
result = result.replace(" *", "* ")
return result
def get_signatures(text: str) -> str:
"""Gets the signatures of all the methods in the header.
Note: This method only works on a certain version of LibUV's header.
Args:
text: The contents of the header file
Returns:
The extracted method signatures.
"""
signatures = [x.split(";")[0].strip() for x in text.split("UV_EXTERN")[1:]]
method_types = [
" ".join(s.split("(")[0].split(" ")[:-1]).strip() for s in signatures
]
names = [s.split("(")[0].split(" ")[-1].strip() for s in signatures]
arguments = [s.split("(")[1][:-1] for s in signatures]
arguments_lists = [[x.strip() for x in a.split(",")] for a in arguments]
return zip(method_types, names, arguments_lists)
def append_method(method_type: str, name: str, arguments_list: List[str],
header: List[str], source: List[str]) -> None:
"""Writes the method to the header and the source list of lines.
Args:
method_type: The return type of the method as a string
name: The name of the method
arguments_list: A list of method aruments
header: A list that receives method wrapper declarations
source: A list that receives the declarations of the method wrappers
"""
header.append(
fix_method_type(method_type) + " sapi_" + name + "(" +
", ".join(map(fix_argument, arguments_list)) + ");")
source.append(
fix_method_type(method_type) + " sapi_" + name + "(" +
", ".join(map(fix_argument, arguments_list)) + ") {\n" + " return " +
name + "(" + ", ".join(map(fix_call_argument, arguments_list)) + ");\n" +
"}")
def append_text(text: str, file: List[str]) -> None:
"""Writes text to file list of lines.
Useful for additional methods, includes, extern "C"...
Args:
text: The text to append to the file
file: A list receiving file lines
"""
file.append(text)
def generate_wrapper() -> None:
"""Generates the wrapper."""
header_file = open(sys.argv[2], "w")
source_file = open(sys.argv[3], "w")
text = read_file(sys.argv[1])
text = clean_file(text)
signatures = get_signatures(text)
header = []
source = []
append_text("#include <uv.h>", header)
append_text("#include <cstddef>", header)
append_text("extern \"C\" {", header)
append_text("#include \"" + os.path.abspath(header_file.name) + "\"", source)
for (method_type, name, arguments_list) in signatures:
# These wrapper methods are manually added at the end
if name in ("uv_once", "uv_loop_configure"):
continue
append_method(method_type, name, arguments_list, header, source)
# Add sapi_uv_once (uv_once uses a differnet kind of callback)
append_text("void sapi_uv_once(void* guard, void (*callback)(void));", header)
append_text(
"void sapi_uv_once(void* guard, void (*callback)(void)) {\n" +
" return uv_once(reinterpret_cast<uv_once_t*>(guard)," + "callback);\n" +
"}", source)
# Add sapi_uv_loop_configure (uv_loop_configure is variadic)
append_text(
"int sapi_uv_loop_configure(void* loop, uv_loop_option option)" + ";",
header)
append_text(
"int sapi_uv_loop_configure(void* loop, uv_loop_option option)" +
" {\n return uv_loop_configure(" +
"reinterpret_cast<uv_loop_t*>(loop), option);\n" + "}", source)
# Add sapi_uv_loop_configure_int (uv_loop_configure is variadic)
append_text(
"int sapi_uv_loop_configure_int(void* loop, " +
"uv_loop_option option, int ap);", header)
append_text(
"int sapi_uv_loop_configure_int(void* loop, " +
"uv_loop_option option, int ap) {\n" + " return uv_loop_configure(" +
"reinterpret_cast<uv_loop_t*>(loop), option, ap);\n}", source)
append_text("} // extern \"C\"\n", header)
header_file.write("\n\n".join(header))
source_file.write("\n\n".join(source))
generate_wrapper()
| 27.424242 | 80 | 0.654635 | 1,190 | 8,145 | 4.335294 | 0.184874 | 0.019771 | 0.031983 | 0.022097 | 0.380306 | 0.317116 | 0.307812 | 0.272533 | 0.257414 | 0.257414 | 0 | 0.004338 | 0.207489 | 8,145 | 296 | 81 | 27.516892 | 0.794888 | 0.401105 | 0 | 0.20354 | 0 | 0 | 0.203688 | 0.057247 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097345 | false | 0 | 0.035398 | 0 | 0.274336 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac552fb0646204935fd1ce71331ff3543fcef411 | 3,764 | py | Python | fomo_social_harvester/scraper/twitter.py | dgnsrekt/fomo-social-harvester | accae1c91741ded911da53050331c39bf461c2e7 | [
"MIT"
] | null | null | null | fomo_social_harvester/scraper/twitter.py | dgnsrekt/fomo-social-harvester | accae1c91741ded911da53050331c39bf461c2e7 | [
"MIT"
] | null | null | null | fomo_social_harvester/scraper/twitter.py | dgnsrekt/fomo-social-harvester | accae1c91741ded911da53050331c39bf461c2e7 | [
"MIT"
] | null | null | null | from time import sleep
import logging
from .base import fetch_page
class TwitterParsingError(Exception):
pass
def parse_tweets(element):
tweet_selector = 'li.ProfileNav-item.ProfileNav-item--tweets.is-active > a > span.ProfileNav-value'
try:
return int(element.find(tweet_selector)[0].element.values()[1])
except IndexError:
print('TW', end='', flush=True)
return 0
def parse_following(element):
following_selector = 'li.ProfileNav-item.ProfileNav-item--following > a > span.ProfileNav-value'
try:
return int(element.find(following_selector)[0].element.values()[1])
except IndexError:
print('FG', end='', flush=True)
return 0
def parse_followers(element):
followers_selector = 'li.ProfileNav-item.ProfileNav-item--followers > a > span.ProfileNav-value'
try:
return int(element.find(followers_selector)[0].element.values()[1])
except IndexError:
print('FR', end='', flush=True)
return 0
def parse_likes(element):
likes_selector = 'li.ProfileNav-item.ProfileNav-item--favorites > a > span.ProfileNav-value'
try:
return int(element.find(likes_selector)[0].element.values()[1])
except IndexError:
print('L', end='', flush=True)
return 0
def parse_twitter_count(row):
sleep(.1)
name = row.get('name')
twitter_link = row.get('link')
user = twitter_link.split('/')[-1]
twitter_header = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': f'https://twitter.com/{user}',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',
'X-Twitter-Active-User': 'yes',
'X-Requested-With': 'XMLHttpRequest'
}
html = fetch_page(twitter_link, header=twitter_header)
# html = fetch_page(twitter_link)
selector_one = '#page-container > div.ProfileCanopy.ProfileCanopy--withNav.ProfileCanopy--large.js-variableHeightTopBar > div > div.ProfileCanopy-navBar.u-boxShadow > div > div > div.Grid-cell.u-size2of3.u-lg-size3of4 > div > div > ul'
selector_two = '#page-container > div.ProfileCanopy.ProfileCanopy--withNav.js-variableHeightTopBar > div > div.ProfileCanopy-navBar.u-boxShadow > div > div > div.Grid-cell.u-size2of3.u-lg-size3of4 > div > div > ul'
if html:
if html.url == 'https://twitter.com/account/suspended':
print(name, 'suspended')
return {'name': name, 'tweets': None, 'following': None,
'followers': None, 'likes': None}
element_one = html.find(selector_one)
element_two = html.find(selector_two)
if len(element_one) > 0:
element = element_one
elif len(element_two) > 0:
element = element_two
else:
print(html, name, 'Element Not Found')
raise TwitterParsingError(f'Element Not found {name}')
# try:
element = element[0]
# except IndexError:
# print(name, element, end='', flush=True)
# return {'name': name, 'tweets': None, 'following': None,
# 'followers': None, 'likes': None}
#
# else:
tweets = parse_tweets(element)
following = parse_following(element)
followers = parse_followers(element)
likes = parse_likes(element)
print('.', end='', flush=True)
return {'name': name, 'tweets': tweets, 'following': following,
'followers': followers, 'likes': likes}
print()
print(f'Oops! Either "{user}" does not exist or is private.')
return {'name': name, 'tweets': 0, 'following': 0,
'followers': 0, 'likes': 0}
| 35.509434 | 239 | 0.626196 | 452 | 3,764 | 5.128319 | 0.283186 | 0.048318 | 0.031061 | 0.046592 | 0.483175 | 0.462468 | 0.354616 | 0.289042 | 0.213115 | 0.138913 | 0 | 0.018333 | 0.231934 | 3,764 | 105 | 240 | 35.847619 | 0.783466 | 0.056589 | 0 | 0.162162 | 0 | 0.054054 | 0.348391 | 0.16629 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067568 | false | 0.013514 | 0.040541 | 0 | 0.27027 | 0.121622 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac5555b80bbaaceba1a504fcdefa87d3a0f08f81 | 9,371 | py | Python | plugin/view.py | mrmansano/sublime-ycmd | fece62f0ce4e9cbf96ed8ba07f5cecb24b21427e | [
"MIT"
] | 12 | 2018-01-24T20:58:10.000Z | 2021-12-21T15:02:10.000Z | plugin/view.py | mrmansano/sublime-ycmd | fece62f0ce4e9cbf96ed8ba07f5cecb24b21427e | [
"MIT"
] | 4 | 2018-01-13T14:39:45.000Z | 2020-11-25T00:05:27.000Z | plugin/view.py | mrmansano/sublime-ycmd | fece62f0ce4e9cbf96ed8ba07f5cecb24b21427e | [
"MIT"
] | 2 | 2018-10-23T17:13:44.000Z | 2019-05-12T04:10:17.000Z | #!/usr/bin/env python3
'''
plugin/view.py
View manager class.
Manages and organizes views. The main purpose of this class is to help
determine which views/files belong to the same project. Views in the same
project may all share a single ycmd server backend.
'''
import logging
import threading
from ..lib.subl.view import (
View,
get_view_id,
)
from ..lib.util.lock import lock_guard
logger = logging.getLogger('sublime-ycmd.' + __name__)
try:
import sublime
except ImportError:
from ..lib.subl.dummy import sublime
class SublimeYcmdViewManager(object):
'''
Singleton helper class. Manages wrappers around sublime view instances.
The wrapper class `View` is used around `sublime.View` to cache certain
calculations, and to store view-specific variables/state.
Although this abstraction isn't strictly necessary, it can save expensive
operations like file path calculation and ycmd event notification.
All APIs are thread-safe.
'''
def __init__(self):
# maps view IDs to `View` instances
self._views = {}
self._lock = threading.RLock()
self.reset()
@lock_guard()
def reset(self):
if self._views:
view_ids = list(self._views.keys())
for view_id in view_ids:
self._unregister_view(view_id)
logger.info('all views have been unregistered')
# active views:
self._views = {}
def get_wrapped_view(self, view):
'''
Returns an instance of `View` corresponding to `view`. If one does
not exist, it will be created, if possible.
If the view is provided as an ID (int), then the lookup is performed
as normal, but a `KeyError` will be raised if it does not exist.
If the view is an instance of `sublime.View`, then the lookup is again
performed as usual, but will be created if it does not exist.
Finally, if the view is an instance of `View`, it is returned as-is.
'''
if not isinstance(view, (int, sublime.View, View)):
raise TypeError('view must be a View: %r' % (view))
if isinstance(view, View):
return view
view_id = get_view_id(view)
if view_id is None:
logger.error('failed to get view ID for view: %r', view)
raise TypeError('view id must be an int: %r' % (view))
with self._lock:
if view_id not in self._views:
# create a wrapped view, if possible
if not isinstance(view, sublime.View):
# not possible... view given with just its id
logger.warning(
'view has not been registered, id: %r', view_id,
)
raise KeyError(view,)
# else, we have a usable view for the wrapper
logger.debug(
'view has not been registered, registering it: %r', view,
)
self._register_view(view, view_id)
assert view_id in self._views, \
'[internal] view id has not been registered: %r' % (view_id)
wrapped_view = self._views[view_id] # type: View
return wrapped_view
@lock_guard()
def has_notified_ready_to_parse(self, view, server):
'''
Returns true if the given `view` has been parsed by the `server`. This
must be done at least once to ensure that the ycmd server has a list
of identifiers to offer in completion results.
This works by storing a view-specific variable indicating the server,
if any, that the view has been uploaded to. If this variable is not
set, or if the variable refers to another server, this method will
return false. In that case, the notification should probably be sent.
'''
view = self.get_wrapped_view(view)
if not view:
logger.error('unknown view type: %r', view)
raise TypeError('view must be a View: %r' % (view))
init_notified_server_set(view)
return has_notified_server(view, server)
@lock_guard()
def set_notified_ready_to_parse(self, view, server, has_notified=True):
'''
Updates the variable that indicates that the given `view` has been
parsed by the `server`.
This works by setting a view-specific variable indicating the server,
that the view has been uploaded to. The same variable can then be
checked in `has_notified_ready_to_parse`.
'''
view = self.get_wrapped_view(view)
if not view:
logger.error('unknown view type: %r', view)
raise TypeError('view must be a View: %r' % (view))
init_notified_server_set(view)
if has_notified:
add_notified_server(view, server)
else:
remove_notified_server(view, server)
def _register_view(self, view, view_id=None):
if not isinstance(view, sublime.View):
raise TypeError('view must be a sublime.View: %r' % (view))
if view_id is None:
view_id = get_view_id(view)
if not isinstance(view_id, int):
raise TypeError('view id must be an int: %r' % (view))
logger.debug('registering view with id: %r, %r', view_id, view)
view = View(view)
with self._lock:
self._views[view_id] = view
return view_id
def _unregister_view(self, view):
view_id = get_view_id(view)
if view_id is None:
logger.error('failed to get view ID for view: %r', view)
raise TypeError('view id must be an int: %r' % (view))
with self._lock:
if view_id not in self._views:
logger.debug(
'view was never registered, ignoring id: %s', view_id,
)
return False
del self._views[view_id]
return True
@lock_guard()
def get_views(self):
'''
Returns a shallow-copy of the map of managed `View` instances.
'''
return self._views.copy()
def __contains__(self, view):
view_id = get_view_id(view)
if view_id is None:
logger.error('failed to get view ID for view: %r', view)
raise TypeError('view id must be an int: %r' % (view))
with self._lock:
return view_id in self._views
@lock_guard()
def __getitem__(self, view):
return self.get_wrapped_view(view)
@lock_guard()
def __len__(self):
return len(self._views)
def __bool__(self):
''' Returns `True`, so an instance is always truthy. '''
return True
NOTIFIED_SERVERS_KEY = 'notified_servers'
def init_notified_server_set(view, key=NOTIFIED_SERVERS_KEY):
'''
Initializes the set of notified servers for a given `view` if it has not
already been initialized.
This does nothing if it has been initialized already.
'''
if not isinstance(view, View):
logger.warning('view does not appear valid: %r', view)
if key not in view:
logger.debug('view has not been sent to any server, creating metadata')
view[key] = set()
def get_server_key(server):
'''
Returns a unique key for `server` to use as an id for it.
'''
server_key = str(server)
return server_key
def has_notified_server(view, server, key=NOTIFIED_SERVERS_KEY):
'''
Checks if a given `server` is in the notified server set for a `view`.
'''
if not isinstance(view, View):
logger.warning('view does not appear valid: %r', view)
if key not in view:
logger.error(
'notified server set is not initialized for view: %r', view,
)
notified_servers = view[key]
assert isinstance(notified_servers, set), \
'[internal] notified server set is not a set: %r' % (notified_servers)
server_key = get_server_key(server)
return server_key in notified_servers
def add_notified_server(view, server, key=NOTIFIED_SERVERS_KEY):
'''
Adds `server` to the notified server set for `view`.
'''
if not isinstance(view, View):
logger.warning('view does not appear valid: %r', view)
if key not in view:
logger.error(
'notified server set is not initialized for view: %r', view,
)
notified_servers = view[key]
assert isinstance(notified_servers, set), \
'[internal] notified server set is not a set: %r' % (notified_servers)
server_key = get_server_key(server)
notified_servers.add(server_key)
def remove_notified_server(view, server, key=NOTIFIED_SERVERS_KEY):
'''
Removes `server` to the notified server set for `view`.
If the server is not in the notified server set, this does nothing.
'''
if not isinstance(view, View):
logger.warning('view does not appear valid: %r', view)
if key not in view:
logger.error(
'notified server set is not initialized for view: %r', view,
)
notified_servers = view[key]
assert isinstance(notified_servers, set), \
'[internal] notified server set is not a set: %r' % (notified_servers)
server_key = get_server_key(server)
notified_servers.discard(server_key)
| 32.765734 | 79 | 0.619998 | 1,275 | 9,371 | 4.416471 | 0.183529 | 0.04049 | 0.039247 | 0.026993 | 0.484106 | 0.421062 | 0.400107 | 0.34683 | 0.322856 | 0.31078 | 0 | 0.000151 | 0.294953 | 9,371 | 285 | 80 | 32.880702 | 0.852127 | 0.27297 | 0 | 0.484076 | 0 | 0 | 0.167726 | 0 | 0 | 0 | 0 | 0 | 0.025478 | 1 | 0.10828 | false | 0 | 0.044586 | 0.012739 | 0.242038 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac55fc20d998d88d80ebd6ae13f5e7669f2ee2b3 | 5,952 | py | Python | lmp/script/train_tknzr.py | ProFatXuanAll/char-RNN | 531f101b3d1ba20bafd28ca060aafe6f583d1efb | [
"Beerware"
] | null | null | null | lmp/script/train_tknzr.py | ProFatXuanAll/char-RNN | 531f101b3d1ba20bafd28ca060aafe6f583d1efb | [
"Beerware"
] | null | null | null | lmp/script/train_tknzr.py | ProFatXuanAll/char-RNN | 531f101b3d1ba20bafd28ca060aafe6f583d1efb | [
"Beerware"
] | null | null | null | r"""Use this script to train tokenizer on a dataset.
This script is usually run before training language model.
See Also
--------
:doc:`lmp.dset </dset/index>`
All available datasets.
:doc:`lmp.script.sample_dset </script/sample_dset>`
Get a glimpse on all available datasets.
:doc:`lmp.script.tknz_txt </script/tknz_txt>`
Use pre-trained tokenizer to perform tokenization on given text.
:doc:`lmp.tknzr </tknzr/index>`
All available tokenizers.
Examples
--------
The following example script train a whitespace tokenizer :py:class:`lmp.tknzr.WsTknzr` on Wiki-Text-2 dataset
:py:class:`lmp.dset.WikiText2Dset` with ``train`` version.
.. code-block:: shell
python -m lmp.script.train_tknzr whitespace \
--dset_name wiki-text-2 \
--exp_name my_tknzr_exp \
--max_vocab 10 \
--min_count 2 \
--ver train
The training result will be saved at path ``project_root/exp/my_tknzr_exp`` and can be reused by other scripts.
One can increase ``--max_vocab`` to allow tokenizer to include more tokens into its vocabulary:
.. code-block:: shell
python -m lmp.script.train_tknzr whitespace \
--dset_name wiki-text-2 \
--exp_name my_tknzr_exp \
--max_vocab 10000 \
--min_count 2 \
--ver train
Set ``--max_vocab`` to ``-1`` to include all tokens in :py:class:`lmp.dset.WikiText2Dset` into tokenizer's vocabulary:
.. code-block:: shell
python -m lmp.script.train_tknzr whitespace \
--dset_name wiki-text-2 \
--exp_name my_tknzr_exp \
--max_vocab -1 \
--min_count 2 \
--ver train
Tokens have low occurrence counts may indicate typos, named entities (people, locations, organizations, etc.) or random
character combinations (emojis, glyphs, etc.). Sometimes one does not want to include tokens have low occurrence
counts. Use ``--min_count`` to filter out tokens have occurrence counts lower than ``--min_count``.
.. code-block:: shell
python -m lmp.script.train_tknzr whitespace \
--dset_name wiki-text-2 \
--exp_name my_tknzr_exp \
--max_vocab 10000 \
--min_count 5 \
--ver train
Sometimes cases do not matter, sometimes they do matter. For example:
I ate an apple.
Apple is a fruit.
Apple is a company.
The words `apple` and `Apple` in the first two sentences have the meaning of edible fruit regardless of `apple` being
upper case `Apple` or lower case `apple`. But in the third sentence the word `Apple` has the meaning of smartphone
company and can only be upper case (which represent the name of an entity). Thus when processing text one must decide
whether to treat cases as a whole or differently. In this script one can use ``--is_uncased`` to treat upper cases as
same as lower cases.
.. code-block:: shell
python -m lmp.script.train_tknzr whitespace
--dset_name wiki-text-2 \
--exp_name my_tknzr_exp \
--is_uncased \
--max_vocab 10000 \
--min_count 5 \
--ver train
You can use ``-h`` or ``--help`` options to get a list of available tokenizers.
.. code-block:: shell
python -m lmp.script.train_tknzr -h
You can use ``-h`` or ``--help`` options on a specific tokenizer to get a list of supported CLI arguments.
.. code-block:: shell
python -m lmp.script.train_tknzr whitespace -h
"""
import argparse
import gc
import sys
from typing import List
import lmp.dset
import lmp.tknzr
import lmp.util.cfg
import lmp.util.dset
import lmp.util.rand
import lmp.util.tknzr
def parse_args(argv: List[str]) -> argparse.Namespace:
"""Parse CLI arguments.
Parameters
----------
argv: list[str]
List of CLI arguments.
See Also
--------
sys.argv
Python CLI arguments interface.
Returns
-------
argparse.Namespace
Parsed CLI arguments.
"""
# Create parser.
parser = argparse.ArgumentParser('python -m lmp.script.train_tknzr', description='Train tokenizer.')
# Use tokenizer name to create subparser for all tokenizers.
subparsers = parser.add_subparsers(dest='tknzr_name', required=True)
for tknzr_name, tknzr_type in lmp.tknzr.TKNZR_OPTS.items():
tknzr_subparser = subparsers.add_parser(
tknzr_name,
description=f'Training `lmp.tknzr.{tknzr_type.__name__}` tokenizer.',
)
# Required arguments.
group = tknzr_subparser.add_argument_group('tokenizer training arguments')
group.add_argument(
'--dset_name',
choices=lmp.dset.DSET_OPTS.keys(),
help='Name of the dataset which will be used to train tokenizer.',
required=True,
type=str,
)
group.add_argument(
'--exp_name',
help='Name of the tokenizer training experiment.',
required=True,
type=str,
)
group.add_argument(
'--ver',
help='Version of the dataset.',
required=True,
type=str,
)
# Optional arguments.
group.add_argument(
'--seed',
default=42,
help='Random seed. Default is ``42``.',
type=int,
)
# Add tokenizer specific arguments.
tknzr_type.add_CLI_args(parser=tknzr_subparser)
return parser.parse_args(argv)
def main(argv: List[str]) -> None:
"""Script entry point.
Parameters
----------
argv: list[str]
List of CLI arguments.
Returns
-------
None
"""
# Parse CLI arguments.
args = parse_args(argv=argv)
# Save training configuration.
lmp.util.cfg.save(args=args, exp_name=args.exp_name)
# Set random seed for reproducibility.
lmp.util.rand.set_seed(seed=args.seed)
# Get dataset instance with specified version.
dset = lmp.util.dset.load(**args.__dict__)
# Get new tokenizer instance.
tknzr = lmp.util.tknzr.create(**args.__dict__)
# Build tokenizer's vocabulary.
tknzr.build_vocab(batch_txt=dset)
# Save training result.
lmp.util.tknzr.save(exp_name=args.exp_name, tknzr=tknzr)
# Free memory. This is only need for unit test.
del args
del dset
del tknzr
gc.collect()
if __name__ == '__main__':
main(argv=sys.argv[1:])
| 26.810811 | 119 | 0.68834 | 856 | 5,952 | 4.66472 | 0.28271 | 0.022539 | 0.020035 | 0.032056 | 0.281993 | 0.228149 | 0.20561 | 0.176559 | 0.147258 | 0.13724 | 0 | 0.007749 | 0.197749 | 5,952 | 221 | 120 | 26.932127 | 0.828482 | 0.669019 | 0 | 0.163934 | 0 | 0 | 0.176067 | 0.028993 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032787 | false | 0 | 0.163934 | 0 | 0.213115 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac55fcd58209e1e0911d97a23f245a53b11a02aa | 6,427 | py | Python | Exercise_1b/k-means.py | lukaszbinden/jmcs-pattern-recognition | 8776016b231e9ae961d0b86826d32e9f66dcdeb8 | [
"MIT"
] | null | null | null | Exercise_1b/k-means.py | lukaszbinden/jmcs-pattern-recognition | 8776016b231e9ae961d0b86826d32e9f66dcdeb8 | [
"MIT"
] | null | null | null | Exercise_1b/k-means.py | lukaszbinden/jmcs-pattern-recognition | 8776016b231e9ae961d0b86826d32e9f66dcdeb8 | [
"MIT"
] | 1 | 2018-04-06T12:35:31.000Z | 2018-04-06T12:35:31.000Z | import sys
import csv
from datetime import datetime
import random
import numpy as np
import scipy.spatial
import math
from itertools import combinations
# CONSTS
MAX_ITERATIONS = 15
TYPE_FIXED_NUMBER_OF_ITERATIONS = 99
TYPE_RANDOM_CHOICE = 100
METHOD_C_INDEX = 500
METHOD_DUNN_INDEX = 501
# CONFIGURATION OF PROGRAM
TERMINATION_CRITERIA = TYPE_FIXED_NUMBER_OF_ITERATIONS
ALGORITHM_INITIAL_CLUSTERS = TYPE_RANDOM_CHOICE
def load_data(filename):
with open(filename, 'r') as f:
reader = csv.reader(f)
data = list(reader)
matrix = np.array(data, dtype = int)
# separate labels from samples
samples = matrix[:,1:]
labels = matrix[:,0]
return labels, samples
def print_indent(text, indent, indent_char='\t'):
print('{indent}{text}'.format(indent=indent*indent_char, text=text))
sys.stdout.flush()
def k_means(train_set, k):
"""
:return: clustering [C_1,...,C_k]
"""
assert(k > 0)
k_cluster_centers = choose_cluster_centers(train_set, k, ALGORITHM_INITIAL_CLUSTERS)
k_clusters = {}
termination_dict = {}
while True:
dist = scipy.spatial.distance.cdist(train_set, k_cluster_centers) # uses euclidean
# for each xi, assign it to nearest center
cluster_ids = np.argmin(dist, axis=1)
for i in range(0, k): # for each cluster
xi_indices = np.where(cluster_ids == i)[0]
cluster_i = train_set[xi_indices]
k_clusters[i] = xi_indices # cluster_i
# recompute cluster center
k_cluster_centers[i] = np.mean(np.array(cluster_i), axis=0)
if terminate(termination_dict, TERMINATION_CRITERIA):
break
assert(len(k_clusters) == k)
result = []
for i in k_clusters:
result.append(k_clusters[i])
return result
def terminate(termination_dict, criteria):
if criteria == TYPE_FIXED_NUMBER_OF_ITERATIONS:
if 'cnt' not in termination_dict:
termination_dict['cnt'] = 0
termination_dict['cnt'] = termination_dict['cnt'] + 1
if termination_dict['cnt'] >= MAX_ITERATIONS:
return True
return False
def validate(train_set, clusters, k, validation_dict, method):
if method == METHOD_C_INDEX:
gamma = 0
alpha = 0
distances = []
pdist_square = get_pdist_square(train_set, validation_dict)
for i in range(0, len(train_set) - 2):
for j in range(i+1, len(train_set) - 1):
distances.append(pdist_square[i][j])
if in_same_cluster(clusters, i, j):
gamma = gamma + pdist_square[i][j]
alpha = alpha + 1
distances = np.array(distances)
idx = np.argpartition(distances, alpha)
min_dist = sum(distances[idx[:alpha]])
idx = np.argpartition(distances, -alpha)
max_dist = sum(distances[idx[-alpha:]])
c_index = (gamma - min_dist) / (max_dist - min_dist)
print_indent('C-Index for k={k_val}: {c_val}'.format(k_val=k, c_val=c_index), indent=1)
elif method == METHOD_DUNN_INDEX:
pdist_square = get_pdist_square(train_set, validation_dict)
inter_cluster_distances = []
for pair in combinations(clusters, 2): # all possible pairs of clusters
cluster_i = pair[0]
cluster_j = pair[1]
inter_cluster_distances.append(dunn_cluster_distance(cluster_i, cluster_j, pdist_square))
diameters = []
for cluster in clusters:
diameters.append(dunn_cluster_diameter(pdist_square, cluster))
delta_max = max(diameters)
dunn_index = min(inter_cluster_distances) / delta_max
print_indent('Dunn-Index for k={k_val}: {d_val}'.format(k_val=k, d_val=dunn_index), indent=1)
else:
print("invalid method specified.")
def in_same_cluster(clusters, i, j):
for xi_indices in clusters:
if i in xi_indices and j in xi_indices:
return True
return False
def get_pdist_square(train_set, validation_dict):
if 'pdist_square_key' not in validation_dict:
pdist = scipy.spatial.distance.pdist(train_set)
pdist_square = scipy.spatial.distance.squareform(pdist)
validation_dict['pdist_square_key'] = pdist_square
else:
pdist_square = validation_dict['pdist_square_key']
return pdist_square
def dunn_cluster_distance(cluster1, cluster2, pdist_square):
min_distance = math.inf
for i in cluster1:
for j in cluster2:
dist = pdist_square[i][j]
if dist < min_distance:
min_distance = dist
assert(min_distance != math.inf)
return min_distance
def dunn_cluster_diameter(pdist_square, cluster):
diameter = 0
for pair in combinations(cluster, 2): # all possible pairs x,y e C
dist = pdist_square[pair[0]][pair[1]]
if dist > diameter:
diameter = dist
return diameter
def choose_cluster_centers(train_set, k, algorithm):
if algorithm == TYPE_RANDOM_CHOICE:
# random choice of k elements of train_set
indices = random.sample(range(0, len(train_set) - 1), k)
centers = train_set[indices]
else:
print('no algorithm defined')
assert(len(centers) == k)
return centers
def main():
print("exercise_1b -->")
_, train_imgs = load_data("../data/MNIST/train_med.csv")
print("\ttraining set size..: ", len(train_imgs))
start_total = datetime.now()
validation_dict = {}
for k in [5, 7, 9, 10, 12, 15]: # [3]
start_k = datetime.now()
clusters = k_means(train_imgs, k)
end_k = datetime.now()
print_indent('Runtime for k={k_key}: {duration}'.format(k_key=k, duration=end_k-start_k), indent=1)
start_k = datetime.now()
validate(train_imgs, clusters, k, validation_dict, METHOD_C_INDEX)
end_k = datetime.now()
print_indent('Runtime: {duration}'.format(k_key=k, duration=end_k-start_k), indent=1)
start_k = datetime.now()
validate(train_imgs, clusters, k, validation_dict, METHOD_DUNN_INDEX)
end_k = datetime.now()
print_indent('Runtime: {duration}'.format(k_key=k, duration=end_k-start_k), indent=1)
end = datetime.now()
print_indent('Total runtime: {duration}'.format(duration=end-start_total), indent=1)
print("exercise_1b <--")
if __name__ == "__main__":
main()
| 32.135 | 107 | 0.649759 | 864 | 6,427 | 4.579861 | 0.200231 | 0.055598 | 0.018196 | 0.022239 | 0.295426 | 0.192823 | 0.144807 | 0.108163 | 0.108163 | 0.084407 | 0 | 0.012564 | 0.244593 | 6,427 | 199 | 108 | 32.296482 | 0.802472 | 0.047612 | 0 | 0.114094 | 0 | 0 | 0.061536 | 0.004431 | 0 | 0 | 0 | 0 | 0.026846 | 1 | 0.073826 | false | 0 | 0.053691 | 0 | 0.194631 | 0.087248 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac573639f73ed7a2e8b77594d86d36186b61768a | 11,831 | py | Python | solveV2.py | Casper64/natural-deduction | 30c9f7640126102aa31aae70e0e28322159d766c | [
"MIT"
] | null | null | null | solveV2.py | Casper64/natural-deduction | 30c9f7640126102aa31aae70e0e28322159d766c | [
"MIT"
] | null | null | null | solveV2.py | Casper64/natural-deduction | 30c9f7640126102aa31aae70e0e28322159d766c | [
"MIT"
] | null | null | null | """
==================== VERSION 2 ====================
Problems:
- Still not doable to trace every step taken
- The code is ok, but my understanding of propositional logic isn't enough to code the logic without errors
- The logic is hardcoded and not flexible enough
- Still too many workarounds
To take to the next version:
- Improved parser
- New Premise and Token classes
- Adding steps in a procedural form
"""
from tokens import Token
from typing import Union
from xml.dom.expatbuilder import Rejecter
import debug
from parse import Parser
from ruler import Ruler
import input
import re
import util
from rules_core.negation import negate
from output import NaturalDeductionTree, Step, StepType
# TODO: Add docstrings to all methods!!
CONCLUDE = ":-"
def init():
for statement in input.statements:
debug.log(f"Current statement: '{statement}'")
solver = Solver(statement)
solver.start()
if solver.solved:
debug.log(f"Found solution!\n", debug.SUCCESS)
else:
debug.log(f"Solution not found!\n", debug.ERROR)
solver.nd.close()
class Premise:
def __init__(self, premise: any, raw=None):
self._premise = util.cleanup(premise)
self._parse(raw)
def create(premise: any):
if isinstance(premise, Premise):
return premise
elif isinstance(premise, list):
return Premise.from_raw(premise)
elif isinstance(premise, Token):
return Premise.from_raw(premise.get_raw())
else:
return Premise(premise)
def get(self):
return self._premise
def set(self, premise: str):
self._premise = premise
self._parse()
def duplicate(self, premise: str):
return Premise(premise)
def _parse(self, raw=None):
parser = Parser(self._premise, raw)
self.raw = parser.raw
self.literals = parser.literals
self.tokens = parser.tokens
def from_raw(raw: list):
"""Returns premise created from raw state"""
def search(a):
if isinstance(a, list):
for i, b in enumerate(a):
# Prevent weird slicing of string
if not isinstance(b, str):
a[i:i+1] = search(b)
elif isinstance(a, Token):
a = a.get_raw()
elif isinstance(a, Premise):
a = a.raw
return a
raw = search(raw)
string = util.raw_to_str(raw)
return Premise(string, raw)
def __eq__(self, other: any):
if isinstance(other, str):
return self._premise == other
elif isinstance(other, list):
return str(self.raw) == str(other)
elif isinstance(other, Premise):
return str(self.raw) == str(other.raw)
elif isinstance(other, Token):
return str(self) == str(other)
def __ne__(self, other: any):
if isinstance(other, str):
return self._premise != other
elif isinstance(other, list):
return str(self.raw) != str(other)
elif isinstance(other, Premise):
return self._premise != other._premise
def __repr__(self):
return f"{self._premise}"
class Layer:
def __init__(self, proved: list[Premise], assumption: Premise, target: Premise):
self.assumption = assumption
self.target = target
# In the layer itself the assumption is considered proved, but we know that might not be the case
# So we have to keep track of it
self.proved = proved
if assumption:
self.proved.append(assumption)
def __repr__(self):
return f"Assumption = {self.assumption}, proved = {self.proved}"
class Solver:
def __init__(self, statement: str):
self.solved = False
self.nd = NaturalDeductionTree(statement)
self.statement = statement
a = statement.split(CONCLUDE)
if len(a) < 2:
raise Exception("Statement does include a conclusion")
elif len(a) > 2:
raise Exception("Statement includes multiple conclusions")
debug.log(f"Parsing '{statement}'")
self.premises = [Premise(x) for x in a[0].split(",")]
self.conclusion = Premise(a[1])
for premise in self.premises:
self.nd.add(Step(premise, StepType.P))
debug.log(f"Raw representation of conclusion {self.conclusion.raw}")
debug.log(f"With tokens = {self.conclusion.tokens}")
if not self.conclusion.get():
debug.log("No conclusion is found", debug.ERROR)
raise Exception("A conclusion must be provided")
else:
debug.log("Found valid premise(s) and conclusion", debug.SUCCESS)
self.ruler = Ruler()
self.stack: list[Layer] = []
self.level = 0
self.stack.append(Layer(self.premises, [], self.conclusion))
def start(self):
debug.log("Starting solver")
result = self.prove(self.conclusion)
if not result:
self.nd.add(Step("", StepType.CT))
return False
self.solved = result
return result
def prove(self, target: Premise, caller: StepType = None):
if not isinstance(target, Premise):
target = Premise.create(target)
debug.log(f"Trying to prove {target}")
token = target.tokens.get_main_operator()
# If the target to prove has an operator in it we need to somehow prove that the
# introduction rule of that operator is applicable in the current state.
# If not then the target is a contradiction
if token:
result = self.ruler.introduce(token.operator)(self, target.tokens, token)
if result:
# return self.resolve(target)
if target == self.stack[self.level].target:
return self.resolve(target)
else:
return True
return self.reject(target)
else:
debug.log(f"No operator found so target must be a literal")
neg = negate(target)
if self.level == 0 and "!" in target.get():
debug.log("Trying to prove a negation")
self.assume(neg, target)
self.remove_prove(neg)
found = self.prove(target)
if found:
return self.resolve(target)
return self.reject(target)
# Checking if the target is already proved or the target is a contradiction
for premise in self.stack[self.level].proved:
if premise == target:
debug.log(f"{target} is already proved!")
if target == self.stack[self.level].target:
self.resolve(target)
return True
if premise == neg:
# Prove target with rule of called?
if not caller:
self.add_prove(target, True)
debug.log(f"Can't prove {target} because it contradicts with {premise}")
return False
# Get all the premises where the literal is used
valids: list[Premise] = []
for premise in self.stack[-1].proved:
t = target.get().replace("!","")
for literal in premise.literals:
if re.match(r"!?"+t, literal):
l = Premise.create(literal)
valids.append((l, premise))
break
# If there are no valids the premise can't be proved
if len(valids) == 0:
debug.log(f"No valid premises were found trying to prove {target}")
return False
debug.log(f"Valid premises containing literal {target} = {valids}")
for t, premise in valids:
found = self.extract(premise, t)
if found:
# Check if the extraction succeeded, but not yet found the right target
if target in self.stack[self.level].proved:
self.resolve(target)
continue
# Else remove the prove because the target was not found and we need to continue
# Maybe this crashes at some point because a next prove on the same level needs the premise
else:
debug.log(f"(see below) because {target} was not found, but something else did. semi fix? (from 'solve.py:192')", debug.WARNING)
self.remove_prove(premise)
return self.prove(self.stack[self.level].target)
def extract(self, premise: Premise, target: Premise):
debug.log(f"Trying to extract {target} from {premise}")
token = premise.tokens.get_main_operator()
# If there is no operator in the premise the premise is a literal
if not token:
debug.log(f"No operator found so premise must be a literal. You should not use 'Solver.extract' for comparing a literal with a literal", debug.WARNING)
return premise == target
return self.ruler.apply(token.operator)(self, target, premise.tokens, token)
def resolve(self, premise: Premise):
debug.log(f"{premise} is true!")
if self.level == 0:
self.solved = True
return True
self.nd.add(Step("", StepType.CA))
# Make all the assumptions made at the previous layer true or somethign idk
# Maybe only for all introduction rules
previous = self.stack.pop()
self.level -= 1
debug.log(f"Layer popped. New layer at level {self.level}: {self.stack[self.level]}")
return True
def reject(self, premise: Premise):
if not isinstance(premise, Premise):
premise = Premise.from_raw(premise)
self.nd.add(Step("", StepType.CT))
if self.level != 0:
self.nd.add(Step("", StepType.CA))
if self.level == 0:
self.solved = False
return False
previous = self.stack.pop()
debug.log(f"{previous.assumption} is false!")
self.level -= 1
self.stack[self.level].proved.remove(previous.assumption)
neg = negate(previous.assumption)
self.add_prove(neg, False)
if premise in self.stack[self.level].proved:
self.stack[self.level].proved.remove(premise)
self.nd.add(Step(neg, StepType.EN))
debug.log(f"Layer popped. New layer at level {self.level}: {self.stack[self.level]}")
return False
def assume(self, token: Token, target: Token):
premise = Premise.create(token)
pt = Premise.create(target)
self.nd.add(Step("", StepType.OA))
self.nd.add(Step(premise, StepType.A))
self.stack.append(Layer(self.stack[self.level].proved, premise, pt))
self.level += 1
debug.log(f"New layer created at level {self.level}: {self.stack[self.level]}")
return premise
def add_prove(self, p, add_as_assumption):
premise = Premise.create(p)
if add_as_assumption:
self.nd.add(Step(premise, StepType.A))
self.stack[self.level].proved.append(premise)
def remove_prove(self, premise):
if isinstance(premise, Token):
premise = Premise.from_raw(premise)
if premise in self.stack[self.level].proved:
debug.log(f"Removing {premise} from the current layer")
self.stack[self.level].proved.remove(premise) | 35.743202 | 163 | 0.577297 | 1,431 | 11,831 | 4.728162 | 0.181691 | 0.034585 | 0.027934 | 0.039905 | 0.234555 | 0.173219 | 0.124298 | 0.086905 | 0.076559 | 0.059415 | 0 | 0.002375 | 0.323726 | 11,831 | 331 | 164 | 35.743202 | 0.84327 | 0.127969 | 0 | 0.25 | 0 | 0.016949 | 0.124526 | 0.013415 | 0 | 0 | 0 | 0.003021 | 0 | 1 | 0.097458 | false | 0 | 0.04661 | 0.016949 | 0.305085 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac5759f5434d9273247c6b3d21531df27ef3b03f | 9,245 | py | Python | examples/speech_to_text/criterions/ctc_multi_loss.py | indra622/FBK-fairseq | 4357af09ef2ad1594f75a5b7bcc02d5b10cad2e5 | [
"MIT"
] | 2 | 2021-09-14T06:42:08.000Z | 2021-11-09T21:15:18.000Z | examples/speech_to_text/criterions/ctc_multi_loss.py | indra622/FBK-fairseq | 4357af09ef2ad1594f75a5b7bcc02d5b10cad2e5 | [
"MIT"
] | null | null | null | examples/speech_to_text/criterions/ctc_multi_loss.py | indra622/FBK-fairseq | 4357af09ef2ad1594f75a5b7bcc02d5b10cad2e5 | [
"MIT"
] | 3 | 2021-09-06T10:18:39.000Z | 2021-12-29T10:52:51.000Z | import math
from argparse import Namespace
import torch
import torch.nn.functional as F
from torch import nn
from fairseq import utils, metrics
from fairseq.criterions import register_criterion, LegacyFairseqCriterion, FairseqCriterion
from fairseq.criterions.ctc import CtcCriterion
class FakeEncoderModel(nn.Module):
def __init__(self, encoder, net_out, target):
super().__init__()
self.net_out = net_out
self.target = target
if hasattr(encoder, "output_batch_first"):
self.output_batch_first = encoder.output_batch_first
def forward(self, **unused):
return self.net_out
def get_targets(self, *unused):
return self.target
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
encoder_out = net_output["ctc_out"]
if torch.is_tensor(encoder_out):
logits = encoder_out.float()
if log_probs:
probs = F.log_softmax(logits, dim=-1)
else:
probs = F.softmax(logits, dim=-1)
if hasattr(self, "output_batch_first"):
probs.batch_first = self.output_batch_first
return probs
raise NotImplementedError
class FakeDecoderModel(nn.Module):
def __init__(self, model, net_out, target):
super().__init__()
self.model = model
self.net_out = net_out
self.target = target
def forward(self, **unused):
return self.net_out
def get_normalized_probs(self, net_output, log_probs, sample=None):
return self.model.get_normalized_probs(net_output, log_probs, sample=sample)
def get_targets(self, *unused):
return self.target
@property
def decoder(self):
return self.model.decoder
class BaseCTCLoss(CtcCriterion):
def __init__(self, args, task):
super(FairseqCriterion, self).__init__(task)
self.args = args
self.blank_idx = task.source_dictionary.index("<ctc_blank>")
self.pad_idx = task.source_dictionary.pad()
self.eos_idx = task.source_dictionary.eos()
self.post_process = self.args.ctc_post_process
if self.args.wer_args is not None:
(
self.args.wer_kenlm_model,
self.args.wer_lexicon,
self.args.wer_lm_weight,
self.args.wer_word_score,
) = eval(self.args.wer_args)
if self.args.wer_kenlm_model is not None:
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
dec_args = Namespace()
dec_args.nbest = 1
dec_args.criterion = "ctc"
dec_args.kenlm_model = self.args.wer_kenlm_model
dec_args.lexicon = self.args.wer_lexicon
dec_args.beam = 50
dec_args.beam_size_token = min(50, len(task.target_dictionary))
dec_args.beam_threshold = min(50, len(task.target_dictionary))
dec_args.lm_weight = self.args.wer_lm_weight
dec_args.word_score = self.args.wer_word_score
dec_args.unk_weight = -math.inf
dec_args.sil_weight = 0
self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary)
else:
self.w2l_decoder = None
self.zero_infinity = self.args.zero_infinity
self.sentence_avg = self.args.sentence_avg
@register_criterion("ctc_multi_loss")
class CTCMultiLoss(LegacyFairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
assert task.source_dictionary is not None
self.ctc_criterion = BaseCTCLoss(args, task)
self.real_criterion = CTCMultiLoss.build_real_criterion(args, task)
self.ctc_weight = args.ctc_weight
@staticmethod
def build_real_criterion(args, task):
saved_criterion = args.criterion
args.criterion = args.underlying_criterion
assert saved_criterion != args.underlying_criterion
underlying_criterion = task.build_criterion(args)
args.criterion = saved_criterion
return underlying_criterion
@staticmethod
def add_args(parser):
parser.add_argument('--ctc-encoder-layer', default=6, type=int, metavar='LAYER_NUM',
help='The encoder layer whose feature are used to compute the CTC loss')
parser.add_argument('--ctc-weight', default=1.0, type=float, metavar='W',
help='The relative weight to assign to the CTC loss')
parser.add_argument('--underlying-criterion', type=str, metavar='VAL', required=True,
help='underlying criterion to use for the model output loss')
parser.add_argument('--zero-infinity', default=True, type=bool, metavar='ZERO_INF',
help='zero inf loss when source length <= target length')
parser.add_argument('--ctc-post-process', default='letter', metavar='POST_PROC',
help='how to post process predictions into words. can be letter, wordpiece, BPE symbols, etc. \
See fairseq.data.data_utils.post_process() for full list of options')
parser.add_argument('--wer-kenlm-model', default=None, metavar='WER_KENLM',
help='if this is provided, use kenlm to compute wer (along with other wer_* args)')
parser.add_argument('--wer-lexicon', default=None, metavar='WER_LEX',
help='lexicon to use with wer_kenlm_model')
parser.add_argument('--wer-lm-weight', default=2.0, metavar='WER_LM_W',
help='lm weight to use with wer_kenlm_model')
parser.add_argument('--wer-word-score', default=1.0, metavar='WER_WORD_SCORE',
help='lm word score to use with wer_kenlm_model')
parser.add_argument('--wer-args', default=None, metavar='WER_ARGS',
help='DEPRECATED: tuple of (wer_kenlm_model, wer_lexicon, wer_lm_weight, wer_word_score)')
def forward(self, model, sample, reduce=True):
decoder_out, encoder_out = model(**sample["net_input"])
encoder_fake_model = FakeEncoderModel(model.encoder, encoder_out, sample["transcript"])
decoder_fake_model = FakeDecoderModel(model, decoder_out, sample["target"])
encoder_sample = {
"net_input": {
"src_lengths": encoder_out["ctc_lengths"]
},
"target": sample["transcript"],
"target_lengths": sample["transcript_lengths"]-1,
"ntokens": sum(sample["transcript_lengths"]).item(),
"id": sample["id"]
}
ctc_loss, ctc_sample_size, ctc_logging_output = self.ctc_criterion(
encoder_fake_model, encoder_sample, reduce=reduce)
real_loss, _, real_logging_output = self.real_criterion(
decoder_fake_model, sample, reduce=reduce)
loss = self.ctc_weight * ctc_loss + real_loss
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"real_loss": real_logging_output['loss'],
"ctc_loss": ctc_logging_output['loss'],
"ntokens": real_logging_output['ntokens'],
"nsentences": real_logging_output['nsentences'],
"sample_size": real_logging_output['sample_size'],
}
if 'nll_loss' in real_logging_output:
logging_output['nll_loss'] = real_logging_output['nll_loss']
return loss, ctc_sample_size, logging_output
@staticmethod
def logging_outputs_can_be_summed():
return True
@staticmethod
def reduce_metrics(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get('loss', 0) for log in logging_outputs))
real_loss_sum = utils.item(sum(log.get('real_loss', 0) for log in logging_outputs))
ctc_loss_sum = utils.item(sum(log.get('ctc_loss', 0) for log in logging_outputs))
if logging_outputs and 'nll_loss' in logging_outputs[0]:
nll_loss_sum = utils.item(sum(log.get('nll_loss', 0) for log in logging_outputs))
else:
nll_loss_sum = loss_sum - ctc_loss_sum # NLL computed on the real loss, not on the auxiliary CTC
ntokens = utils.item(sum(log.get('ntokens', 0) for log in logging_outputs))
sample_size = utils.item(sum(log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('nll_loss', nll_loss_sum / ntokens / math.log(2), ntokens, round=3)
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg))
metrics.log_scalar('real_loss', real_loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('ctc_loss', ctc_loss_sum / sample_size / math.log(2), sample_size, round=3)
| 46.225 | 124 | 0.634613 | 1,148 | 9,245 | 4.842334 | 0.1777 | 0.024465 | 0.021767 | 0.01619 | 0.269833 | 0.203454 | 0.157762 | 0.120345 | 0.083288 | 0.083288 | 0 | 0.005602 | 0.266306 | 9,245 | 199 | 125 | 46.457286 | 0.813947 | 0.019037 | 0 | 0.149701 | 0 | 0 | 0.127328 | 0.002483 | 0 | 0 | 0 | 0 | 0.011976 | 1 | 0.095808 | false | 0 | 0.053892 | 0.041916 | 0.233533 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac5772fac5251b3c2b75808345f2d4fe1f9819c3 | 2,758 | py | Python | src/elchempy/experiments/_moved_dataloaders/files_func_collector.py | MyPyDavid/ECpy | b74842b64eca86d2181067fdb22bfa8fa4b2c8bb | [
"MIT"
] | 3 | 2022-01-04T09:06:15.000Z | 2022-03-05T08:24:01.000Z | src/elchempy/experiments/_moved_dataloaders/files_func_collector.py | MyPyDavid/ECpy | b74842b64eca86d2181067fdb22bfa8fa4b2c8bb | [
"MIT"
] | null | null | null | src/elchempy/experiments/_moved_dataloaders/files_func_collector.py | MyPyDavid/ECpy | b74842b64eca86d2181067fdb22bfa8fa4b2c8bb | [
"MIT"
] | 1 | 2022-03-05T12:17:49.000Z | 2022-03-05T12:17:49.000Z | """ collects all function calls on list of files with options for multi or single core processing"""
import os
from pathlib import Path
from typing import List, Collection, Dict
# from functools import partial
from itertools import repeat
import concurrent.futures
from multiprocessing import Pool
import logging
logger = logging.getLogger(__name__)
# Local imports
# from elchempy.indexer.filename_parser import FilePathParser
# from elchempy.indexer.EC_filepath_parser import ElchemPathParser
# 3rd party
import pandas as pd
#%%
def wrapper_func(*args):
# print(f'args: {arg}\nkwargs: {kwargs}')
# **kwargs
func, file, kwargs = args
try:
result = func(file, **kwargs)
return result
except Exception as exc:
logger.info(f"Error in multiprocess wrapper {exc}")
# result = None
def run_func_on_files(func, files, multi_run=False, **kwargs) -> Dict:
collection = []
if multi_run:
collection = make_collection_multi(func, files, **kwargs)
else:
collection = make_collection_serial(func, files, **kwargs)
# breakpoint()
collect_dict = {str(i): i for i in collection}
if not collect_dict:
logger.warning(f"Collection len={len(collection)}, collect_dict is empty.")
try:
# _test = str(ecpp_collection[0])
return collect_dict
except TypeError as ex:
raise ex from ex
except Exception as ex:
raise ex from ex
def make_collection_multi(func: callable, files: Collection, **kwargs) -> List:
collection = []
with Pool(os.cpu_count() - 2) as pool:
try:
# results = pool.map(EC_classifier_multi_core.EC_PAR_file_check, self.par_files_run)
collection = pool.starmap(
wrapper_func, zip(repeat(func), files, repeat(kwargs))
)
except Exception as ex:
# print('FileHelper module not found:',e)
logger.error(f"make_collection_multi multiprocessing error: {ex}")
raise ex from ex
# results = pool.map(PAR_file_parser, self.par_files_run)
return collection
def make_collection_serial(func: callable, files: Collection, **kwargs) -> List:
ecpp_collection = []
for file in files:
try:
logger.debug(f"{__name__} calling {func} on\n{file}.")
ecpp = func(file, **kwargs)
ecpp_collection.append(ecpp)
except Exception as ex:
_err = {"PAR_file": file, "error": ex, "kwargs": kwargs}
logger.warning(
f"{__name__} make_collection unexpected error for calling {func} on\n{file}.\n{ex}"
)
raise ex from ex
return ecpp_collection
| 29.031579 | 115 | 0.638869 | 339 | 2,758 | 5.029499 | 0.339233 | 0.049267 | 0.039883 | 0.030499 | 0.102053 | 0.063343 | 0 | 0 | 0 | 0 | 0 | 0.001484 | 0.26686 | 2,758 | 94 | 116 | 29.340426 | 0.841741 | 0.217549 | 0 | 0.232143 | 0 | 0.017857 | 0.129093 | 0.020112 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac57f60d91ce8019034203ea74fe206ac16c3c3e | 9,435 | py | Python | preprocessing.py | IEtoI/autoProt | 4b7d606332a7379ff128e3d30d0611b4c47f9e64 | [
"MIT"
] | null | null | null | preprocessing.py | IEtoI/autoProt | 4b7d606332a7379ff128e3d30d0611b4c47f9e64 | [
"MIT"
] | null | null | null | preprocessing.py | IEtoI/autoProt | 4b7d606332a7379ff128e3d30d0611b4c47f9e64 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 09:26:07 2019
@author: Wignand
DataProcessing
:function cleaning: for first processing of dataframe ratio cols
"""
import numpy as np
import pandas as pd
from importlib import resources
import re
from autoprot.decorators import report
def read_csv(file, sep='\t'):
return pd.read_csv(file, sep=sep)
def to_csv(df, file, sep='\t', index=False):
df.to_csv(file, sep=sep, index=index)
@report
def cleaning(df, file="proteinGroups"):
"""
removes contaminant, reverse and identified by site only entries
@file:: which file is provided:
proteinGroups; Phospho (STY); evidence;
modificationSpecificPeptides
"""
columns = df.columns
if file == "proteinGroups":
if ("Potential contaminant" not in columns) or\
("Reverse" not in columns) or\
("Only identified by site" not in columns):
print("Is this data already cleaned?\nMandatory columns for cleaning not present in data!")
print("Returning provided dataframe!")
return df
df = df[(df['Potential contaminant'].isnull()) &
(df['Reverse'].isnull()) &
(df['Only identified by site'].isnull())]
df.drop(['Potential contaminant',"Reverse", 'Only identified by site'], axis=1, inplace=True)
elif (file == "Phospho (STY)") or (file == "evidence") or (file == "modificationSpecificPeptides"):
if ("Potential contaminant" not in columns) or\
("Reverse" not in columns):
print("Is this data already cleaned?\nMandatory columns for cleaning not present in data!")
print("Returning provided dataframe!")
return df
df = df[(df['Potential contaminant'].isnull()) &
(df['Reverse'].isnull())]
df.drop(['Potential contaminant',"Reverse"], axis=1, inplace=True)
return df
def log(df, cols, base=2, invert=None):
"""
performs log transformation. Returns dataframe with additional log columns
@params
::cols: cols which are transformed
::base: base of log, default=2, alternative: 10
::invert: vector corresponding to columns telling which to invert
"""
if base == 2:
for c in cols:
df[f"log2_{c}"] = np.log2(df[c])
elif base==10:
for c in cols:
df[f"log10_{c}"] = np.log10(df[c])
else:
print("This base is not implemented!")
if invert is not None:
lcols = df.filter(regex="^log").columns
df[lcols] = df[lcols] * invert
return df
def locProts(df, thresh=.75):
"""
removes entries with localiatoin probabiliy below threshold
@params
@df :: dataframe to be filtered
@thresh :: threshold of localization probability
"""
if "Localization prob" not in df.columns:
print("This dataframe has no 'Localization prob' column!")
return True
print(f"{df.shape[0]} entries in dataframe.")
df = df[df["Localization prob"]>=thresh]
print(f"{df.shape[0]} entries in dataframe with localization prob >= {thresh*100}%.")
return df
@report
def removeNonQuant(df, cols):
"""
removes entries without quantitative data
@params
@df :: dataframe to be filtered
@cols :: cols to be evaluated for missingness
"""
df = df[~(df[cols].isnull().all(1))]
return df
def expandSiteTable(df, cols):
"""
function that expands the phosphosite table Sites -> peptides
x, a__1, a__2, a__3
->
x, a, 1
x, a, 2
x, a, 3
@params
@df :: dataframe to be expanded (important that an "id" column is provided)
@cols :: cols which are going to be expanded (format: Ratio.*___.)
"""
print(f"{df.shape[0]} phosphosites in dataframe.")
dfs = []
expected = df.shape[0]*3
#columns to melt
melt = cols
melt_set = list(set([i[:-4] for i in melt]))
#Due to MaxQuant column names we might have to drop some columns
check = [i in df.columns for i in melt_set]
if False not in check:
df.drop(melt_set, axis=1, inplace=True)
if True in check and False in check:
print("Your provided columns ")
raise ValueError("The columns you provided are not suitable!")
for i in melt_set:
cs = list(df.filter(regex=i+'___').columns )+ ["id"]
dfs.append(pd.melt(df[cs], id_vars='id'))
temp = df.copy(deep=True)
temp = temp.drop(melt, axis=1)
for idx,df in enumerate(dfs):
x = df["variable"].iloc[0].split('___')[0]
if idx==0:
t = df.copy(deep=True)
t.columns = ["id", "Multiplicity", x]
t["Multiplicity"] = t["Multiplicity"].apply(lambda x: x.split('___')[1])
else:
df.columns = ["id", "Multiplicity", x]
df = df.drop(["id", "Multiplicity"], axis=1)
t = t.join(df,rsuffix=idx)
temp = temp.merge(t,on='id', how='left')
if temp.shape[0] != expected:
print("The expansion of site table is probably not correct!!! Check it! Maybe you provided wrong columns?")
temp = temp[~(temp[melt_set].isnull().all(1))]
print(f"{temp.shape[0]} phosphopeptides in dataframe after expansion.")
return temp
@report
def filterVv(df, groups,n=2, vv=True):
"""
....function that filters dataframe for minimum number of valid values
....@params
df :: dataframe to be filtered - copy is returned
groups :: the experimental groups. Each group is filtered for at least n vv
n :: minimum amount of valid values
vv :: True for minimum amount of valid values; False for maximum amount of missing values
...."""
if vv == True:
idxs = [set(df[(len(group)-df[group].isnull().sum(1)) >= n].index) for\
group in groups]
else:
idxs = [set(df[df[group].isnull().sum(1) <= n].index) for\
group in groups]
#take intersection of idxs
idx = set.intersection(*idxs)
df = df.loc[idx]
return df
def GoAnnot(prots, gos, onlyProts=False):
"""
function that finds kinases based on go annoation in
list of gene names. If there are multiple gene names separated by semicolons
only the first entry will be used.
:@Prots: List of Gene names
:@go: List of go terms
Notes:
Homo sapiens.gene_info and gene2go files
are needed for annotation
In case of multiple gene names per line (e.g. AKT1;PKB)
only the first name will be extracted.
"""
with resources.open_text("autoprot.data","Homo_sapiens.gene_info") as d:
geneInfo = pd.read_csv(d, sep='\t')
with resources.open_text("autoprot.data","gene2go_alt") as d:
gene2go = pd.read_csv(d, sep='\t')
prots = pd.DataFrame(pd.Series([str(i).upper().split(';')[0] for i in prots]), columns=["Gene names"])
prots = prots.merge(geneInfo[["Symbol", "GeneID"]], left_on="Gene names", right_on="Symbol", how='inner')
prots = prots.merge(gene2go[["GeneID", "GO_ID", "GO_term"]], on="GeneID", how='inner')
if onlyProts == True:
for idx, go in enumerate(gos):
if idx == 0:
redProts = prots["Symbol"][prots["GO_term"].str.contains(go)]
else:
redProts = redProts.append(prots["Symbol"][prots["GO_term"].str.contains(go)])
return redProts.drop_duplicates()
else:
for idx, go in enumerate(gos):
if idx == 0:
redProts = prots[prots["GO_term"]==go]
else:
redProts = redProts.append(prots[prots["GO_term"]==go])
return redProts.drop_duplicates()
def motifAnnot(df, motif, col=None):
"""
Function that searches for phosphorylation motif in the provided dataframe.
If not specified "Sequence window" column is searched. Phosphorylated central residue
has to indicated with S/T, arbitrary amino acids with x.
Examples:
- RxRxxS/T
- PxS/TP
- RxRxxS/TxSxxR
:@df: dataframe
:@motif: str; motif to be searched for
:@col: str; alternative column to be searched in if Sequence window is not desired
"""
#make some assertions that the column is indeed the proper MQ output
#(might want to customize the possibilites later)
def findMotif(x,col, motif, motlen):
seq = x[col]
if ";" in seq:
seqs = seq.split(';')
else: seqs = [seq]
for seq in seqs:
pos = 0
pos2 = re.finditer(motif,seq)
if pos2:
for p in pos2:
pos = p.end()
if pos == np.floor(motlen/2+1):
return 1
return 0
if col is None:
col = "Sequence window"
assert(col in df.columns)
assert(len(df[col].iloc[0]) % 2 == 1)
search = motif.replace('x', '.').replace('S/T', '(S|T)').upper()
i = search.index("(S|T)")
before = search[:i]
after = search[i+5:]
search = f"(?<={before})(S|T)(?={after})"
motlen = len(df[col].iloc[0])
df[motif] = df.apply(findMotif, col=col, motif=search, motlen=motlen, axis=1)
return df
| 34.944444 | 116 | 0.585056 | 1,240 | 9,435 | 4.412903 | 0.262097 | 0.009503 | 0.006579 | 0.013889 | 0.23538 | 0.195358 | 0.133955 | 0.133955 | 0.11038 | 0.11038 | 0 | 0.011905 | 0.287758 | 9,435 | 269 | 117 | 35.074349 | 0.802381 | 0.263911 | 0 | 0.238411 | 0 | 0 | 0.215226 | 0.012375 | 0 | 0 | 0 | 0 | 0.013245 | 1 | 0.072848 | false | 0 | 0.033113 | 0.006623 | 0.205298 | 0.07947 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac580c61ae831f3262439ecc753bf0847ba056f2 | 9,151 | py | Python | utils/aws_cognito_ftns.py | arup-group/london-pop-synth | 38e56230d440d49ddb2e2841d46a5cbaab260c35 | [
"MIT"
] | 1 | 2020-11-25T06:56:43.000Z | 2020-11-25T06:56:43.000Z | utils/aws_cognito_ftns.py | arup-group/london-pop-synth | 38e56230d440d49ddb2e2841d46a5cbaab260c35 | [
"MIT"
] | null | null | null | utils/aws_cognito_ftns.py | arup-group/london-pop-synth | 38e56230d440d49ddb2e2841d46a5cbaab260c35 | [
"MIT"
] | null | null | null | import sys
import boto3
import re
from uuid import UUID
import pandas as pd
from datetime import date, timedelta
from tabulate import tabulate
def is_email_address(string):
return re.match(r"[^@]+@[^@]+\.[^@]+", string)
def is_uuid(uuid_to_test, version=4):
try:
uuid_obj = UUID(uuid_to_test, version=version)
except ValueError:
return False
return str(uuid_obj) == uuid_to_test
def find_filter_method(string):
if is_email_address(string):
return 'email'
elif is_uuid(string, version=4):
return 'uuid'
def get_cognito_id(user_cognito_data):
for record in user_cognito_data['Attributes']:
if record['Name'] == "sub":
return record['Value']
def get_cognito_user(user_list, requested_user):
"""
:param user_list: result of get_cognito_user_list
:param by: 'email' or 'uuid'
:return:
"""
user_list_dict = build_cognito_user_dict(user_list, by=find_filter_method(requested_user))
try:
user_data = user_list_dict[requested_user]
return user_data
except KeyError:
print("User not found. Exiting")
sys.exit(1)
def get_cognito_users_dataframe(user_list, requested_users_list):
_df = None
for user in requested_users_list:
user_cognito_data = get_cognito_user(user_list, user)
if _df is None:
_df = user_data_to_dataframe(user_cognito_data)
else:
_df = _df.append(user_data_to_dataframe(user_cognito_data))
return _df
def build_cognito_user_dict(user_list, by):
"""
:param user_list: result of get_cognito_user_list
:param by: 'email' or 'uuid'
:return:
"""
if by == 'email':
user_list_dict = {}
for user in user_list:
user_list_dict[user['Username']] = user
return user_list_dict
elif by == 'uuid':
user_list_dict = {}
for user in user_list:
for attribute in user['Attributes']:
if attribute['Name'] == 'sub':
user_list_dict[attribute['Value']] = user
break
return user_list_dict
else:
raise NotImplementedError
def get_cognito_user_list(region_name,pool_name):
client = boto3.client('cognito-idp',region_name=region_name)
pool = get_pool_id(region_name,pool_name)
if not pool:
print("No participant User Pool found. Speak to one of the Rorys")
print("Exiting!")
sys.exit(1)
response = client.list_users(UserPoolId=pool)
user_list = response.get("Users")
page_token = response.get("PaginationToken")
while page_token:
response = client.list_users(
UserPoolId=pool,
PaginationToken=page_token
)
user_list.extend(response.get("Users"))
page_token = response.get("PaginationToken")
return user_list
def get_pool_id(region_name,pool_name):
client = boto3.client('cognito-idp',region_name=region_name)
cognito_details = client.list_user_pools(MaxResults=60)
for user_pool in cognito_details['UserPools']:
if user_pool['Name'] == pool_name:
user_pool_id = user_pool['Id']
return user_pool_id
def get_office_user_list(region_name,pool_name):
user_list = get_cognito_user_list(region_name,pool_name)
office_user_list = {}
for user in user_list:
for att in user['Attributes']:
if att['Name'] == "sub":
cog_id = att['Value']
for att in user['Attributes']:
if att['Name'] == "custom:arup_office":
if att['Value'] not in office_user_list:
office_user_list[att['Value']] = []
office_user_list[att['Value']].append(cog_id)
offices = office_user_list.keys()
members_list = office_user_list.values()
output = []
for i in range(0,len(offices)):
office = offices[i]
members = members_list[i]
output.append({"office":office,"members": members})
return output
def get_study_stats(region_name,user_stats,pool_name):
user_list = get_cognito_user_list(region_name,pool_name)
office_user_list = get_office_user_list(region_name,pool_name)
user_count = len(user_list)
cog_ids = []
users_data = []
offices = []
planners = 0
for user in user_list:
cog_id = user['Attributes'][0]['Value']
user_data = {"user":user['Username'],"signup":user['UserCreateDate']}
for att in user['Attributes']:
if att['Name'] == "custom:arup_office":
offices.append({"office" : att['Value']})
user_data["office"] = att['Value']
if att['Name'] == "custom:is_transport_planner" and att['Value'] == "true":
planners = planners + 1
users_data.append(user_data)
global_new_user_count = 0
for user in user_stats:
for office in office_user_list:
if user['user'] in office['members']:
if "data" not in office:
office['data'] = []
office['data'].append(user)
for office in office_user_list:
if "data" in office:
record_count = 0
trip_count = 0
for record in office['data']:
trip_count = trip_count + record['trip_count']
record_count = record_count + record['total_records']
office.pop('data')
else:
trip_count = 0
record_count = 0
office['trip_count'] = trip_count
office['record_count'] = record_count
yesterday = (date.today() - timedelta(1)).timetuple()
if "new_users_24hr" not in office:
office['new_users_24hr'] = 0
for user in user_list:
creation_date = user['UserCreateDate'].timetuple()
if creation_date > yesterday:
for record in user['Attributes']:
if record['Name'] == "sub":
cog_id = record['Value']
if cog_id in office['members']:
office['new_users_24hr'] = office['new_users_24hr'] + 1
global_new_user_count = global_new_user_count + 1
for office in office_user_list:
office['User'] = len(office["members"])
office.pop("members")
for office in office_user_list:
if office['office'] == "-1":
office['office'] = "Unkown office (intrigue)"
top_office = sorted(office_user_list, key=lambda k: k['new_users_24hr'],reverse=True)
growth = int(float(global_new_user_count) / len(user_list) * 100.0)
print("{} new users since yesterday").format(global_new_user_count)
summary_stats_df = pd.DataFrame(office_user_list)
summary_stats_df['New users'] = summary_stats_df['new_users_24hr']
summary_stats_df['Points'] = summary_stats_df['record_count']
summary_stats_df['Trips'] = summary_stats_df['trip_count']
output = summary_stats_df.drop(columns=["new_users_24hr","record_count","trip_count"])
output = output[['office',"Trips","New users"]]
output = output.sort_values("Trips",ascending=False)
overall_stats = "```" + tabulate(output, tablefmt="simple", headers="keys",showindex=False) + "```"
return user_count, global_new_user_count, growth, top_office, overall_stats
def find_new_users_since_yesterday(user_list):
yesterday = (date.today() - timedelta(1)).timetuple()
new_user_count = 0
offices = []
for user in user_list:
creation_date = user['UserCreateDate'].timetuple()
if creation_date > yesterday:
new_user_count = new_user_count + 1
for att in user['Attributes']:
if att['Name'] == "custom:arup_office":
offices.append(att['Value'])
return new_user_count, offices
def find_percentage_of_verified_users(region_name, pool_name):
# this is a dummy method to test the integration with AWS Cognito
# email_verified is an attribute that should exist in all user pools
user_list = get_cognito_user_list(region_name,pool_name)
user_count = len(user_list)
verified_user_count = 0
for user in user_list:
for att in user['Attributes']:
if att['Name'] == "email_verified":
if att['Value'] == "true":
verified_user_count += 1
verified_user_percentage = (user_count / verified_user_count) * 100
return user_count, verified_user_percentage
def user_data_to_dataframe(user_cognito_data):
flat_user_cognito_data = {}
for key, value in user_cognito_data.items():
if isinstance(value, list):
for attribute in value:
flat_user_cognito_data[attribute['Name']] = attribute['Value']
else:
flat_user_cognito_data[key] = value
return pd.DataFrame(flat_user_cognito_data, index=[0]) | 29.424437 | 103 | 0.615124 | 1,146 | 9,151 | 4.60733 | 0.153578 | 0.080303 | 0.039773 | 0.030682 | 0.38447 | 0.327652 | 0.273674 | 0.214583 | 0.180871 | 0.174053 | 0 | 0.007575 | 0.278658 | 9,151 | 311 | 104 | 29.424437 | 0.792304 | 0.033548 | 0 | 0.267327 | 0 | 0 | 0.111743 | 0.003069 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069307 | false | 0 | 0.034653 | 0.00495 | 0.188119 | 0.019802 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac58b9f60073df0cc75a535978a98a62f2ac5f20 | 8,715 | py | Python | testproject/model_meta/tests/tests.py | samuelblattner/django-cassandra-engine | e2d0c1edc884d5ebd24aeacf156501b02033ec6f | [
"BSD-2-Clause"
] | 1 | 2019-10-08T13:55:36.000Z | 2019-10-08T13:55:36.000Z | testproject/model_meta/tests/tests.py | hsamfm/django-cassandra-engine | f3ad96a00c8d91be9703ee4e4b1b45d4f93cb012 | [
"BSD-2-Clause"
] | null | null | null | testproject/model_meta/tests/tests.py | hsamfm/django-cassandra-engine | f3ad96a00c8d91be9703ee4e4b1b45d4f93cb012 | [
"BSD-2-Clause"
] | 2 | 2019-10-23T15:37:48.000Z | 2020-11-10T14:55:15.000Z | from unittest import skipIf
import django
from django.apps import apps
from django.contrib.contenttypes.fields import GenericForeignKey
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import Field
from django.db.models.options import IMMUTABLE_WARNING
from django.test import SimpleTestCase
from cassandra.cqlengine import columns as cassandra_columns
from model_meta.models import CassandraThing
from model_meta.results import TEST_RESULTS
class OptionsBaseTests(SimpleTestCase):
def _map_related_query_names(self, res):
return tuple((o.name, m) for o, m in res)
def _map_names(self, res):
return tuple((f.name, m) for f, m in res)
def _model(self, current_model, field):
model = field.model._meta.concrete_model
return None if model == current_model else model
def _details(self, current_model, relation):
direct = isinstance(relation, Field) or isinstance(relation, GenericForeignKey)
model = relation.model._meta.concrete_model
if model == current_model:
model = None
field = relation if direct else relation.field
return relation, model, direct, bool(field.many_to_many) # many_to_many can be None
class GetFieldsTests(OptionsBaseTests):
def test_get_fields_is_immutable(self):
msg = IMMUTABLE_WARNING % "get_fields()"
for _ in range(2):
# Running unit test twice to ensure both non-cached and cached result
# are immutable.
fields = CassandraThing._meta.get_fields()
with self.assertRaisesMessage(AttributeError, msg):
fields += ["errors"]
class LabelTests(OptionsBaseTests):
def test_label(self):
for model, expected_result in TEST_RESULTS['labels'].items():
self.assertEqual(model._meta.label, expected_result)
def test_label_lower(self):
for model, expected_result in TEST_RESULTS['lower_labels'].items():
self.assertEqual(model._meta.label_lower, expected_result)
class DataTests(OptionsBaseTests):
def test_fields(self):
for model, expected_result in TEST_RESULTS['fields'].items():
fields = model._meta.fields
self.assertEqual([f.attname for f in fields], expected_result)
def test_local_fields(self):
def is_data_field(f):
return isinstance(f, Field) and not f.many_to_many
for model, expected_result in TEST_RESULTS['local_fields'].items():
fields = model._meta.local_fields
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertEqual(f.model, model)
self.assertTrue(is_data_field(f))
def test_local_concrete_fields(self):
for model, expected_result in TEST_RESULTS['local_concrete_fields'].items():
fields = model._meta.local_concrete_fields
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertIsNotNone(f.column)
class M2MTests(OptionsBaseTests):
def test_many_to_many(self):
for model, expected_result in TEST_RESULTS['many_to_many'].items():
fields = model._meta.many_to_many
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertTrue(f.many_to_many and f.is_relation)
def test_many_to_many_with_model(self):
for model, expected_result in TEST_RESULTS['many_to_many_with_model'].items():
models = [self._model(model, field) for field in model._meta.many_to_many]
self.assertEqual(models, expected_result)
class RelatedObjectsTests(OptionsBaseTests):
def key_name(self, r):
return r[0]
def test_related_objects(self):
result_key = 'get_all_related_objects_with_model'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields()
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_related_query_names(objects), key=self.key_name),
sorted(expected, key=self.key_name),
)
def test_related_objects_local(self):
result_key = 'get_all_related_objects_with_model_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_parents=False)
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_related_query_names(objects), key=self.key_name),
sorted(expected, key=self.key_name),
)
def test_related_objects_include_hidden(self):
result_key = 'get_all_related_objects_with_model_hidden'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_hidden=True)
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_include_hidden_local_only(self):
result_key = 'get_all_related_objects_with_model_hidden_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_hidden=True,
include_parents=False)
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
@skipIf(django.VERSION[1] < 10, "For Django>1.10 only")
class PrivateFieldsTests(OptionsBaseTests):
def test_private_fields(self):
for model, expected_names in TEST_RESULTS['private_fields'].items():
objects = model._meta.private_fields
self.assertEqual(sorted([f.name for f in objects]),
sorted(expected_names))
class GetFieldByNameTests(OptionsBaseTests):
def test_get_data_field(self):
field_info = self._details(
CassandraThing, CassandraThing._meta.get_field('data_abstract'))
self.assertEqual(field_info[1:], (None, False, False))
self.assertIsInstance(field_info[0], cassandra_columns.Text)
def test_get_fields_only_searches_forward_on_apps_not_ready(self):
opts = CassandraThing._meta
# If apps registry is not ready, get_field() searches over only
# forward fields.
opts.apps.models_ready = False
try:
# 'data_abstract' is a forward field, and therefore will be found
self.assertTrue(opts.get_field('data_abstract'))
msg = (
"CassandraThing has no field named 'relating_baseperson'. The app "
"cache isn't ready yet, so if this is an auto-created related "
"field, it won't be available yet."
)
# 'data_abstract' is a reverse field, and will raise an exception
with self.assertRaisesMessage(FieldDoesNotExist, msg):
opts.get_field('relating_baseperson')
finally:
opts.apps.models_ready = True
class RelationTreeTests(SimpleTestCase):
all_models = (CassandraThing,)
def setUp(self):
apps.clear_cache()
def test_clear_cache_clears_relation_tree(self):
# The apps.clear_cache is setUp() should have deleted all trees.
# Exclude abstract models that are not included in the Apps registry
# and have no cache.
all_models_with_cache = (m for m in self.all_models if not m._meta.abstract)
for m in all_models_with_cache:
self.assertNotIn('_relation_tree', m._meta.__dict__)
def test_first_relation_tree_access_populates_all(self):
# CassandraThing does not have any relations, so relation_tree
# should be empty
self.assertEqual(len(CassandraThing._meta._relation_tree), 0)
class ParentListTests(SimpleTestCase):
def test_get_parent_list(self):
self.assertEqual(CassandraThing._meta.get_parent_list(), [])
| 39.434389 | 92 | 0.657028 | 1,068 | 8,715 | 5.10206 | 0.168539 | 0.023124 | 0.035236 | 0.020554 | 0.404845 | 0.377317 | 0.365939 | 0.343182 | 0.320609 | 0.281703 | 0 | 0.001864 | 0.261388 | 8,715 | 220 | 93 | 39.613636 | 0.844648 | 0.061847 | 0 | 0.214724 | 0 | 0 | 0.064192 | 0.027931 | 0 | 0 | 0 | 0 | 0.147239 | 1 | 0.153374 | false | 0 | 0.067485 | 0.02454 | 0.325153 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac5a4ea0231731608c9745e1fcfac8b855776907 | 682 | py | Python | tests/test_naive_edit_distance.py | ThomasShaffer/SeqPy | d1d400d1dc64ac536da8ff7f84ffd33dfbbce1ed | [
"MIT"
] | null | null | null | tests/test_naive_edit_distance.py | ThomasShaffer/SeqPy | d1d400d1dc64ac536da8ff7f84ffd33dfbbce1ed | [
"MIT"
] | null | null | null | tests/test_naive_edit_distance.py | ThomasShaffer/SeqPy | d1d400d1dc64ac536da8ff7f84ffd33dfbbce1ed | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import unittest
from dna import *
class test_naive_edit_distance(unittest.TestCase):
def test_empty_string(self):
sequence_one = dna('AGTG')
sequence_two = dna('')
self.assertEqual(len(sequence_one), sequence_one.edit_distance_naive(sequence_two))
def test_incorrect_input(self):
sequence_one = dna('AGTG')
sequence_two = dna('!!gU')
self.assertRaises(Exception)
def test_correct_one(self):
sequence_one = dna('AGTGC')
sequence_two = dna('AGTGG')
self.assertEqual(sequence_one.edit_distance_naive(sequence_two), 1)
if __name__ == '__main__':
unittest.main()
| 27.28 | 91 | 0.671554 | 84 | 682 | 5.071429 | 0.428571 | 0.15493 | 0.105634 | 0.126761 | 0.352113 | 0.352113 | 0.352113 | 0.169014 | 0 | 0 | 0 | 0.001866 | 0.214076 | 682 | 24 | 92 | 28.416667 | 0.79291 | 0.030792 | 0 | 0.117647 | 0 | 0 | 0.045455 | 0 | 0 | 0 | 0 | 0 | 0.176471 | 1 | 0.176471 | false | 0 | 0.117647 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac5ce764cd192ba5e6212cffb4110b666799761f | 2,635 | py | Python | intake/tests/services/test_statistics.py | cforlando/intake | a5233d5c0f862f28ee265b9b4831405aabeec7e2 | [
"MIT"
] | 51 | 2016-07-20T02:26:57.000Z | 2021-07-07T14:45:06.000Z | intake/tests/services/test_statistics.py | cforlando/intake | a5233d5c0f862f28ee265b9b4831405aabeec7e2 | [
"MIT"
] | 1,091 | 2016-04-29T18:07:45.000Z | 2021-04-19T18:39:39.000Z | intake/tests/services/test_statistics.py | cforlando/intake | a5233d5c0f862f28ee265b9b4831405aabeec7e2 | [
"MIT"
] | 24 | 2016-06-14T18:10:43.000Z | 2021-11-14T20:26:39.000Z | import datetime
from django.test import TestCase
from intake.constants import PACIFIC_TIME
from intake.services import statistics
from intake import utils
from intake.tests.factories import FormSubmissionWithOrgsFactory
from user_accounts.models import Organization
from intake.tests.base_testcases import ALL_APPLICATION_FIXTURES
class TestGetOrgDataDict(TestCase):
fixtures = ALL_APPLICATION_FIXTURES
def test_returns_expected_data(self):
results = statistics.get_org_data_dict()
all_orgs = results.pop(0)
dates = [week['date'] for week in all_orgs['weekly_totals']]
for org_data in results:
self.assertIn('total', org_data)
self.assertIn('apps_this_week', org_data)
self.assertIn('org', org_data)
self.assertListEqual(
dates, [week['date'] for week in org_data['weekly_totals']])
class TestMakeYearWeeks(TestCase):
def test_expected_week(self):
# 19th week of 2017
same_week = [
PACIFIC_TIME.localize(
datetime.datetime(year=2017, month=5, day=14)), # sunday
PACIFIC_TIME.localize(
datetime.datetime(year=2017, month=5, day=12)), # friday
PACIFIC_TIME.localize(
datetime.datetime(year=2017, month=5, day=11)), # friday
PACIFIC_TIME.localize(
datetime.datetime(year=2017, month=5, day=8)), # monday
]
# 20th week of 2017
next_week = [
PACIFIC_TIME.localize(
datetime.datetime(year=2017, month=5, day=15)), # monday
PACIFIC_TIME.localize(
datetime.datetime(year=2017, month=5, day=21)), # sunday
]
for date in same_week:
result = statistics.as_year_week(date)
self.assertEqual(result, '2017-19-1')
for date in next_week:
result = statistics.as_year_week(date)
self.assertEqual(result, '2017-20-1')
def test_make_year_weeks_output(self):
todays_date = utils.get_todays_date()
weekday = todays_date.weekday()
first_day_of_this_week = todays_date - datetime.timedelta(days=weekday)
next_week = first_day_of_this_week + datetime.timedelta(days=7)
last_year_week = statistics.as_year_week(first_day_of_this_week)
too_far_year_week = statistics.as_year_week(next_week)
year_weeks = statistics.make_year_weeks()
expected_last_yw = year_weeks[-1]
self.assertNotEqual(too_far_year_week, expected_last_yw)
self.assertEqual(last_year_week, expected_last_yw)
| 39.924242 | 79 | 0.659962 | 327 | 2,635 | 5.051988 | 0.281346 | 0.038741 | 0.069007 | 0.098063 | 0.394673 | 0.357143 | 0.272397 | 0.272397 | 0.272397 | 0.272397 | 0 | 0.035515 | 0.251992 | 2,635 | 65 | 80 | 40.538462 | 0.802638 | 0.029222 | 0 | 0.145455 | 0 | 0 | 0.029031 | 0 | 0 | 0 | 0 | 0 | 0.145455 | 1 | 0.054545 | false | 0 | 0.145455 | 0 | 0.254545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac5f39249c4e0349656d0a68d1ee4a13b79d9f0b | 7,015 | py | Python | src/oidcservice/oidc/provider_info_discovery.py | peppelinux/JWTConnect-Python-OidcService | af979f45666bc47b62c69ddcbb199a15c7b96597 | [
"Apache-2.0"
] | 1 | 2020-09-30T13:07:46.000Z | 2020-09-30T13:07:46.000Z | src/oidcservice/oidc/provider_info_discovery.py | peppelinux/JWTConnect-Python-OidcService | af979f45666bc47b62c69ddcbb199a15c7b96597 | [
"Apache-2.0"
] | null | null | null | src/oidcservice/oidc/provider_info_discovery.py | peppelinux/JWTConnect-Python-OidcService | af979f45666bc47b62c69ddcbb199a15c7b96597 | [
"Apache-2.0"
] | null | null | null | import logging
from oidcmsg import oidc
from oidcmsg.oauth2 import ResponseMessage
from oidcservice.oauth2 import provider_info_discovery
from oidcservice.exception import ConfigurationError
__author__ = 'Roland Hedberg'
logger = logging.getLogger(__name__)
PREFERENCE2PROVIDER = {
# "require_signed_request_object": "request_object_algs_supported",
"request_object_signing_alg": "request_object_signing_alg_values_supported",
"request_object_encryption_alg":
"request_object_encryption_alg_values_supported",
"request_object_encryption_enc":
"request_object_encryption_enc_values_supported",
"userinfo_signed_response_alg": "userinfo_signing_alg_values_supported",
"userinfo_encrypted_response_alg":
"userinfo_encryption_alg_values_supported",
"userinfo_encrypted_response_enc":
"userinfo_encryption_enc_values_supported",
"id_token_signed_response_alg": "id_token_signing_alg_values_supported",
"id_token_encrypted_response_alg":
"id_token_encryption_alg_values_supported",
"id_token_encrypted_response_enc":
"id_token_encryption_enc_values_supported",
"default_acr_values": "acr_values_supported",
"subject_type": "subject_types_supported",
"token_endpoint_auth_method": "token_endpoint_auth_methods_supported",
"token_endpoint_auth_signing_alg":
"token_endpoint_auth_signing_alg_values_supported",
"response_types": "response_types_supported",
'grant_types': 'grant_types_supported',
'scope': 'scopes_supported'
}
PROVIDER2PREFERENCE = dict([(v, k) for k, v in PREFERENCE2PROVIDER.items()])
PROVIDER_DEFAULT = {
"token_endpoint_auth_method": "client_secret_basic",
"id_token_signed_response_alg": "RS256",
}
def add_redirect_uris(request_args, service=None, **kwargs):
"""
Add redirect_uris to the request arguments.
:param request_args: Incomming request arguments
:param service: A link to the service
:param kwargs: Possible extra keyword arguments
:return: A possibly augmented set of request arguments.
"""
_context = service.service_context
if "redirect_uris" not in request_args:
# Callbacks is a dictionary with callback type 'code', 'implicit',
# 'form_post' as keys.
try:
_cbs = _context.callbacks
except AttributeError:
request_args['redirect_uris'] = _context.redirect_uris
else:
# Filter out local additions.
_uris = [v for k, v in _cbs.items() if not k.startswith('__')]
request_args['redirect_uris'] = _uris
return request_args, {}
class ProviderInfoDiscovery(provider_info_discovery.ProviderInfoDiscovery):
msg_type = oidc.Message
response_cls = oidc.ProviderConfigurationResponse
error_msg = ResponseMessage
def __init__(self, service_context, state_db, client_authn_factory=None,
conf=None):
provider_info_discovery.ProviderInfoDiscovery.__init__(
self, service_context, state_db,
client_authn_factory=client_authn_factory, conf=conf)
def update_service_context(self, resp, **kwargs):
self._update_service_context(resp)
self.match_preferences(resp, self.service_context.issuer)
if 'pre_load_keys' in self.conf and self.conf['pre_load_keys']:
_jwks = self.service_context.keyjar.export_jwks_as_json(
issuer=resp['issuer'])
logger.info(
'Preloaded keys for {}: {}'.format(resp['issuer'], _jwks))
def match_preferences(self, pcr=None, issuer=None):
"""
Match the clients preferences against what the provider can do.
This is to prepare for later client registration and or what
functionality the client actually will use.
In the client configuration the client preferences are expressed.
These are then compared with the Provider Configuration information.
If the Provider has left some claims out, defaults specified in the
standard will be used.
:param pcr: Provider configuration response if available
:param issuer: The issuer identifier
"""
if not pcr:
pcr = self.service_context.provider_info
regreq = oidc.RegistrationRequest
for _pref, _prov in PREFERENCE2PROVIDER.items():
try:
vals = self.service_context.client_preferences[_pref]
except KeyError:
continue
try:
_pvals = pcr[_prov]
except KeyError:
try:
# If the provider have not specified use what the
# standard says is mandatory if at all.
_pvals = PROVIDER_DEFAULT[_pref]
except KeyError:
logger.info(
'No info from provider on {} and no default'.format(
_pref))
_pvals = vals
if isinstance(vals, str):
if vals in _pvals:
self.service_context.behaviour[_pref] = vals
else:
try:
vtyp = regreq.c_param[_pref]
except KeyError:
# Allow non standard claims
if isinstance(vals, list):
self.service_context.behaviour[_pref] = [
v for v in vals if v in _pvals]
elif vals in _pvals:
self.service_context.behaviour[_pref] = vals
else:
if isinstance(vtyp[0], list):
self.service_context.behaviour[_pref] = []
for val in vals:
if val in _pvals:
self.service_context.behaviour[_pref].append(
val)
else:
for val in vals:
if val in _pvals:
self.service_context.behaviour[_pref] = val
break
if _pref not in self.service_context.behaviour:
raise ConfigurationError(
"OP couldn't match preference:%s" % _pref, pcr)
for key, val in self.service_context.client_preferences.items():
if key in self.service_context.behaviour:
continue
try:
vtyp = regreq.c_param[key]
if isinstance(vtyp[0], list):
pass
elif isinstance(val, list) and not isinstance(val, str):
val = val[0]
except KeyError:
pass
if key not in PREFERENCE2PROVIDER:
self.service_context.behaviour[key] = val
logger.debug(
'service_context behaviour: {}'.format(
self.service_context.behaviour))
| 39.189944 | 80 | 0.617676 | 740 | 7,015 | 5.532432 | 0.275676 | 0.071812 | 0.074744 | 0.06595 | 0.22765 | 0.152907 | 0.094773 | 0.074255 | 0.074255 | 0.051295 | 0 | 0.002695 | 0.312331 | 7,015 | 178 | 81 | 39.410112 | 0.845978 | 0.145118 | 0 | 0.228346 | 0 | 0 | 0.210661 | 0.152759 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031496 | false | 0.015748 | 0.03937 | 0 | 0.110236 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |