hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
64193ff79c04af71d979ce5a5fceac0954ce5460 | 12,595 | py | Python | tests/test_api.py | bcyran/philipstv | 6037724d5fab0b72265c2de2c0441a64f6e00c00 | [
"MIT"
] | null | null | null | tests/test_api.py | bcyran/philipstv | 6037724d5fab0b72265c2de2c0441a64f6e00c00 | [
"MIT"
] | null | null | null | tests/test_api.py | bcyran/philipstv | 6037724d5fab0b72265c2de2c0441a64f6e00c00 | [
"MIT"
] | null | null | null | from typing import Any, Type
import pytest
from philipstv import (
PhilipsTVAPI,
PhilipsTVAPIMalformedResponseError,
PhilipsTVAPIUnauthorizedError,
PhilipsTVError,
)
from philipstv.model import (
AllChannels,
AmbilightColor,
AmbilightLayer,
AmbilightMode,
AmbilightModeValue,
AmbilightPower,
AmbilightPowerValue,
AmbilightTopology,
Application,
ApplicationComponent,
ApplicationIntent,
Applications,
ApplicationShort,
Channel,
ChannelID,
ChannelList,
ChannelShort,
CurrentChannel,
CurrentVolume,
DeviceInfo,
InputKey,
InputKeyValue,
PairingAuthInfo,
PairingGrantPayload,
PairingRequestPayload,
PairingRequestResponse,
PairingResponse,
PowerState,
PowerStateValue,
SetChannel,
Volume,
)
from tests.fakes import FakePhilipsTV
DEVICE_INFO = DeviceInfo(
id="<device_id>",
device_name="<device_name>",
device_os="<device_os>",
app_id="<app_id>",
app_name="<app_name>",
type="<type>",
)
def test_host() -> None:
expected_host = "192.168.0.66"
fake_tv = FakePhilipsTV()
fake_tv.host = expected_host
result = PhilipsTVAPI(fake_tv).host
assert result == expected_host
def test_auth() -> None:
expected_auth = ("<id>", "<key>")
fake_tv = FakePhilipsTV()
api = PhilipsTVAPI(fake_tv)
api.auth = expected_auth
assert api.auth == expected_auth
assert fake_tv.auth == expected_auth
def test_pair_request() -> None:
fake_tv = FakePhilipsTV(
post_responses={
"6/pair/request": {
"error_id": "SUCCESS",
"error_text": "Authorization required",
"auth_key": "<key>",
"timestamp": 12345,
"timeout": 60,
}
}
)
result = PhilipsTVAPI(fake_tv).pair_request(
PairingRequestPayload(scope=["read", "write", "control"], device=DEVICE_INFO)
)
assert fake_tv.post_requests["6/pair/request"] == {
"scope": ["read", "write", "control"],
"device": DEVICE_INFO.dump(),
}
assert result == PairingRequestResponse(
error_id="SUCCESS",
error_text="Authorization required",
auth_key="<key>",
timestamp=12345,
timeout=60,
)
def test_pair_grant() -> None:
fake_tv = FakePhilipsTV(
post_responses={
"6/pair/grant": {
"error_id": "SUCCESS",
"error_text": "Pairing completed",
}
}
)
result = PhilipsTVAPI(fake_tv).pair_grant(
PairingGrantPayload(
auth=PairingAuthInfo(pin="<pin>", auth_timestamp=12345, auth_signature="<signature>"),
device=DEVICE_INFO,
)
)
assert fake_tv.post_requests["6/pair/grant"] == {
"auth": {
"pin": "<pin>",
"auth_timestamp": 12345,
"auth_signature": "<signature>",
},
"device": DEVICE_INFO.dump(),
}
assert result == PairingResponse(
error_id="SUCCESS",
error_text="Pairing completed",
)
def test_get_powerstate() -> None:
fake_tv = FakePhilipsTV(get_responses={"6/powerstate": {"powerstate": "On"}})
result = PhilipsTVAPI(fake_tv).get_powerstate()
assert result == PowerState(powerstate=PowerStateValue.ON)
def test_set_powerstate() -> None:
fake_tv = FakePhilipsTV(post_responses={"6/powerstate": None})
PhilipsTVAPI(fake_tv).set_powerstate(PowerState(powerstate=PowerStateValue.STANDBY))
assert fake_tv.post_requests == {"6/powerstate": {"powerstate": "Standby"}}
def test_get_volume() -> None:
fake_tv = FakePhilipsTV(
get_responses={"6/audio/volume": {"muted": False, "current": 15, "min": 0, "max": 60}}
)
result = PhilipsTVAPI(fake_tv).get_volume()
assert result == CurrentVolume(current=15, muted=False, min=0, max=60)
def test_set_volume() -> None:
fake_tv = FakePhilipsTV(post_responses={"6/audio/volume": None})
PhilipsTVAPI(fake_tv).set_volume(Volume(current=10))
assert fake_tv.post_requests == {"6/audio/volume": {"muted": False, "current": 10}}
def test_get_current_channel() -> None:
fake_tv = FakePhilipsTV(
get_responses={
"6/activities/tv": {
"channel": {"ccid": 35, "preset": "10", "name": "TVN HD"},
"channelList": {"id": "list", "version": "7"},
}
}
)
result = PhilipsTVAPI(fake_tv).get_current_channel()
assert result == CurrentChannel(
channel=ChannelShort(ccid=35, preset="10", name="TVN HD"),
channel_list=ChannelList(id="list", version="7"),
)
def test_set_current_channel() -> None:
fake_tv = FakePhilipsTV(post_responses={"6/activities/tv": None})
PhilipsTVAPI(fake_tv).set_channel(SetChannel(channel=ChannelID(ccid=30)))
assert fake_tv.post_requests == {
"6/activities/tv": {"channel": {"ccid": 30}, "channelList": {"id": "allcab"}}
}
def test_get_all_channels() -> None:
fake_tv = FakePhilipsTV(
get_responses={
"6/channeldb/tv/channelLists/all": {
"version": 1,
"id": "all",
"listType": "MixedSources",
"medium": "mixed",
"operator": "OPER",
"installCountry": "Poland",
"channel": [
{
"ccid": 35,
"preset": "1",
"name": "TVPiS 1 HD",
"onid": 1537,
"tsid": 24,
"sid": 2403,
"serviceType": "audio_video",
"type": "DVB_C",
"logoVersion": 33,
}
],
},
}
)
result = PhilipsTVAPI(fake_tv).get_all_channels()
assert result == AllChannels(
version=1,
id="all",
list_type="MixedSources",
medium="mixed",
operator="OPER",
install_country="Poland",
channel=[
Channel(
ccid=35,
preset="1",
name="TVPiS 1 HD",
onid=1537,
tsid=24,
sid=2403,
service_type="audio_video",
type="DVB_C",
logo_version=33,
)
],
)
def test_input_key() -> None:
fake_tv = FakePhilipsTV(post_responses={"6/input/key": None})
PhilipsTVAPI(fake_tv).input_key(InputKey(key=InputKeyValue.STANDBY))
assert fake_tv.post_requests == {"6/input/key": {"key": "Standby"}}
def test_get_ambilight_power() -> None:
fake_tv = FakePhilipsTV(get_responses={"6/ambilight/power": {"power": "On"}})
result = PhilipsTVAPI(fake_tv).get_ambilight_power()
assert result == AmbilightPower(power=AmbilightPowerValue.ON)
def test_set_ambilight_power() -> None:
fake_tv = FakePhilipsTV(post_responses={"6/ambilight/power": None})
PhilipsTVAPI(fake_tv).set_ambilight_power(AmbilightPower(power=AmbilightPowerValue.OFF))
assert fake_tv.post_requests == {"6/ambilight/power": {"power": "Off"}}
def test_get_ambilight_topology() -> None:
fake_tv = FakePhilipsTV(
get_responses={
"6/ambilight/topology": {"layers": 1, "left": 3, "top": 7, "right": 3, "bottom": 0}
}
)
result = PhilipsTVAPI(fake_tv).get_ambilight_topology()
assert result == AmbilightTopology(layers=1, left=3, top=7, right=3, bottom=0)
def test_get_abilight_mode() -> None:
fake_tv = FakePhilipsTV(get_responses={"6/ambilight/mode": {"current": "internal"}})
result = PhilipsTVAPI(fake_tv).get_ambilight_mode()
assert result == AmbilightMode(current=AmbilightModeValue.INTERNAL)
def test_set_abilight_mode() -> None:
fake_tv = FakePhilipsTV(post_responses={"6/ambilight/mode": None})
PhilipsTVAPI(fake_tv).set_ambilight_mode(AmbilightMode(current=AmbilightModeValue.MANUAL))
assert fake_tv.post_requests == {"6/ambilight/mode": {"current": "manual"}}
@pytest.mark.parametrize(
"method, endpoint",
[
pytest.param("get_ambilight_measured", "6/ambilight/measured", id="measured"),
pytest.param("get_ambilight_processed", "6/ambilight/processed", id="processed"),
pytest.param("get_ambilight_cached", "6/ambilight/cached", id="cached"),
],
)
def test_get_ambilight_colors(method: str, endpoint: str) -> None:
fake_tv = FakePhilipsTV(
get_responses={
endpoint: {
"layer1": {
"left": {"0": {"r": 255, "g": 0, "b": 0}},
"top": {"0": {"r": 0, "g": 255, "b": 0}, "1": {"r": 0, "g": 0, "b": 0}},
"right": {"0": {"r": 0, "g": 0, "b": 255}},
},
}
}
)
result = getattr(PhilipsTVAPI(fake_tv), method)()
assert result["layer1"] == AmbilightLayer(
left={"0": AmbilightColor(r=255, g=0, b=0)},
top={"0": AmbilightColor(r=0, g=255, b=0), "1": AmbilightColor(r=0, g=0, b=0)},
right={"0": AmbilightColor(r=0, g=0, b=255)},
)
def test_set_ambilight_cached() -> None:
fake_tv = FakePhilipsTV(post_responses={"6/ambilight/cached": None})
PhilipsTVAPI(fake_tv).set_ambilight_cached(AmbilightColor(r=255, g=255, b=255))
assert fake_tv.post_requests == {"6/ambilight/cached": {"r": 255, "g": 255, "b": 255}}
def test_get_applications() -> None:
fake_tv = FakePhilipsTV(
get_responses={
"6/applications": {
"version": 0,
"applications": [
{
"intent": {
"component": {
"packageName": "org.droidtv.eum",
"className": "org.droidtv.eum.classname",
},
"action": "android.intent.action.MAIN",
},
"label": "Application",
"order": 0,
"id": "org.droidtv.eum.whatever",
"type": "app",
},
],
}
}
)
result = PhilipsTVAPI(fake_tv).get_applications()
assert result == Applications(
version=0,
applications=[
Application(
intent=ApplicationIntent(
component=ApplicationComponent(
package_name="org.droidtv.eum",
class_name="org.droidtv.eum.classname",
),
action="android.intent.action.MAIN",
),
label="Application",
order=0,
id="org.droidtv.eum.whatever",
type="app",
)
],
)
def test_launch_application() -> None:
fake_tv = FakePhilipsTV(post_responses={"6/activities/launch": None})
PhilipsTVAPI(fake_tv).launch_application(
ApplicationShort(
intent=ApplicationIntent(
component=ApplicationComponent(
package_name="org.droidtv.eum",
class_name="org.droidtv.eum.classname",
),
action="android.intent.action.MAIN",
),
)
)
assert fake_tv.post_requests == {
"6/activities/launch": {
"intent": {
"component": {
"packageName": "org.droidtv.eum",
"className": "org.droidtv.eum.classname",
},
"action": "android.intent.action.MAIN",
},
},
}
@pytest.mark.parametrize(
"response, expected_exception",
[
pytest.param(
PhilipsTVError("GET", "6/powerstate", 401),
PhilipsTVAPIUnauthorizedError,
id="unauthorized request error",
),
pytest.param(
{"foo": "bar"},
PhilipsTVAPIMalformedResponseError,
id="malformed response error",
),
pytest.param(
PhilipsTVError("GET", "6/powerstate", 404),
PhilipsTVError,
id="unhandled error",
),
],
)
def test_api_error(response: Any, expected_exception: Type[Exception]) -> None:
fake_tv = FakePhilipsTV(get_responses={"6/powerstate": response})
with pytest.raises(expected_exception):
PhilipsTVAPI(fake_tv).get_powerstate()
| 28.560091 | 98 | 0.553394 | 1,194 | 12,595 | 5.651591 | 0.170854 | 0.049793 | 0.061944 | 0.068168 | 0.52623 | 0.442353 | 0.351215 | 0.248814 | 0.161529 | 0.161529 | 0 | 0.024446 | 0.308218 | 12,595 | 440 | 99 | 28.625 | 0.750029 | 0 | 0 | 0.197143 | 0 | 0 | 0.163954 | 0.027709 | 0 | 0 | 0 | 0 | 0.068571 | 1 | 0.062857 | false | 0 | 0.014286 | 0 | 0.077143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
641bc418f4c4dc36c2dc0b6d80a85e1d6643d581 | 1,651 | py | Python | mvmm/single_view/opt_diagnostics.py | idc9/mvmm | 64fce755a7cd53be9b08278484c7a4c77daf38d1 | [
"MIT"
] | 1 | 2021-08-17T13:22:54.000Z | 2021-08-17T13:22:54.000Z | mvmm/single_view/opt_diagnostics.py | idc9/mvmm | 64fce755a7cd53be9b08278484c7a4c77daf38d1 | [
"MIT"
] | null | null | null | mvmm/single_view/opt_diagnostics.py | idc9/mvmm | 64fce755a7cd53be9b08278484c7a4c77daf38d1 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
# from copy import deepcopy
import numpy as np
from mvmm.viz_utils import set_xaxis_int_ticks
def plot_opt_hist(loss_vals, init_loss_vals=None,
loss_name='loss value',
title='', step_vals=None, inches=10):
loss_val_diffs = np.diff(loss_vals)
# get signed log of loss val differences
log_lvd = np.array([np.nan] * len(loss_val_diffs))
log_lvd[loss_val_diffs > 0] = np.log10(loss_val_diffs[loss_val_diffs > 0])
log_lvd[loss_val_diffs < 0] = np.log10(-loss_val_diffs[loss_val_diffs < 0])
# if init_loss_vals is not None:
# n_plots = 3
# else:
# n_plots = 2
plt.figure(figsize=(2.1 * inches, inches))
# plot loss val history
plt.subplot(1, 2, 1)
plt.plot(loss_vals, marker='.')
plt.xlabel('step')
plt.ylabel(loss_name)
plt.title(title)
# final initializations
if init_loss_vals is not None:
for i, val in enumerate(init_loss_vals):
label = None
if i == 0:
label = 'init std = {:1.3f}'.format(np.std(init_loss_vals))
plt.axhline(val, lw=.5, alpha=.5, label=label)
plt.legend()
if step_vals is not None:
for s in step_vals:
plt.axvline(s - 1, color='gray')
set_xaxis_int_ticks()
# plot los val differences
plt.subplot(1, 2, 2)
plt.plot(log_lvd, marker='.')
plt.xlabel('step')
plt.ylabel('log10(diff-{})'.format(loss_name))
set_xaxis_int_ticks()
if step_vals is not None:
for s in step_vals:
plt.axvline(s - 1, color='gray')
set_xaxis_int_ticks()
| 27.065574 | 79 | 0.616596 | 255 | 1,651 | 3.764706 | 0.317647 | 0.072917 | 0.1 | 0.066667 | 0.361458 | 0.358333 | 0.3 | 0.252083 | 0.252083 | 0.252083 | 0 | 0.023927 | 0.265899 | 1,651 | 60 | 80 | 27.516667 | 0.768152 | 0.132041 | 0 | 0.305556 | 0 | 0 | 0.042164 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.083333 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
641d5526aa245c11c77b4b77e01bc0c1c4e9e4c4 | 2,696 | py | Python | shinyutils/matwrap.py | toddrme2178/shinyutils | c9459f93f76d0486da65a457ce1c2671c52bdc5b | [
"MIT"
] | null | null | null | shinyutils/matwrap.py | toddrme2178/shinyutils | c9459f93f76d0486da65a457ce1c2671c52bdc5b | [
"MIT"
] | null | null | null | shinyutils/matwrap.py | toddrme2178/shinyutils | c9459f93f76d0486da65a457ce1c2671c52bdc5b | [
"MIT"
] | null | null | null | """matwrap.py: wrapper around matplotlib."""
import json
import warnings
from pkg_resources import resource_filename
class MatWrap:
_rc_defaults_path = resource_filename("shinyutils", "data/mplcfg.json")
with open(_rc_defaults_path, "r") as f:
_rc_defaults = json.load(f)
_mpl = None
_plt = None
_sns = None
@classmethod
def configure(
cls,
context="paper",
style="ticks",
font="Latin Modern Roman",
latex_pkgs=None,
**rc_extra,
):
"""
Arguments:
context: seaborn context (paper/notebook/poster).
style: seaborn style (whitegrid, darkgrid, etc.)
font: latex font (passed directly to fontspec).
latex_pkgs: list of packages to load in pgf preamble.
rc_extra: matplotlib params (will overwrite defaults).
"""
rc = MatWrap._rc_defaults.copy()
rc["pgf.preamble"] = [r"\usepackage{fontspec}"]
rc["pgf.preamble"].append(rf"\setmainfont{{{font}}}")
rc["pgf.preamble"].append(rf"\setsansfont{{{font}}}")
if latex_pkgs is not None:
for pkg in reversed(latex_pkgs):
rc["pgf.preamble"].insert(0, rf"\usepackage{{{pkg}}}")
rc.update(rc_extra)
if cls._mpl is None:
import matplotlib
cls._mpl = matplotlib
cls._mpl.rcParams.update(rc)
import matplotlib.pyplot
import seaborn
cls._plt = matplotlib.pyplot
cls._sns = seaborn
else:
cls._mpl.rcParams.update(rc)
cls._sns.set(context, style, rc=rc)
def __new__(cls):
raise NotImplementedError(
"MatWrap does not provide instances. Use the class methods."
)
@classmethod
def _ensure_conf(cls):
if cls._mpl is None:
cls.configure()
@classmethod
def mpl(cls):
cls._ensure_conf()
return cls._mpl
@classmethod
def plt(cls):
cls._ensure_conf()
return cls._plt
@classmethod
def sns(cls):
cls._ensure_conf()
return cls._sns
@classmethod
def palette(cls):
return [
"#e41a1c",
"#6a3d9a",
"#d55e00",
"#34495e",
"#377eb8",
"#4daf4a",
"#95a5a6",
"#222222",
]
@staticmethod
def set_size_tight(fig, size):
warnings.warn(
"constrained_layout is enabled by default: don't use tight_layout",
DeprecationWarning,
)
fig.set_size_inches(*size)
fig.tight_layout(pad=0, w_pad=0, h_pad=0)
| 25.923077 | 79 | 0.556751 | 294 | 2,696 | 4.918367 | 0.411565 | 0.058091 | 0.035961 | 0.033195 | 0.130705 | 0.051867 | 0 | 0 | 0 | 0 | 0 | 0.019488 | 0.333828 | 2,696 | 103 | 80 | 26.174757 | 0.785635 | 0.120549 | 0 | 0.166667 | 0 | 0 | 0.158992 | 0.028236 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.076923 | 0.012821 | 0.294872 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
641dd088351667ee7ce3a62e35c4c5f3b6771754 | 3,078 | py | Python | autotest/get_exes.py | MODFLOW-USGS/modflow6 | 6cc0f91b93e0615b68c9239c32cd5bff7f0696b9 | [
"CC0-1.0"
] | 102 | 2017-12-19T09:56:38.000Z | 2022-03-30T01:47:28.000Z | autotest/get_exes.py | MODFLOW-USGS/modflow6 | 6cc0f91b93e0615b68c9239c32cd5bff7f0696b9 | [
"CC0-1.0"
] | 331 | 2018-01-10T21:22:48.000Z | 2022-03-29T05:57:00.000Z | autotest/get_exes.py | MODFLOW-USGS/modflow6 | 6cc0f91b93e0615b68c9239c32cd5bff7f0696b9 | [
"CC0-1.0"
] | 87 | 2017-12-13T21:40:39.000Z | 2022-03-30T05:31:40.000Z | # Get executables
import os
import shutil
import pymake
from framework import running_on_CI
if running_on_CI():
print("running on CI environment")
os.environ["PYMAKE_DOUBLE"] = "1"
# paths to executables for previous versions of MODFLOW
ebindir = os.path.abspath(
os.path.join(os.path.expanduser("~"), ".local", "bin")
)
if not os.path.exists(ebindir):
os.makedirs(ebindir)
mfexe_pth = "temp/mfexes"
# use the line below to set fortran compiler using environmental variables
# os.environ["FC"] = "gfortran"
# some flags to check for errors in the code
# add -Werror for compilation to terminate if errors are found
strict_flags = (
"-Wtabs -Wline-truncation -Wunused-label "
"-Wunused-variable -pedantic -std=f2008 "
"-Wcharacter-truncation"
)
def get_compiler_envvar(fc):
env_var = os.environ.get("FC")
if env_var is not None:
if env_var != fc:
fc = env_var
return fc
def create_dir(pth):
# create pth directory
print(f"creating... {os.path.abspath(pth)}")
os.makedirs(pth, exist_ok=True)
msg = "could not create... {}".format(os.path.abspath(pth))
assert os.path.exists(pth), msg
def rebuild_mf6_release():
pm = pymake.Pymake(verbose=True)
pm.target = "mf6"
pm.appdir = ebindir
download_pth = os.path.join("temp")
target_dict = pymake.usgs_program_data.get_target(pm.target)
pm.download_target(pm.target, download_path=download_pth, verify=False)
# Set MODFLOW 6 to compile develop version of the release
srcpth = os.path.join(
download_pth, target_dict["dirname"], target_dict["srcdir"]
)
fpth = os.path.join(srcpth, "Utilities", "version.f90")
with open(fpth) as f:
lines = f.read().splitlines()
assert len(lines) > 0, "could not update {}".format(srcpth)
f = open(fpth, "w")
for line in lines:
tag = "IDEVELOPMODE = 0"
if tag in line:
line = line.replace(tag, "IDEVELOPMODE = 1")
f.write("{}\n".format(line))
f.close()
# reset compiler based on environmental variable, if defined
pm.fc = get_compiler_envvar(pm.fc)
# add strict flags if gfortran is being used
if pm.fc == "gfortran":
pm.fflags = strict_flags
# build the release version of MODFLOW 6
pm.build()
msg = "{} does not exist.".format(pm.target)
assert pm.returncode == 0, msg
# finalize the build
pm.finalize()
def test_create_dirs():
pths = [os.path.join("..", "bin"), os.path.join("temp")]
for pth in pths:
create_dir(pth)
def test_getmfexes(verify=True):
pymake.getmfexes(mfexe_pth, verify=verify)
for target in os.listdir(mfexe_pth):
srcpth = os.path.join(mfexe_pth, target)
if os.path.isfile(srcpth):
dstpth = os.path.join(ebindir, target)
print("copying {} -> {}".format(srcpth, dstpth))
shutil.copy(srcpth, dstpth)
def test_rebuild_mf6_release():
rebuild_mf6_release()
if __name__ == "__main__":
test_create_dirs()
test_getmfexes(verify=False)
| 25.65 | 75 | 0.653996 | 428 | 3,078 | 4.577103 | 0.357477 | 0.045942 | 0.040837 | 0.016335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007077 | 0.219623 | 3,078 | 119 | 76 | 25.865546 | 0.808493 | 0.166667 | 0 | 0 | 0 | 0 | 0.146552 | 0.017241 | 0 | 0 | 0 | 0 | 0.040541 | 1 | 0.081081 | false | 0 | 0.054054 | 0 | 0.148649 | 0.040541 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
641ffda3aaa9896187418b645908b4cbd191a8de | 949 | py | Python | workflow/Strategy.py | YOCKOW/PythonGitHubActionsWorkflowRepresentation | 1e8cb9fd4ade42b01f0c521f659c16b12d7957de | [
"MIT"
] | null | null | null | workflow/Strategy.py | YOCKOW/PythonGitHubActionsWorkflowRepresentation | 1e8cb9fd4ade42b01f0c521f659c16b12d7957de | [
"MIT"
] | null | null | null | workflow/Strategy.py | YOCKOW/PythonGitHubActionsWorkflowRepresentation | 1e8cb9fd4ade42b01f0c521f659c16b12d7957de | [
"MIT"
] | null | null | null | from .Node import Node
from .BooleanNode import BooleanNode
from .MappingNode import MappingNode
from .Matrix import Matrix
from .NumberNode import IntegerNode
from typing import Any, Dict, List
class Strategy(MappingNode):
def __init__(self, info: Dict[str, Any]):
assert isinstance(info, dict)
converted: Dict[str, Node] = {}
for key, value in info.items():
if key == 'fail-fast':
assert isinstance(value, bool)
converted[key] = BooleanNode(value)
elif key == 'max-parallel':
assert isinstance(value, int)
converted[key] = IntegerNode(value)
elif key == 'matrix':
assert isinstance(value, dict)
converted[key] = Matrix(value)
else:
raise ValueError(f"Unexpected key named {key} for strategy.")
super().__init__(converted)
@property
def key_order(self) -> List[str]:
return [
'fail-fast',
'max-parallel',
'matrix',
]
| 27.911765 | 69 | 0.646997 | 111 | 949 | 5.45045 | 0.405405 | 0.105785 | 0.104132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.239199 | 949 | 33 | 70 | 28.757576 | 0.83795 | 0 | 0 | 0 | 0 | 0 | 0.099156 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 1 | 0.066667 | false | 0 | 0.2 | 0.033333 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
642147f2d7a649b4cd903efa6b1d905ea393c7ca | 4,216 | py | Python | dbcron/validators.py | cloudspectatordevelopment/django-dbcron | 53182ca803d1f5d8fd022a1cae79e0c0256bd25f | [
"BSD-3-Clause"
] | null | null | null | dbcron/validators.py | cloudspectatordevelopment/django-dbcron | 53182ca803d1f5d8fd022a1cae79e0c0256bd25f | [
"BSD-3-Clause"
] | null | null | null | dbcron/validators.py | cloudspectatordevelopment/django-dbcron | 53182ca803d1f5d8fd022a1cae79e0c0256bd25f | [
"BSD-3-Clause"
] | null | null | null | from django.core import validators
from django.utils.translation import ugettext_lazy as _
from django.utils.deconstruct import deconstructible
MONTH_MAP = {
'JAN': 1,
'FEB': 2,
'MAR': 3,
'APR': 4,
'MAY': 5,
'JUN': 6,
'JUL': 7,
'AUG': 8,
'SEP': 9,
'OCT': 10,
'NOV': 11,
'DEC': 12,
}
DAYS = {
'SUN': 0,
'MON': 1,
'TUE': 2,
'WED': 3,
'THU': 4,
'FRI': 5,
'SAT': 6,
}
@deconstructible
class BaseCrontabValidator:
code = 'invalid'
int_message = _("Enter a number between %d and %d.")
range_message = _("Please enter valid range from %d and %s.")
freq_message = _("Enter a valid positive number.")
special_strings = []
def __eq__(self, other):
return self.__class__ == other.__class__
def validate_int(self, value, index):
try:
value = int(value)
except TypeError:
raise validators.ValidationError(
message=self.int_message % (self.int_min, self.int_max),
code=self.code
)
if not self.int_min <= value <= self.int_max:
raise validators.ValidationError(
message=self.int_message % (self.int_min, self.int_max),
code=self.code
)
def validate_range(self, value, index):
try:
min_, max_ = value.split('-')
except ValueError:
raise validators.ValidationError(
message=_("Bad range format"),
code=self.code
)
self.validate_int(min_, index)
self.validate_int(max_, index)
min_, max_ = int(min_), int(max_)
if min_ >= max_:
raise validators.ValidationError(
message=self.range_message % (self.int_min, self.int_max),
code=self.code
)
def validate_frequency(self, value, index):
star, freq = value.split('/')
try:
freq = int(freq)
except TypeError:
raise validators.ValidationError(message=self.freq_message,
code=self.code)
if freq <= 0:
raise validators.ValidationError(message=self.freq_message,
code=self.code)
if star != '*':
raise validators.ValidationError(
message=_("The first character of frequency must be '*'."),
code=self.code
)
def __call__(self, value):
value = str(value)
values = value.split(',')
for i, value in enumerate(values):
if value.isdigit():
self.validate_int(value, i)
continue
elif value.startswith('-') and value[1:].isdigit():
raise validators.ValidationError(
message=self.range_message % (self.int_min, self.int_max),
code=self.code
)
elif value.startswith('*/'):
self.validate_frequency(value, i)
continue
elif '-' in value:
self.validate_range(value, i)
continue
elif value.upper() in self.special_strings:
continue
elif value in ['*', '?']:
continue
raise validators.ValidationError(
message=_("Enter a correct value."),
code=self.code
)
class SecondsValidator(BaseCrontabValidator):
int_min = 0
int_max = 59
class MinutesValidator(BaseCrontabValidator):
int_min = 0
int_max = 59
class HoursValidator(BaseCrontabValidator):
int_min = 0
int_max = 23
class DaysOfMonthValidator(BaseCrontabValidator):
int_min = 1
int_max = 31
special_strings = ['L']
class MonthValidator(BaseCrontabValidator):
int_min = 1
int_max = 12
month_list = MONTH_MAP
special_strings = list(month_list)
class DaysOfWeekValidator(BaseCrontabValidator):
int_min = 0
int_max = 6
day_list = DAYS
special_strings = ['L'] + list(day_list)
class YearsValidator(BaseCrontabValidator):
int_min = 1970
int_max = 2099
| 27.376623 | 78 | 0.548387 | 441 | 4,216 | 5.047619 | 0.281179 | 0.037736 | 0.121294 | 0.149596 | 0.352201 | 0.331536 | 0.269542 | 0.256065 | 0.220126 | 0.220126 | 0 | 0.017812 | 0.347486 | 4,216 | 153 | 79 | 27.555556 | 0.791349 | 0 | 0 | 0.305344 | 0 | 0 | 0.062144 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038168 | false | 0 | 0.022901 | 0.007634 | 0.312977 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6421978a16090177ad88ec238394d851bbfb27bc | 1,103 | py | Python | micromagneticmodel/tests/test_rkky.py | ubermag/micromagneticmodel | 91ad92d26cdbec369a5a41f7b90a17ca5328cd07 | [
"BSD-3-Clause"
] | 5 | 2019-10-21T01:12:16.000Z | 2021-09-24T03:52:30.000Z | micromagneticmodel/tests/test_rkky.py | ubermag/micromagneticmodel | 91ad92d26cdbec369a5a41f7b90a17ca5328cd07 | [
"BSD-3-Clause"
] | 11 | 2019-08-12T22:38:17.000Z | 2022-03-15T00:08:47.000Z | micromagneticmodel/tests/test_rkky.py | ubermag/micromagneticmodel | 91ad92d26cdbec369a5a41f7b90a17ca5328cd07 | [
"BSD-3-Clause"
] | 4 | 2020-06-27T15:36:28.000Z | 2021-12-06T15:08:04.000Z | import re
import pytest
import numpy as np
import discretisedfield as df
import micromagneticmodel as mm
from .checks import check_term
class TestRKKY:
def setup(self):
self.valid_args = [(1, ['a', 'b']),
(-1, ['a', 'bc']),
(0, ['r1', 'r2'])]
self.invalid_args = [('a', ['a', 'b']),
(-1, 'a'),
(0, 0)]
def test_init_valid_args(self):
for sigma, subregions in self.valid_args:
term = mm.RKKY(sigma=sigma, subregions=subregions)
check_term(term)
assert hasattr(term, 'sigma')
assert hasattr(term, 'subregions')
assert term.name == 'rkky'
assert re.search(r'^RKKY\(sigma=.+\)$', repr(term))
def test_init_invalid_args(self):
for sigma, subregions in self.invalid_args:
with pytest.raises((TypeError, ValueError)):
term = mm.RKKY(sigma=sigma, subregions=subregions)
with pytest.raises(AttributeError):
term = mm.RKKY(wrong=1)
| 32.441176 | 66 | 0.533998 | 125 | 1,103 | 4.616 | 0.384 | 0.103986 | 0.051993 | 0.013865 | 0.249567 | 0.249567 | 0.249567 | 0 | 0 | 0 | 0 | 0.012262 | 0.334542 | 1,103 | 33 | 67 | 33.424242 | 0.773842 | 0 | 0 | 0.071429 | 0 | 0 | 0.045331 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.107143 | false | 0 | 0.214286 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6421c29517fd50762f06843cef09f1ed5fb8d24e | 1,048 | py | Python | tests/test_commands_status.py | ar90n/kkt | e772860b20231e067973478350a4f0edb8bf5db1 | [
"Apache-2.0"
] | 1 | 2020-09-03T08:19:46.000Z | 2020-09-03T08:19:46.000Z | tests/test_commands_status.py | ar90n/kkt | e772860b20231e067973478350a4f0edb8bf5db1 | [
"Apache-2.0"
] | null | null | null | tests/test_commands_status.py | ar90n/kkt | e772860b20231e067973478350a4f0edb8bf5db1 | [
"Apache-2.0"
] | null | null | null | import pytest
import kkt
from kkt.exception import MetaDataNotFound
from kkt.commands.status import status, status_impl
@pytest.mark.parametrize(
"given, expected",
[
(
{"status": "complete", "failureMessage": None, "user": "user"},
"status: complete",
),
(
{"status": "running", "failureMessage": None, "user": "user"},
"status: running",
),
(
{"status": "complete", "failureMessage": "failed", "user": "user"},
"status: complete\nmessage: failed",
),
],
)
def test_status_impl(given, expected, kaggle_api):
api = kaggle_api(**given)
actual = status_impl(api, {"slug": "kkt"})
assert expected == actual
def test_commands_status(chshared_datadir, cli_runner, kaggle_api, monkeypatch):
api = kaggle_api("complete", None, "user")
monkeypatch.setattr("kkt.commands.kkt_command.get_kaggle_api", lambda: api)
ret = cli_runner.invoke(status, [])
assert "status: complete\n" == ret.output
| 28.324324 | 80 | 0.607824 | 108 | 1,048 | 5.75 | 0.37037 | 0.112721 | 0.067633 | 0.083736 | 0.10306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.242366 | 1,048 | 36 | 81 | 29.111111 | 0.782116 | 0 | 0 | 0.1 | 0 | 0 | 0.255725 | 0.037214 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6425be78a648f6351f256ea328387bb3f4129437 | 1,460 | py | Python | examples/example_ros_anymal.py | TLasguignes/signal_scope | ad2690df9c7a5f1502c5e7807568b7f4050fcc10 | [
"BSD-3-Clause"
] | 2 | 2021-04-03T15:49:13.000Z | 2021-06-01T08:07:11.000Z | examples/example_ros_anymal.py | TLasguignes/signal_scope | ad2690df9c7a5f1502c5e7807568b7f4050fcc10 | [
"BSD-3-Clause"
] | 1 | 2021-03-18T14:41:09.000Z | 2021-03-18T21:10:05.000Z | examples/example_ros_anymal.py | TLasguignes/signal_scope | ad2690df9c7a5f1502c5e7807568b7f4050fcc10 | [
"BSD-3-Clause"
] | 1 | 2020-12-09T10:20:17.000Z | 2020-12-09T10:20:17.000Z | '''
A working example for signals from Anymal
Plots x,y,z in position and the yaw angle
'''
import numpy
import sys
sys.argv = ['test']
import tf
def getYawDegrees(msg):
'''yaw degrees'''
quaternion = (
msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
return msg.header.stamp, euler[2]*180.0/numpy.pi
def getPositionNorm(msg):
'''position magnitude'''
x = [msg.pose.pose.position.x, msg.pose.pose.position.y, msg.pose.pose.position.z]
return msg.header.stamp, numpy.linalg.norm(x)
def getVelocity(msg):
'''velocity in m/sec'''
vel = [msg.twist.twist.linear.x, msg.twist.twist.linear.y, msg.twist.twist.linear.z]
return msg.header.stamp, numpy.linalg.norm(vel)
addPlot(timeWindow=20, yLimits=[-10, 10])
addSignal('/state_estimator/anymal_state', msg.header.stamp, msg.pose.pose.position.x)
addSignal('/state_estimator/anymal_state', msg.header.stamp, msg.pose.pose.position.y)
addSignal('/state_estimator/anymal_state', msg.header.stamp, msg.pose.pose.position.z)
addSignalFunction('/state_estimator/anymal_state', getPositionNorm)
addPlot(timeWindow=20, yLimits=[-180, 180])
addSignalFunction('/state_estimator/anymal_state', getYawDegrees)
addPlot(timeWindow=20, yLimits=[-2, 2])
addSignalFunction('/state_estimator/anymal_state', getVelocity)
| 29.2 | 88 | 0.729452 | 204 | 1,460 | 5.151961 | 0.294118 | 0.066603 | 0.104662 | 0.108468 | 0.439581 | 0.259753 | 0.259753 | 0.259753 | 0.191246 | 0.191246 | 0 | 0.017955 | 0.122603 | 1,460 | 49 | 89 | 29.795918 | 0.802498 | 0.091096 | 0 | 0 | 0 | 0 | 0.136818 | 0.133743 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
642625ac3b2c3acf82bc4a6e8e24c9530afa8666 | 1,366 | py | Python | src/face_detect.py | train255/Silent-Face-Anti-Spoofing | e2137cde55ba4c7b43c2a7d6340d827a106b7404 | [
"Apache-2.0"
] | null | null | null | src/face_detect.py | train255/Silent-Face-Anti-Spoofing | e2137cde55ba4c7b43c2a7d6340d827a106b7404 | [
"Apache-2.0"
] | null | null | null | src/face_detect.py | train255/Silent-Face-Anti-Spoofing | e2137cde55ba4c7b43c2a7d6340d827a106b7404 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 20-6-9 上午10:20
# @Author : zhuying
# @Company : Minivision
# @File : anti_spoof_predict.py
# @Software : PyCharm
import cv2
import math
import numpy as np
class FaceModel:
def __init__(self):
caffemodel = "./resources/detection_model/Widerface-RetinaFace.caffemodel"
deploy = "./resources/detection_model/deploy.prototxt"
self.detector = cv2.dnn.readNetFromCaffe(deploy, caffemodel)
self.detector_confidence = 0.6
def get_bbox(self, img):
height, width = img.shape[0], img.shape[1]
aspect_ratio = width / height
if img.shape[1] * img.shape[0] >= 192 * 192:
img = cv2.resize(img,
(int(192 * math.sqrt(aspect_ratio)),
int(192 / math.sqrt(aspect_ratio))), interpolation=cv2.INTER_LINEAR)
blob = cv2.dnn.blobFromImage(img, 1, mean=(104, 117, 123))
self.detector.setInput(blob, 'data')
out = self.detector.forward('detection_out').squeeze()
max_conf_index = np.argmax(out[:, 2])
left, top, right, bottom = out[max_conf_index, 3]*width, out[max_conf_index, 4]*height, \
out[max_conf_index, 5]*width, out[max_conf_index, 6]*height
bbox = [int(left), int(top), int(right-left+1), int(bottom-top+1)]
return bbox | 40.176471 | 98 | 0.607613 | 176 | 1,366 | 4.573864 | 0.477273 | 0.043478 | 0.074534 | 0.074534 | 0.111801 | 0.062112 | 0 | 0 | 0 | 0 | 0 | 0.048181 | 0.25549 | 1,366 | 34 | 99 | 40.176471 | 0.743363 | 0.098097 | 0 | 0 | 0 | 0 | 0.097064 | 0.083197 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.125 | 0 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
642774b4963705c8dd1baff49ac53f72d342febc | 21,645 | py | Python | ipvs.py | akamac/gnlpy | 61abb4b6fda0b7c5e1e7810b3786b75d77cbaff8 | [
"BSD-3-Clause"
] | 176 | 2015-05-20T20:11:27.000Z | 2020-08-24T11:39:49.000Z | ipvs.py | akamac/gnlpy | 61abb4b6fda0b7c5e1e7810b3786b75d77cbaff8 | [
"BSD-3-Clause"
] | 22 | 2015-10-10T06:48:28.000Z | 2020-07-24T13:12:05.000Z | ipvs.py | akamac/gnlpy | 61abb4b6fda0b7c5e1e7810b3786b75d77cbaff8 | [
"BSD-3-Clause"
] | 46 | 2015-05-20T22:22:31.000Z | 2020-07-24T17:29:31.000Z | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""IPVS module
This module exists as a pure-python replacement for ipvsadm.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
import socket
import struct
import gnlpy.netlink as netlink
# IPVS forwarding methods
IPVS_MASQUERADING = 0
IPVS_LOCAL = 1
IPVS_TUNNELING = 2
IPVS_ROUTING = 3
IPVS_METHODS = set([
IPVS_MASQUERADING,
IPVS_LOCAL,
IPVS_TUNNELING,
IPVS_ROUTING
])
# Virtual Service flags
IPVS_SVC_F_ONEPACKET = 0x0004
# These are attr_list_types which are nestable. The command attribute list
# is ultimately referenced by the messages which are passed down to the
# kernel via netlink. These structures must match the type and ordering
# that the kernel expects.
IpvsStatsAttrList = netlink.create_attr_list_type(
'IpvsStatsAttrList',
('CONNS', netlink.U32Type),
('INPKTS', netlink.U32Type),
('OUTPKTS', netlink.U32Type),
('INBYTES', netlink.U64Type),
('OUTBYTES', netlink.U64Type),
('CPS', netlink.U32Type),
('INPPS', netlink.U32Type),
('OUTPPS', netlink.U32Type),
('INBPS', netlink.U32Type),
('OUTBPS', netlink.U32Type),
)
IpvsStatsAttrList64 = netlink.create_attr_list_type(
'IpvsStatsAttrList64',
('CONNS', netlink.U64Type),
('INPKTS', netlink.U64Type),
('OUTPKTS', netlink.U64Type),
('INBYTES', netlink.U64Type),
('OUTBYTES', netlink.U64Type),
('CPS', netlink.U64Type),
('INPPS', netlink.U64Type),
('OUTPPS', netlink.U64Type),
('INBPS', netlink.U64Type),
('OUTBPS', netlink.U64Type),
)
IpvsServiceAttrList = netlink.create_attr_list_type(
'IpvsServiceAttrList',
('AF', netlink.U16Type),
('PROTOCOL', netlink.U16Type),
('ADDR', netlink.BinaryType),
('PORT', netlink.Net16Type),
('FWMARK', netlink.U32Type),
('SCHED_NAME', netlink.NulStringType),
('FLAGS', netlink.BinaryType),
('TIMEOUT', netlink.U32Type),
('NETMASK', netlink.U32Type),
('STATS', IpvsStatsAttrList),
('PE_NAME', netlink.NulStringType),
('STATS64', IpvsStatsAttrList64),
)
IpvsDestAttrList = netlink.create_attr_list_type(
'IpvsDestAttrList',
('ADDR', netlink.BinaryType),
('PORT', netlink.Net16Type),
('FWD_METHOD', netlink.U32Type),
('WEIGHT', netlink.I32Type),
('U_THRESH', netlink.U32Type),
('L_THRESH', netlink.U32Type),
('ACTIVE_CONNS', netlink.U32Type),
('INACT_CONNS', netlink.U32Type),
('PERSIST_CONNS', netlink.U32Type),
('STATS', IpvsStatsAttrList),
('ADDR_FAMILY', netlink.U16Type),
('STATS64', IpvsStatsAttrList64),
)
IpvsDaemonAttrList = netlink.create_attr_list_type(
'IpvsDaemonAttrList',
('STATE', netlink.U32Type),
('MCAST_IFN', netlink.NulStringType),
('SYNC_ID', netlink.U32Type),
)
IpvsInfoAttrList = netlink.create_attr_list_type(
'IpvsInfoAttrList',
('VERSION', netlink.U32Type),
('CONN_TAB_SIZE', netlink.U32Type),
)
IpvsCmdAttrList = netlink.create_attr_list_type(
'IpvsCmdAttrList',
('SERVICE', IpvsServiceAttrList),
('DEST', IpvsDestAttrList),
('DAEMON', IpvsDaemonAttrList),
('TIMEOUT_TCP', netlink.U32Type),
('TIMEOUT_TCP_FIN', netlink.U32Type),
('TIMEOUT_UDP', netlink.U32Type),
)
IpvsMessage = netlink.create_genl_message_type(
'IpvsMessage', 'IPVS',
('NEW_SERVICE', IpvsCmdAttrList),
('SET_SERVICE', IpvsCmdAttrList),
('DEL_SERVICE', IpvsCmdAttrList),
('GET_SERVICE', IpvsCmdAttrList),
('NEW_DEST', IpvsCmdAttrList),
('SET_DEST', IpvsCmdAttrList),
('DEL_DEST', IpvsCmdAttrList),
('GET_DEST', IpvsCmdAttrList),
('NEW_DAEMON', IpvsCmdAttrList),
('DEL_DAEMON', IpvsCmdAttrList),
('GET_DAEMON', IpvsCmdAttrList),
('SET_CONFIG', IpvsCmdAttrList),
('GET_CONFIG', IpvsCmdAttrList),
('SET_INFO', IpvsCmdAttrList),
('GET_INFO', IpvsCmdAttrList),
('ZERO', IpvsCmdAttrList),
('FLUSH', IpvsCmdAttrList),
required_modules=['ip_vs'],
)
def verbose(f):
def g(self, *args, **kwargs):
if self.verbose:
s_args = [repr(a) for a in args]
s_args.extend(['{0}={1}'.format(k, repr(v))
for k, v in six.iteritems(kwargs)])
print('{0}({1})'.format(f.__name__, ', '.join(s_args)))
return f(self, *args, **kwargs)
return g
def _validate_ip(ip):
try:
socket.inet_pton(_to_af(ip), ip)
return True
except socket.error:
return False
def _to_af(ip):
return socket.AF_INET6 if ':' in ip else socket.AF_INET
def _to_af_union(ip):
af = _to_af(ip)
return af, socket.inet_pton(af, ip).ljust(16, b'\0')
def _from_af_union(af, addr):
n = 4 if af == socket.AF_INET else 16
return socket.inet_ntop(af, addr[:n])
def _to_proto_num(proto):
if proto is None:
return None
if proto.lower() == 'tcp':
return socket.IPPROTO_TCP
elif proto.lower() == 'udp':
return socket.IPPROTO_UDP
else:
assert False, 'unknown proto %s' % proto
def _from_proto_num(n):
if n is None:
return None
if n == socket.IPPROTO_TCP:
return 'tcp'
elif n == socket.IPPROTO_UDP:
return 'udp'
else:
assert False, 'unknown proto num %d' % n
class Dest(object):
"""Describes a real server to be load balanced to."""
def __init__(self, d={}, validate=False):
self.ip_ = d.get('ip', None)
self.weight_ = d.get('weight', None)
self.port_ = d.get('port', None)
self.fwd_method_ = d.get('fwd_method', IPVS_TUNNELING)
self.counters_ = d.get('counters', {})
def __repr__(self):
return 'Dest(d=dict(ip="%s", weight=%d))' % (self.ip(), self.weight())
def counters(self):
return self.counters_
def ip(self):
return self.ip_
def weight(self):
return self.weight_
def port(self):
return self.port_
def fwd_method(self):
return self.fwd_method_
def validate(self):
assert _validate_ip(self.ip_)
assert isinstance(self.weight_, int)
assert self.weight_ >= -1
assert self.fwd_method_ in IPVS_METHODS
def to_dict(self):
return {
'ip': self.ip_,
'weight': self.weight_,
}
def to_attr_list(self):
af, addr = _to_af_union(self.ip_)
return IpvsDestAttrList(addr_family=af,
addr=addr,
port=self.port_,
fwd_method=self.fwd_method_)
def __eq__(self, other):
return isinstance(other, Dest) and self.to_dict() == other.to_dict()
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def from_attr_list(lst, default_af=None):
return Dest(
d={
'ip': _from_af_union(lst.get('addr_family', default_af),
lst.get('addr')),
'weight': lst.get('weight'),
'port': lst.get('port'),
'fwd_method': lst.get('fwd_method'),
'counters': {
'active_conns': lst.get('active_conns'),
'inact_conns': lst.get('inact_conns'),
'persist_conns': lst.get('persist_conns'),
'conns': lst.get('stats').get('conns'),
'inpkts': lst.get('stats').get('inpkts'),
'outpkts': lst.get('stats').get('outpkts'),
'inbytes': lst.get('stats').get('inbytes'),
'outbytes': lst.get('stats').get('outbytes'),
'cps': lst.get('stats').get('cps'),
'inpps': lst.get('stats').get('inpps'),
'outpps': lst.get('stats').get('outpps'),
'inbps': lst.get('stats').get('inbps'),
'outbps': lst.get('stats').get('outbps')
}
},
validate=True,
)
class Service(object):
"""Describes a load balanced service.
"""
def __init__(self, d={}, validate=False):
self.proto_ = d.get('proto', None)
self.vip_ = d.get('vip', None)
self.port_ = d.get('port', None)
self.sched_ = d.get('sched', None)
self.fwmark_ = d.get('fwmark', None)
default_af = None
if self.vip_:
default_af = _to_af(self.vip_)
self.af_ = d.get('af', default_af)
self.counters_ = d.get('counters', {})
if validate:
self.validate()
def __repr__(self):
if self.fwmark_ is not None:
return 'Service(d=dict(fwmark=%d, sched="%s", af="%s"))' % (
self.fwmark(), self.sched(), self.af())
return 'Service(d=dict(proto="%s", vip="%s", port=%d, sched="%s"))' % (
self.proto(), self.vip(), self.port(), self.sched())
def counters(self):
return self.counters_
def af(self):
return self.af_
def fwmark(self):
return self.fwmark_
def proto(self):
return self.proto_
def proto_num(self):
return _to_proto_num(self.proto_)
def port(self):
return self.port_
def vip(self):
return self.vip_
def sched(self):
return self.sched_
def validate(self):
assert self.af_ in [socket.AF_INET, socket.AF_INET6]
if self.vip_ or self.port_ or self.proto_:
assert self.proto_.lower() in ['tcp', 'udp']
assert _validate_ip(self.vip_)
assert isinstance(self.port_, int)
assert self.port_ > 0 and self.port_ < (2 ** 16)
assert self.fwmark_ is None
else:
assert isinstance(self.fwmark_, int)
assert self.proto_ is None
assert self.port_ is None
assert self.vip_ is None
assert self.fwmark_ > 0 and self.fwmark_ < (2 ** 32)
def to_dict(self):
self.validate()
if self.fwmark_ is None:
return {
'proto': self.proto_,
'vip': self.vip_,
'port': self.port_,
'sched': self.sched_,
'af': self.af_
}
else:
return {
'fwmark': self.fwmark_,
'sched': self.sched_,
'af': self.af_
}
def to_attr_list(self):
if self.fwmark_ is None:
af, addr = _to_af_union(self.vip_)
netmask = ((1 << 32) - 1) if af == socket.AF_INET else 128
proto = self.proto_num()
return IpvsServiceAttrList(af=af, addr=addr, protocol=proto,
netmask=netmask, port=self.port_,
sched_name=self.sched_,
flags=struct.pack(str('=II'), 0, 0))
else:
netmask = ((1 << 32) - 1)
return IpvsServiceAttrList(fwmark=self.fwmark_, af=self.af_,
netmask=netmask, sched_name=self.sched_,
flags=struct.pack(str('=II'), 0, 0))
def __eq__(self, other):
return isinstance(other, Service) and self.to_dict() == other.to_dict()
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def from_attr_list(lst):
if lst.get('addr', None) is not None:
d = dict(
vip=_from_af_union(lst.get('af'), lst.get('addr')),
proto=_from_proto_num(lst.get('protocol')),
port=lst.get('port'),
sched=lst.get('sched_name'),
af=lst.get('af'),
counters={
'conns': lst.get('stats').get('conns'),
'inpkts': lst.get('stats').get('inpkts'),
'outpkts': lst.get('stats').get('outpkts'),
'inbytes': lst.get('stats').get('inbytes'),
'outbytes': lst.get('stats').get('outbytes'),
'cps': lst.get('stats').get('cps'),
'inpps': lst.get('stats').get('inpps'),
'outpps': lst.get('stats').get('outpps'),
'inbps': lst.get('stats').get('inbps'),
'outbps': lst.get('stats').get('outbps')
}
)
else:
d = dict(
fwmark=lst.get('fwmark'),
sched=lst.get('sched_name'),
af=lst.get('af'),
)
return Service(d=d, validate=True)
class Pool(object):
"""A tuple of a service and an array of dests for that service
"""
def __init__(self, d={}, validate=False):
self.service_ = Service(d.get('service', {}), validate)
self.dests_ = [Dest(x, validate) for x in d.get('dests', [])]
def service(self):
return self.service_
def dests(self):
return self.dests_
def validate(self):
self.service_.validate()
for dest in self.dests_:
dest.validate()
def to_dict(self):
self.validate()
return {
'service': self.service_.to_dict(),
'dests': [d.to_dict() for d in self.dests_],
}
@staticmethod
def from_args(service=None, dests=[]):
assert isinstance(service, Service)
assert isinstance(dests, list)
p = Pool()
p.service_ = service
p.dests_ = dests
return p
@staticmethod
def load_pools_from_json_list(lst):
return [Pool(i, True) for i in lst]
class IpvsClient(object):
"""A python client to use instead of shelling out to ipvsadm
"""
def __init__(self, verbose=False):
self.verbose = verbose
self.nlsock = netlink.NetlinkSocket(verbose=verbose)
def __modify_service(self, method, vip, port, protocol, ops, **svc_kwargs):
if ops:
assert protocol == socket.IPPROTO_UDP
af, addr = _to_af_union(vip)
netmask = ((1 << 32) - 1) if af == socket.AF_INET else 128
flags = 0
if ops:
flags |= IPVS_SVC_F_ONEPACKET
out_msg = IpvsMessage(
method, flags=netlink.MessageFlags.ACK_REQUEST,
attr_list=IpvsCmdAttrList(
service=IpvsServiceAttrList(
af=af,
port=port,
protocol=protocol,
addr=addr,
netmask=netmask,
flags=struct.pack(str('=II'), flags, flags),
**svc_kwargs
)
)
)
self.nlsock.execute(out_msg)
@verbose
def add_service(self, vip, port, protocol=socket.IPPROTO_TCP,
sched_name='rr', ops=False):
self.__modify_service('new_service', vip, port, protocol, ops,
sched_name=sched_name, timeout=0)
@verbose
def del_service(self, vip, port, protocol=socket.IPPROTO_TCP):
self.__modify_service('del_service', vip, port, protocol, False)
def __modify_fwm_service(self, method, fwmark, af, **svc_kwargs):
netmask = ((1 << 32) - 1) if af == socket.AF_INET else 128
out_msg = IpvsMessage(
method, flags=netlink.MessageFlags.ACK_REQUEST,
attr_list=IpvsCmdAttrList(
service=IpvsServiceAttrList(
fwmark=fwmark,
flags=struct.pack(str('=II'), 0, 0),
af=af,
netmask=netmask,
**svc_kwargs
)
)
)
self.nlsock.execute(out_msg)
@verbose
def add_fwm_service(self, fwmark, sched_name='rr', af=socket.AF_INET):
self.__modify_fwm_service('new_service', fwmark,
sched_name=sched_name, timeout=0, af=af)
@verbose
def del_fwm_service(self, fwmark, af=socket.AF_INET):
self.__modify_fwm_service('del_service', fwmark, af=af)
def __modify_dest(self, method, vip, port, rip, rport=None,
protocol=socket.IPPROTO_TCP, **dest_kwargs):
vaf, vaddr = _to_af_union(vip)
raf, raddr = _to_af_union(rip)
rport = rport or port
out_msg = IpvsMessage(
method, flags=netlink.MessageFlags.ACK_REQUEST,
attr_list=IpvsCmdAttrList(
service=IpvsServiceAttrList(
af=vaf,
port=port,
protocol=protocol,
addr=vaddr,
),
dest=IpvsDestAttrList(
addr_family=raf,
addr=raddr,
port=rport,
**dest_kwargs
),
),
)
self.nlsock.execute(out_msg)
@verbose
def add_dest(self, vip, port, rip, rport=None,
protocol=socket.IPPROTO_TCP, weight=1, method=IPVS_TUNNELING):
self.__modify_dest('new_dest', vip, port, rip, rport,
protocol=protocol, weight=weight,
fwd_method=method, l_thresh=0, u_thresh=0)
@verbose
def update_dest(self, vip, port, rip, rport=None,
protocol=socket.IPPROTO_TCP, weight=None,
method=IPVS_TUNNELING):
self.__modify_dest('set_dest', vip, port, rip, rport, protocol,
weight=weight, l_thresh=0, u_thresh=0,
fwd_method=method)
@verbose
def del_dest(self, vip, port, rip, rport=None,
protocol=socket.IPPROTO_TCP):
self.__modify_dest('del_dest', vip, port, rip, rport, protocol)
def __modify_fwm_dest(self, method, fwmark, rip, vaf, port,
**dest_kwargs):
raf, raddr = _to_af_union(rip)
out_msg = IpvsMessage(
method, flags=netlink.MessageFlags.ACK_REQUEST,
attr_list=IpvsCmdAttrList(
service=IpvsServiceAttrList(
fwmark=fwmark,
af=vaf,
),
dest=IpvsDestAttrList(
addr_family=raf,
addr=raddr,
port=port,
**dest_kwargs
),
),
)
self.nlsock.execute(out_msg)
@verbose
def add_fwm_dest(self, fwmark, rip, vaf=socket.AF_INET, port=0, weight=1):
self.__modify_fwm_dest('new_dest', fwmark, rip, weight=weight,
port=port, vaf=vaf, l_thresh=0, u_thresh=0,
fwd_method=2)
@verbose
def update_fwm_dest(self, fwmark, rip, vaf=socket.AF_INET, weight=None,
port=0):
self.__modify_fwm_dest('set_dest', fwmark, rip, weight=weight,
vaf=vaf, port=port, l_thresh=0, u_thresh=0,
fwd_method=2)
@verbose
def del_fwm_dest(self, fwmark, rip, vaf=socket.AF_INET, port=0):
self.__modify_fwm_dest('del_dest', fwmark, rip, vaf=vaf, port=port)
def flush(self):
out_msg = IpvsMessage('flush', flags=netlink.MessageFlags.ACK_REQUEST)
self.nlsock.execute(out_msg)
def get_pools(self):
"""
Get all the pools configured
"""
pools = []
req = IpvsMessage(
'get_service', flags=netlink.MessageFlags.MATCH_ROOT_REQUEST)
for msg in self.nlsock.query(req):
svc_lst = msg.get_attr_list().get('service')
service = Service.from_attr_list(svc_lst)
dests = self.get_dests(svc_lst)
pools.append(Pool.from_args(
service=service,
dests=dests
))
return pools
def get_pool(self, svc_lst):
s = self.get_service(svc_lst)
if s is None:
return None
dests = self.get_dests(s.to_attr_list())
return Pool.from_args(service=s, dests=dests)
def get_service(self, svc_lst):
out_msg = IpvsMessage(
'get_service', flags=netlink.MessageFlags.REQUEST,
attr_list=IpvsCmdAttrList(service=svc_lst))
try:
res = self.nlsock.query(out_msg)
svc_lst = res[0].get_attr_list().get('service')
return Service.from_attr_list(svc_lst)
except RuntimeError:
# If the query failed because the service is not present
# simply return None
return None
def get_dests(self, svc_lst):
assert isinstance(svc_lst, IpvsServiceAttrList)
dests = []
out_msg = IpvsMessage(
'get_dest', flags=netlink.MessageFlags.MATCH_ROOT_REQUEST,
attr_list=IpvsCmdAttrList(service=svc_lst)
)
try:
for dst_msg in self.nlsock.query(out_msg):
dst_lst = dst_msg.get_attr_list().get('dest')
dests.append(Dest.from_attr_list(dst_lst, svc_lst.get('af')))
return dests
except RuntimeError:
# Typically happens if the service is not defined
return None
| 32.30597 | 79 | 0.55611 | 2,439 | 21,645 | 4.714227 | 0.119721 | 0.020351 | 0.019134 | 0.024352 | 0.379283 | 0.313881 | 0.244564 | 0.222908 | 0.17116 | 0.159158 | 0 | 0.012247 | 0.320998 | 21,645 | 669 | 80 | 32.35426 | 0.770089 | 0.046662 | 0 | 0.327172 | 0 | 0.001848 | 0.082398 | 0.002479 | 0 | 0 | 0.000292 | 0 | 0.038817 | 1 | 0.118299 | false | 0 | 0.014787 | 0.044362 | 0.238447 | 0.003697 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
642d621c0f5b8d6ce8cc22ca5eadd03e790a2df1 | 1,814 | py | Python | src/architectures/gqn.py | ae-foster/cresp | 0215842370d3d665133a496d6d971a537400c674 | [
"MIT"
] | 5 | 2021-11-15T22:49:47.000Z | 2022-02-05T01:39:54.000Z | src/architectures/gqn.py | ae-foster/cresp | 0215842370d3d665133a496d6d971a537400c674 | [
"MIT"
] | null | null | null | src/architectures/gqn.py | ae-foster/cresp | 0215842370d3d665133a496d6d971a537400c674 | [
"MIT"
] | null | null | null | import torch
from torch import nn
from torch.nn import functional as F
class GQN_Pool(nn.Module):
def __init__(self, representation_dim, covariate_dim, net=None, **kwargs):
assert representation_dim == 256
assert (isinstance(net, nn.Identity) or (net is None))
super(GQN_Pool, self).__init__()
self.conv1 = nn.Conv2d(3, 256, kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(256, 256, kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(128, 256, kernel_size=2, stride=2)
self.conv5 = nn.Conv2d(256 + covariate_dim, 256, kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv2d(256 + covariate_dim, 128, kernel_size=3, stride=1, padding=1)
self.conv7 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.conv8 = nn.Conv2d(256, 256, kernel_size=1, stride=1)
self.pool = nn.AvgPool2d(16)
def forward(self, x, xi=None):
# Residual connection
skip_in = F.relu(self.conv1(x))
skip_out = F.relu(self.conv2(skip_in))
r = F.relu(self.conv3(skip_in))
r = F.relu(self.conv4(r)) + skip_out
if xi is not None:
# You should set covariate_dim=0 to exercise this branch
# Broadcast / upsample
xi = xi.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 16, 16)
# Residual connection
# Concatenate
skip_in = torch.cat((r, xi), dim=1)
else:
skip_in = r
skip_out = F.relu(self.conv5(skip_in))
r = F.relu(self.conv6(skip_in))
r = F.relu(self.conv7(r)) + skip_out
r = F.relu(self.conv8(r))
# Pool
r = self.pool(r)
r = r.squeeze(-1).squeeze(-1)
return r
| 36.28 | 92 | 0.597574 | 274 | 1,814 | 3.828467 | 0.273723 | 0.06101 | 0.068637 | 0.047664 | 0.387035 | 0.312679 | 0.197331 | 0.125834 | 0.125834 | 0 | 0 | 0.081694 | 0.271224 | 1,814 | 49 | 93 | 37.020408 | 0.7118 | 0.072767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.058824 | false | 0 | 0.088235 | 0 | 0.205882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
642d803688df88f4bfdeaaef4cc078fe36396c9e | 622 | py | Python | test/example/weather.py | lawsn/crawlingpy | d8737fb4964bf2492fbd91557cd6a152dd80889a | [
"Apache-2.0"
] | null | null | null | test/example/weather.py | lawsn/crawlingpy | d8737fb4964bf2492fbd91557cd6a152dd80889a | [
"Apache-2.0"
] | null | null | null | test/example/weather.py | lawsn/crawlingpy | d8737fb4964bf2492fbd91557cd6a152dd80889a | [
"Apache-2.0"
] | null | null | null | import requests
from bs4 import BeautifulSoup
url = 'https://weather.com/weather/tenday/l/'
param = 'b757e0078b0b135DetailsSummary--tempValue--RcZzi0973ea8930d24ef111c7b8457939f4e2046fc8bbe48119f17'
response = requests.get(url + param)
print(response.text)
soup = BeautifulSoup(response.text, 'html.parser')
dateTime = soup.select('h2.DetailsSummary--daypartName--1Mebr')
weather = soup.select('span.DetailsSummary--lowTempValue--1DlJK')
print(weather)
dateTimeLen = len(dateTime)
weatherLen = len(weather)
print(weatherLen)
# for i in range(0, dateTimeLen):
# # print(dateTime[i].text)
# print(weather[i]) | 25.916667 | 106 | 0.766881 | 68 | 622 | 7.014706 | 0.573529 | 0.050314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.089286 | 0.099678 | 622 | 24 | 107 | 25.916667 | 0.7625 | 0.131833 | 0 | 0 | 0 | 0 | 0.412313 | 0.322761 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0.230769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
642dcacaee4a8f97f5492f6645961bdd1c1df4eb | 2,857 | py | Python | CreateImages.py | AlJ95/VanPixel-CNN-approach | ce49af871d31211263ea0a14840ed3456b024e23 | [
"CC0-1.0"
] | null | null | null | CreateImages.py | AlJ95/VanPixel-CNN-approach | ce49af871d31211263ea0a14840ed3456b024e23 | [
"CC0-1.0"
] | null | null | null | CreateImages.py | AlJ95/VanPixel-CNN-approach | ce49af871d31211263ea0a14840ed3456b024e23 | [
"CC0-1.0"
] | null | null | null | import random as r
import cv2
import numpy as np
from skimage.util import random_noise
def create_figs(image, image_pre_result, no_of_objects=1):
for i in range(no_of_objects):
width = r.randint(0, image.shape[0] / 2)
height = r.randint(0, image.shape[1] / 2)
xpos = r.randint(0, image.shape[1])
ypos = r.randint(0, image.shape[0])
group_number = i + 1
rgb = np.array([r.random(), r.random(), r.random(), 1])
randInt = r.randint(0, 1)
if randInt == 0:
cv2.ellipse(image, (xpos, ypos), (width, height), 0,
0, 360, rgb, -1)
cv2.ellipse(image_pre_result, (xpos, ypos),
(width, height), 0, 0, 360,
(group_number + 1) * 25, -1)
else:
cv2.rectangle(image, (xpos, ypos),
(xpos + width, ypos + height), rgb, -1)
cv2.rectangle(image_pre_result, (xpos, ypos),
(xpos + width, ypos + height),
(group_number + 1) * 25, -1)
return image, image_pre_result
def produce_image(no_of_images, sp_prob=0.05, dim=(1080, 1920, 4),
grad="Laplacian", kernel_size=3):
image = np.ones(dim)
image_pre_result = np.full(image.shape[0:2], 1)
image, image_pre_result = create_figs(image, image_pre_result,
no_of_images)
if grad == "Laplacian":
gradient = cv2.Laplacian(image.astype('float64'), cv2.CV_64F,
ksize=kernel_size)[:, :, 0:3].clip(0, 1)
elif grad == "Scharr":
dx = cv2.Scharr(image.astype('float64'),
cv2.CV_64F, dx=1, dy=0)[:, :, 0:3]
dy = cv2.Scharr(image.astype('float64'),
cv2.CV_64F, dx=0, dy=1)[:, :, 0:3]
gradient = np.where(np.abs(dx) >= np.abs(dy),
np.abs(dx), np.abs(dy)).clip(0, 1)
else:
dx = cv2.Sobel(image.astype('float64'), cv2.CV_64F,
dx=1, dy=0, ksize=kernel_size)[:, :, 0:3]
dy = cv2.Sobel(image.astype('float64'), cv2.CV_64F,
dx=0, dy=1, ksize=kernel_size)[:, :, 0:3]
gradient = np.where(np.abs(dx) >= np.abs(dy), np.abs(dx),
np.abs(dy)).clip(0, 1)
gradientRGB = np.amax(gradient, axis=2)
if sp_prob > r.random():
image = random_noise(image, mode='s&p', amount=0.05)
gradient = cv2.Laplacian(image.astype('float64'),
cv2.CV_64F, ksize=11)[:, :, 0:3]
gradient = np.abs(gradient)
gradient /= np.max(gradient)
gradientRGB = np.where(abs(gradientRGB) < 0.1, 0, 1)
return image, gradient, gradientRGB
| 40.239437 | 74 | 0.500525 | 375 | 2,857 | 3.701333 | 0.221333 | 0.032421 | 0.070605 | 0.090778 | 0.51585 | 0.442363 | 0.345821 | 0.311239 | 0.263689 | 0.239193 | 0 | 0.069617 | 0.351418 | 2,857 | 70 | 75 | 40.814286 | 0.679439 | 0 | 0 | 0.068966 | 0 | 0 | 0.024758 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.068966 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
642dd59683d2234e13b5d5f7591dbab7c437923f | 3,697 | py | Python | development/testing/_modules/auxiliary_scalability.py | boryana-ilieva/respy | c3f600eefbb54d4f34ae2cc1e52d4d32c03ba0dc | [
"MIT"
] | null | null | null | development/testing/_modules/auxiliary_scalability.py | boryana-ilieva/respy | c3f600eefbb54d4f34ae2cc1e52d4d32c03ba0dc | [
"MIT"
] | null | null | null | development/testing/_modules/auxiliary_scalability.py | boryana-ilieva/respy | c3f600eefbb54d4f34ae2cc1e52d4d32c03ba0dc | [
"MIT"
] | 1 | 2020-10-31T19:40:22.000Z | 2020-10-31T19:40:22.000Z | from datetime import datetime
import datetime as dt
import shlex
import os
from auxiliary_shared import strfdelta
from config import SPEC_DIR
import respy
def get_actual_evaluations():
with open('est.respy.info', 'r') as infile:
for line in infile.readlines():
list_ = shlex.split(line)
if not list_:
continue
if not len(list_) == 4:
continue
if list_[2] == 'Evaluations':
return int(list_[3])
raise AssertionError
def run(spec_dict, fname, grid_slaves):
""" Run an estimation task that allows to get a sense of the scalability
of the code.
"""
dirname = fname.replace('.ini', '')
os.mkdir(dirname)
os.chdir(dirname)
respy_obj = respy.RespyCls(SPEC_DIR + fname)
respy_obj.unlock()
respy_obj.set_attr('is_debug', False)
respy_obj.set_attr('file_est', '../data.respy.dat')
for key_ in spec_dict.keys():
respy_obj.set_attr(key_, spec_dict[key_])
respy_obj.lock()
maxfun = respy_obj.get_attr('maxfun')
min_slave = min(grid_slaves)
# Simulate the baseline dataset, which is used regardless of the number
# of slaves.
respy.simulate(respy_obj)
respy_obj.write_out()
# Iterate over the grid of requested slaves.
for num_slaves in grid_slaves:
dirname = '{:}'.format(num_slaves)
os.mkdir(dirname)
os.chdir(dirname)
respy_obj.unlock()
respy_obj.set_attr('num_procs', num_slaves + 1)
if num_slaves > 1:
respy_obj.set_attr('is_parallel', True)
else:
respy_obj.set_attr('is_parallel', False)
respy_obj.lock()
respy_obj.write_out()
start_time = datetime.now()
respy.estimate(respy_obj)
finish_time = datetime.now()
if num_slaves == min_slave:
duration_baseline = finish_time - start_time
num_evals = get_actual_evaluations()
os.chdir('../')
record_information(start_time, finish_time, num_slaves, maxfun,
duration_baseline, num_evals, min_slave)
os.chdir('../')
def record_information(start_time, finish_time, num_slaves, maxfun,
duration_baseline, num_evals, min_slave):
""" Record the information on execution time, which involves a lot of
formatting of different data types.
"""
fmt = '{:>15} {:>25} {:>25} {:>15} {:>15} {:>15}\n'
if not os.path.exists('scalability.respy.info'):
with open('scalability.respy.info', 'a') as out_file:
out_file.write('\n Benchmarking a maximum of ' + str(
maxfun) + ' evaluations\n\n')
out_file.write(
fmt.format(*['Slaves', 'Start', 'Stop', 'Duration',
'Benchmark', 'Evaluations']))
out_file.write('\n')
start_str = start_time.strftime("%Y-%m-%d %H:%M:%S")
finish_str = finish_time.strftime("%Y-%m-%d %H:%M:%S")
duration_time = finish_time - start_time
duration_actual_str = strfdelta(duration_time, "{H:02}:{M:02}:{S:02}")
duration_linear_str = '---'
if not num_slaves == min_slave:
duration_linear_secs = duration_baseline.total_seconds() / (
num_slaves / max(min_slave, 1))
duration_linear = dt.timedelta(seconds=duration_linear_secs)
duration_linear_str = strfdelta(duration_linear, "{H:02}:{M:02}:{S:02}")
with open('scalability.respy.info', 'a') as out_file:
line = [num_slaves, start_str, finish_str, duration_actual_str,
duration_linear_str, num_evals]
out_file.write(fmt.format(*line))
| 30.056911 | 80 | 0.614823 | 478 | 3,697 | 4.508368 | 0.290795 | 0.059397 | 0.030626 | 0.041763 | 0.260789 | 0.217169 | 0.185615 | 0.162413 | 0.112297 | 0.07703 | 0 | 0.011038 | 0.264809 | 3,697 | 122 | 81 | 30.303279 | 0.781825 | 0.083311 | 0 | 0.2 | 0 | 0 | 0.116279 | 0.019678 | 0 | 0 | 0 | 0 | 0.0125 | 1 | 0.0375 | false | 0 | 0.0875 | 0 | 0.1375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6430502b11619ea1bdfdd8e1744178dab3548462 | 2,006 | py | Python | src/app/database/model/Revisions.py | william920429/repair_system | 27b5f4612a48a326943d23c2acce326070134228 | [
"MIT"
] | 1 | 2021-05-18T17:28:09.000Z | 2021-05-18T17:28:09.000Z | src/app/database/model/Revisions.py | SiriusKoan/repair_system | 6524e7a8d5954b6e16b34b9d88db9d455ef3dd1d | [
"MIT"
] | 3 | 2021-05-02T06:00:47.000Z | 2021-05-05T11:42:58.000Z | src/app/database/model/Revisions.py | SiriusKoan/repair_system | 6524e7a8d5954b6e16b34b9d88db9d455ef3dd1d | [
"MIT"
] | 3 | 2021-04-15T13:03:36.000Z | 2021-05-06T03:13:59.000Z | import datetime
from .common import db, timeformat
class Revisions(db.Model):
"""
After the admin views the reports, they will make a revision record.
The table is connected to `Records`, `Users` and `Statuses`.
id: PK.
record_id: `id` in `Records` table.
user_id: `id` in `Users` table.
status_id: `id` in `Statuses` table.
insert_time: Revision time. The value will be automatically added.
description: If the admins fail to find a status for the situation, the field can be used.
"""
__tablename__ = "revisions"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
record_id = db.Column(db.ForeignKey("records.id"), nullable=False)
user_id = db.Column(db.ForeignKey("users.id"), nullable=False)
status_id = db.Column(db.ForeignKey("statuses.id"), nullable=False)
insert_time = db.Column(db.TIMESTAMP, nullable=False, index=True)
description = db.Column(db.String(255), nullable=False)
def __init__(self, id, record_id, user_id, status_id, insert_time, description):
self.id = id
self.record_id = record_id
self.user_id = user_id
self.status_id = status_id
self.insert_time = datetime.datetime.strptime(insert_time, timeformat)
self.description = description
def __repr__(self):
return (
"Revisions(id={id},record_id={record_id},user_id={user_id},status_id={status_id},insert_time='{mytime}',description='{description}')"
.format(mytime=self.insert_time.strftime(timeformat), **self.__dict__)
)
@classmethod
def new(cls, record_id, user_id, status_id, description, insert_time=None):
if insert_time is None:
insert_time = datetime.datetime.now().strftime(timeformat)
return cls(
id=None,
record_id=record_id,
user_id=user_id,
status_id=status_id,
insert_time=insert_time,
description=description[:255]
)
| 37.849057 | 145 | 0.664008 | 264 | 2,006 | 4.82197 | 0.295455 | 0.094266 | 0.043991 | 0.037706 | 0.180676 | 0.119403 | 0.084839 | 0.084839 | 0.084839 | 0.084839 | 0 | 0.003859 | 0.224826 | 2,006 | 52 | 146 | 38.576923 | 0.814791 | 0.1999 | 0 | 0 | 0 | 0.029412 | 0.108264 | 0.083921 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.058824 | 0.029412 | 0.441176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6432e2af24456be3ab041cf5afca3a2790d37a5b | 3,093 | py | Python | libs/senseAll.py | WyattSP/Remote-Air-Quality-Station | 908555591f6f9f183c68df01c6569171019e2246 | [
"MIT"
] | null | null | null | libs/senseAll.py | WyattSP/Remote-Air-Quality-Station | 908555591f6f9f183c68df01c6569171019e2246 | [
"MIT"
] | null | null | null | libs/senseAll.py | WyattSP/Remote-Air-Quality-Station | 908555591f6f9f183c68df01c6569171019e2246 | [
"MIT"
] | null | null | null | from sds011sensor import *
import aqi
import time
from datetime import datetime
import argparse
import os
import csv
import timeit
from sense_hat import SenseHat
#Initate arg parser
my_parser = argparse.ArgumentParser(description='Input Parameters')
#Create args
my_parser.add_argument('-a',
type = int,
help = 'samples to average over (default 3)')
my_parser.add_argument('-r',
type = int,
help = 'air qulity runs (2s between samples)')
#Execute the parse_args()
args = my_parser.parse_args()
input_a = args.a #average
input_r = args.r #runs
#Create sensor instance
sensor = SDS011("/dev/ttyUSB0")
#Initate sense HAT
sense = SenseHat()
#Define LEDs
def start_program():
sense.clear()
sense.show_letter(str('S'))
time.sleep(3)
sense.clear()
return
def exit_program():
sense.clear()
sense.show_letter(str('X'))
time.sleep(3)
sense.clear()
return
B = (0,0,225)
def collecting():
sense.clear()
signal = [
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, B, B, O, O, O,
O, O, O, B, B, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
O, O, O, O, O, O, O, O,
]
sense.set_pixels(signal)
time.sleep(1)
sense.clear()
return
def log_senser_time(start_time,end_time):
with open("sds_log.txt", 'a') as file:
t = end_time-start_time
file.write(f"Runtime {t} s on {datetime.now()}\n")
#data collection
def air_data(n = 3, runs = 1):
start_program()
sensor.sleep(sleep=False)
start = timeit.default_timer()
time.sleep(10)
os.chdir('/home/pi/AQ/sensor/')
with open("aq_log_%s.txt" % name, "w") as csvfile:
savefile = csv.writer(csvfile,delimiter=',')
try:
for i in range(runs):
pm_2_5 = 0
pm_10 = 0
for j in range(n):
x = sensor.query()
pm_2_5 = pm_2_5 + x[0]
pm_10 = pm_10 + x[1]
time.sleep(2)
pm_2_5 = round(pm_2_5/n, 1)
pm_10 = round(pm_10/n, 1)
aqi_2_5 = aqi.to_iaqi(aqi.POLLUTANT_PM25, str(pm_2_5))
aqi_10 = aqi.to_iaqi(aqi.POLLUTANT_PM10, str(pm_10))
temp = sense.get_temperature()
hum = sense.get_humidity()
press = sense.get_pressure()
outp = (datetime.now(),pm_2_5,aqi_2_5,pm_10,aqi_10,hum, temp, press)
savefile.writerow(outp)
print(outp)
except KeyboardInterrupt:
exit_program()
print(" Sampling Terminated")
pass
sensor.sleep(sleep=True)
end = timeit.default_timer()
log_senser_time(start,end)
return(print(" Sampling Complete"))
if __name__ == "__main__":
print("runtime estimated %s" % (input_r*input_a+30))
time.sleep(1)
air_data(input_a,input_r)
print("Results saved to aq_log")
exit_program()
| 24.164063 | 84 | 0.558358 | 451 | 3,093 | 3.649667 | 0.312639 | 0.069259 | 0.09842 | 0.123937 | 0.138518 | 0.113001 | 0.081409 | 0.038882 | 0.038882 | 0.038882 | 0 | 0.033412 | 0.312965 | 3,093 | 127 | 85 | 24.354331 | 0.741176 | 0.041707 | 0 | 0.263158 | 0 | 0 | 0.093063 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0.010526 | 0.094737 | 0 | 0.178947 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
643504529c3cf23bc6fd859398bf2d84dc5211e8 | 4,373 | py | Python | validation/cta-1dc/make.py | QRemy/gammapy-benchmarks | 7f6170e88284958056fbdf468fb890787a13f153 | [
"BSD-3-Clause"
] | null | null | null | validation/cta-1dc/make.py | QRemy/gammapy-benchmarks | 7f6170e88284958056fbdf468fb890787a13f153 | [
"BSD-3-Clause"
] | null | null | null | validation/cta-1dc/make.py | QRemy/gammapy-benchmarks | 7f6170e88284958056fbdf468fb890787a13f153 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Run Gammapy validation: CTA 1DC"""
import logging
import yaml
import matplotlib.pyplot as plt
from gammapy.analysis import Analysis, AnalysisConfig
def target_config3d(config_file, target_config_file, tag):
"""Create analyis configuration for out source."""
targets_config_ = yaml.safe_load(open(target_config_file))
targets_config = {}
for conf in targets_config_: # define tag as key
targets_config[conf["tag"]] = conf
config = AnalysisConfig.from_yaml(config_file)
config_dict = config.settings
config_dict["observations"]["filters"][0]["frame"] = targets_config[tag]["frame"]
config_dict["observations"]["filters"][0]["lon"] = targets_config[tag]["lon"]
config_dict["observations"]["filters"][0]["lat"] = targets_config[tag]["lat"]
config_dict["observations"]["filters"][0]["radius"] = targets_config[tag]["radius"]
config_dict["observations"]["filters"][0]["border"] = targets_config[tag]["radius"]
config_dict["datasets"]["geom"]["skydir"] = [
float(targets_config[tag]["lon"].strip(" deg")),
float(targets_config[tag]["lat"].strip(" deg")),
]
config_dict["datasets"]["geom"]["axes"][0]["lo_bnd"] = targets_config[tag]["emin"]
config_dict["datasets"]["geom"]["axes"][0]["hi_bnd"] = targets_config[tag]["emax"]
config_dict["datasets"]["geom"]["axes"][0]["nbin"] = targets_config[tag]["nbin"]
config_dict["datasets"]["geom"]["axes"][0]["nbin"] = targets_config[tag]["nbin"]
config_dict["flux-points"]["fp_binning"]["lo_bnd"] = targets_config[tag]["emin"]
config_dict["flux-points"]["fp_binning"]["hi_bnd"] = targets_config[tag]["emax"]
config_dict["flux-points"]["fp_binning"]["nbin"] = targets_config[tag]["nbin"]
config_dict["flux-points"]["fp_binning"]["lo_bnd"] = targets_config[tag]["emin"]
config_dict["flux-points"]["fp_binning"]["hi_bnd"] = targets_config[tag]["emax"]
config_dict["flux-points"]["fp_binning"]["nbin"] = targets_config[tag]["nbin"]
config_dict["fit"]["fit_range"]["min"] = str(targets_config[tag]["emin"]) + " TeV"
config_dict["fit"]["fit_range"]["max"] = str(targets_config[tag]["emax"]) + " TeV"
config.update_settings(config=config_dict)
return config
def run_3d(name):
"""Run 3D analysis for one source."""
logging.info(f"run3d: {name}")
mode = "3d"
config_file = f"config{mode}.yaml"
target_config_file = f"targets.yaml"
model_file = f"model{mode}_{name}.yaml"
outdir = f"results/{name}"
config = target_config3d(config_file, target_config_file, name)
analysis = Analysis(config)
analysis.get_observations()
conf = config.settings["observations"]["filters"][0]
nb, lon, lat, rad = (
len(analysis.observations.ids),
conf["lon"],
conf["lat"],
conf["radius"],
)
logging.info(f"{nb} observations found in {rad} around {lon}, {lat} ")
analysis.get_datasets()
# test
plt.figure(figsize=(5, 5))
analysis.datasets["stacked"].counts.sum_over_axes().plot(add_cbar=True)
plt.savefig(f"{outdir}/{name}_{mode}_counts.png", bbox_inches="tight")
analysis.set_model(filename=model_file)
logging.info(analysis.model)
analysis.run_fit()
logging.info(analysis.fit_result.parameters.to_table())
analysis.fit_result.parameters.to_table().write(
f"{outdir}/{name}_{mode}_bestfit.dat", format="ascii", overwrite=True
)
analysis.get_flux_points(source=f"{name}")
analysis.flux_points.write(f"{outdir}/{name}_{mode}_fluxpoints.fits")
plt.figure(figsize=(5, 5))
analysis.datasets["stacked"].counts.sum_over_axes().plot(add_cbar=True)
plt.savefig(f"{outdir}/{name}_{mode}_counts.png", bbox_inches="tight")
plt.figure(figsize=(5, 5))
analysis.datasets["stacked"].plot_residuals(
method="diff/sqrt(model)", vmin=-0.5, vmax=0.5
)
plt.savefig(f"{outdir}/{name}_{mode}_residuals.png", bbox_inches="tight")
plt.figure(figsize=(8, 5))
ax_sed, ax_residuals = analysis.flux_points.peek()
plt.savefig(f"{outdir}/{name}_{mode}_fluxpoints.png", bbox_inches="tight")
def main():
targets = "all"
if targets == "all":
targets = ["cas_a", "hess_j1702"]
else:
targets = targets.split(",")
for target in targets:
run_3d(target)
if __name__ == "__main__":
main()
| 37.376068 | 87 | 0.661788 | 572 | 4,373 | 4.835664 | 0.246504 | 0.108098 | 0.109906 | 0.041215 | 0.488431 | 0.404555 | 0.322849 | 0.281634 | 0.245119 | 0.245119 | 0 | 0.00912 | 0.147496 | 4,373 | 116 | 88 | 37.698276 | 0.732833 | 0.034759 | 0 | 0.174419 | 0 | 0 | 0.232691 | 0.055675 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034884 | false | 0 | 0.046512 | 0 | 0.093023 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
643ebb9e00e5a46b9e13bfbe41b37466798f580a | 2,994 | py | Python | layers/dilated_convolution.py | giovgiac/neptune | a3cc256d3f3d833be2b8654131726ab48482f5e1 | [
"MIT"
] | 4 | 2021-03-06T10:00:43.000Z | 2022-02-21T16:09:48.000Z | layers/dilated_convolution.py | giovgiac/neptune | a3cc256d3f3d833be2b8654131726ab48482f5e1 | [
"MIT"
] | null | null | null | layers/dilated_convolution.py | giovgiac/neptune | a3cc256d3f3d833be2b8654131726ab48482f5e1 | [
"MIT"
] | 1 | 2020-12-21T14:50:26.000Z | 2020-12-21T14:50:26.000Z | # dilated_convolution.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from layers.normalization import InstanceNormalization
from typing import Tuple, Union
import tensorflow as tf
class DilatedConv2D(tf.keras.layers.Layer):
def __init__(self, filters: int, kernel_size: Union[int, Tuple[int, int]], padding: str,
activation_fn: type(tf.keras.layers.Layer) = tf.keras.layers.ELU, name=None) -> None:
super(DilatedConv2D, self).__init__(trainable=True, name=name)
# Save parameters to class.
self.activation_fn = activation_fn
self.filters = filters
self.kernel_size = kernel_size
self.padding = padding
# Create dilated convolution sublayers.
self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, dilation_rate=1, padding=padding,
use_bias=False)
self.norm_1 = InstanceNormalization(axis=-1)
if self.activation_fn:
self.actv_1 = activation_fn()
self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, dilation_rate=2, padding=padding,
use_bias=False)
self.norm_2 = InstanceNormalization(axis=-1)
if self.activation_fn:
self.actv_2 = activation_fn()
self.conv_3 = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, dilation_rate=4, padding=padding,
use_bias=False)
self.norm_3 = InstanceNormalization(axis=-1)
if self.activation_fn:
self.actv_3 = activation_fn()
self.conv_4 = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, dilation_rate=8, padding=padding,
use_bias=False)
self.norm_4 = InstanceNormalization(axis=-1)
if self.activation_fn:
self.actv_4 = activation_fn()
self.conv_f = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, padding=padding,
activity_regularizer=tf.keras.regularizers.l1(0.1))
self.concat = tf.keras.layers.Concatenate()
@tf.function
def call(self, inputs, **kwargs) -> tf.Tensor:
c1 = self.conv_1(inputs)
c1 = self.norm_1(c1)
if self.activation_fn:
c1 = self.actv_1(c1)
c2 = self.conv_2(inputs)
c2 = self.norm_2(c2)
if self.activation_fn:
c2 = self.actv_2(c2)
c3 = self.conv_3(inputs)
c3 = self.norm_3(c3)
if self.activation_fn:
c3 = self.actv_3(c3)
c4 = self.conv_4(inputs)
c4 = self.norm_4(c4)
if self.activation_fn:
c4 = self.actv_4(c4)
# Concatenate all blocks and return final convolution.
ct = self.concat([c1, c2, c3, c4])
return self.conv_f(ct)
| 38.384615 | 120 | 0.618236 | 374 | 2,994 | 4.719251 | 0.224599 | 0.101983 | 0.066289 | 0.081586 | 0.372238 | 0.372238 | 0.372238 | 0.295184 | 0.295184 | 0.177337 | 0 | 0.030942 | 0.287575 | 2,994 | 77 | 121 | 38.883117 | 0.796531 | 0.046426 | 0 | 0.210526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035088 | false | 0 | 0.105263 | 0 | 0.175439 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
643f3606fb03079a2aebe0691fe7ccf424370f28 | 1,605 | py | Python | controller/models/models.py | zhouzhibo-byte/odoo14 | 8b9df536e09d56d036421cd12fe141116a545dd8 | [
"Unlicense"
] | null | null | null | controller/models/models.py | zhouzhibo-byte/odoo14 | 8b9df536e09d56d036421cd12fe141116a545dd8 | [
"Unlicense"
] | null | null | null | controller/models/models.py | zhouzhibo-byte/odoo14 | 8b9df536e09d56d036421cd12fe141116a545dd8 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
from odoo import models, fields,api
class LibraryBook(models.Model):
_name = 'library.book1'
_description = 'Library Book'
_order = 'date_release desc, name'
name = fields.Char('Title', required=True)
short_name = fields.Char('Short Title', required=True)
date_release = fields.Date('Release Date')
author_ids = fields.Many2many('res.partner', string='Authors')
state = fields.Selection([('txrub', '填写入职表单'),
('itwh', 'IT维护'),
('wyqr', '文员确认')], '状态', default='txrub', index=True, copy=False,
track_visibility='onchange')
def get_portal_url(self):
portal_link = "%s/?db=%s" % (
self.env['ir.config_parameter'].sudo().get_param('web.base.url'), self.env.cr.dbname)
return portal_link
def preview_invoice(self):
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'target': 'self',
'url': self.get_portal_url(),
}
@api.onchange('state')
def get_t(self):
print('11111111111')
print(self.get_portal_url())
return {
'type': 'ir.actions.act_url',
'target': 'self',
'url': self.get_portal_url(),
}
def get_stock_file(self):
return {'type': 'ir.actions.act_url',
'url': '/controllers/books',
'target': 'self', }
#
# def book_return(self):
# self.ensure_one()
# self.write({
# 'state': 'returned'
# }) | 31.470588 | 95 | 0.530841 | 173 | 1,605 | 4.751445 | 0.491329 | 0.043796 | 0.058394 | 0.069343 | 0.1618 | 0.1618 | 0.131387 | 0.131387 | 0.131387 | 0.131387 | 0 | 0.012635 | 0.309657 | 1,605 | 51 | 96 | 31.470588 | 0.729242 | 0.07352 | 0 | 0.216216 | 0 | 0 | 0.212838 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.027027 | 0.027027 | 0.486486 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
643f658432e2b5800e1eef8b53a2ef7b6a24acc9 | 472 | py | Python | 1.py | aayushgupta97/advent_of_code_2020 | dbf5291f1cf4bd5855bbc54cb560ac3c7a7ac26c | [
"MIT"
] | null | null | null | 1.py | aayushgupta97/advent_of_code_2020 | dbf5291f1cf4bd5855bbc54cb560ac3c7a7ac26c | [
"MIT"
] | null | null | null | 1.py | aayushgupta97/advent_of_code_2020 | dbf5291f1cf4bd5855bbc54cb560ac3c7a7ac26c | [
"MIT"
] | null | null | null | with open("data/input_1.txt", 'r') as f:
input_data = f.readlines()
data = [int(i.replace("\n", "")) for i in input_data]
print(len(data))
def part_1():
for i in data:
for j in data:
if i + j == 2020:
print(i * j)
def part_2():
for i in data:
for j in data:
for k in data:
if i + j + k == 2020:
print(i*j*k)
exit()
part_2() | 19.666667 | 53 | 0.423729 | 71 | 472 | 2.732394 | 0.366197 | 0.154639 | 0.092784 | 0.103093 | 0.278351 | 0.206186 | 0.206186 | 0.206186 | 0 | 0 | 0 | 0.045627 | 0.442797 | 472 | 24 | 54 | 19.666667 | 0.692015 | 0 | 0 | 0.235294 | 0 | 0 | 0.040169 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0 | 0 | 0.117647 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
643f6db20affdc124c5bd15106d9960bdbf837f9 | 661 | py | Python | Pdf to Audio Book Maker.py | farhan1503001/Beginners-Python-Projects | 67c2cfc426cad6dcbb148252b9845aeaef78cb57 | [
"MIT"
] | null | null | null | Pdf to Audio Book Maker.py | farhan1503001/Beginners-Python-Projects | 67c2cfc426cad6dcbb148252b9845aeaef78cb57 | [
"MIT"
] | null | null | null | Pdf to Audio Book Maker.py | farhan1503001/Beginners-Python-Projects | 67c2cfc426cad6dcbb148252b9845aeaef78cb57 | [
"MIT"
] | null | null | null | import pyttsx3
import PyPDF2
from tkinter.filedialog import *
#Ask for book file
book_file=askopenfilename()
pdfreader=PyPDF2.PdfFileReader(book_file)#Initialize Reading the pdf file
num_pages=pdfreader.numPages#Finds the number of pages in that book
#Now iterate through each and every page and convert that to audio
for page in range(num_pages):
book_page=pdfreader.getPage(page)
#Now extract the text from book's page
text=book_page.extractText()
#Initialize the text to speech object
text_to_speech=pyttsx3.init()
#starts speaking the text from the object
text_to_speech.say(text)
text_to_speech.runAndWait() | 38.882353 | 74 | 0.765507 | 99 | 661 | 4.989899 | 0.484848 | 0.048583 | 0.097166 | 0.072874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007326 | 0.173979 | 661 | 17 | 75 | 38.882353 | 0.897436 | 0.400908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
643fa9180d34b45a3bfb22d963256b7d5985659e | 2,857 | py | Python | cose/messages/macmessage.py | mpzaborski/pycose | 1c63b272f551a69e411063937a070c1c0479f8e0 | [
"BSD-3-Clause"
] | null | null | null | cose/messages/macmessage.py | mpzaborski/pycose | 1c63b272f551a69e411063937a070c1c0479f8e0 | [
"BSD-3-Clause"
] | null | null | null | cose/messages/macmessage.py | mpzaborski/pycose | 1c63b272f551a69e411063937a070c1c0479f8e0 | [
"BSD-3-Clause"
] | null | null | null | """
MACed Message with Recipients
COSE_Mac = [
Headers,
payload : bstr / nil,
tag : bstr,
recipients :[+COSE_recipient]
]
"""
from typing import Optional, List
import cbor2
from cose import CoseMessage
from cose.messages import cosemessage, maccommon
from cose.attributes.algorithms import CoseAlgorithms
from cose.keys.symmetric import SymmetricKey
from cose.messages.recipient import CoseRecipient, RcptParams
@cosemessage.CoseMessage.record_cbor_tag(97)
class MacMessage(maccommon.MacCommon):
context = "MAC"
cbor_tag = 97
@classmethod
def from_cose_obj(cls, cose_obj) -> 'MacMessage':
msg = super().from_cose_obj(cose_obj)
msg.auth_tag = cose_obj.pop(0)
try:
msg.recipients = [CoseRecipient.from_recipient_obj(r) for r in cose_obj.pop(0)]
except (IndexError, ValueError):
msg.recipients = None
return msg
def __init__(self,
phdr: dict = None,
uhdr: dict = None,
payload: bytes = b'',
external_aad: bytes = b'',
recipients: Optional[List[CoseRecipient]] = None):
if phdr is None:
phdr = {}
if uhdr is None:
uhdr = {}
super().__init__(phdr, uhdr, payload, external_aad)
if recipients is None:
self.recipients = []
else:
self.recipients = recipients
def encode(self,
key: SymmetricKey,
alg: Optional[CoseAlgorithms] = None,
mac_params: Optional[List[RcptParams]] = None,
tagged: bool = True,
mac: bool = True) -> bytes:
""" Encodes and protects the COSE_Mac message. """
# encode/encrypt the base fields
if mac:
message = [self.encode_phdr(), self.encode_uhdr(), self.payload, self.compute_tag(alg=alg, key=key)]
else:
message = [self.encode_phdr(), self.encode_uhdr(), self.payload]
if mac_params is None:
mac_params = []
if len(self.recipients) == len(mac_params):
if len(mac_params) > 0:
message.append(CoseRecipient.recursive_encode(self.recipients, mac_params))
else:
raise ValueError("List with cryptographic parameters should have the same length as the recipient list.")
if tagged:
message = cbor2.dumps(cbor2.CBORTag(self.cbor_tag, message), default=self._special_cbor_encoder)
else:
message = cbor2.dumps(message, default=self._special_cbor_encoder)
return message
def __repr__(self) -> str:
return \
f'<COSE_Mac0: [{self._phdr}, {self._uhdr}, {CoseMessage._truncate(self._payload)}, ' \
f'{CoseMessage._truncate(self.auth_tag)}, {self.recipients}]>'
| 31.054348 | 117 | 0.60413 | 318 | 2,857 | 5.248428 | 0.31761 | 0.033553 | 0.019173 | 0.013182 | 0.098262 | 0.098262 | 0.055123 | 0.055123 | 0.055123 | 0 | 0 | 0.005958 | 0.295065 | 2,857 | 91 | 118 | 31.395604 | 0.822741 | 0.076304 | 0 | 0.065574 | 0 | 0.016393 | 0.090667 | 0.029714 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065574 | false | 0 | 0.114754 | 0.016393 | 0.278689 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff218524b954b48397bd6cee491e5e897272d086 | 8,083 | py | Python | V1/oer_glidervsCTD_plot.py | NOAA-PMEL/EcoFOCI_OculusGlider | 5655c0e173432768706416932c94a089a3e7993f | [
"Unlicense"
] | 2 | 2018-04-12T19:49:05.000Z | 2020-10-01T11:46:48.000Z | V1/oer_glidervsCTD_plot.py | NOAA-PMEL/EcoFOCI_OculusGlider | 5655c0e173432768706416932c94a089a3e7993f | [
"Unlicense"
] | null | null | null | V1/oer_glidervsCTD_plot.py | NOAA-PMEL/EcoFOCI_OculusGlider | 5655c0e173432768706416932c94a089a3e7993f | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
"""
Background:
--------
oer_glidervsCTD_plot.py
Purpose:
--------
History:
--------
"""
import argparse, os
import numpy as np
import calc.aanderaa_corrO2_sal as optode_O2_corr
from plots.profile_plot import CTDProfilePlot
import io_utils.EcoFOCI_netCDF_read as eFOCI_ncread
# Visual Stack
import matplotlib as mpl
import matplotlib.pyplot as plt
def castdirection(depth):
"""determin index of upcast and downcast"""
downcast = [0,np.argmax(np.diff(depth)<0)+1]
upcast = [np.argmax(np.diff(depth)<0),len(depth)]
return (downcast,upcast)
"""-------------------------------- Main -----------------------------------------------"""
parser = argparse.ArgumentParser(description='Plot archived NetCDF glider data against NetCDF CTD cast')
parser.add_argument('glider_file', metavar='glider_file', type=str,
help='complete path to netcdf glider file')
parser.add_argument('ctd_file', metavar='glider_file', type=str,
help='complete path to netcdf ctd file')
args = parser.parse_args()
#######################
#
# Data Ingest and Processing
filein = args.glider_file
diveNum = filein.split('/')[-1].split('.nc')[0]
df = eFOCI_ncread.EcoFOCI_netCDF(file_name=filein)
vars_dic = df.get_vars()
ncdata = df.ncreadfile_dic()
data_time = df.epochtime2date()
df.close()
### Dive Data
pressure = ncdata['ctd_pressure']
SBE_Temperature = ncdata['temperature']
SBE_Salinity = ncdata['salinity']
Wetlabs_CDOM = ncdata['wlbb2fl_sig470nm_adjusted']
Wetlabs_CHL = ncdata['wlbb2fl_sig695nm_adjusted']
Wetlabs_NTU = ncdata['wlbb2fl_sig700nm_adjusted']
Aand_Temp = ncdata['eng_aa4330_Temp']
#Aand_O2_corr = ncdata['aanderaa4330_dissolved_oxygen']
#Aand_O2_corr = ncdata['eng_aa4330_O2']
Aand_O2_corr = optode_O2_corr.O2_dep_comp(oxygen_conc=ncdata['eng_aa4330_O2'],
depth=ncdata['depth']/100)
Aand_O2_corr = optode_O2_corr.O2_sal_comp(oxygen_conc=Aand_O2_corr,
salinity=SBE_Salinity,
temperature=SBE_Temperature)
Aand_DO = optode_O2_corr.O2_molar2umkg(oxygen_conc=Aand_O2_corr,
salinity=SBE_Salinity,
temperature=SBE_Temperature,
pressure=pressure)
Aand_O2_corr = Aand_DO
Aand_DO_Sat = ncdata['eng_aa4330_AirSat']
Aand_DO_Sat_calc = optode_O2_corr.O2PercentSat(oxygen_conc=Aand_O2_corr,
salinity=SBE_Salinity,
temperature=SBE_Temperature,
pressure=pressure)
PAR_satu = ncdata['eng_satu_PARuV'] * 1000
PAR_satd = ncdata['eng_satd_PARuV'] * 1000
lat = ncdata['log_gps_lat'][0]
lon = ncdata['log_gps_lon'][0]
"""---------"""
filein = args.ctd_file
df = eFOCI_ncread.EcoFOCI_netCDF(file_name=filein)
vars_dic = df.get_vars()
ncdata = df.ncreadfile_dic()
#ctd_data_time = df.epochtime2date()
df.close()
### CTD Data
CTD_pressure = ncdata['P_1'][0,:,0,0]
CTD_Temperature = ncdata['T_28'][0,:,0,0]
CTD_Salinity = ncdata['S_41'][0,:,0,0]
CTD_Wetlabs_CHL = ncdata['F_903'][0,:,0,0]
CTD_O2_corr = ncdata['O_65'][0,:,0,0]
CTD_DO_Sat = ncdata['OST_62'][0,:,0,0]
#######################
#
# Plots
GliderPlot = CTDProfilePlot()
downInd,upInd = castdirection(pressure)
if not os.path.exists('images/dive' + diveNum ):
os.makedirs('images/dive' + diveNum)
########## CTD
### temperature
(plt, fig) = GliderPlot.plot1plot_CTD(epic_key=['T_28','T_28u','T2_35','T2_35u','temperature'],
xdata=[SBE_Temperature[downInd[0]:downInd[1]],SBE_Temperature[upInd[0]:upInd[1]],
Aand_Temp[downInd[0]:downInd[1]],Aand_Temp[upInd[0]:upInd[1]],CTD_Temperature],
ydata=[pressure[downInd[0]:downInd[1]],pressure[upInd[0]:upInd[1]],CTD_pressure],
xlabel='Temperature (C)',
updown=['d','u','d','u',''],
maxdepth=np.max(pressure))
ptitle = GliderPlot.add_title(cruiseid='',
fileid=filein.split('/')[-1],
castid=diveNum,
castdate=data_time[0],
lat=lat,
lon=lon)
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/dive'+diveNum+'/dive'+diveNum+'_temperature.png', bbox_inches='tight', dpi = (300))
plt.close()
###salinity
(plt, fig) = GliderPlot.plot1plot_CTD(epic_key=['S_41','S_41u','salinity'],
xdata=[SBE_Salinity[downInd[0]:downInd[1]],SBE_Salinity[upInd[0]:upInd[1]],CTD_Salinity],
ydata=[pressure[downInd[0]:downInd[1]],pressure[upInd[0]:upInd[1]],CTD_pressure],
xlabel='Salinity (PSU)',
updown=['d','u',''],
maxdepth=np.max(pressure))
ptitle = GliderPlot.add_title(cruiseid='',
fileid=filein.split('/')[-1],
castid=diveNum,
castdate=data_time[0],
lat=lat,
lon=lon)
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/dive'+diveNum+'/dive'+diveNum+'_salinity.png', bbox_inches='tight', dpi = (300))
plt.close()
##### optode
### conc
(plt, fig) = GliderPlot.plot1plot_CTD(epic_key=['O_65','O_65u','oxy_conc'],
xdata=[Aand_O2_corr[downInd[0]:downInd[1]],Aand_O2_corr[upInd[0]:upInd[1]],CTD_O2_corr],
ydata=[pressure[downInd[0]:downInd[1]],pressure[upInd[0]:upInd[1]],CTD_pressure],
xlabel='Oxygen Conc (umole/kg)',
updown=['d','u',''],
maxdepth=np.max(pressure))
ptitle = GliderPlot.add_title(cruiseid='',
fileid=filein.split('/')[-1],
castid=diveNum,
castdate=data_time[0],
lat=lat,
lon=lon)
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/dive'+diveNum+'/dive'+diveNum+'_O2Conc.png', bbox_inches='tight', dpi = (300))
plt.close()
### PSat
(plt, fig) = GliderPlot.plot1plot_CTD(epic_key=['OST_62','OST_62u','oxy_sat'],
xdata=[Aand_DO_Sat[downInd[0]:downInd[1]],Aand_DO_Sat[upInd[0]:upInd[1]],CTD_DO_Sat],
ydata=[pressure[downInd[0]:downInd[1]],pressure[upInd[0]:upInd[1]],CTD_pressure],
xlabel='Oxygen PSat (%)',
updown=['d','u',''],
maxdepth=np.max(pressure))
ptitle = GliderPlot.add_title(cruiseid='',
fileid=filein.split('/')[-1],
castid=diveNum,
castdate=data_time[0],
lat=lat,
lon=lon)
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/dive'+diveNum+'/dive'+diveNum+'_O2Psat.png', bbox_inches='tight', dpi = (300))
plt.close()
########## WetLabs
###chl
(plt, fig) = GliderPlot.plot1plot_CTD(epic_key=['Chl_933','Chl_933u','chlor'],
xdata=[Wetlabs_CHL[downInd[0]:downInd[1]],Wetlabs_CHL[upInd[0]:upInd[1]],CTD_Wetlabs_CHL],
ydata=[pressure[downInd[0]:downInd[1]],pressure[upInd[0]:upInd[1]],CTD_pressure],
xlabel='Chl (ug/l)',
updown=['d','u',''],
maxdepth=np.max(pressure))
ptitle = GliderPlot.add_title(cruiseid='',
fileid=filein.split('/')[-1],
castid=diveNum,
castdate=data_time[0],
lat=lat,
lon=lon)
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/dive'+diveNum+'/dive'+diveNum+'_chl.png', bbox_inches='tight', dpi = (300))
plt.close()
| 32.857724 | 107 | 0.601633 | 1,022 | 8,083 | 4.536204 | 0.199609 | 0.022002 | 0.035591 | 0.037964 | 0.591242 | 0.553926 | 0.531924 | 0.483822 | 0.44931 | 0.44931 | 0 | 0.038498 | 0.222318 | 8,083 | 245 | 108 | 32.991837 | 0.699014 | 0.047755 | 0 | 0.538462 | 0 | 0 | 0.105023 | 0.010047 | 0 | 0 | 0 | 0 | 0 | 1 | 0.00641 | false | 0 | 0.044872 | 0 | 0.057692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff2233bb02f187b2c1f54b2c72ddfd6822179595 | 1,551 | py | Python | scripts/operator/delete_operators_from_quay.py | snyk-schmidtty/kubernetes-monitor | 58811f1fea6673561806811abfa70292ca6a4f07 | [
"Apache-2.0"
] | null | null | null | scripts/operator/delete_operators_from_quay.py | snyk-schmidtty/kubernetes-monitor | 58811f1fea6673561806811abfa70292ca6a4f07 | [
"Apache-2.0"
] | 5 | 2021-02-27T03:16:34.000Z | 2021-02-27T03:17:58.000Z | scripts/operator/delete_operators_from_quay.py | snyk-schmidtty/kubernetes-monitor | 58811f1fea6673561806811abfa70292ca6a4f07 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python3
"""Delete all Operators from the Quay Snyk Operator repository
Args:
username (str): Quay username
password (str): Quay password
"""
from requests import get, delete
from json import loads
from get_quay_token import getQuayToken
from typing import List, Dict
from sys import argv
Package = Dict[str, object]
def getOperators(url: str) -> List[Package]:
resp = get(url)
return loads(resp.text)
def getVersions(packages: List[Package]) -> List[str]:
def extract_version(package):
return package["release"]
return list(map(extract_version, packages))
def deleteOperator(quay_token: str, url: str) -> None:
response = delete(url, headers={
"Authorization": quay_token,
"Content-Type": "application/json",
"Accept": "application/json"
})
print(response.text)
if __name__ == '__main__':
_, username, password = argv
quay_token = getQuayToken(
"https://quay.io/cnr/api/v1/users/login",
username,
password
)
operators = getOperators(
"https://quay.io/cnr/api/v1/packages/" + username + "/" + "snyk-operator"
)
versions = getVersions(operators)
if len(versions) == 0:
print("No Operators to delete")
else:
print("Deleting the following Operators:", ', '.join(versions))
for version in versions:
deleteOperator(
quay_token,
"https://quay.io/cnr/api/v1/packages/" + username +
"/" + "snyk-operator/" + version + "/helm"
)
| 24.234375 | 81 | 0.63314 | 177 | 1,551 | 5.451977 | 0.418079 | 0.046632 | 0.034197 | 0.043523 | 0.117098 | 0.117098 | 0.097409 | 0.097409 | 0.097409 | 0.097409 | 0 | 0.004237 | 0.239201 | 1,551 | 63 | 82 | 24.619048 | 0.813559 | 0.098646 | 0 | 0 | 0 | 0 | 0.200575 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0.04878 | 0.121951 | 0.02439 | 0.292683 | 0.073171 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff266552067c871f264f181ad65f6b4259ee89bd | 11,031 | py | Python | pull_requests_and_issues.py | andreasscherbaum/github_issue_exporter | 95e01b7dd90250393b4550502445ab6c0f821e7a | [
"Apache-2.0"
] | null | null | null | pull_requests_and_issues.py | andreasscherbaum/github_issue_exporter | 95e01b7dd90250393b4550502445ab6c0f821e7a | [
"Apache-2.0"
] | null | null | null | pull_requests_and_issues.py | andreasscherbaum/github_issue_exporter | 95e01b7dd90250393b4550502445ab6c0f821e7a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Download GitHub Issued and PR as CSV
#
# written by Andreas 'ads' Scherbaum <andreas@scherbaum.la>
#
# version: 0.5 2016-08-27
# initial version
# 1.0 2016-09-04
# update help
# add options
# remove hardcoded organization and project
import re
import os
import sys
import logging
import tempfile
import atexit
import shutil
import argparse
_urllib_version = False
try:
import urllib2
_urllib_version = 2
except ImportError:
import urllib3
_urllib_version = 3
try:
import httplib
except ImportError:
import http.client as httplib
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import gzip
import zlib
from subprocess import Popen
try: from urlparse import urljoin # Python2
except ImportError: from urllib.parse import urljoin # Python3
import json
# start with 'info', can be overriden by '-q' later on
logging.basicConfig(level = logging.INFO,
format = '%(levelname)s: %(message)s')
# download_url()
#
# download a specific url, handle compression
#
# parameter:
# - url
# return:
# - content of the link
def download_url(url):
global _urllib_version
if (_urllib_version == 2):
rq = urllib2.Request(url)
rq.add_header('Accept-encoding', 'gzip')
try:
rs = urllib2.urlopen(rq)
except urllib2.HTTPError as e:
if (e.code == 400):
logging.error('HTTPError = ' + str(e.code) + ' (Bad Request)')
elif (e.code == 401):
logging.error('HTTPError = ' + str(e.code) + ' (Unauthorized)')
elif (e.code == 403):
logging.error('HTTPError = ' + str(e.code) + ' (Forbidden)')
elif (e.code == 404):
logging.error('HTTPError = ' + str(e.code) + ' (URL not found)')
elif (e.code == 408):
logging.error('HTTPError = ' + str(e.code) + ' (Request Timeout)')
elif (e.code == 418):
logging.error('HTTPError = ' + str(e.code) + " (I'm a teapot)")
elif (e.code == 500):
logging.error('HTTPError = ' + str(e.code) + ' (Internal Server Error)')
elif (e.code == 502):
logging.error('HTTPError = ' + str(e.code) + ' (Bad Gateway)')
elif (e.code == 503):
logging.error('HTTPError = ' + str(e.code) + ' (Service Unavailable)')
elif (e.code == 504):
logging.error('HTTPError = ' + str(e.code) + ' (Gateway Timeout)')
else:
logging.error('HTTPError = ' + str(e.code))
sys.exit(1)
except urllib2.URLError as e:
logging.error('URLError = ' + str(e.reason))
sys.exit(1)
except httplib.HTTPException as e:
logging.error('HTTPException')
sys.exit(1)
except Exception:
logging.error('generic exception')
sys.exit(1)
if rs.info().get('Content-Encoding') == 'gzip':
b = StringIO(rs.read())
f = gzip.GzipFile(fileobj = b)
data = f.read()
else:
data = rs.read()
elif (_urllib_version == 3):
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("httplib").setLevel(logging.WARNING)
user_agent = {'user-agent': 'GPDB buildclient', 'accept-encoding': 'gzip, deflate'}
#http = urllib3.PoolManager(maxsize = 3, retries = 2, headers = user_agent)
http = urllib3.PoolManager(maxsize = 3, headers = user_agent)
try:
rs = http.urlopen('GET', url, redirect = True)
except urllib3.exceptions.MaxRetryError as e:
logging.error("Too many retries")
sys.exit(1)
except urllib3.URLError as e:
logging.error('URLError = ' + str(e.code))
sys.exit(1)
except httplib.HTTPException as e:
logging.error('HTTPException')
sys.exit(1)
except urllib3.exceptions.ConnectTimeoutError as e:
logging.error("Timeout")
sys.exit(1)
except Exception:
logging.error('generic exception')
sys.exit(1)
if (rs.status != 200):
if (rs.status == 400):
logging.error("HTTPError = 400 (Bad Request)")
elif (rs.status == 401):
logging.error("HTTPError = 401 (Unauthorized)")
elif (rs.status == 403):
logging.error("HTTPError = 403 (Forbidden)")
elif (rs.status == 404):
logging.error("HTTPError = 404 (URL not found)")
elif (rs.status == 408):
logging.error("HTTPError = 408 (Request Timeout)")
elif (rs.status == 418):
logging.error("HTTPError = 418 (I'm a teapot)")
elif (rs.status == 500):
logging.error("HTTPError = 500 (Internal Server Error)")
elif (rs.status == 502):
logging.error("HTTPError = 502 (Bad Gateway)")
elif (rs.status == 503):
logging.error("HTTPError = 503 (Service Unavailable)")
elif (rs.status == 504):
logging.error("HTTPError = 504 (Gateway Timeout)")
else:
logging.error("HTTPError = " + str(rs.status) + "")
sys.exit(1)
if (len(rs.data.decode()) == 0):
logging.error("failed to download URL")
sys.exit(1)
data = rs.data.decode()
else:
logging.error("unknown urllib version!")
sys.exit(1)
logging.debug("fetched " + human_size(len(data)))
return data
# from: http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
# human_size()
#
# format number into human readable output
#
# parameters:
# - number
# return:
# - string with formatted number
def human_size(size_bytes):
"""
format a size in bytes into a 'human' file size, e.g. bytes, KB, MB, GB, TB, PB
Note that bytes/KB will be reported in whole numbers but MB and above will have greater precision
e.g. 1 byte, 43 bytes, 443 KB, 4.3 MB, 4.43 GB, etc
"""
if (size_bytes == 1):
# because I really hate unnecessary plurals
return "1 byte"
suffixes_table = [('bytes',0),('KB',0),('MB',1),('GB',2),('TB',2), ('PB',2)]
num = float(size_bytes)
for suffix, precision in suffixes_table:
if (num < 1024.0):
break
num /= 1024.0
if (precision == 0):
formatted_size = "%d" % num
else:
formatted_size = str(round(num, ndigits=precision))
return "%s %s" % (formatted_size, suffix)
# write_output()
#
# write output file
#
# parameter:
# - filename
# - flag if Pull Requests (true) or Issues (false) should be printed
# - list with PR/Issues
# return:
# none
def write_output(file, is_pr, issues):
f = open(file, 'w')
f.write("ID\tTitle\tCreated\tURL\tState\n")
for item_outer in issues:
for item in item_outer:
if (is_pr is False and 'pull_request' in item):
continue
if (is_pr is True and 'pull_request' not in item):
continue
f.write(str(item['number']) + "\t")
f.write(str(item['title'].encode('utf-8')) + "\t")
f.write(str(item['created_at']).replace('T', ' ').replace('Z', '') + "\t")
# replace the API link with the WWW link
url = item['url'].replace('https://api.github.com/repos/', 'https://www.github.com/')
f.write(str(url) + "\t")
f.write(str(item['state']) + "\t")
f.write("\n")
f.close()
# print_help()
#
# print the help
#
# parameter:
# - parser
# return:
# none
def print_help(parser):
parser.print_help()
#######################################################################
# Main program
parser = argparse.ArgumentParser(description = 'GitHub Issues and PR exporter',
epilog = '',
usage = '%(prog)s [options] <GitHub organization name> <GitHub project name>',
add_help = False)
parser.add_argument('--help', default = False, dest = 'help', action = 'store_true', help = 'show this help')
parser.add_argument('--state', default = 'open', dest = 'state', help = 'Issue state (open, closed, all - Default: open)', choices = ['open', 'closed', 'all'])
parser.add_argument('--sort', default = 'created', dest = 'sort', help = 'Sort order (created, updated, comments - Default: created)', choices = ['created', 'updated', 'comments'])
parser.add_argument('-v', '--verbose', default = False, dest = 'verbose', action = 'store_true', help = 'be more verbose')
parser.add_argument('-q', '--quiet', default = False, dest = 'quiet', action = 'store_true', help = 'run quietly')
# parse parameters
parsed = parser.parse_known_args()
args = parsed[0]
remaining_args = parsed[1]
if (args.help is True):
print_help(parser)
sys.exit(0)
if (args.verbose is True and args.quiet is True):
print_help(parser)
print("")
print("Error: --verbose and --quiet can't be set at the same time")
sys.exit(1)
if (args.verbose is True):
logging.getLogger().setLevel(logging.DEBUG)
if (args.quiet is True):
logging.getLogger().setLevel(logging.ERROR)
if (len(remaining_args) < 2):
print_help(parser)
print("")
print("")
print('"GitHub organization name" and "GitHub project name" must be specified!')
print("")
sys.exit(1)
github_organization = remaining_args[0]
github_project = remaining_args[1]
logging.debug("Organization: " + github_organization)
logging.debug(" Project: " + github_project)
issues_json_all = []
# fetch all pages, until a page with no JSON data is returned
page = 0
while True:
page += 1
logging.debug("Page: " + str(page))
issues_url = 'https://api.github.com/repos/' + github_organization + '/' + github_project + '/issues?state=' + args.state + '&sort=' + args.sort + '&filter=all&page=' + str(page)
issues_data = download_url(issues_url)
if (len(issues_data) < 20):
# GitHub returns an empty JSON field if there is no more data available
logging.info("fetched " + str(page - 1) + " pages with data")
break
issues_json = json.loads(issues_data)
issues_json_all.append(issues_json)
#print(issues_json)
base_output_name = 'GitHub_' + github_organization + '_' + github_project + '_'
issues_output_name = base_output_name + 'Issues.csv'
pr_output_name = base_output_name + 'PR.csv'
#logging.debug("Base filename: " + base_output_name)
write_output(issues_output_name, False, issues_json_all)
write_output(pr_output_name, True, issues_json_all)
logging.info("Issues: " + issues_output_name)
logging.info(" PR: " + pr_output_name)
| 31.161017 | 182 | 0.581996 | 1,336 | 11,031 | 4.723054 | 0.247006 | 0.062758 | 0.073217 | 0.045642 | 0.19683 | 0.128526 | 0.080032 | 0.052932 | 0.041838 | 0.041838 | 0 | 0.024762 | 0.275134 | 11,031 | 353 | 183 | 31.249292 | 0.764382 | 0.142598 | 0 | 0.210046 | 0 | 0 | 0.192002 | 0.00344 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018265 | false | 0 | 0.105023 | 0 | 0.136986 | 0.050228 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff2e375a8f50378a13a348c074037b0b1fd28807 | 1,035 | py | Python | util/security/test/test_secret.py | sferich888/quay | 4672db1df76874238baf134d04e74112ac9f630d | [
"Apache-2.0"
] | null | null | null | util/security/test/test_secret.py | sferich888/quay | 4672db1df76874238baf134d04e74112ac9f630d | [
"Apache-2.0"
] | null | null | null | util/security/test/test_secret.py | sferich888/quay | 4672db1df76874238baf134d04e74112ac9f630d | [
"Apache-2.0"
] | null | null | null | import uuid
import pytest
from util.security.secret import convert_secret_key
@pytest.mark.parametrize(
"config_secret_key, expected_secret_key",
[
pytest.param("somesecretkey", b"somesecretkeysomesecretkeysomese", id="Some string"),
pytest.param("255", b"\xff" * 32, id="Some int that can be represented as a byte",),
pytest.param(
"256",
b"25625625625625625625625625625625",
id="Some int that can't be represented as a byte multiple (256 is 100 in hex -> 12 bits)",
),
pytest.param(
"123e4567-e89b-12d3-a456-426655440000",
uuid.UUID("123e4567-e89b-12d3-a456-426655440000").bytes * 2,
id="Some 16bit UUID",
),
],
)
def test_convert_secret_key(config_secret_key, expected_secret_key):
converted_secret_key = convert_secret_key(config_secret_key)
assert len(converted_secret_key) == 32
assert isinstance(converted_secret_key, bytes)
assert converted_secret_key == expected_secret_key
| 34.5 | 102 | 0.675362 | 128 | 1,035 | 5.25 | 0.429688 | 0.174107 | 0.107143 | 0.102679 | 0.40625 | 0.165179 | 0 | 0 | 0 | 0 | 0 | 0.133085 | 0.223188 | 1,035 | 29 | 103 | 35.689655 | 0.702736 | 0 | 0 | 0.16 | 0 | 0.04 | 0.337198 | 0.131401 | 0 | 0 | 0 | 0 | 0.12 | 1 | 0.04 | false | 0 | 0.12 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff31cc51ee4cb699318f8c7e3d04f5809d3e62ad | 3,395 | py | Python | socket/app.py | cheeseywhiz/cheeseywhiz | 51f6651ddbaeebd14d9ce77776bc4cf3a95511c4 | [
"MIT"
] | null | null | null | socket/app.py | cheeseywhiz/cheeseywhiz | 51f6651ddbaeebd14d9ce77776bc4cf3a95511c4 | [
"MIT"
] | null | null | null | socket/app.py | cheeseywhiz/cheeseywhiz | 51f6651ddbaeebd14d9ce77776bc4cf3a95511c4 | [
"MIT"
] | null | null | null | """A sample custom HTTP server."""
import functools
import html
import traceback
import collect
import server
server.Logger.name = __file__
HTML_TMPL = '''\
<html>
<head>
<link rel="stylesheet" type="text/css" href="/myStyle.css"/>
</head>
<body id="consolas">
%s</body>
</html>
'''
LINK_HOME = '<a href="/">Home</a>'
app = server.app.App('0.0.0.0', 8080)
app.resolver.update_from_files_json('app.json')
@app.register('/myStyle.css')
def my_style():
status_code, headers, content = app._return_file(
collect.Path('myStyle.css')
)
headers['Content-Type'] = 'text/css'
return status_code, headers, content
def insert_body(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
if isinstance(response, tuple):
status_code, headers, text = response
return status_code, headers, HTML_TMPL % text
else:
return HTML_TMPL % response
return wrapper
@app.register('/')
@insert_body
def index():
return '''\
<a href="/img.png"><img src="/img.png" width="250"/></a>
<form action="/" method="post">
<input id="consolas" type="text" name="url"><br/>
<input id="consolas" type="submit" value="Submit">
</form>
'''
@insert_body
def dir_landing_page(url_path, folder_path, recursive):
def contents():
yield folder_path.parent
yield folder_path
yield from folder_path
parts = []
for file in contents():
rel_path = file.relpath(folder_path)
new_url = url_path / rel_path
if recursive or file.is_file():
parts.append(f'''
<a href="{new_url}">{rel_path}</a>''')
inner = '<br/>'.join(parts)
return f'''\
<h1>{LINK_HOME}{url_path}</h1>
<p>{inner}
</p>
'''
for url_path, fs_path in app.resolver.dirs.items():
recursive = app.resolver.recursive(url_path)
def contents():
if recursive:
yield from fs_path.tree
else:
yield fs_path
for file in contents():
if not file.is_dir():
continue
rel_file = file.relpath(fs_path)
new_url = url_path / rel_file
app.register(new_url)(
functools.partial(dir_landing_page, new_url, file, recursive)
)
@app.register('/', 'post')
def index_post():
input = server.app.ActiveRequest.body['url']
new_url = collect.Path(input)
return 303, {'Location': str(new_url)}, ''
@app.register('/page')
def page():
return 307, {'Location': '/new'}, ''
@app.register('/new')
@insert_body
def new():
return f'''\
<p>
This is the new page. You may have been redirected.<br/>
{LINK_HOME}
</p>
'''
@app.register('/req', 'GET', 'POST')
def req_():
return (
200, {'Content-Type': 'text/plain'},
server.app.ActiveRequest.raw_request)
@app.register_exception(server.http.HTTPException)
def handle_http(error):
body = f'''\
<h1>{error.status_code} {error.reason}</h1>
<pre id="consolas">{html.escape(str(error.message))}</pre>
{LINK_HOME}
'''
return error.status_code, HTML_TMPL % body
@app.register_exception(Exception)
def handle_exc(error):
new_error = server.http.HTTPException(traceback.format_exc(), 500)
return handle_http(new_error)
print('ready')
if __name__ == '__main__':
app.run()
| 21.762821 | 73 | 0.613844 | 442 | 3,395 | 4.533937 | 0.300905 | 0.049401 | 0.033932 | 0.023952 | 0.01996 | 0.01996 | 0 | 0 | 0 | 0 | 0 | 0.01027 | 0.225626 | 3,395 | 155 | 74 | 21.903226 | 0.751997 | 0.008247 | 0 | 0.172414 | 0 | 0.008621 | 0.255876 | 0.04701 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112069 | false | 0 | 0.043103 | 0.034483 | 0.258621 | 0.008621 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff32f68fb44dc05b704c9e1bf005ac75b155b258 | 1,512 | py | Python | services/web/server/src/simcore_service_webserver/projects/projects_tags_handlers.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 25 | 2018-04-13T12:44:12.000Z | 2022-03-12T15:01:17.000Z | services/web/server/src/simcore_service_webserver/projects/projects_tags_handlers.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 2,553 | 2018-01-18T17:11:55.000Z | 2022-03-31T16:26:40.000Z | services/web/server/src/simcore_service_webserver/projects/projects_tags_handlers.py | mrnicegyu11/osparc-simcore | b6fa6c245dbfbc18cc74a387111a52de9b05d1f4 | [
"MIT"
] | 20 | 2018-01-18T19:45:33.000Z | 2022-03-29T07:08:47.000Z | """ Handlers for CRUD operations on /projects/{*}/tags/{*}
"""
import logging
from aiohttp import web
from .._meta import api_version_prefix as VTAG
from ..login.decorators import RQT_USERID_KEY, login_required
from ..security_decorators import permission_required
from .projects_db import APP_PROJECT_DBAPI, ProjectDBAPI
log = logging.getLogger(__name__)
routes = web.RouteTableDef()
@routes.put(f"/{VTAG}/projects/{{project_uuid}}/tags/{{tag_id}}")
@login_required
@permission_required("project.tag.*")
async def add_tag(request: web.Request):
user_id: int = request[RQT_USERID_KEY]
db: ProjectDBAPI = request.config_dict[APP_PROJECT_DBAPI]
try:
tag_id, study_uuid = (
request.match_info["tag_id"],
request.match_info["study_uuid"],
)
except KeyError as err:
raise web.HTTPBadRequest(reason=f"Invalid request parameter {err}") from err
return await db.add_tag(
project_uuid=study_uuid, user_id=user_id, tag_id=int(tag_id)
)
@routes.delete(f"/{VTAG}/projects/{{project_uuid}}/tags/{{tag_id}}")
@login_required
@permission_required("project.tag.*")
async def remove_tag(request: web.Request):
user_id: int = request[RQT_USERID_KEY]
db: ProjectDBAPI = request.config_dict[APP_PROJECT_DBAPI]
tag_id, study_uuid = (
request.match_info["tag_id"],
request.match_info["study_uuid"],
)
return await db.remove_tag(
project_uuid=study_uuid, user_id=user_id, tag_id=int(tag_id)
)
| 28 | 84 | 0.71164 | 207 | 1,512 | 4.89372 | 0.31401 | 0.049358 | 0.063179 | 0.039487 | 0.560711 | 0.560711 | 0.560711 | 0.560711 | 0.560711 | 0.560711 | 0 | 0 | 0.166667 | 1,512 | 53 | 85 | 28.528302 | 0.803968 | 0.035714 | 0 | 0.432432 | 0 | 0 | 0.129055 | 0.067633 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.162162 | 0 | 0.216216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff331d0933475a031554907cd3c4b819e0f29798 | 1,362 | py | Python | audioToJummbox/main.py | KeeyanGhoreshi/AudioToJummbox | aaf9a64e47367ff1e375916ffeab92355322bd99 | [
"MIT"
] | null | null | null | audioToJummbox/main.py | KeeyanGhoreshi/AudioToJummbox | aaf9a64e47367ff1e375916ffeab92355322bd99 | [
"MIT"
] | null | null | null | audioToJummbox/main.py | KeeyanGhoreshi/AudioToJummbox | aaf9a64e47367ff1e375916ffeab92355322bd99 | [
"MIT"
] | null | null | null | import argparse
import os
import sys
import logging
from audioToJummbox import converter
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("infile", help="The audio file to convert")
parser.add_argument("--base", "-b", default="./resources/template.json", help="The file used as the base for the output. See readme.")
parser.add_argument("--output", "-o", default="./resources/data.json", help="The output jummbox json file")
parser.add_argument("--tempo", "-t", default=200, type=float, help="The tempo of the song the audio is going into, not the tempo of the audio!")
parser.add_argument(
"--smooth",
"-s",
type=int,
default=0,
help="Can be 1, 2, or 3. Max tempo for each is 300, 150, 100")
args = parser.parse_args()
args.output = (
"{}.json".format(os.path.basename(args.infile))
if not args.output
else args.output
)
return args
def main():
try:
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
args = parse_args()
converter.convert(args.infile, args.tempo, args.base, args.output, args.smooth)
print(args)
except KeyboardInterrupt:
sys.exit(1)
except Exception as e:
logging.exception(e)
sys.exit(1)
if __name__ == "__main__":
main() | 33.219512 | 148 | 0.637298 | 183 | 1,362 | 4.655738 | 0.448087 | 0.052817 | 0.099765 | 0.030516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017127 | 0.228341 | 1,362 | 41 | 149 | 33.219512 | 0.79353 | 0 | 0 | 0.054054 | 0 | 0 | 0.25752 | 0.033749 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.135135 | 0 | 0.216216 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff34e2899bffacd28b1fe087f784f173acd09a05 | 18,680 | py | Python | src/menu_screen.py | dcripplinger/rotj | 4de46b0e39e3971dd23650a28010bd90b3f637d2 | [
"MIT"
] | 5 | 2019-11-03T21:27:17.000Z | 2021-12-08T04:38:50.000Z | src/menu_screen.py | dcripplinger/rotj | 4de46b0e39e3971dd23650a28010bd90b3f637d2 | [
"MIT"
] | 10 | 2018-01-06T03:56:17.000Z | 2022-02-17T22:04:39.000Z | src/menu_screen.py | dcripplinger/rotj | 4de46b0e39e3971dd23650a28010bd90b3f637d2 | [
"MIT"
] | 1 | 2018-01-06T03:21:51.000Z | 2018-01-06T03:21:51.000Z | # -*- coding: UTF-8 -*-
import time
import pygame
from pygame.locals import *
from constants import BLACK, GAME_WIDTH, HQ_LOCATIONS
from helpers import create_save_state, erase_save_state, is_half_second, load_save_states, copy_save_state
from text import create_prompt, MenuBox, MenuGrid, TextBox
MAIN_MENU = [
'GAME START',
'REGISTER HISTORY BOOK',
'ERASE HISTORY BOOK',
'COPY HISTORY BOOK',
]
class MenuScreen(object):
def __init__(self, screen, game):
self.screen = screen
self.game = game
self.select_sound = pygame.mixer.Sound('data/audio/select.wav')
self.screen_state = 'unstarted'
self.state = load_save_states()
self.start_prompt = None
self.start_menu = None
self.main_menu = None
self.speed_menu = None
self.register_prompt = None
self.register_menu = None
self.erase_menu = None
self.erase_prompt = None
self.confirm_erase_menu = None
self.name_menu = None
self.name_blurb = None
self.name_field = None
self.name_underline = None
self.current_name_char = None
self.copy_prompt = None
self.copy_menu = None
self.paste_menu = None
self.paste_prompt = None
self.controls = TextBox('~~~~~~~~~~X: select\n~~~~~~~~~~Z: cancel\n ~~~~~ARROWS: move\n~~~~~~ENTER: pause menu\nRIGHT~SHIFT: party status')
def load_main_menu(self):
if all(self.state): # if all three save slots are full
self.main_menu = MenuBox([MAIN_MENU[0], MAIN_MENU[2]])
elif any(self.state): # if any of the three save slots is populated
self.main_menu = MenuBox(MAIN_MENU)
else:
self.main_menu = MenuBox([MAIN_MENU[1],])
def load_start_menu(self):
self.start_menu = MenuBox(self.format_populated_save_slots())
self.start_prompt = create_prompt('Which history do you wish to continue?')
def load_copy_menu(self):
self.copy_menu = MenuBox(self.format_populated_save_slots())
self.copy_prompt = create_prompt('Which history book do you wish to copy?')
def load_paste_menu(self):
self.paste_menu = MenuBox(self.format_unpopulated_save_slots())
self.paste_prompt = create_prompt('Which book do you wish to copy to?')
def load_register_menu(self):
self.register_menu = MenuBox(self.format_unpopulated_save_slots())
self.register_prompt = create_prompt('Which history book do you wish to begin?')
def load_speed_menu(self):
self.speed_menu = MenuBox(['FAST', 'FAST', 'STILL FAST'])
def format_populated_save_slots(self):
return [u'{}~{:~<8}~L{:02}'.format(i+1, slot['name'], slot['level']) for i, slot in enumerate(self.state) if slot]
def format_unpopulated_save_slots(self):
return [u'{}~~~~~~'.format(i+1) for i, slot in enumerate(self.state) if not slot]
def load_erase_menu(self):
self.erase_menu = MenuBox(self.format_populated_save_slots())
self.erase_prompt = create_prompt('Which history book do you wish to erase?')
def load_confirm_erase_menu(self):
self.confirm_erase_menu = MenuBox(['YES', 'NO'])
self.erase_prompt = create_prompt('Are you sure?')
def load_name_menu(self):
self.name_menu = MenuGrid([ # each list in this list is a column in the menu grid
['0', 'A', 'I', 'Q', 'Y', 'a', 'i', 'q', 'y'],
['1', 'B', 'J', 'R', 'Z', 'b', 'j', 'r', 'z'],
['2', 'C', 'K', 'S', '-', 'c', 'k', 's'],
['3', 'D', 'L', 'T', ',', 'd', 'l', 't'],
['4', 'E', 'M', 'U', '.', 'e', 'm', 'u'],
['5', 'F', 'N', 'V', '/', 'f', 'n', 'v'],
['6', 'G', 'O', 'W', '?', 'g', 'o', 'w'],
['7', 'H', 'P', 'X', u'–', 'h', 'p', 'x'],
['8', '~', '"', "'"],
['9', '!', 'Back.', 'Fwd.', 'End.'],
])
self.name_blurb = TextBox('Please enter your name')
self.name_field = TextBox('~~~~~~~~')
self.name_underline = TextBox('--------')
self.current_name_char = 0
def draw(self):
self.screen.fill(BLACK)
self.screen.blit(self.controls.surface, (32, 112))
prompt_vert_pos = 160
mid_menu_vert_pos = 80
top_menu_vert_pos = 16
if self.screen_state == 'main':
self.screen.blit(self.main_menu.surface, ((GAME_WIDTH - self.main_menu.get_width())//2, top_menu_vert_pos))
elif self.screen_state in ['start', 'start_prompt']:
self.screen.blit(self.start_prompt.surface, ((GAME_WIDTH - self.start_prompt.width)//2, prompt_vert_pos))
self.screen.blit(self.start_menu.surface, ((GAME_WIDTH - self.start_menu.get_width())//2, top_menu_vert_pos))
elif self.screen_state == 'speed':
self.screen.blit(self.start_prompt.surface, ((GAME_WIDTH - self.start_prompt.width)//2, prompt_vert_pos))
self.screen.blit(self.start_menu.surface, ((GAME_WIDTH - self.start_menu.get_width())//2, top_menu_vert_pos))
self.screen.blit(self.speed_menu.surface, (GAME_WIDTH - self.speed_menu.get_width(), mid_menu_vert_pos))
elif self.screen_state == 'register':
self.screen.blit(self.register_prompt.surface, ((GAME_WIDTH - self.register_prompt.width)//2, prompt_vert_pos))
self.screen.blit(self.register_menu.surface, ((GAME_WIDTH - self.register_menu.get_width())//2, top_menu_vert_pos))
elif self.screen_state == 'erase':
self.screen.blit(self.erase_prompt.surface, ((GAME_WIDTH - self.erase_prompt.width)//2, prompt_vert_pos))
self.screen.blit(self.erase_menu.surface, ((GAME_WIDTH - self.erase_menu.get_width())//2, top_menu_vert_pos))
elif self.screen_state == 'confirm_erase':
self.screen.blit(self.erase_prompt.surface, ((GAME_WIDTH - self.erase_prompt.width)//2, prompt_vert_pos))
self.screen.blit(self.erase_menu.surface, ((GAME_WIDTH - self.erase_menu.get_width())//2, top_menu_vert_pos))
self.screen.blit(
self.confirm_erase_menu.surface,
((GAME_WIDTH - self.confirm_erase_menu.get_width())//2, mid_menu_vert_pos),
)
elif self.screen_state == 'name':
self.screen.blit(self.name_blurb.surface, ((GAME_WIDTH - self.name_blurb.width)//2, top_menu_vert_pos))
self.screen.blit(self.name_field.surface, ((GAME_WIDTH - self.name_field.width)//2, top_menu_vert_pos + 16))
self.screen.blit(self.name_underline.surface, ((GAME_WIDTH - self.name_underline.width)//2, top_menu_vert_pos + 24))
self.screen.blit(self.name_menu.surface, ((GAME_WIDTH - self.name_menu.get_width())//2, top_menu_vert_pos + 40))
elif self.screen_state in ['copy', 'copy_prompt']:
self.screen.blit(self.copy_prompt.surface, ((GAME_WIDTH - self.copy_prompt.width)//2, prompt_vert_pos))
self.screen.blit(self.copy_menu.surface, ((GAME_WIDTH - self.copy_menu.get_width())//2, top_menu_vert_pos))
elif self.screen_state == 'paste':
self.screen.blit(self.copy_menu.surface, ((GAME_WIDTH - self.copy_menu.get_width())//2, top_menu_vert_pos))
self.screen.blit(self.paste_prompt.surface, ((GAME_WIDTH - self.paste_prompt.width)//2, prompt_vert_pos))
self.screen.blit(self.paste_menu.surface, ((GAME_WIDTH - self.paste_menu.get_width())//2, mid_menu_vert_pos))
def update(self, dt):
if self.screen_state == 'unstarted':
pygame.mixer.music.load('data/audio/music/menu.wav')
pygame.mixer.music.play(-1)
self.screen_state = 'main'
self.load_main_menu()
self.main_menu.focus()
elif self.screen_state == 'main':
self.main_menu.update(dt)
elif self.screen_state == 'start':
self.start_menu.update(dt)
self.start_prompt.update(dt)
elif self.screen_state == 'start_prompt':
self.start_prompt.update(dt)
if not self.start_prompt.has_more_stuff_to_show():
self.screen_state = 'start'
self.start_menu.focus()
elif self.screen_state == 'speed':
self.speed_menu.update(dt)
self.start_prompt.update(dt)
elif self.screen_state == 'register':
self.register_menu.update(dt)
self.register_prompt.update(dt)
elif self.screen_state == 'erase':
self.erase_menu.update(dt)
self.erase_prompt.update(dt)
elif self.screen_state == 'confirm_erase':
self.confirm_erase_menu.update(dt)
self.erase_prompt.update(dt)
elif self.screen_state == 'name':
self.name_menu.update(dt)
underline = '--------'
if is_half_second():
self.name_underline = TextBox(underline)
else:
i = self.current_name_char
self.name_underline = TextBox(underline[:i] + '~' + underline[i+1:])
elif self.screen_state == 'copy':
self.copy_menu.update(dt)
self.copy_prompt.update(dt)
elif self.screen_state == 'copy_prompt':
self.copy_prompt.update(dt)
if not self.copy_prompt.has_more_stuff_to_show():
self.screen_state = 'copy'
self.copy_menu.focus()
elif self.screen_state == 'paste':
self.paste_menu.update(dt)
self.paste_prompt.update(dt)
def handle_input_main(self, pressed):
self.main_menu.handle_input(pressed)
if pressed[K_x]:
if self.main_menu.get_choice() == MAIN_MENU[0]:
self.screen_state = 'start'
self.load_start_menu()
self.start_menu.focus()
elif self.main_menu.get_choice() == MAIN_MENU[1]:
self.screen_state = 'register'
self.load_register_menu()
self.register_menu.focus()
elif self.main_menu.get_choice() == MAIN_MENU[2]:
self.screen_state = 'erase'
self.load_erase_menu()
self.erase_menu.focus()
elif self.main_menu.get_choice() == MAIN_MENU[3]:
self.screen_state = 'copy'
self.load_copy_menu()
self.copy_menu.focus()
self.main_menu = None
def handle_input_start(self, pressed):
self.start_menu.handle_input(pressed)
if pressed[K_x]:
slot = int(self.start_menu.get_choice()[0])
game_state = self.state[slot-1]
if game_state.get('corrupt'):
self.start_prompt = create_prompt('That history is corrupt and should be deleted. Which history do you wish to continue?')
self.screen_state = 'start_prompt'
self.start_menu.unfocus()
else:
self.screen_state = 'speed'
self.load_speed_menu()
self.speed_menu.focus()
self.start_menu.unfocus()
self.start_prompt.shutdown()
elif pressed[K_z]:
self.screen_state = 'main'
self.load_main_menu()
self.start_menu = None
self.start_prompt.shutdown()
self.start_prompt = None
self.main_menu.focus()
def handle_input_copy(self, pressed):
self.copy_menu.handle_input(pressed)
if pressed[K_x]:
slot = int(self.copy_menu.get_choice()[0])
game_state = self.state[slot-1]
if game_state.get('corrupt'):
self.copy_prompt = create_prompt('That history is corrupt and should be deleted. Which history book do you wish to copy?')
self.screen_state = 'copy_prompt'
self.copy_menu.unfocus()
else:
self.screen_state = 'paste'
self.load_paste_menu()
self.paste_menu.focus()
self.copy_menu.unfocus()
self.copy_prompt.shutdown()
elif pressed[K_z]:
self.screen_state = 'main'
self.load_main_menu()
self.copy_menu = None
self.copy_prompt.shutdown()
self.copy_prompt = None
self.main_menu.focus()
def handle_input_paste(self, pressed):
self.paste_menu.handle_input(pressed)
if pressed[K_x]:
self.paste_prompt.shutdown()
copy_save_state(self.copy_menu.get_choice()[0], self.paste_menu.get_choice()[0])
self.state = load_save_states()
self.screen_state = 'main'
self.load_main_menu()
self.main_menu.focus()
self.copy_menu = None
self.paste_menu = None
self.paste_prompt = None
elif pressed[K_z]:
self.screen_state = 'copy'
self.load_copy_menu()
self.paste_menu = None
self.paste_prompt.shutdown()
self.paste_prompt = None
self.copy_menu.focus()
def handle_input_speed(self, pressed):
self.speed_menu.handle_input(pressed)
if pressed[K_x]:
self.start_prompt.shutdown()
time.sleep(.5)
slot = int(self.start_menu.get_choice()[0])
self.game.game_state = self.state[slot-1]
self.game.unprocessed_beaten_path = [k for k, v in self.game.game_state['beaten_path'].items() if not v]
self.game.slot = slot
if self.game.game_state['level'] == 0:
self.game.set_screen_state('beginning')
else:
if self.game.args.map:
self.game.set_current_map(self.game.args.map, list(self.game.args.position), 'n')
else:
hq_map = u'{}_palace'.format(self.game.game_state['hq'])
self.game.mark_beaten_path(HQ_LOCATIONS[self.game.game_state['hq']])
self.game.set_current_map(hq_map, [17,15], 'n', followers='trail', continue_current_music=True)
elif pressed[K_z]:
self.screen_state = 'start'
self.start_menu.focus()
self.speed_menu = None
self.start_prompt.shutdown()
def handle_input_register(self, pressed):
self.register_menu.handle_input(pressed)
if pressed[K_x]:
self.screen_state = 'name'
self.load_name_menu()
self.register_prompt.shutdown()
self.register_prompt = None
self.name_menu.focus()
elif pressed[K_z]:
self.screen_state = 'main'
self.load_main_menu()
self.register_menu = None
self.register_prompt.shutdown()
self.register_prompt = None
self.main_menu.focus()
def handle_input_erase(self, pressed):
self.erase_menu.handle_input(pressed)
if pressed[K_x]:
self.screen_state = 'confirm_erase'
self.erase_prompt.shutdown()
self.load_confirm_erase_menu()
self.confirm_erase_menu.focus()
self.erase_menu.unfocus()
elif pressed[K_z]:
self.screen_state = 'main'
self.load_main_menu()
self.erase_menu = None
self.erase_prompt.shutdown()
self.erase_prompt = None
self.main_menu.focus()
def handle_input_confirm_erase(self, pressed):
self.confirm_erase_menu.handle_input(pressed)
if pressed[K_x] and self.confirm_erase_menu.get_choice() == 'YES':
erase_save_state(self.erase_menu.get_choice()[0])
self.state = load_save_states()
self.screen_state = 'main'
self.load_main_menu()
self.confirm_erase_menu = None
self.erase_menu = None
self.erase_prompt.shutdown()
self.erase_prompt = None
self.main_menu.focus()
elif pressed[K_z] or (pressed[K_x] and self.confirm_erase_menu.get_choice() == 'NO'):
self.screen_state = 'erase'
self.load_erase_menu()
self.confirm_erase_menu = None
self.erase_menu.focus()
self.erase_prompt.shutdown()
def handle_input_name(self, pressed):
self.name_menu.handle_input(pressed)
if pressed[K_x]:
new_char = self.name_menu.get_choice()
if new_char == 'Back.':
self.current_name_char = self.current_name_char-1 if self.current_name_char > 0 else 0
elif new_char == 'Fwd.':
self.current_name_char = self.current_name_char+1 if self.current_name_char < 7 else 7
elif new_char == 'End.':
create_save_state(self.register_menu.get_choice()[0], self.name_field.text)
self.state = load_save_states()
self.screen_state = 'main'
self.load_main_menu()
self.register_menu = None
self.current_name_char = None
self.name_blurb = None
self.name_field = None
self.name_underline = None
self.name_menu = None
self.main_menu.focus()
else:
text = self.name_field.text
i = self.current_name_char
self.name_field = TextBox(text[:i] + new_char + text[i+1:])
self.current_name_char = self.current_name_char+1 if self.current_name_char < 7 else 7
def handle_input(self, pressed):
if pressed[K_x]:
self.select_sound.play()
if self.screen_state == 'main':
self.handle_input_main(pressed)
elif self.screen_state == 'start':
self.handle_input_start(pressed)
elif self.screen_state == 'start_prompt':
self.start_prompt.handle_input(pressed)
elif self.screen_state == 'speed':
self.handle_input_speed(pressed)
elif self.screen_state == 'register':
self.handle_input_register(pressed)
elif self.screen_state == 'erase':
self.handle_input_erase(pressed)
elif self.screen_state == 'confirm_erase':
self.handle_input_confirm_erase(pressed)
elif self.screen_state == 'name':
self.handle_input_name(pressed)
elif self.screen_state == 'copy':
self.handle_input_copy(pressed)
elif self.screen_state == 'copy_prompt':
self.copy_prompt.handle_input(pressed)
elif self.screen_state == 'paste':
self.handle_input_paste(pressed)
| 45.672372 | 147 | 0.59652 | 2,401 | 18,680 | 4.36818 | 0.086214 | 0.078185 | 0.080092 | 0.052536 | 0.718154 | 0.599066 | 0.508009 | 0.455378 | 0.3914 | 0.31331 | 0 | 0.006773 | 0.280782 | 18,680 | 408 | 148 | 45.784314 | 0.7738 | 0.00803 | 0 | 0.494709 | 0 | 0.002646 | 0.066775 | 0.002483 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063492 | false | 0 | 0.015873 | 0.005291 | 0.087302 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff37239f95e4e3ea9734c1a1abe1ff10c0e7a235 | 1,033 | py | Python | apps/groups/urls.py | sgazda94/dj_schulx | c6d2086201b80bb007df34be6b623e3001bf7446 | [
"MIT"
] | null | null | null | apps/groups/urls.py | sgazda94/dj_schulx | c6d2086201b80bb007df34be6b623e3001bf7446 | [
"MIT"
] | 1 | 2022-03-28T22:19:01.000Z | 2022-03-28T22:19:01.000Z | apps/groups/urls.py | sgazda94/dj_schulx | c6d2086201b80bb007df34be6b623e3001bf7446 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = "groups"
urlpatterns = [
path("", views.GroupListView.as_view(), name="group-list"),
# group
path("<int:pk>", views.GroupDetailView.as_view(), name="group-detail"),
path("create/", views.GroupCreateView.as_view(), name="group-create"),
path("<int:pk>/update/", views.GroupUpdateView.as_view(), name="group-update"),
path("<int:pk>/delete/", views.GroupDeleteView.as_view(), name="group-delete"),
# lesson
path(
"<int:group_id>/lesson/<int:pk>",
views.LessonDetailView.as_view(),
name="lesson-detail",
),
path(
"<int:group_id>/lesson/create/",
views.LessonCreateView.as_view(),
name="lesson-create",
),
path(
"<int:group_id>/lesson/<int:pk>/update/",
views.LessonUpdateView.as_view(),
name="lesson-update",
),
path(
"<int:group_id>/lesson/<int:pk>/start",
views.LessonStartView.as_view(),
name="lesson-start",
),
]
| 28.694444 | 83 | 0.604066 | 120 | 1,033 | 5.083333 | 0.266667 | 0.088525 | 0.147541 | 0.122951 | 0.155738 | 0.122951 | 0.122951 | 0 | 0 | 0 | 0 | 0 | 0.206196 | 1,033 | 35 | 84 | 29.514286 | 0.743902 | 0.011617 | 0 | 0.266667 | 0 | 0 | 0.289784 | 0.130648 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff380efa06c35f4afffb0d74a5bad4060b622804 | 5,792 | py | Python | baseline2D.py | lruthotto/NeuralOC | 3f37c7349527fb1f5890077ebe987f2f77486a54 | [
"MIT"
] | 13 | 2021-05-12T11:52:53.000Z | 2022-03-30T14:50:28.000Z | baseline2D.py | lruthotto/NeuralOC | 3f37c7349527fb1f5890077ebe987f2f77486a54 | [
"MIT"
] | null | null | null | baseline2D.py | lruthotto/NeuralOC | 3f37c7349527fb1f5890077ebe987f2f77486a54 | [
"MIT"
] | 3 | 2021-04-20T08:07:50.000Z | 2022-02-15T22:37:12.000Z | # baseline2D.py
# baseline method for problems using the Cross2D object
import math
import torch
import argparse
from src.utils import normpdf
from src.initProb import *
from src.OCflow import ocG
from src.plotter import *
parser = argparse.ArgumentParser('Baseline')
parser.add_argument(
'--data', choices=['softcorridor','swap2','swap12','swarm',
'swap12_1pair', 'swap12_2pair', 'swap12_3pair', 'swap12_4pair', 'swap12_5pair', # for CoD experiment
'midcross2', 'midcross4', 'midcross20', 'midcross30'],
type=str, default='softcorridor')
parser.add_argument("--nt" , type=int, default=50, help="number of time steps")
parser.add_argument('--alph' , type=str, default='100.0, 10000.0, 300.0')
# alphas: G, Q (obstacle), W (interaction)
parser.add_argument('--niters', type=int, default=600)
parser.add_argument('--prec' , type=str, default='single', choices=['single','double'], help="single or double precision")
parser.add_argument('--save' , type=str, default='experiments/oc/baseline', help="define the save directory")
parser.add_argument('--resume', type=str, default=None, help="for loading a pretrained model")
args = parser.parse_args()
args.alph = [float(item) for item in args.alph.split(',')]
if args.prec =='double':
argPrec = torch.float64
else:
argPrec = torch.float32
device= torch.device('cpu')
cvt = lambda x: x.type(argPrec).to(device, non_blocking=True)
nt = args.nt
def loss_fun(U, Z_0, prob, nt, alphG):
"""
compute loss
:param U: nt-by-d tensor, set of controls
:param Z_0: d-dim vector, initial point
:param prob: problem Object
:param nt: int, number of time steps
:param alphG: float, alpha_0 on the terminal cost G
:return: float, loss value
"""
h = 1. / nt
Z = Z_0
loss = 0
for i in range(nt):
Z = Z + h * U[i,:]
L, _ , _ , _ = prob.calcLHQW(Z.view(1,-1), U[i,:].view(1,-1))
loss = loss + h * L
cG = 0.5 * torch.sum(ocG(Z.view(1,-1), prob.xtarget)**2, 1, keepdims=True)
loss = loss + alphG * cG
return loss
def trainBaseline(z0,prob,nt=10, nIters = 600, alphG=100., u=None):
"""
method to train the baseline model, a discrete optimization approach
:param z0: d-dim vector, initial point
:param prob: problem Object
:param nt: int, number of time steps
:param nIters: int, max number of iterations
:param alphG: float, alpha_0 on the terminal cost G
:param u: nt-by-d Parameters, the controls, initial guess
:return: nt-by-d Parameters, the optimized u
"""
if u is None:
# initialize with noisy straight lines
y = prob.xtarget - z0
u = y * torch.ones((nt,z0.numel()), device=y.device, dtype=y.dtype) + 0.1*torch.randn(nt, z0.numel(), device=y.device, dtype=y.dtype)
u = torch.nn.Parameter(u)
bestLoss = float("inf")
ubest = torch.zeros_like(u.data)
lr = 0.1
optim = torch.optim.Adam([{'params': u}], lr=lr, weight_decay=0.0 )
for i in range(nIters):
optim.zero_grad()
err = loss_fun(u, z0, prob, nt, alphG) # calc loss
if err.item() < bestLoss:
bestLoss= err.item()
ubest.data = copy.deepcopy(u.data)
err.backward() # backprop
optim.step()
if i % 10 == 0: # log_freq
print(i, err.item())
if nIters/4 == 0: # lower lr
lr = lr * 0.1
print('lr: ',lr)
return ubest
if __name__ == '__main__':
alphG = args.alph[0]
prob, _, _, xInit = initProb(args.data, 10, 10, var0=1.0, cvt=cvt,
alph=[alphG, args.alph[1], args.alph[2], 0.0, 0.0, 0.0])
prob.train()
d = xInit.numel()
strTitle = 'baseline_' + args.data + '_{:}_{:}_{:}'.format(int(alphG), int(prob.alph_Q),int(prob.alph_W))
x0 = xInit # x0 can be more than one point
traj = cvt(torch.zeros(x0.size(0), d, nt+1))
h = 1. / nt
for i in range(x0.size(0)):
z0 = x0[i,:]
if args.resume is not None: # load a previous model
# if loading a pretrained model, check that alph values are set appropriately
uopt = cvt(torch.load(args.resume))
else:
uopt = trainBaseline(z0, prob, nt=nt, nIters = args.niters, alphG=alphG, u=None)
# save weights
torch.save(uopt, args.save + '/' + strTitle + '.pth')
# Visualization
prob.eval() # set problem to eval mode
traj[i,:,0] = z0
accL = 0 # accumulated along trajectory
accQ = 0
accW = 0
for j in range(nt):
L, _, Q, W = prob.calcLHQW(traj[i,:,j].view(1, -1), uopt[j, :].view(1, -1))
accL = accL + h * L
accQ = accQ + h * Q
accW = accW + h * W
traj[i,:,j+1] = traj[i,:,j] + h * uopt[j,:]
cG = 0.5 * torch.sum(ocG(traj[i,:,-1].view(1, -1), prob.xtarget) ** 2, 1, keepdims=True)
G = alphG * cG
totLoss = G + accL
print('{:10s} {:10s} {:10s} {:10s} {:10s}'.format('loss', 'L', 'G', 'Q', 'W'))
print('{:10.4e} {:10.4e} {:10.4e} {:10.4e} {:10.4e}'.format(totLoss.item(), accL.item(), G.item(), accQ.item(), accW.item()))
sPath = args.save + '/figs/' + strTitle + '.pdf' # '.png'
if args.data == 'corridor' \
or args.data == 'softcorridor' \
or args.data == 'hardcorridor' \
or args.data[0:4] == 'swap' \
or args.data[0:8] == 'midcross':
plotMidCross(traj[:,:,0], traj, prob, nt, sPath, sTitle='baseline', approach='baseline')
# plotMidCrossJustFinal(traj[:,:,0], traj, prob, nt, sPath, sTitle=strTitle, approach='baseline')
print('plot saved to ' + sPath)
| 34.891566 | 141 | 0.577866 | 825 | 5,792 | 4.002424 | 0.294545 | 0.019079 | 0.036039 | 0.009691 | 0.162932 | 0.137795 | 0.12871 | 0.112962 | 0.106905 | 0.068141 | 0 | 0.039271 | 0.261395 | 5,792 | 165 | 142 | 35.10303 | 0.732585 | 0.200276 | 0 | 0.038835 | 0 | 0.009709 | 0.128064 | 0.005078 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019417 | false | 0 | 0.067961 | 0 | 0.106796 | 0.048544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff38bd994fdfd83262984d03a2c5fd5b0d4567e2 | 1,419 | py | Python | mbus_ref/mbus_controlframe.py | gideoncaringal/pyMeterBus | f2489425f156ec43dc408e343f69ece074cb1fba | [
"BSD-3-Clause"
] | null | null | null | mbus_ref/mbus_controlframe.py | gideoncaringal/pyMeterBus | f2489425f156ec43dc408e343f69ece074cb1fba | [
"BSD-3-Clause"
] | null | null | null | mbus_ref/mbus_controlframe.py | gideoncaringal/pyMeterBus | f2489425f156ec43dc408e343f69ece074cb1fba | [
"BSD-3-Clause"
] | 1 | 2019-12-05T13:49:04.000Z | 2019-12-05T13:49:04.000Z | from mbus_h import *
from mbus_telegram import *
from exceptions import *
class MBusControlFrame(MBusTelegram):
def __init__(self, dbuf=None):
self.base_size = MBUS_FRAME_BASE_SIZE_CONTROL
return
def compute_crc(self):
return (self.control + self.address + self.control_information) % 256
def check_crc(self):
return self.compute_crc() == self.checksum
@staticmethod
def parse(data):
if data and len(data) < MBUS_FRAME_BASE_SIZE_CONTROL:
raise MBusFrameDecodeError("Invalid M-Bus length")
if data[0] != MBUS_FRAME_CONTROL_START:
raise MBusFrameDecodeError("Wrong start byte")
base_frame = MBusControlFrame()
base_frame.type = MBUS_FRAME_TYPE_CONTROL
base_frame.base_size = MBUS_FRAME_BASE_SIZE_CONTROL
base_frame.start1 = data[0]
base_frame.length1 = data[1]
base_frame.length2 = data[2]
base_frame.start2 = data[3]
base_frame.control = data[4]
base_frame.address = data[5]
base_frame.control_information = data[6]
base_frame.checksum = data[7]
base_frame.stop = data[8]
if base_frame.length1 > 3 or base_frame.length1 != base_frame.length2:
raise MBusFrameDecodeError("Invalid M-Bus length1 value")
if not base_frame.check_crc():
raise MBusFrameCRCError(base_frame.compute_crc(), base_frame.checksum)
return base_frame
| 30.847826 | 73 | 0.693446 | 185 | 1,419 | 5.037838 | 0.324324 | 0.183476 | 0.055794 | 0.054721 | 0.171674 | 0.06867 | 0.06867 | 0 | 0 | 0 | 0 | 0.01991 | 0.221283 | 1,419 | 45 | 74 | 31.533333 | 0.823529 | 0 | 0 | 0 | 0 | 0 | 0.044397 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.088235 | 0.058824 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff3de2b458b0ec727bd41816ccf0ff6f78bbd977 | 33,996 | py | Python | pytorch_library/utils_training.py | MarioProjects/pytorchlib | 81ea32304d899fbd10ae1efe1d124c0d7bc96f5c | [
"MIT"
] | 1 | 2018-12-05T17:11:47.000Z | 2018-12-05T17:11:47.000Z | pytorch_library/utils_training.py | MarioProjects/pytorchlib | 81ea32304d899fbd10ae1efe1d124c0d7bc96f5c | [
"MIT"
] | null | null | null | pytorch_library/utils_training.py | MarioProjects/pytorchlib | 81ea32304d899fbd10ae1efe1d124c0d7bc96f5c | [
"MIT"
] | null | null | null | import types
import numpy as np
import torch
import math
from torch import nn, optim
import torch.nn.functional as F
from torch.autograd.variable import Variable
CROSS_ENTROPY_ONE_HOT_WARNING = False
def to_categorical(y, num_classes=None, dtype='float32'):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
dtype: The data type expected by the input, as a string
(`float32`, `float64`, `int32`...)
# Returns
A binary matrix representation of the input. The classes axis
is placed last.
# Example
```python
# Consider an array of 5 labels out of a set of 3 classes {0, 1, 2}:
> labels
array([0, 2, 1, 2, 0])
# `to_categorical` converts this into a matrix with as many
# columns as there are classes. The number of rows
# stays the same.
> to_categorical(labels)
array([[ 1., 0., 0.],
[ 0., 0., 1.],
[ 0., 1., 0.],
[ 0., 0., 1.],
[ 1., 0., 0.]], dtype=float32)
```
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
def get_optimizer(optmizer_type, model_params, lr=0.1, pmomentum=0.9, pweight_decay=5e-4, palpha=0.9):
# Funcion para rehacer el optmizador -> Ayuda para cambiar learning rate
if optmizer_type=="SGD":
return optim.SGD(filter(lambda p: p.requires_grad, model_params), lr=lr, momentum=pmomentum)
elif optmizer_type=="Adam":
return optim.Adam(filter(lambda p: p.requires_grad, model_params), lr=lr, weight_decay=pweight_decay)
elif optmizer_type=="RMSprop":
return optim.RMSprop(filter(lambda p: p.requires_grad, model_params), lr=lr, alpha=palpha)
assert False, 'No optimizers with that name!'
def get_current_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def anneal_lr_lineal(models, lr_init, total_epochs, current_epoch, optimizer_type, flag=True):
# flag nos indica si realmente queremos hacer el annel sobre las models
if not flag: lr_new = lr_init
else: lr_new = -(lr_init/total_epochs) * current_epoch + lr_init
redes_resultado = []
for model in models:
redes_resultado.append(get_optimizer(optimizer_type, model.parameters(), lr=lr_new))
if len(redes_resultado) == 1: return lr_new, redes_resultado[0]
return lr_new, redes_resultado
def defrost_model_params(model):
# Funcion para descongelar redes!
for param in model.parameters():
param.requires_grad = True
def simple_target_creator(samples, value):
"""
Funcion para crear un vector utilizado para asignar la clase de las
diferentes muestras durante el entrenamiento de tamaño 'samples'
El vector sera de (samplesx1) lleno de 'value'
"""
return Variable(torch.ones(samples, 1)).type(torch.cuda.FloatTensor)*value
def train_simple_model(model, data, target, loss, optimizer, out_pos=-1, target_one_hot=False, net_type="convolutional", do_step=True, get_corrects=False):
# Losses: https://pytorch.org/docs/stable/nn.html
if(model.training==False): model.train()
if net_type == "fully-connected":
model_out = model.forward(Variable(data.float().view(data.shape[0], -1)))
elif net_type == "convolutional":
model_out = model.forward(Variable(data.float()))
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Para conocer la salida a utilizar en el
# loss lo que hacemos es tomar la que se indique en le parametro out_pos
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[out_pos]
if target_one_hot: _, target = target.max(dim=1)
n_correct = (torch.max(model_out, 1)[1].view(target.size()) == target).sum().item()
# Calculo el error obtenido
# Cuidado con la codificacion one hot! https://discuss.pytorch.org/t/runtimeerror-multi-target-not-supported-newbie/10216/8
try: cost = loss(model_out, target)
except:
global CROSS_ENTROPY_ONE_HOT_WARNING
if not CROSS_ENTROPY_ONE_HOT_WARNING:
print("\nWARNING-INFO: Crossentropy not works with one hot target encoding!\n")
CROSS_ENTROPY_ONE_HOT_WARNING = True
cost = loss(model_out, target[:,0])
cost.backward()
if do_step:
# Actualizamos pesos y gradientes
optimizer.step()
optimizer.zero_grad()
if get_corrects: return n_correct, cost.item()
else: return cost.item()
def evaluate_accuracy_models_generator(models, data, max_data=0, topk=(1,), target_one_hot=False, net_type="convolutional"):
"""Computes the accuracy (sobre 1) over the k top predictions for the specified values of k"""
# Si paso un modelo y topk(1,5) -> acc1, acc5,
# Si paso dos modelo y topk(1,5) -> m1_acc1, m1_acc5, m2_acc1, m2_acc5
with torch.no_grad():
if type(topk)==int:
maxk = topk
topk = (topk,)
else: maxk = max(topk)
correct_models, total_samples = [0]*len(models), 0
for batch_idx, (batch, target) in enumerate(data):
if target_one_hot: _, target = target.max(dim=1)
batch_size = target.size(0)
# calculo predicciones para el error de test de todos los modelos
# Tengo que hacer el forward para cada modelo y ver que clases acierta
for model_indx, model in enumerate(models):
if(model.training==True): model.eval()
if net_type == "fully-connected":
model_out = model.forward(Variable(batch.float().view(batch.shape[0], -1).cuda()))
elif net_type == "convolutional":
model_out = model.forward(Variable(batch.float().cuda()))
else: assert False, "Please define your model type!"
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Por lo que se establece el standar
# de que la ultima salida sean los logits del modelo para hacer la clasificacion
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[-1]
# Transformamos los logits a salida con el indice con el mayor valor
# de las tuplas que continen los logits
res_topk = np.array(topk_accuracy(model_out, target.cuda(), topk=topk))
correct_models[model_indx] += res_topk
total_samples += batch_size
if max_data != 0 and total_samples >= max_data: break
accuracies = []
for result_model in correct_models:
for topkres in result_model:
accuracies.append((topkres*1.0)/total_samples)
#accuracies = list(((np.array(correct_models) * 1.0) / total_samples))
if len(accuracies) == 1: return accuracies[0]
return accuracies
def evaluate_accuracy_loss_models_generator(models, data, loss, max_data=0, topk=(1,), target_one_hot=False, net_type="convolutional"):
"""Computes the accuracy (sobre 1) over the k top predictions for the specified values of k"""
# Si paso un modelo y topk(1,5) -> acc1, acc5,
# Si paso dos modelo y topk(1,5) -> m1_acc1, m1_acc5, m2_acc1, m2_acc5
with torch.no_grad():
if type(topk)==int:
maxk = topk
topk = (topk,)
else: maxk = max(topk)
correct_models, loss_models, total_samples = [0]*len(models), [0]*len(models), 0
for batch_idx, (batch, target) in enumerate(data):
if target_one_hot: _, target = target.max(dim=1)
batch_size = target.size(0)
# calculo predicciones para el error de test de todos los modelos
# Tengo que hacer el forward para cada modelo y ver que clases acierta
for model_indx, model in enumerate(models):
if(model.training==True): model.eval()
if net_type == "fully-connected":
model_out = model.forward(Variable(batch.float().view(batch.shape[0], -1).cuda()))
elif net_type == "convolutional":
model_out = model.forward(Variable(batch.float().cuda()))
else: assert False, "Please define your model type!"
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Por lo que se establece el standar
# de que la ultima salida sean los logits del modelo para hacer la clasificacion
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[-1]
# Transformamos los logits a salida con el indice con el mayor valor
# de las tuplas que continen los logits
res_topk = np.array(topk_accuracy(model_out, target.cuda(), topk=topk))
correct_models[model_indx] += res_topk
try: cost = loss(model_out, target.cuda())
except:
global CROSS_ENTROPY_ONE_HOT_WARNING
if not CROSS_ENTROPY_ONE_HOT_WARNING:
print("\nWARNING-INFO: Crossentropy not works with one hot target encoding!\n")
CROSS_ENTROPY_ONE_HOT_WARNING = True
cost = loss(model_out, target[:,0])
loss_models[model_indx] += cost.item()
total_samples += batch_size
if max_data != 0 and total_samples >= max_data: break
"""
accuracies = []
for result_model in correct_models:
for topkres in result_model:
accuracies.append((topkres*1.0)/total_samples)
#accuracies = list(((np.array(correct_models) * 1.0) / total_samples))
if len(accuracies) == 1: return accuracies[0]
return accuracies
"""
accuracies, losses = [], []
for indx, result_model in enumerate(correct_models):
for topkres in result_model:
accuracies.append((topkres*1.0)/total_samples)
losses.append((loss_models[indx]*1.0)/total_samples)
#accuracies = list(((np.array(correct_models) * 1.0) / total_samples))
if len(accuracies) == 1: return accuracies[0], losses[0]
return accuracies[0], accuracies[1], losses[0]
#zipped = [a for a in zip(accuracies,losses)]
#return [item for sublist in zipped for item in sublist]
def evaluate_accuracy_models_data(models, X_data, y_data, batch_size=100, max_data=0, topk=(1,), net_type="convolutional"):
"""Computes the accuracy over the k top predictions for the specified values of k"""
# Si paso un modelo y topk(1,5) -> acc1, acc5,
# Si paso dos modelo y topk(1,5) -> m1_acc1, m1_acc5, m2_acc1, m2_acc5
with torch.no_grad():
if type(topk)==int:
maxk = topk
topk = (topk,)
else: maxk = max(topk)
correct_models, total_samples = [0]*len(models), 0
total_samples = 0
while True:
# Debemos comprobar que no nos pasamos con el batch_size
if total_samples + batch_size >= len(X_data): batch_size = (len(X_data)) - total_samples
batch = X_data[total_samples:total_samples+batch_size]
target = y_data[total_samples:total_samples+batch_size]
# calculo predicciones para el error de test de todos los modelos
# Tengo que hacer el forward para cada modelo y ver que clases acierta
for model_indx, model in enumerate(models):
if(model.training==True): model.eval()
if net_type == "fully-connected":
model_out = model.forward(Variable(batch.float().view(batch.shape[0], -1).cuda()))
elif net_type == "convolutional":
model_out = model.forward(Variable(batch.float().cuda()))
else: assert False, "Please define your model type!"
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Por lo que se establece el standar
# de que la ultima salida sean los logits del modelo para hacer la clasificacion
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[-1]
# Transformamos los logits a salida con el indice con el mayor valor
# de las tuplas que continen los logits
res_topk = np.array(topk_accuracy(model_out, target.cuda(), topk=topk))
correct_models[model_indx] += res_topk
total_samples+=batch_size
if max_data != 0 and total_samples >= max_data or total_samples == len(X_data): break
accuracies = []
for result_model in correct_models:
for topkres in result_model:
accuracies.append((topkres*1.0)/total_samples)
#accuracies = list(((np.array(correct_models) * 1.0) / total_samples))
if len(accuracies) == 1: return accuracies[0]
return accuracies
def evaluate_accuracy_loss_models_data(models, X_data, y_data, loss, batch_size=100, max_data=0, topk=(1,), net_type="convolutional"):
"""Computes the accuracy over the k top predictions for the specified values of k"""
# Si paso un modelo y topk(1,5) -> acc1, acc5,
# Si paso dos modelo y topk(1,5) -> m1_acc1, m1_acc5, m2_acc1, m2_acc5
with torch.no_grad():
if type(topk)==int:
maxk = topk
topk = (topk,)
else: maxk = max(topk)
correct_models, loss_models, total_samples = [0]*len(models), [0]*len(models), 0
total_samples = 0
while True:
# Debemos comprobar que no nos pasamos con el batch_size
if total_samples + batch_size >= len(X_data): batch_size = (len(X_data)) - total_samples
batch = X_data[total_samples:total_samples+batch_size]
target = y_data[total_samples:total_samples+batch_size]
# calculo predicciones para el error de test de todos los modelos
# Tengo que hacer el forward para cada modelo y ver que clases acierta
for model_indx, model in enumerate(models):
if(model.training==True): model.eval()
#if(model.training==True): model.eval()
if net_type == "fully-connected":
model_out = model.forward(Variable(batch.float().view(batch.shape[0], -1).cuda()))
elif net_type == "convolutional":
model_out = model.forward(Variable(batch.float().cuda()))
else: assert False, "Please define your model type!"
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Por lo que se establece el standar
# de que la ultima salida sean los logits del modelo para hacer la clasificacion
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[-1]
# Transformamos los logits a salida con el indice con el mayor valor
# de las tuplas que continen los logits
res_topk = np.array(topk_accuracy(model_out, target.cuda(), topk=topk))
correct_models[model_indx] += res_topk
try: cost = loss(model_out, target.cuda())
except:
global CROSS_ENTROPY_ONE_HOT_WARNING
if not CROSS_ENTROPY_ONE_HOT_WARNING:
print("\nWARNING-INFO: Crossentropy not works with one hot target encoding!\n")
CROSS_ENTROPY_ONE_HOT_WARNING = True
cost = loss(model_out, target[:,0])
loss_models[model_indx] += cost.item()
total_samples+=batch_size
if max_data != 0 and total_samples >= max_data or total_samples == len(X_data): break
accuracies, losses = [], []
for indx, result_model in enumerate(correct_models):
for topkres in result_model:
accuracies.append((topkres*1.0)/total_samples)
losses.append((loss_models[indx]*1.0)/total_samples)
#accuracies = list(((np.array(correct_models) * 1.0) / total_samples))
if len(accuracies) == 1: return accuracies[0], losses[0]
return accuracies[0], accuracies[1], losses[0]
#zipped = [a for a in zip(accuracies,losses)]
#return [item for sublist in zipped for item in sublist]
def evaluate_accuracy_model_predictions(model_out, y_data, batch_size=100, max_data=0, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
# Si paso un modelo y topk(1,5) -> acc1, acc5,
# Solo permite pasar una salida models_out!
with torch.no_grad():
if type(topk)==int:
maxk = topk
topk = (topk,)
else: maxk = max(topk)
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Por lo que se establece el standar
# de que la ultima salida sean los logits del modelo para hacer la clasificacion
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[-1]
correct_models, total_samples = 0, 0
total_samples = 0
while True:
# Debemos comprobar que no nos pasamos con el batch_size
if total_samples + batch_size >= len(model_out): batch_size = (len(model_out)) - total_samples
batch_out = model_out[total_samples:total_samples+batch_size]
target = y_data[total_samples:total_samples+batch_size]
# Transformamos los logits a salida con el indice con el mayor valor
# de las tuplas que continen los logits
res_topk = np.array(topk_accuracy(batch_out, target.cuda(), topk=topk))
correct_models += res_topk
total_samples+=batch_size
if max_data != 0 and total_samples >= max_data or total_samples == len(model_out): break
return (correct_models*1.0 / total_samples)
def predictions_models_data(models, X_data, batch_size=100, net_type="convolutional"):
"""Computes the predictions for the specified data X_data"""
with torch.no_grad():
outs_models, total_samples = [torch.zeros(0,0).cuda()]*len(models), 0
total_samples = 0
while True:
# Debemos comprobar que no nos pasamos con el batch_size
if total_samples + batch_size >= len(X_data): batch_size = (len(X_data)) - total_samples
batch = X_data[total_samples:total_samples+batch_size]
# calculo predicciones para el error de test de todos los modelos
# Tengo que hacer el forward para cada modelo y ver que clases acierta
for model_indx, model in enumerate(models):
if net_type == "fully-connected":
model_out = model.forward(Variable(batch.float().view(batch.shape[0], -1).cuda()))
elif net_type == "convolutional":
model_out = model.forward(Variable(batch.float().cuda()))
else: assert False, "Please define your model type!"
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Por lo que se establece el standar
# de que la ultima salida sean los logits del modelo para hacer la clasificacion
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[-1]
outs_models[0]=torch.cat((outs_models[0], model_out))
total_samples+=batch_size
if total_samples == len(X_data): break
if len(outs_models) == 1: return outs_models[0]
return outs_models
# INPUTS: output have shape of [batch_size, category_count]
# and target in the shape of [batch_size] * there is only one true class for each sample
# topk is tuple of classes to be included in the precision
# topk have to a tuple so if you are giving one number, do not forget the comma
def topk_accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
#we do not need gradient calculation for those
with torch.no_grad():
#we will use biggest k, and calculate all precisions from 0 to k
maxk = max(topk)
batch_size = target.size(0)
#topk gives biggest maxk values on dimth dimension from output
#output was [batch_size, category_count], dim=1 so we will select biggest category scores for each batch
# input=maxk, so we will select maxk number of classes
#so result will be [batch_size,maxk]
#topk returns a tuple (values, indexes) of results
# we only need indexes(pred)
_, pred = output.topk(maxk, dim=1, largest=True, sorted=True)
# then we transpose pred to be in shape of [maxk, batch_size]
pred = pred.t()
#we flatten target and then expand target to be like pred
# target [batch_size] becomes [1,batch_size]
# target [1,batch_size] expands to be [maxk, batch_size] by repeating same correct class answer maxk times.
# when you compare pred (indexes) with expanded target, you get 'correct' matrix in the shape of [maxk, batch_size] filled with 1 and 0 for correct and wrong class assignments
correct = pred.eq(target.view(1, -1).expand_as(pred))
""" correct=([[0, 0, 1, ..., 0, 0, 0],
[1, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 1, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 1, 0, ..., 0, 0, 0]], device='cuda:0', dtype=torch.uint8) """
res = []
# then we look for each k summing 1s in the correct matrix for first k element.
for k in topk:
res.append(correct[:k].view(-1).float().sum(0, keepdim=True))
return res
##########################################################################################################
##########################################################################################################
##########################################################################################################
def findLR(model, optimizer, criterion, trainloader, final_value=10, init_value=1e-8, verbose=1):
#https://medium.com/coinmonks/training-neural-networks-upto-10x-faster-3246d84caacd
'''
findLR plots the graph for the optimum learning rates for the model with the
corresponding dataset.
The technique is quite simple. For one epoch,
1. Start with a very small learning rate (around 1e-8) and increase the learning rate linearly.
2. Plot the loss at each step of LR.
3. Stop the learning rate finder when loss stops going down and starts increasing.
A graph is created with the x axis having learning rates and the y axis
having the losses.
Arguments:
1. model - (torch.nn.Module) The deep learning pytorch network.
2. optimizer: (torch.optim) The optimiser for the model eg: SGD,CrossEntropy etc
3. criterion: (torch.nn) The loss function that is used for the model.
4. trainloader: (torch.utils.data.DataLoader) The data loader that loads data in batches for input into model
5. final_value: (float) Final value of learning rate
6. init_value: (float) Starting learning rate.
Returns:
learning rates used and corresponding losses
'''
model.train() # setup model for training configuration
num = len(trainloader) - 1 # total number of batches
mult = (final_value / init_value) ** (1/num)
losses = []
lrs = []
best_loss = 0.
avg_loss = 0.
beta = 0.98 # the value for smooth losses
lr = init_value
for batch_num, (inputs, targets) in enumerate(trainloader):
if verbose==1: print("Testint LR: {}".format(lr))
optimizer.param_groups[0]['lr'] = lr
batch_num += 1 # for non zero value
inputs, targets = inputs.cuda(), targets.cuda() # convert to cuda for GPU usage
optimizer.zero_grad() # clear gradients
outputs = model(inputs) # forward pass
loss = criterion(outputs, targets.long().cuda()) # compute loss
#Compute the smoothed loss to create a clean graph
avg_loss = beta * avg_loss + (1-beta) *loss.item()
smoothed_loss = avg_loss / (1 - beta**batch_num)
#Record the best loss
if smoothed_loss < best_loss or batch_num==1:
best_loss = smoothed_loss
# append loss and learning rates for plotting
lrs.append(math.log10(lr))
losses.append(smoothed_loss)
# Stop if the loss is exploding
if batch_num > 1 and smoothed_loss > 4 * best_loss:
break
# backprop for next step
loss.backward()
optimizer.step()
# update learning rate
lr = mult*lr
#plt.xlabel('Learning Rates')
#plt.ylabel('Losses')
#plt.plot(lrs,losses)
#plt.show()
return lrs, losses
def get_total_parameters(model):
return sum(p.numel() for p in model.parameters())
def get_trainable_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
##########################################################################################################
##########################################################################################################
##########################################################################################################
def train_discriminator(discriminator_net, discriminator_optimizer, real_data, fake_data, loss):
num_samples = real_data.size(0) # Para conocer el numero de muestras
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# 1.1 ----> Train with real
# Reseteamos los gradientes
discriminator_optimizer.zero_grad()
discriminator_net.zero_grad()
# prediction on Real Data
prediction_real = discriminator_net(real_data)
# Calculate error and backpropagate
# Debemos tener en cuenta que son reales -> 1s
error_real = loss(prediction_real, simple_target_creator(num_samples, 1))
error_real.backward()
# 1.2 ----> Train on Fake Data
prediction_fake = discriminator_net(fake_data)
# Calculate error and backpropagate
# Debemos tener en cuenta que son falsos -> 0s
error_fake = loss(prediction_fake, simple_target_creator(num_samples, 0))
error_fake.backward()
# 1.3 Update weights with gradients of discriminator
discriminator_optimizer.step()
# Return error
return error_real.item() + error_fake.item()
def train_generator(discriminator_net, generator_optimizer, fake_data, loss):
num_samples = fake_data.size(0) # Para conocer el numero de muestras
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
# Reseteamos gradientes
generator_optimizer.zero_grad()
# Inferimos nuestros datos falsos a traves del discriminador para
# posteriormente tratar de 'engañarlo'
prediction = discriminator_net(fake_data)
# Calculate error and backpropagate
# IMPORTANTE -> Queremos que el generador aprenda a que
# sus muestras sean clasificadas como reales, por lo que
# CALCULAMOS EL LOSS CON 1s! como si fueran reales
error = loss(prediction, simple_target_creator(num_samples, 1))
error.backward()
# 3. Actualizamos pesos y gradientes del generador
generator_optimizer.step()
# Return error
return error.item()
def loss_fn_kd_kldivloss(outputs, teacher_outputs, labels, temperature, alpha=0.9):
"""
Compute the knowledge-distillation (KD) loss given outputs, labels.
"Hyperparameters": temperature and alpha
NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher
and student expects the input tensor to be log probabilities! See Issue #2
source: https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py
"""
alpha = alpha
T = temperature
KD_loss = nn.KLDivLoss()(F.log_softmax(outputs/T, dim=1),
F.softmax(teacher_outputs/T, dim=1)) * (alpha * T * T) + \
F.cross_entropy(outputs, labels) * (1. - alpha)
return KD_loss
'''
mixup: BEYOND EMPIRICAL RISK MINIMIZATION: https://arxiv.org/abs/1710.09412
https://github.com/facebookresearch/mixup-cifar10
'''
def mixup_data(x, y, alpha=1.0, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
### Ejemplo de uso
# inputs, targets_a, targets_b, lam = mixup_data(batch_data, batch_target, alpha_mixup)
# inputs, targets_a, targets_b = map(Variable, (inputs, targets_a, targets_b))
# outputs = model(inputs)
# loss = mixup_criterion(loss_ce, outputs, targets_a, targets_b, lam)
# total_loss += loss.item()
''' ######################################################################## '''
''' ############################# CUTOUT ################################## '''
''' ######################################################################## '''
# https://github.com/uoguelph-mlrg/Cutout
# Para usarlo si estamos usando albumentations añadir otro transform separado que sea
# por ejemplo transforms_torchvision y a traves de ese lo usamos como self.torchvision_transform(feature)
# Hay un ejemplo en el dataloader de LFW -> data_generator.py -> NPDatasetLFW
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img = img.cuda() * mask.cuda()
return img
class BatchCutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, imgs):
"""
Args:
img (Tensor): Tensor image of size (Batch, C, H, W).
Returns:
Tensor: Images with n_holes of dimension length x length cut out of it.
"""
h = imgs.size(2)
w = imgs.size(3)
outputs = torch.empty(imgs.shape)
for index, img in enumerate(imgs):
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
#imgs[index] = img.cuda() * mask.cuda()
outputs[index] = img.cuda() * mask.cuda()
#return imgs
return outputs.cuda() | 42.978508 | 185 | 0.6011 | 4,490 | 33,996 | 4.412249 | 0.139866 | 0.036343 | 0.01802 | 0.01802 | 0.583161 | 0.551966 | 0.540912 | 0.529655 | 0.524002 | 0.515875 | 0 | 0.016981 | 0.27765 | 33,996 | 791 | 186 | 42.978508 | 0.789754 | 0.329862 | 0 | 0.546667 | 0 | 0 | 0.034294 | 0 | 0 | 0 | 0 | 0.002528 | 0.016 | 1 | 0.069333 | false | 0 | 0.018667 | 0.008 | 0.157333 | 0.010667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff3e34a66b19db98dc46a9bddfa39058221a3e0e | 1,178 | py | Python | augly/utils/base_paths.py | Ierezell/AugLy | a7dca8c36bc05dbd7694373fe9b883d6ff720f56 | [
"MIT"
] | 1 | 2021-09-29T21:27:50.000Z | 2021-09-29T21:27:50.000Z | augly/utils/base_paths.py | Ierezell/AugLy | a7dca8c36bc05dbd7694373fe9b883d6ff720f56 | [
"MIT"
] | null | null | null | augly/utils/base_paths.py | Ierezell/AugLy | a7dca8c36bc05dbd7694373fe9b883d6ff720f56 | [
"MIT"
] | 1 | 2021-07-02T13:08:55.000Z | 2021-07-02T13:08:55.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import os
MODULE_BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# asset paths
ASSETS_BASE_DIR = os.path.join(MODULE_BASE_DIR, "assets")
AUDIO_ASSETS_DIR = os.path.join(ASSETS_BASE_DIR, "audio")
TEXT_DIR = os.path.join(ASSETS_BASE_DIR, "text")
EMOJI_DIR = os.path.join(ASSETS_BASE_DIR, "twemojis")
FONTS_DIR = os.path.join(ASSETS_BASE_DIR, "fonts")
IMG_MASK_DIR = os.path.join(ASSETS_BASE_DIR, "masks")
SCREENSHOT_TEMPLATES_DIR = os.path.join(ASSETS_BASE_DIR, "screenshot_templates")
TEMPLATE_PATH = os.path.join(SCREENSHOT_TEMPLATES_DIR, "web.png")
TEST_URI = os.path.join(ASSETS_BASE_DIR, "tests")
# test paths
METADATA_BASE_PATH = os.path.join(MODULE_BASE_DIR, "utils", "expected_output")
METADATA_FILENAME = "expected_metadata.json"
AUDIO_METADATA_PATH = os.path.join(METADATA_BASE_PATH, "audio_tests", METADATA_FILENAME)
IMAGE_METADATA_PATH = os.path.join(METADATA_BASE_PATH, "image_tests", METADATA_FILENAME)
TEXT_METADATA_PATH = os.path.join(METADATA_BASE_PATH, "text_tests", METADATA_FILENAME)
VIDEO_METADATA_PATH = os.path.join(METADATA_BASE_PATH, "video_tests", METADATA_FILENAME)
| 40.62069 | 88 | 0.797963 | 183 | 1,178 | 4.759563 | 0.256831 | 0.110218 | 0.160735 | 0.104478 | 0.432836 | 0.432836 | 0.353617 | 0.174512 | 0 | 0 | 0 | 0.000921 | 0.078098 | 1,178 | 28 | 89 | 42.071429 | 0.801105 | 0.078947 | 0 | 0 | 0 | 0 | 0.13876 | 0.020352 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.058824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff3f61dcc346db3af4cf5a36041b057887e5a981 | 1,639 | py | Python | venv/Lib/site-packages/transliterate/tests/data/default.py | jedeland/jedel_name_generator | 8d220c7e6a13e013d9664402cb0262f323a4056d | [
"MIT"
] | null | null | null | venv/Lib/site-packages/transliterate/tests/data/default.py | jedeland/jedel_name_generator | 8d220c7e6a13e013d9664402cb0262f323a4056d | [
"MIT"
] | null | null | null | venv/Lib/site-packages/transliterate/tests/data/default.py | jedeland/jedel_name_generator | 8d220c7e6a13e013d9664402cb0262f323a4056d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__title__ = 'transliterate.tests.data.default'
__author__ = 'Artur Barseghyan'
__copyright__ = '2013-2018 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
latin_text = u"Lorem ipsum dolor sit amet"
armenian_text = u'Լօրեմ իպսում դօլօր սիտ ամետ'
cyrillic_text = u'Лорем ипсум долор сит амет'
ukrainian_cyrillic_text = u'Лорем іпсум долор сіт амет'
bulgarian_cyrillic_text = u'Лорем ипсум долор сит амет'
georgian_text = u'ლორემ იპსუმ დოლორ სით ამეთ'
greek_text = u'Λορεμ ιψυμ δολορ σιτ αμετ'
hebrew_text = u'Lורeמ יpסuמ דולור סית אמeת'
mongolian_cyrillic_text = u'Лорэм ипсүм долор сит амэт'
serbian_cyrillic_text = u'Лорем ипсум долор сит амет'
pangram_serbian_cyrillic_text = u'Фијуче ветар у шибљу, леди пасаже и куће ' \
u'иза њих и гунђа у оџацима'
pangram_serbian_latin_text = u'Fijuče vetar u šiblju, ledi pasaže i kuće ' \
u'iza njih i gunđa u odžacima'
test_15_register_custom_language_pack_mapping = (
u"abcdefghij",
u"1234567890",
)
test_33_register_unregister_mapping = (
u"abcdefghij",
u"1234567890",
)
test_34_latin_to_latin_mapping = (
u"abgdezilxkhmjnpsvtrcqw&ofABGDEZILXKHMJNPSVTRCQOFW",
u"zbgdeailxkhnjmpswtrcqv&ofZBGDEAILXKHNJMPSWTRCQOFV",
)
test_34_latin_to_latin_characters = u"abgdezilxkhmjnpsvtrcqw&of" \
u"ABGDEZILXKHMJNPSVTRCQOFW"
test_34_latin_to_latin_reversed_characters = u"abgdezilxkhmjnpsvtrcqw&of" \
u"ABGDEZILXKHMJNPSVTRCQOFW"
test_34_latin_to_latin_text = u"Lorem ipsum dolor sit amet 123453254593485938"
| 35.630435 | 78 | 0.721171 | 215 | 1,639 | 5.186047 | 0.525581 | 0.058296 | 0.069955 | 0.064574 | 0.374888 | 0.346188 | 0.286996 | 0.286996 | 0.13991 | 0.13991 | 0 | 0.048462 | 0.206833 | 1,639 | 45 | 79 | 36.422222 | 0.809231 | 0.012813 | 0 | 0.171429 | 0 | 0 | 0.47401 | 0.141089 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff3fa6bdc655e5c340cc8b1b61dfaa6a7d978fda | 5,510 | py | Python | PLOT/plot_orbits2.py | itpplasma/SIMPLE | 6981791e0a7889647ac5c006325ac951811c2f36 | [
"MIT"
] | 1 | 2020-11-18T14:58:27.000Z | 2020-11-18T14:58:27.000Z | PLOT/plot_orbits2.py | landreman/SIMPLE | 77722c2479b4a064b99d0e2a58ef7749ce157c07 | [
"MIT"
] | 6 | 2019-10-25T07:52:00.000Z | 2021-11-16T13:19:04.000Z | PLOT/plot_orbits2.py | landreman/SIMPLE | 77722c2479b4a064b99d0e2a58ef7749ce157c07 | [
"MIT"
] | 2 | 2021-11-05T18:55:09.000Z | 2022-03-23T06:27:04.000Z | #%%
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from neo_orb_fffi import *
from os import path
import exportfig
#basedir = '/home/calbert/net/cobra/ipp/ishw19/scripts'
basedir = '.'
zs = np.load(path.join(basedir, 'z.npy'))
var_tips = np.load(path.join(basedir, 'ztip.npy'))
#%%
libneo_orb.compile(verbose=1)
neo_orb.load()
new_vmec_stuff.load()
neo_orb.init_field(5, 5, 3, -1)
#%%
s = libneo_orb._ffi.new('double*', 0.7)
theta = libneo_orb._ffi.new('double*', 0.0)
varphi = libneo_orb._ffi.new('double*', 0.0)
theta_vmec = libneo_orb._ffi.new('double*', 0.0)
varphi_vmec = libneo_orb._ffi.new('double*', 0.0)
A_phi = libneo_orb._ffi.new('double*', 0.0)
A_theta = libneo_orb._ffi.new('double*', 0.0)
dA_phi_ds = libneo_orb._ffi.new('double*', 0.0)
dA_theta_ds = libneo_orb._ffi.new('double*', 0.0)
aiota = libneo_orb._ffi.new('double*', 0.0)
R = libneo_orb._ffi.new('double*', 0.0)
Z = libneo_orb._ffi.new('double*', 0.0)
alam = libneo_orb._ffi.new('double*', 0.0)
dR_ds = libneo_orb._ffi.new('double*', 0.0)
dR_dt = libneo_orb._ffi.new('double*', 0.0)
dR_dp = libneo_orb._ffi.new('double*', 0.0)
dZ_ds = libneo_orb._ffi.new('double*', 0.0)
dZ_dt = libneo_orb._ffi.new('double*', 0.0)
dZ_dp = libneo_orb._ffi.new('double*', 0.0)
dl_ds = libneo_orb._ffi.new('double*', 0.0)
dl_dt = libneo_orb._ffi.new('double*', 0.0)
dl_dp = libneo_orb._ffi.new('double*', 0.0)
Bctrvr_vartheta = libneo_orb._ffi.new('double*', 0.0)
Bctrvr_varphi = libneo_orb._ffi.new('double*', 0.0)
Bcovar_r = libneo_orb._ffi.new('double*', 0.0)
Bcovar_vartheta = libneo_orb._ffi.new('double*', 0.0)
Bcovar_varphi = libneo_orb._ffi.new('double*', 0.0)
sqg = libneo_orb._ffi.new('double*', 0.0)
# %%
phase = -np.pi
RR = np.zeros_like(zs[0, :])
ZZ = np.zeros_like(RR)
PP = np.zeros_like(RR)
for k in np.arange(len(RR)):
s[0] = zs[0, k]
theta[0] = zs[1, k]
varphi[0] = zs[2, k]
if s[0] <= 0.0 or s[0] >= 1.0:
break
neo_orb.spline_vmec(
s, theta, varphi, A_phi, A_theta, dA_phi_ds, dA_theta_ds, aiota, R, Z,
alam, dR_ds, dR_dt, dR_dp, dZ_ds, dZ_dt, dZ_dp, dl_ds, dl_dt, dl_dp
)
RR[k] = R[0]/100.0
ZZ[k] = Z[0]/100.0
PP[k] = varphi[0]
fig = plt.figure(figsize=(16, 9))
ax = fig.add_subplot(111, projection='3d')
ax._axis3don = False
ph = np.linspace(0, 2 * np.pi, endpoint=True, num=75)
th = np.linspace(0, 2 * np.pi, endpoint=True, num=75)
th, ph = np.meshgrid(th, ph)
thflat = th.flatten()
phflat = ph.flatten()
x = np.empty_like(thflat)
y = np.empty_like(thflat)
z = np.empty_like(thflat)
for k in np.arange(len(thflat)):
s[0] = 1.0
theta[0] = thflat[k]
varphi[0] = phflat[k]
neo_orb.spline_vmec(
s, theta, varphi, A_phi, A_theta, dA_phi_ds, dA_theta_ds, aiota, R, Z,
alam, dR_ds, dR_dt, dR_dp, dZ_ds, dZ_dt, dZ_dp, dl_ds, dl_dt, dl_dp
)
x[k] = np.cos(-varphi[0]) * R[0]/100.0
y[k] = np.sin(-varphi[0]) * R[0]/100.0
z[k] = Z[0]/100.0
x = x.reshape(th.shape)
y = y.reshape(th.shape)
z = z.reshape(th.shape)
ax.plot_surface(x, y, z, rstride=2, cstride=2, color='tab:blue', alpha=0.1)
ax.plot(RR*np.cos(-PP), RR*np.sin(-PP),
ZZ, linewidth=0.5, alpha=0.7, color='k')
ax.set_xlim(-16, 16)
ax.set_ylim(-16, 16)
ax.set_zlim(-5, 5)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.tight_layout()
RR = np.zeros_like(var_tips[0, :])
ZZ = np.zeros_like(RR)
PP = np.zeros_like(RR)
for k in np.arange(len(RR)):
s[0] = var_tips[0, k]
theta[0] = var_tips[1, k]
varphi[0] = var_tips[2, k]
if s[0] <= 0.0 or s[0] >= 1.0:
break
neo_orb.spline_vmec(
s, theta, varphi, A_phi, A_theta, dA_phi_ds, dA_theta_ds, aiota, R, Z,
alam, dR_ds, dR_dt, dR_dp, dZ_ds, dZ_dt, dZ_dp, dl_ds, dl_dt, dl_dp
)
RR[k] = R[0]/100.0
ZZ[k] = Z[0]/100.0
PP[k] = varphi[0]
ax.plot(RR*np.cos(-PP), RR*np.sin(-PP),
ZZ, 'o', markersize=1, color='tab:red')
plt.savefig('orbit2.png', dpi=150)
exportfig.exportfig('orbit2')
exportfig.exporteps('orbit2')
# %%
fig = plt.figure(figsize=(2.0, 2.0))
#ax = fig.add_subplot(111, projection='3d')
#ax._axis3don = False
th = np.linspace(0, 2*np.pi, 100)
#plt.plot(RR, ZZ, ',')
thring = np.linspace(0, 2 * np.pi, endpoint=True, num=len(PP))
x = np.empty_like(thring)
y = np.empty_like(thring)
z = np.empty_like(thring)
for k in np.arange(len(RR)):
s[0] = 1.0
theta[0] = var_tips[1, k]
varphi[0] = np.mod(var_tips[2, k]+2.0*np.pi/10.0, 2.0*np.pi/5.0)-2.0*np.pi/10.0
neo_orb.spline_vmec(
s, theta, varphi, A_phi, A_theta, dA_phi_ds, dA_theta_ds, aiota, R, Z,
alam, dR_ds, dR_dt, dR_dp, dZ_ds, dZ_dt, dZ_dp, dl_ds, dl_dt, dl_dp
)
x[k] = R[0]/100.0
#y[k] = np.sin(-varphi[0]) * R[0]/100.0
z[k] = Z[0]/100.0
plt.plot(x, z, '.', markersize=1, color='tab:blue')
plt.plot(RR, ZZ, '.', markersize=0.5, color='tab:red')
plt.xlabel(r'$R$ / m', labelpad=2)
plt.ylabel(r'$Z$ / m', labelpad=-2)
plt.axis('equal')
plt.tight_layout()
plt.savefig('orbit2_proj.png', dpi=300)
exportfig.exporteps('orbit2_proj')
# %%
plt.figure(figsize=(1.8, 1.8))
plt.plot(np.cos(thring), np.sin(thring), '-', color='tab:blue')
plt.plot(np.sqrt(var_tips[0, :])*np.cos(var_tips[1, :]),
np.sqrt(var_tips[0, :])*np.sin(var_tips[1, :]),
'o', markersize=0.5, color='tab:red')
plt.xlabel(r'$R$')
plt.ylabel(r'$Z$')
plt.axis('equal')
plt.tight_layout()
plt.savefig('orbit2_proj_topo.png', dpi=300)
exportfig.exporteps('orbit2_proj_topo')
# %%
| 28.549223 | 83 | 0.634846 | 1,078 | 5,510 | 3.061224 | 0.14564 | 0.018788 | 0.101818 | 0.127273 | 0.662424 | 0.636061 | 0.603333 | 0.554545 | 0.332727 | 0.31697 | 0 | 0.05453 | 0.154628 | 5,510 | 192 | 84 | 28.697917 | 0.653929 | 0.035027 | 0 | 0.28 | 0 | 0 | 0.071631 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.046667 | 0 | 0.046667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff42053acc3e1c654965608c70b04513a16eaec2 | 2,177 | py | Python | helpers/extract_edges.py | aliborji/ShapeDefence | 92da19bb195b5161d997f6ee1cc777b07a714f6f | [
"MIT"
] | null | null | null | helpers/extract_edges.py | aliborji/ShapeDefence | 92da19bb195b5161d997f6ee1cc777b07a714f6f | [
"MIT"
] | 1 | 2022-03-12T00:40:21.000Z | 2022-03-12T00:40:21.000Z | helpers/extract_edges.py | aliborji/ShapeDefense | 92da19bb195b5161d997f6ee1cc777b07a714f6f | [
"MIT"
] | null | null | null | # clac edge
import os
from edge_detector import *
from PIL import Image
# import torch
from image_transform import ImageTransform
from config import *
# from matplotlib import pyplot as plt
# import glob
# import numpy as np
import argparse
'''
python extract_edges.py --img_dir --dest_dir
img_dir = './dog-breed-identification/test/'
dest_dir = './dog-breed-identification/testedge/'
'''
def main(img_dir, dest_dir):
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
im_tran = ImageTransform((HEIGHT, WIDTH), MEAN, STD)
files = os.listdir(img_dir)
for img_name in files:
print(img_name)
if not img_name.endswith('.jpg'): continue
fullname = os.path.join(img_dir, img_name)
im = Image.open(fullname)
im = im_tran(im, 'train')
edge_map = detect_edge_new(im[:3].permute(1,2,0)) # make it XxYx3!!!
savename = os.path.join(dest_dir, img_name)
Image.fromarray(edge_map).save(savename)
def mainSobel(img_dir, dest_dir):
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
im_tran = ImageTransform((HEIGHT, WIDTH), MEAN, STD)
files = os.listdir(img_dir)
for img_name in files:
print(img_name)
if not img_name.endswith('.jpg'): continue
fullname = os.path.join(img_dir, img_name)
im = Image.open(fullname)
im = im_tran(im, 'train')
edge_map = compute_energy_matrix(im[:3].permute(1,2,0)) # make it XxYx3!!!
savename = os.path.join(dest_dir, img_name)
Image.fromarray(edge_map).save(savename)
# for img_name in glob.glob(img_dir+'*.jpg'):
# file, ext = os.path.splitext(infile)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Edge Extraction on a Folder')
parser.add_argument('--indir', type=str, help='Input dir for images')
parser.add_argument('--outdir', type=str, help='Output dir for edges')
args = parser.parse_args()
# import pdb; pdb.set_trace()
# print(args.indir)
mainSobel(args.indir, args.outdir)
print('Done!')
# main(sys.argv[1], float(sys.argv[2]) | 27.910256 | 82 | 0.644465 | 310 | 2,177 | 4.341935 | 0.341935 | 0.057207 | 0.029718 | 0.028975 | 0.488856 | 0.488856 | 0.488856 | 0.488856 | 0.488856 | 0.488856 | 0 | 0.007134 | 0.227377 | 2,177 | 78 | 83 | 27.910256 | 0.793103 | 0.143316 | 0 | 0.585366 | 0 | 0 | 0.066432 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.146341 | 0 | 0.195122 | 0.073171 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff43880d7c55f155ec148a756cf0c994cb3a1a8c | 838 | py | Python | bootstrap_scoped/util/_test.py | achillesrasquinha/bootstrap-scoped | e8e3eab2648e6d0acb33161fab196d6c82ff607a | [
"MIT"
] | 1 | 2018-09-14T16:04:21.000Z | 2018-09-14T16:04:21.000Z | bootstrap_scoped/util/_test.py | achillesrasquinha/bootstrap-scoped | e8e3eab2648e6d0acb33161fab196d6c82ff607a | [
"MIT"
] | null | null | null | bootstrap_scoped/util/_test.py | achillesrasquinha/bootstrap-scoped | e8e3eab2648e6d0acb33161fab196d6c82ff607a | [
"MIT"
] | null | null | null | # imports - compatibility imports
from bootstrap_scoped._compat import StringIO, input
# imports - standard imports
import sys
from contextlib import contextmanager
# imports - module imports
from bootstrap_scoped.util import get_if_empty
__STDIN__ = sys.stdin
@contextmanager
def mock_input(args):
# https://stackoverflow.com/a/36491341
sys.stdin = args
yield
sys.stdin = __STDIN__
def assert_stdout(capfd, output):
stdout, _ = capfd.readouterr()
assert output == stdout
def assert_input(capfd, text, output, expected = None, input_ = None, stdout = None):
if expected == None:
expected = output
input_ = get_if_empty(input_, input)
stdout = get_if_empty(stdout, text)
with mock_input(StringIO(output)):
assert input_(text) == expected
assert_stdout(capfd, stdout) | 26.1875 | 85 | 0.718377 | 103 | 838 | 5.582524 | 0.359223 | 0.026087 | 0.052174 | 0.090435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011869 | 0.195704 | 838 | 32 | 86 | 26.1875 | 0.841246 | 0.143198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.238095 | 1 | 0.142857 | false | 0 | 0.190476 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff46c1924c55c89c1952f77ec63a4d0bb246a216 | 853 | py | Python | Application/Script/script/pyexpect/arp/Cisco.py | onlysheep5200/gperf | f04b1e96fc86aa379fa1cdaedff59224b6904179 | [
"Apache-2.0"
] | 1 | 2016-12-08T23:27:42.000Z | 2016-12-08T23:27:42.000Z | Application/Script/script/pyexpect/arp/Cisco.py | onlysheep5200/gperf | f04b1e96fc86aa379fa1cdaedff59224b6904179 | [
"Apache-2.0"
] | null | null | null | Application/Script/script/pyexpect/arp/Cisco.py | onlysheep5200/gperf | f04b1e96fc86aa379fa1cdaedff59224b6904179 | [
"Apache-2.0"
] | null | null | null | #py
proc.sendline('show arp')
proc.expect('Type Interface')
result = []
while True:
index = proc.expect(['--More--', '#'])
lines = proc.before.strip().replace('\x08', '').splitlines()
for line in lines:
cols = line.strip().split()
length = len(cols)
if length == 6 or length == 5:
if cols[3].find('.') == -1:
continue
cols[3] = cols[3].replace('.', '')
mac = []
for i in range(6):
mac.append(cols[3][2*i]+cols[3][2*i+1])
cols[3] = ':'.join(mac)
if length == 5:
cols.append('0')
result.append(dict(zip(
('protocol', 'ip', 'age', 'mac', 'type', 'interface'), cols)))
if index == 0:
proc.send(' ')
elif index == 1:
break
proc.ret_value = result
| 29.413793 | 78 | 0.458382 | 102 | 853 | 3.823529 | 0.5 | 0.076923 | 0.030769 | 0.035897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033989 | 0.344666 | 853 | 28 | 79 | 30.464286 | 0.663685 | 0.002345 | 0 | 0 | 0 | 0 | 0.083529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff482cff2e58cffc22549bec344549101d45cd35 | 682 | py | Python | ch9/crx_gates.py | PacktPublishing/Learn-Quantum-Computing-with-Python | e74caf12609ce848de2636019dd15f90d1f3189f | [
"MIT"
] | 1 | 2020-07-09T15:38:59.000Z | 2020-07-09T15:38:59.000Z | ch9/crx_gates.py | PacktPublishing/Learn-Quantum-Computing-with-Python | e74caf12609ce848de2636019dd15f90d1f3189f | [
"MIT"
] | null | null | null | ch9/crx_gates.py | PacktPublishing/Learn-Quantum-Computing-with-Python | e74caf12609ce848de2636019dd15f90d1f3189f | [
"MIT"
] | 2 | 2019-04-05T14:42:23.000Z | 2019-04-19T16:09:27.000Z | from pyquil import Program
from pyquil.parameters import Parameter, quil_sin, quil_cos
from pyquil.quilbase import DefGate
from pyquil.gates import *
import numpy as np
thetaParameter = Parameter('theta')
controlledRx = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, quil_cos(thetaParameter / 2), -1j * quil_sin(thetaParameter / 2)],
[0, 0, -1j * quil_sin(thetaParameter / 2), quil_cos(thetaParameter / 2)]
])
gate_definition = DefGate('CRX', controlledRx, [thetaParameter])
CONTROLRX = gate_definition.get_constructor()
program = Program()
program = program + gate_definition
program = program + H(0)
program = program + CONTROLRX(np.pi/2)(0, 1)
print(program) | 27.28 | 77 | 0.714076 | 93 | 682 | 5.129032 | 0.344086 | 0.02935 | 0.025157 | 0.016771 | 0.121593 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038194 | 0.155425 | 682 | 25 | 78 | 27.28 | 0.789931 | 0 | 0 | 0 | 0 | 0 | 0.011713 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.263158 | 0 | 0.263158 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff4b74c3171ca37a261f7014a1e21bacf39c2b04 | 24,200 | py | Python | LexNormNew.py | AnneDirkson/lex_normalization | 6a5072002b31606cfd668b8ac303d5deaa95d092 | [
"BSD-3-Clause"
] | 4 | 2019-07-15T17:56:59.000Z | 2020-04-01T19:18:24.000Z | LexNormNew.py | AnneDirkson/lex_normalization | 6a5072002b31606cfd668b8ac303d5deaa95d092 | [
"BSD-3-Clause"
] | null | null | null | LexNormNew.py | AnneDirkson/lex_normalization | 6a5072002b31606cfd668b8ac303d5deaa95d092 | [
"BSD-3-Clause"
] | 1 | 2019-08-01T10:58:37.000Z | 2019-08-01T10:58:37.000Z | #!/usr/bin/env python
# coding: utf-8
# # Lexical normalization pipeline
#
# author - AR Dirkson
# date - 15-7-2019
#
# Python 3 script
#
# This pipeline takes raw text data and performs:
# - Removes URLs, email addresses
# - Tokenization with NLTK
# - Removes non_English posts (conservatively) using langid module with top 10 languages and threshold of 100
# - British English to American English
# - Normalization of contractions
# - Normalization of generic abbreviations and slang
# - Normalization of domain-specific (patient forum) abbreviations
# - Spelling correction
# In[1]:
import pickle
import numpy as np
import random
import pandas as pd
from collections import Counter, defaultdict, OrderedDict
from nltk import pos_tag, word_tokenize
import re
import seaborn as sns
import matplotlib.pyplot as plt
import editdistance
import kenlm
from sklearn.metrics import recall_score, precision_score, f1_score, fbeta_score
from nltk.tokenize.treebank import TreebankWordDetokenizer
from gensim.models import KeyedVectors
import langid
# In[2]:
class Normalizer ():
def __init__(self):
pass
#to use this function the files need to be sorted in the same folder as the script under /obj_lex/
def load_obj(self, name):
with open('obj_lex\\' + name + '.pkl', 'rb') as f:
return pickle.load(f, encoding='latin1')
def load_files(self):
self.abbr_dict = self.load_obj ('abbreviations_dict')
self.aspell_dict = self.load_obj ('aspell_dict_lower')
self.short_expanse_dict = self.load_obj ('short_expansions_dict')
self.cList = self.load_obj ('contractionslistone')
self.cList2 = self.load_obj ('contractionslisttwo')
self.drugnames = self.load_obj ('fdadrugslist')
def change_tup_to_list(self, tup):
thelist = list(tup)
return thelist
def change_list_to_tup(self,thelist):
tup = tuple(thelist)
return tup
#---------Remove URls, email addresses and personal pronouns ------------------
def replace_urls(self,list_of_msgs):
list_of_msgs2 = []
for msg in list_of_msgs:
nw_msg = re.sub(
r'\b' + r'((\(<{0,1}https|\(<{0,1}http|\[<{0,1}https|\[<{0,1}http|<{0,1}https|<{0,1}http)(:|;| |: )\/\/|www.)[\w\.\/#\?\=\+\;\,\&\%_\n-]+(\.[a-z]{2,4}\]{0,1}\){0,1}|\.html\]{0,1}\){0,1}|\/[\w\.\?\=#\+\;\,\&\%_-]+|[\w\/\.\?\=#\+\;\,\&\%_-]+|[0-9]+#m[0-9]+)+(\n|\b|\s|\/|\]|\)|>)',
'-URL-', msg)
list_of_msgs2.append(nw_msg)
return list_of_msgs2
def replace_email(self,list_of_msgs):
list_of_msgs2 = []
for msg in list_of_msgs:
nw_msg = re.sub (r"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+[. ])", ' ', msg) #remove email
nw_msg2 = re.sub (r"(@[a-zA-Z0-9]+[. ])", ' ', nw_msg) #remove usernames
# nw_msg3 = re.sub(r"(@ [a-zA-Z0-9]+[. ])", ' ', nw_msg2) #remove usernames
list_of_msgs2.append(nw_msg2)
return list_of_msgs2
def remove_empty (self,list_of_msgs):
empty = []
check_msgs3 =[]
for a, i in enumerate (list_of_msgs):
if len(i) == 0:
print('empty')
else:
check_msgs3.append(i)
return check_msgs3
def remove_registered_icon (self, msg):
nw_msg = re.sub ('\u00AE', '', msg)
nw_msg2 = re.sub ('\u00E9', 'e', nw_msg)
return nw_msg2
def escape_char (self, msg):
msg1 = msg.replace('\x08', '')
msg2 = msg1.replace ('\x8d', '')
msg3 = msg2.replace('ðŸ', '')
return msg3
def anonymize (self, posts):
posts2 = self.replace_urls (posts)
posts3 = self.replace_email (posts2)
posts4 = self.remove_empty(posts3)
posts5 = [self.remove_registered_icon(p) for p in posts4]
posts6 = [self.escape_char(p) for p in posts5]
return posts6
#---------Convert to lowercase ----------------------------------------------------
def lowercase (self, post):
post1 = []
for word in post:
word1 = word.lower()
post1.append (word1)
return post1
#---------Remove non_English posts -------------------------------------------------
def language_identify_basic (self, posts):
nw = []
tally = 0
list_removed = []
for post in posts:
out = langid.classify (post)
out2 = list(out)
if out2[0]=='en':
nw.append(post)
else:
tally += 1
list_removed.append(tuple ([post, out2[0], out2[1]]))
return nw, tally, list_removed
def language_identify_thres (self, msgs, lang_list, thres):
nw = []
tally = 0
list_removed = []
for post in msgs:
langid.set_languages(lang_list)
out = langid.classify (post)
out2 = list(out)
if out2[0]=='en':
nw.append(post)
elif out2[1] > thres:
nw.append(post)
else:
tally += 1
list_removed.append(tuple ([post, out2[0], out2[1]]))
return nw, tally, list_removed
def remove_non_english(self, posts):
# d = TreebankWordDetokenizer()
# posts2 = [d.detokenize(m) for m in posts]
posts_temp, tally, list_removed = self.language_identify_basic(posts)
lang = []
for itm in list_removed:
lang.append(itm[1])
c = Counter(lang)
lang_list = ['en']
for itm in c.most_common(10):
z = list(itm)
lang_list.append(z[0])
print("Most common 10 languages in the data are:" + str(lang_list))
posts3, tally_nw, list_removed_nw = self.language_identify_thres(posts, lang_list, thres = -100)
return posts3
## --- Contraction expansions ------------------------------##
def prepareContractions(self):
self.c_re = re.compile('(%s)' % '|'.join(self.cList.keys()))
self.c_re2 = re.compile('(%s)' % '|'.join(self.cList2.keys()))
def remove_apos (self, sent):
sent2 = re.sub ("'",'', sent)
return sent2
# except TypeError:
# pass
def expandContractions (self, text):
def replace(match):
return self.cList[match.group(0)]
return self.c_re.sub(replace, text)
#needs to happen after tokenization
def expandContractions_second (self, text):
text2 = []
for w in text:
if w.lower() in self.cList2:
v = word_tokenize(self.cList2[w.lower()])
for i in v:
text2.append(i)
else:
text2.append(w)
return text2
###--- 1-2 letter expansions -------------------------------##
def load_ngrammodel(self):
path = 'obj_lex\\tetragram_model.binary'
self.model = kenlm.Model(path)
def get_parameters_ngram_model (self, word, sent):
i = sent.index(word)
if ((i-2) >= 0) and (len(sent)>(i+2)):
out = sent[(i-2):(i+3)]
bos = False
eos = False
elif ((i-2) < 0) and (len(sent)> (i+2)) : #problem with beginning
bos = True
eos = False
out = sent[0:(i+3)]
elif ((i-2) >= 0) and (len(sent) <= (i+2)): #problem with end
bos = False
eos = True
out = sent[(i-2):]
else: #problem with both
out = sent
eos = True
bos = True
d = TreebankWordDetokenizer()
out2 = d.detokenize(out)
return bos, eos, out2
def get_prob(self, word, token, out, bos, eos): #token is candidate
out_nw = out.replace(word, token)
p = self.model.score(out_nw, bos = bos, eos = eos)
return p
def short_abbr_expansion(self, sent):
sent2 = []
for word in sent:
if len(word) > 2:
sent2.append(word)
else:
if word in self.short_expanse_dict .keys():
cand = self.short_expanse_dict [word]
final_p = -100
bos, eos, out = self.get_parameters_ngram_model(word,sent)
for i in cand:
p = self.get_prob(word, i, out, bos, eos)
if p > final_p:
final_p = p
correct = i
sent2.append(correct)
else:
sent2.append(word)
return sent2
#---------Lexical normalization pipeline (Sarker, 2017) -------------------------------
def loadItems(self):
'''
This is the primary load function.. calls other loader functions as required..
'''
global english_to_american
global noslang_dict
global IGNORE_LIST_TRAIN
global IGNORE_LIST
english_to_american = {}
lexnorm_oovs = []
IGNORE_LIST_TRAIN = []
IGNORE_LIST = []
english_to_american = self.loadEnglishToAmericanDict()
noslang_dict = self.loadDictionaryData()
for key, value in noslang_dict.items ():
value2 = value.lower ()
value3 = word_tokenize (value2)
noslang_dict[key] = value3
return None
def loadEnglishToAmericanDict(self):
etoa = {}
english = open('obj_lex/englishspellings.txt')
american = open('obj_lex/americanspellings.txt')
for line in english:
etoa[line.strip()] = american.readline().strip()
return etoa
def loadDictionaryData(self):
'''
this function loads the various dictionaries which can be used for mapping from oov to iv
'''
n_dict = {}
infile = open('obj_lex/noslang_mod.txt')
for line in infile:
items = line.split(' - ')
if len(items[0]) > 0 and len(items) > 1:
n_dict[items[0].strip()] = items[1].strip()
return n_dict
def preprocessText(self, tokens, IGNORE_LIST, ignore_username=False, ignore_hashtag=False, ignore_repeated_chars=True, eng_to_am=True, ignore_urls=False):
'''
Note the reason it ignores hashtags, @ etc. is because there is a preprocessing technique that is
designed to remove them
'''
normalized_tokens =[]
#print tokens
text_string = ''
# NOTE: if nesting if/else statements, be careful about execution sequence...
# tokens2 = [t[0] for t in tokens]
for t in tokens:
t_lower = t.strip().lower()
# if the token is not in the IGNORE_LIST, do various transformations (e.g., ignore usernames and hashtags, english to american conversion
# and others..
if t_lower not in IGNORE_LIST:
# ignore usernames '@'
if re.match('@', t) and ignore_username:
IGNORE_LIST.append(t_lower)
text_string += t_lower + ' '
#ignore hashtags
elif re.match('#', t_lower) and ignore_hashtag:
IGNORE_LIST.append(t_lower)
text_string += t_lower + ' '
#convert english spelling to american spelling
elif t.strip().lower() in english_to_american.keys() and eng_to_am:
text_string += english_to_american[t.strip().lower()] + ' '
#URLS
elif re.search('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', t_lower) and ignore_urls:
IGNORE_LIST.append(t_lower)
text_string += t_lower + ' '
elif not ignore_repeated_chars and not re.search(r'[^a-zA-Z]', t_lower):
# if t_lower only contains alphabetic characters
t_lower = re.sub(r'([a-z])\1+', r'\1\1', t_lower)
text_string += t_lower + ' '
# print t_lower
# if none of the conditions match, just add the token without any changes..
else:
text_string += t + ' '
else: # i.e., if the token is in the ignorelist..
text_string += t_lower + ' '
normalized_tokens = text_string.split()
return normalized_tokens, IGNORE_LIST
def dictionaryBasedNormalization(self, tokens, I_LIST, M_LIST):
tokens2 =[]
for t in (tokens):
t_lower = t.strip().lower()
if t_lower in noslang_dict.keys() and len(t_lower)>2:
nt = noslang_dict[t_lower]
[tokens2.append(m) for m in nt]
if not t_lower in M_LIST:
M_LIST.append(t_lower)
if not nt in M_LIST:
M_LIST.append(nt)
else:
tokens2.append (t)
return tokens2, I_LIST, M_LIST
#----Using the Sarker normalization functions ----------------------------
#Step 1 is the English normalization and step 2 is the abbreviation normalization
def normalize_step1(self, tokens, oovoutfile=None):
global IGNORE_LIST
global il
MOD_LIST = []
# Step 1: preprocess the text
normalized_tokens, il = self.preprocessText(tokens, IGNORE_LIST)
return normalized_tokens
def normalize_step2(self, normalized_tokens, oovoutfile=None):
global IGNORE_LIST
global il
MOD_LIST = []
ml = MOD_LIST
normalized_tokens, il, ml = self.dictionaryBasedNormalization(normalized_tokens, il, ml)
return normalized_tokens
def sarker_normalize (self,list_of_msgs):
self.loadItems()
msgs_normalized = [self.normalize_step1(m) for m in list_of_msgs]
msgs_normalized2 = [self.normalize_step2(m) for m in msgs_normalized]
return msgs_normalized2
#-------Domain specific abreviation expansion ----------------------------
# The list of abbreviations is input as a dictionary with tokenized output
def domain_specific_abbr (self, tokens, abbr):
post2 = []
for t in tokens:
if t.lower() in abbr.keys():
nt = abbr[t.lower()]
[post2.append(m) for m in nt]
else:
post2.append(t)
return post2
def expand_abbr (self, data, abbr):
data2 = []
for post in data:
post2 = self.domain_specific_abbr (tokens = post, abbr= abbr)
data2.append(post2)
return data2
#-------Spelling correction -------------------------------------------------
def flev_rel (self, cand, token):
abs_edit_dist = editdistance.eval(cand, token)
rel_edit_dist = abs_edit_dist / len(token)
return rel_edit_dist
def modelsim (self,cand,token, model):
try:
similarity = model.similarity(cand, token)
except KeyError:
similarity = 0
return similarity
def run_low_emb (self, word, voc, model, w1 =0.4, w2= 0.6):
replacement = [' ',100]
for token in voc:
sim1 = self.flev_rel(word, token) #lower is better
sim2 = self.modelsim (word, token, model)
sim = w1 * sim1 + w2 * (1-sim2)
if sim < replacement[1]:
replacement[1] = sim
replacement[0] = token
return replacement
def wrong_concatenation(self, token, token_freq):
best_plausibility = 0
best_split = 0
t = token_freq[token]
limit = 9*t
NUMBER = re.compile('[0-9]+') # Only letters and dashes
if '-' in token:
return token
else:
for i in range(3, len(token)):
left, right = token[:i], token[i:]
if len(right) < 3:
continue
elif NUMBER.fullmatch(left) and right in token_freq:
best_split= (left,right)
elif NUMBER.fullmatch(right) and left in token_freq:
best_split = (left, right)
else:
if left not in token_freq or right not in token_freq:
continue
if token_freq[left] < limit or token_freq[right] < limit:
# print('too low')
continue
plausibility = min(token_freq[left], token_freq[right])
if plausibility > best_plausibility:
best_plausibility = plausibility
best_split = (left, right)
if best_split != 0:
return list(best_split)
else:
return token
def spelling_correction (self, post, min_rel_freq = 9, max_flev_rel = 0.76):
post2 = []
cnt = 0
tagged_post = pos_tag(post)
tags = [t[1] for t in tagged_post]
for a, token in enumerate (post):
token2 = token.lower()
if (tags[a] == 'NNP') or (tags[a] == 'NNPS'):
post2.append(token)
else:
if self.TRUE_WORD.fullmatch(token2) and (token2 != '-url-') and (token2 != '-') and (token2 != '--'):
# if token2 in self.spelling_corrections:
# correct = self.spelling_corrections[token2]
# if len(correct) >1:
# [post2.append(i) for i in correct]
# else:
# post2.append(correct)
# cnt +=1
# self.replaced.append(token2)
# self.replaced_with.append(correct)
if token2 in self.aspell_dict:
post2.append(token)
elif token2 in self.drugnames:
post2.append(token)
else:
freq_word = self.token_freq[token2]
limit = freq_word * min_rel_freq
subset = [t[0] for t in self.token_freq_ordered2 if t[1]>= limit]
candidate = self.run_low_emb(token2, subset, self.model2)
if candidate[1] > max_flev_rel:
x = self.wrong_concatenation(token2, self.token_freq)
if x != token2:
[post2.append(i) for i in x]
cnt +=1
self.replaced.append(token2)
self.replaced_with.append( " ".join(x))
self.spelling_corrections [token2] = x
else:
post2.append(token)
else:
post2.append(candidate[0])
cnt +=1
self.replaced.append(token2)
self.replaced_with.append(candidate[0])
self.spelling_corrections [token2] = candidate[0]
else:
post2.append(token)
self.total_cnt.append (cnt)
return post2
def initialize_files_for_spelling(self):
total_cnt = []
replaced = []
replaced_with = []
spelling_corrections= {}
return total_cnt, replaced, replaced_with, spelling_corrections
def change_tup_to_list (self, tup):
thelist = list(tup)
return thelist
def load_model (self):
filename = 'obj_lex//Health_2.5mreviews.s200.w10.n5.v15.cbow.bin'
self.model2 = KeyedVectors.load_word2vec_format(filename, binary=True)
def create_token_freq (self, data):
flat_data = [item for sublist in data for item in sublist]
flat_data2 = [i.lower() for i in flat_data]
flat_data3 = []
for token2 in flat_data2:
if self.TRUE_WORD.fullmatch(token2) and (token2 != '-url-') and (token2 != '-') and (token2 != '--'):
flat_data3.append(token2)
self.token_freq = Counter(flat_data3)
token_freq_ordered = self.token_freq.most_common ()
self.token_freq_ordered2 = [self.change_tup_to_list(m) for m in token_freq_ordered]
def correct_spelling_mistakes(self, data, different_token_freq = False):
self.load_model()
self.load_files ()
self.total_cnt, self.replaced, self.replaced_with, self.spelling_corrections = self.initialize_files_for_spelling()
self.TRUE_WORD = re.compile('[-a-z]+') # Only letters and dashes
if different_token_freq == False:
self.create_token_freq(data)
else:
self.token_freq = self.load_obj('token_freq')
token_freq_ordered = self.token_freq.most_common ()
self.token_freq_ordered2 = [self.change_tup_to_list(m) for m in token_freq_ordered]
out = []
for num, m in enumerate(data):
if num%1000 == 0:
print(num)
out.append(self.spelling_correction (m))
return out, self.total_cnt, self.replaced, self.replaced_with, self.spelling_corrections
#--------Overall normalization function--------------------------------------
def normalize (self, posts, anonymize = True, remove_foreign = False):
self.load_files ()
posts0 = [str(m) for m in posts]
if anonymize == True:
posts1 = self.anonymize(posts0)
print(posts1[0])
else:
posts1 = posts0
if remove_foreign == True:
posts1b = self.remove_non_english(posts1)
else:
posts1b = posts1
posts2 = [i.replace ('’', "'") for i in posts1b]
self.prepareContractions()
posts3 = [self.expandContractions(m) for m in posts2]
posts4 = [self.remove_apos(m) for m in posts3]
posts5 = [word_tokenize(m) for m in posts4]
print('done with tokenizing')
print(posts5[0])
posts6 = [self.expandContractions_second(m) for m in posts5]
print(posts6[0])
self.load_ngrammodel()
posts8 = [self.sarker_normalize(posts6)]
posts8b = posts8[0]
posts9 = [self.short_abbr_expansion(m) for m in posts8b]
posts10 = [self.expand_abbr(posts9, self.abbr_dict)]
print(posts10[0][0])
return posts10[0]
# In[4]:
#example of usage
test = ['my bff is 4ever', 'the colour of the moon is grey']
#normalize but not correct spelling mistakes
test2 = Normalizer().normalize(test)
print(test2)
#correct spelling mistakes - input must be tokenized
test3, total_cnt, replaced, replaced_with, spelling_corrections = Normalizer().correct_spelling_mistakes(test2)
print(test3)
# In[ ]:
print('The total number of spelling mistakes found is: ' + str(sum(total_cnt)))
# In[ ]:
| 36.890244 | 288 | 0.516198 | 2,703 | 24,200 | 4.461339 | 0.18128 | 0.020897 | 0.00539 | 0.007546 | 0.187661 | 0.15731 | 0.147027 | 0.130525 | 0.120823 | 0.111369 | 0 | 0.024187 | 0.366157 | 24,200 | 655 | 289 | 36.946565 | 0.761979 | 0.143802 | 0 | 0.256579 | 0 | 0.006579 | 0.050259 | 0.028849 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098684 | false | 0.002193 | 0.032895 | 0.002193 | 0.223684 | 0.024123 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff4d18734c956fbb0cec7f798227420aa80822c7 | 4,392 | py | Python | examples/households/VisualizeOccupancy.py | a-buntjer/tsib | 9d6ddcdca55c9b8afb5324c0da8d0910cb1a326e | [
"MIT"
] | 14 | 2019-12-16T16:54:43.000Z | 2021-11-08T11:46:51.000Z | examples/households/VisualizeOccupancy.py | a-buntjer/tsib | 9d6ddcdca55c9b8afb5324c0da8d0910cb1a326e | [
"MIT"
] | null | null | null | examples/households/VisualizeOccupancy.py | a-buntjer/tsib | 9d6ddcdca55c9b8afb5324c0da8d0910cb1a326e | [
"MIT"
] | 7 | 2020-05-27T19:49:58.000Z | 2022-02-02T12:45:33.000Z | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
import os
import matplotlib.pyplot as plt
# %matplotlib inline
import pandas as pd
import numpy as np
# %%
import tsib.household.profiles
# %% [markdown]
# ### Get plot style
# %%
data = tsorb.utils.InputData.DataExchangeCsv()
# %%
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# %%
EXPORT_PATH = os.path.join('plots')
# %%
import matplotlib.colors as mcolors
def make_colormap(seq):
"""
Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
# %%
rvb = make_colormap([np.array([2., 61., 100.])/255,
np.array([20., 129., 129.])/255,0.25,
np.array([20., 129., 129.])/255,
np.array([108., 139., 70.])/255,0.5,
np.array([108., 139., 70.])/255,
np.array([250.,150.,90.])/255,0.75,
np.array([250.,150.,90.])/255,
np.array([255.,192.,0.])/255, ])
# %% [markdown]
# ### Plot state activity
# %%
residents = 2
# %%
day_of_week = 'wd'
# %%
num_row_jump = int((residents + 1)**2)
# %%
startstate = data.get_four_state_start_state[day_of_week][:,residents-1]
# %%
startprob = []
labels = []
for i in range(residents+1):
for ii,state in enumerate(startstate[i*7:i*7+residents+1]):
startprob.append(state)
labels.append(str(i) + ' home and ' + str(ii) + ' active')
startprob = np.array(startprob)
# %%
act_transition = data.get_four_state_trans_data[day_of_week + str(residents)]
# %%
# %%
states = [startprob]
index = []
# loop over whole day
for interval in range(0, 144):
transition_prob = act_transition[num_row_jump * interval:
num_row_jump * (interval+1), 2:]
index.append(float(interval)/6.)
states.append(np.dot(states[interval],transition_prob))
# %%
np.array(states[1:]).shape
# %%
len(index)
# %%
colorset = rvb(np.linspace(0, 1, len(labels)))
# %%
plt.figure()
ax = plt.subplot(1,1,1)
ax.stackplot(index,np.array(states[1:]).T, labels = labels, colors = colorset)
handles, labels = ax.get_legend_handles_labels()
ax.legend(list(reversed(handles)), list(reversed(labels)), loc='center left', ncol = 1, bbox_to_anchor=(1., 0.4))
ax.set_ylabel('Activity share [-]')
ax.set_xlabel('Time [h]')
#ax.legend(bbox_to_anchor=(1.0, 1.0),edgecolor = 'w', )
NAME = 'method_occActivity_'+ str(residents) + '_'+day_of_week
# %% [markdown]
# ### Plot activity type
# %%
activity = data.get_activities
# %%
df = pd.DataFrame(activity,columns = ['day','occ','activity'] + index )
# %%
df.loc[df['day']==0,'day'] = 'wd'
df.loc[df['day']==1,'day'] = 'we'
# %%
df = df.set_index(['day','occ'])
# %% [markdown]
# Select day and occ
# %%
residents = 3
# %%
day_of_week = 'we'
# %%
activity = df.loc[(day_of_week,residents)].set_index(['activity'])
# %% [markdown]
# activity.loc['Other',:] = residents - activity.sum()
# %%
activity.index = activity.index.str.replace('Act_','')
activity.index = activity.index.str.replace('WashDress','Washing/\nDressing')
activity.index = activity.index.str.replace('HouseClean','House\ncleaning')
# %%
colorset = rvb(np.linspace(0, 1, len(activity.index)))
# %%
plt.figure()
ax = plt.subplot(1,1,1)
activity.T.plot(colors = colorset, ax = ax)
ax.set_xlabel('Time [h]')
ax.set_ylabel('Activity probability [-]')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.legend(loc='center left', ncol = 1, bbox_to_anchor=(1., 0.4))
NAME = 'method_activityProbability_'+ str(residents) + '_'+day_of_week
# %%
| 22.994764 | 113 | 0.602004 | 588 | 4,392 | 4.386054 | 0.338435 | 0.029857 | 0.024428 | 0.015122 | 0.183792 | 0.162078 | 0.064366 | 0.044203 | 0.025591 | 0.025591 | 0 | 0.04675 | 0.201275 | 4,392 | 190 | 114 | 23.115789 | 0.688426 | 0.172131 | 0 | 0.075 | 0 | 0 | 0.094138 | 0.00761 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0125 | false | 0 | 0.0875 | 0 | 0.1125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff4dbf32ba6d1faa9492a197d71f8f0f2884bd27 | 314 | py | Python | exsLer Arquivo/ex2 alunos/_main.py | SpaceTheArcher/LP2_2s2017 | 6616b3ecfe5838d1cfac1dac1c8e66b1b661486e | [
"Apache-2.0"
] | 1 | 2017-08-27T03:45:37.000Z | 2017-08-27T03:45:37.000Z | exsLer Arquivo/ex2 alunos/_main.py | SpaceTheArcher/LP2_2s2017 | 6616b3ecfe5838d1cfac1dac1c8e66b1b661486e | [
"Apache-2.0"
] | null | null | null | exsLer Arquivo/ex2 alunos/_main.py | SpaceTheArcher/LP2_2s2017 | 6616b3ecfe5838d1cfac1dac1c8e66b1b661486e | [
"Apache-2.0"
] | null | null | null |
# coding=utf-8
with open('Alunos.txt') as arquivo:
for linha in arquivo:
lista = linha.split(";")
media = float(lista[1]) + float(lista[2])
with open('saida.txt', 'w') as arquivo2:
varLinha = str("{};{}".format(linha,media))
arquivo2.write(varLinha) | 28.545455 | 56 | 0.541401 | 38 | 314 | 4.473684 | 0.657895 | 0.094118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022422 | 0.289809 | 314 | 11 | 57 | 28.545455 | 0.73991 | 0.038217 | 0 | 0 | 0 | 0 | 0.089965 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff4f54b1c8c94866bf7755682cc73296b3e13229 | 2,382 | py | Python | Leetcode/Python Solutions/Common Algorithm Templates/DisjointSet.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | 1 | 2020-01-06T02:21:56.000Z | 2020-01-06T02:21:56.000Z | Leetcode/Python Solutions/Common Algorithm Templates/DisjointSet.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | null | null | null | Leetcode/Python Solutions/Common Algorithm Templates/DisjointSet.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | 3 | 2021-02-22T17:41:01.000Z | 2022-01-13T05:03:19.000Z | """
Language: Python
Written by: Mostofa Adib Shakib
Optimization Techniques:
1) Union by Rank: We always attach the smaller tree to the root of the larger tree. Since it is the depth of the tree that affects
the running time, the tree with the smaller depth gets added under the root of the deeper tree. The depth of the tree only increases
if the depth of both the tree are the same
2) Path compression: The idea is the each node visited on the way to a root node may as well be attached directly to the
root node; they all share the same representative.
"""
class DisjointSet:
def __init__(self):
self.parent = {}
self.rank = {}
# Creates a set containing only a given element in it
def makeSet(self, universe):
for i in universe:
self.parent[i] = i
self.rank[i] = 0
# Determines in which subset a particular element is in an returns
# the representative of that particular set
def find(self, k):
if self.parent[k] != k:
# path compression
# recursively finds the parent once
self.parent[k] = self.find(self.parent[k])
return self.parent[k]
# Merges two different subsets into a single subset and representative
# of one set becomes the representative of other
def union(self, x, y):
# finds the parent of a node
xRoot = self.find(x)
yRoot = self.find(y)
# returns if both the nodes share the same parent
if xRoot == yRoot:
return
# make the tree with the least depth the child of the tree with higher depth
if self.rank[xRoot] > self.rank[yRoot]:
self.parent[yRoot] = xRoot
elif self.rank[xRoot] < self.rank[yRoot]:
self.parent[xRoot] = yRoot
else:
# When the nodes in both trees have the same depth
self.parent[xRoot] = yRoot
self.rank[yRoot] += 1
# A utility function needed to print the disjointSet
def printSets(universe, ds):
print([ds.find(i) for i in universe])
if __name__ == '__main__':
# universe of items
universe = [1, 2, 3, 4, 5]
# initialize DisjointSet class
ds = DisjointSet()
# create singleton set for each element of universe
ds.makeSet(universe)
printSets(universe, ds)
ds.union(4, 3) # 4 and 3 are in same set
printSets(universe, ds)
ds.union(2, 1) # 1 and 2 are in same set
printSets(universe, ds)
ds.union(1, 3) # 1, 2, 3, 4 are in same set
printSets(universe, ds)
| 27.068182 | 132 | 0.685978 | 384 | 2,382 | 4.223958 | 0.335938 | 0.055487 | 0.05857 | 0.038841 | 0.147349 | 0.110358 | 0.110358 | 0.091245 | 0.046856 | 0 | 0 | 0.01261 | 0.234257 | 2,382 | 88 | 133 | 27.068182 | 0.876645 | 0.548279 | 0 | 0.162162 | 0 | 0 | 0.007605 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0 | 0 | 0.216216 | 0.162162 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff52620b1cd6c61d10575db074cd0d1ccf5ea327 | 19,761 | py | Python | tests/test_abstract.py | fish2000/CLU | 80bc2df5f001b5639d79ba979e19ec77a9931425 | [
"BSD-3-Clause"
] | 1 | 2019-07-02T08:17:59.000Z | 2019-07-02T08:17:59.000Z | tests/test_abstract.py | fish2000/CLU | 80bc2df5f001b5639d79ba979e19ec77a9931425 | [
"BSD-3-Clause"
] | 13 | 2019-12-17T02:28:30.000Z | 2021-11-17T03:46:10.000Z | tests/test_abstract.py | fish2000/CLU | 80bc2df5f001b5639d79ba979e19ec77a9931425 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
import abc
import clu.abstract
import copy
import os
import pytest
abstract = abc.abstractmethod
@pytest.fixture(scope='module')
def strings():
yield ('yo', 'dogg', 'iheard', 'youlike')
@pytest.fixture(scope='module')
def capstrings():
yield ('YoDogg',
'iHeardYouLike',
'VariouslyCapitalizedStrings')
class TestAbstractMetas(object):
""" Run the tests for the clu.abstract module’s metaclasses. """
def test_metaclass_Slotted(self, strings):
from clu.predicates import slots_for
class Base(abc.ABC, metaclass=clu.abstract.Slotted):
pass
class DerivedOne(Base):
__slots__ = ('yo', 'dogg')
class DerivedTwo(DerivedOne):
pass
class DerivedThree(DerivedTwo):
__slots__ = ('iheard', 'youlike')
assert slots_for(DerivedThree) == strings
assert Base.__slots__ == tuple()
assert DerivedOne.__slots__ == ('yo', 'dogg')
assert DerivedTwo.__slots__ == tuple()
assert DerivedThree.__slots__ == ('iheard', 'youlike')
def test_metaclass_NonSlotted(self):
from clu.predicates import getpyattr, slots_for
class Base(abc.ABC, metaclass=clu.abstract.NonSlotted):
pass
class DerivedOne(Base):
__slots__ = ('yo', 'dogg')
class DerivedTwo(DerivedOne):
pass
class DerivedThree(DerivedTwo):
__slots__ = ('iheard', 'youlike')
assert slots_for(DerivedThree) == tuple()
# assert nopyattr(Base(), 'slots')
# assert nopyattr(DerivedOne(), 'slots')
# assert nopyattr(DerivedTwo(), 'slots')
# assert nopyattr(DerivedThree(), 'slots')
assert '__slots__' not in Base().__dict__
assert '__slots__' not in DerivedOne().__dict__
assert '__slots__' not in DerivedTwo().__dict__
assert '__slots__' not in DerivedThree().__dict__
assert '__slots__' not in Base.__dict__
assert '__slots__' not in DerivedOne.__dict__
assert '__slots__' not in DerivedTwo.__dict__
assert '__slots__' not in DerivedThree.__dict__
assert getpyattr(Base, 'slots') == tuple()
assert getpyattr(DerivedOne, 'slots') == tuple()
assert getpyattr(DerivedTwo, 'slots') == tuple()
assert getpyattr(DerivedThree, 'slots') == tuple()
def test_metaclass_BasePath(self, dirname):
from clu.fs import pypath
# Ensure “sys.path” contains the “yodogg” package:
basepath = dirname.subdirectory('yodogg')
assert basepath.exists
pypath.enhance(basepath)
class AppConfigBase(abc.ABC, metaclass=clu.abstract.BasePath):
pass
class AppConfig(AppConfigBase, basepath=basepath):
pass
appconfig = AppConfig()
assert appconfig.basepath == AppConfig.basepath
assert appconfig.basepath == os.fspath(basepath)
assert os.path.exists(appconfig.basepath)
class TestAbstractABCs(object):
""" Run the tests for the clu.abstract module’s abstract base classes. """
@pytest.mark.TODO
def test_abc_Unhashable(self):
import collections.abc
class Ancestor(abc.ABC, metaclass=clu.abstract.Slotted):
@abstract
def __hash__(self):
...
class HashMe(Ancestor):
def __hash__(self):
return hash(type(self))
class DontHashMe(HashMe, clu.abstract.Unhashable):
def __hash__(self):
return hash(type(self))
class Rando(clu.abstract.Unhashable):
pass
# class Unrelated(abc.ABC):
# pass
assert hash(HashMe())
assert isinstance(HashMe(), collections.abc.Hashable)
assert not isinstance(HashMe(), clu.abstract.Unhashable)
with pytest.raises(TypeError) as exc:
hash(DontHashMe())
assert "unhashable type" in str(exc)
assert "DontHashMe" in str(exc)
assert not isinstance(DontHashMe(), collections.abc.Hashable)
assert isinstance(DontHashMe(), clu.abstract.Unhashable)
assert not isinstance(Rando(), collections.abc.Hashable)
assert isinstance(Rando(), clu.abstract.Unhashable)
# TODO: make it work for random, unrelated ABC classes:
# assert not isinstance(Unrelated(), collections.abc.Hashable)
# assert isinstance(Unrelated(), clu.abstract.Unhashable)
def test_abc_Cloneable(self):
class YoDogg(clu.abstract.Cloneable, metaclass=clu.abstract.Slotted):
__slots__ = ('yo', 'dogg', 'yo_dogg')
def __init__(self, **kwargs):
self.yo = kwargs.pop('yo', 'YO')
self.dogg = kwargs.pop('dogg', 'DOGG')
self.yo_dogg = kwargs.pop('yo_dogg', 'YO_DOGG')
def __eq__(self, other):
return self.yo == other.yo and \
self.dogg == other.dogg and \
self.yo_dogg == other.yo_dogg
def clone(self, deep=False, memo=None):
kwargs = dict(zip(self.__slots__,
(getattr(self, slot, None) for slot in self.__slots__)))
return type(self)(**kwargs)
i0 = YoDogg()
i1 = i0.clone()
i2 = copy.copy(i0)
i3 = copy.deepcopy(i0)
assert i0 == i1
assert i0 == i2
assert i0 == i3
assert i0 == YoDogg(yo='YO', dogg='DOGG', yo_dogg='YO_DOGG')
def test_abc_ReprWrapper(self):
from clu.repr import compare_instance_reprs
class YoDogg(clu.abstract.ReprWrapper, metaclass=clu.abstract.Slotted):
__slots__ = ('yo', 'dogg', 'yo_dogg')
def __init__(self, **kwargs):
self.yo = kwargs.pop('yo', 'YO')
self.dogg = kwargs.pop('dogg', 'DOGG')
self.yo_dogg = kwargs.pop('yo_dogg', 'YO_DOGG')
def __eq__(self, other):
return self.yo == other.yo and \
self.dogg == other.dogg and \
self.yo_dogg == other.yo_dogg
def inner_repr(self):
return f"yo=“{self.yo}”, dogg=“{self.dogg}”, yo_dogg=“{self.yo_dogg}”"
i0 = YoDogg()
i1 = YoDogg(yo='YO', dogg='DOGG', yo_dogg='YO_DOGG')
i2 = YoDogg(yo='Yo', dogg='Dogg', yo_dogg='Yo Dogg')
assert i0 == i1
assert i0 != i2
assert compare_instance_reprs(i0, i1)
assert not compare_instance_reprs(i0, i2)
assert not compare_instance_reprs(i0, i1, i2)
def test_abc_Format(self, capstrings):
class UpperCaser(clu.abstract.Format):
def render(self, string):
return str(string).upper()
class CaseFolder(clu.abstract.Format):
def render(self, string):
return str(string).casefold()
uppercaser = UpperCaser()
casefolder = CaseFolder()
do_nothing = clu.abstract.NonFormat()
for string in capstrings:
assert uppercaser.render(string) == string.upper()
assert uppercaser.render(string).isupper()
assert casefolder.render(string) == string.casefold()
assert casefolder.render(string).islower()
assert do_nothing.render(string) == str(string)
assert do_nothing.render(string).isprintable() # why not
def test_abc_AppName(self):
class AppConfigBase(clu.abstract.AppName):
pass
class AppConfig(AppConfigBase, appname='YoDogg'):
pass
appconfig = AppConfig()
assert appconfig.appname == 'YoDogg'
assert appconfig.appname == AppConfig.appname
with pytest.raises(LookupError) as exc:
_ = AppConfigBase()
assert "Cannot instantiate a base config" in str(exc.value)
def test_abc_ManagedContext(self):
import contextlib
from clu.typology import (subclasscheck,
iscontextmanager,
isabstractcontextmanager)
class Managed(clu.abstract.ManagedContext):
def setup(self):
return self
def teardown(self):
pass
assert iscontextmanager(Managed)
assert isabstractcontextmanager(Managed)
assert subclasscheck(Managed, contextlib.AbstractContextManager)
assert issubclass(Managed, contextlib.AbstractContextManager)
with Managed() as m:
assert iscontextmanager(m)
assert isabstractcontextmanager(m)
assert subclasscheck(m, Managed)
assert isinstance(m, Managed)
class TestAbstractReprClasses(object):
""" Run the tests for the clu.abstract module’s repr classes. """
def test_repr_SlottedRepr(self):
from clu.repr import compare_instance_reprs
class YoDogg(clu.abstract.SlottedRepr):
__slots__ = ('yo', 'dogg', 'yo_dogg')
def __init__(self, **kwargs):
self.yo = kwargs.pop('yo', 'YO')
self.dogg = kwargs.pop('dogg', 'DOGG')
self.yo_dogg = kwargs.pop('yo_dogg', 'YO_DOGG')
def __eq__(self, other):
return self.yo == other.yo and \
self.dogg == other.dogg and \
self.yo_dogg == other.yo_dogg
i0 = YoDogg()
i1 = YoDogg(yo='YO', dogg='DOGG', yo_dogg='YO_DOGG')
i2 = YoDogg(yo='Yo', dogg='Dogg', yo_dogg='Yo Dogg')
assert i0 == i1
assert i0 != i2
assert compare_instance_reprs(i0, i1)
assert not compare_instance_reprs(i0, i2)
assert not compare_instance_reprs(i0, i1, i2)
@pytest.mark.TODO
def test_repr_MappingViewRepr(self, dirname):
# TODO: this test makes no sense – rewrite it
from clu.repr import compare_instance_reprs
data = dirname.subdirectory('data')
keys = repr(data.keys())
values = repr(data.values())
items = repr(data.items())
assert compare_instance_reprs(keys.replace('KeysView', 'ItemsView'), items)
assert compare_instance_reprs(values.replace('ValuesView', 'ItemsView'), items)
assert not compare_instance_reprs(keys, items)
assert not compare_instance_reprs(values, items)
class TestAbstractFormats(object):
""" Run the tests for the clu.abstract module’s format types. """
def test_format_SlottedFormat(self, capstrings):
class HTMLTagger(clu.abstract.SlottedFormat):
def __init__(self, tag_name):
self.opstring = "<" + tag_name.casefold() \
+ ">" + "{0}" \
+ "</" + tag_name.casefold() \
+ ">"
def render(self, string):
return self.opstring.format(string)
boldizer = HTMLTagger('b')
italizer = HTMLTagger('i')
strengthener = HTMLTagger('strong')
emphasizer = HTMLTagger('em')
for string in capstrings:
assert boldizer.render(string) == f"<b>{string}</b>"
assert italizer.render(string) == f"<i>{string}</i>"
assert strengthener.render(string) == f"<strong>{string}</strong>"
assert emphasizer.render(string) == f"<em>{string}</em>"
class TestAbstractDescriptors(object):
""" Run the tests for the clu.abstract module’s descriptor classes. """
def test_descriptor_Descriptor(self):
pass
def test_descriptor_ValueDescriptor(self, environment, consts):
from clu.fs.misc import gethomedir
from clu.predicates import isclasstype, pyattr
from clu.predicates import attr, attrs, stattr, stattrs
# Data descriptor wrapping a R/O value:
from clu.abstract import ValueDescriptor
# We have to use these irritating default environment values everywhere,
# in case the testing environment is kertwanged:
home = environment.get('HOME', gethomedir())
user = environment.get('USER', consts.USER)
# Non-data descriptor wrapping R/O access to a named environment variable,
# a default value for that variable, and the variables’ name:
class EnvironmentName(object):
__slots__ = ('name', 'default')
def __init__(self, name=None, default=None):
self.default = default
if name is not None:
self.name = name
def __set_name__(self, cls, name):
if name is not None:
self.name = name
def __get__(self, instance=None, cls=None):
if instance is not None:
return environment.get(self.name,
self.default)
if isclasstype(cls):
return self.name
def __repr__(self):
clsname = pyattr(type(self), 'name', 'qualname')
selfname = attr( self, 'name', 'default')
return f"{clsname}<[{selfname}]>"
# Slotted type with non-data descriptors,
# wrapping values and environment variables:
class Slotted(object):
__slots__ = ('yo', 'dogg', 'wtf')
hax = ValueDescriptor('HAXXX')
HOME = EnvironmentName(default=gethomedir())
USER = EnvironmentName(default=consts.USER)
def __init__(self):
self.yo: str = "YO"
self.dogg: str = "DOGG"
self.wtf: str = "WTFFF"
# Non-slotted (“dictish”) type with data descriptors,
# wrapping both values and environment variables:
class Dictish(object):
yo: str = "YO"
dogg: str = "DOGG"
wtf = ValueDescriptor('WTFFF')
HOME = EnvironmentName(default=gethomedir())
USER = EnvironmentName(default=consts.USER)
def __init__(self):
self.hax: str = "HAXXX"
# The basics – Slotted type and instance attributes:
assert attr(Slotted, 'hax', 'HOME', 'USER') == 'HAXXX'
assert attr(Slotted(), 'hax', 'HOME', 'USER') == 'HAXXX'
assert attrs(Slotted, 'hax', 'HOME', 'USER') == ('HAXXX', 'HOME', 'USER')
assert attrs(Slotted(), 'hax', 'HOME', 'USER') == ('HAXXX', home, user)
# More basics – Dictish type and instance attributes:
assert attr(Dictish, 'wtf', 'HOME', 'USER') == 'WTFFF'
assert attr(Dictish(), 'wtf', 'HOME', 'USER') == 'WTFFF'
assert attrs(Dictish, 'wtf', 'HOME', 'USER') == ('WTFFF', 'HOME', 'USER')
assert attrs(Dictish(), 'wtf', 'HOME', 'USER') == ('WTFFF', home, user)
# Check statically obtained ValueDescriptor instances,
# from Slotted types and instances:
assert repr(stattr(Slotted, 'hax', 'HOME', 'USER')) == 'HAXXX'
assert repr(stattr(Slotted(), 'hax', 'HOME', 'USER')) == 'HAXXX'
assert repr(stattr(Slotted, 'hax', 'HOME', 'USER')) == repr(ValueDescriptor('HAXXX'))
assert repr(stattr(Slotted(), 'hax', 'HOME', 'USER')) == repr(ValueDescriptor('HAXXX'))
assert type(stattr(Slotted, 'hax', 'HOME', 'USER')) is ValueDescriptor
assert type(stattr(Slotted(), 'hax', 'HOME', 'USER')) is ValueDescriptor
# Check statically obtained ValueDescriptor instances,
# from Dictish types and instances:
assert repr(stattr(Dictish, 'wtf', 'HOME', 'USER')) == 'WTFFF'
assert repr(stattr(Dictish(), 'wtf', 'HOME', 'USER')) == 'WTFFF'
assert repr(stattr(Dictish, 'wtf', 'HOME', 'USER')) == repr(ValueDescriptor('WTFFF'))
assert repr(stattr(Dictish(), 'wtf', 'HOME', 'USER')) == repr(ValueDescriptor('WTFFF'))
assert type(stattr(Dictish, 'wtf', 'HOME', 'USER')) is ValueDescriptor
assert type(stattr(Dictish(), 'wtf', 'HOME', 'USER')) is ValueDescriptor
# Check statically obtained EnvironmentName instances,
# from both Slotted and Dictish types and instances:
assert repr(stattr(Slotted, 'HOME')) == repr(EnvironmentName('HOME', default=gethomedir()))
assert repr(stattr(Slotted(), 'HOME')) == repr(EnvironmentName('HOME', default=gethomedir()))
assert repr(stattr(Slotted, 'USER')) == repr(EnvironmentName('USER', default=consts.USER))
assert repr(stattr(Slotted(), 'USER')) == repr(EnvironmentName('USER', default=consts.USER))
assert repr(stattr(Slotted, 'HOME')) == "EnvironmentName<[HOME]>"
assert repr(stattr(Slotted(), 'HOME')) == "EnvironmentName<[HOME]>"
assert repr(stattr(Slotted, 'USER')) == "EnvironmentName<[USER]>"
assert repr(stattr(Slotted(), 'USER')) == "EnvironmentName<[USER]>"
assert repr(stattr(Dictish, 'HOME')) == repr(EnvironmentName('HOME', default=gethomedir()))
assert repr(stattr(Dictish(), 'HOME')) == repr(EnvironmentName('HOME', default=gethomedir()))
assert repr(stattr(Dictish, 'USER')) == repr(EnvironmentName('USER', default=consts.USER))
assert repr(stattr(Dictish(), 'USER')) == repr(EnvironmentName('USER', default=consts.USER))
assert repr(stattr(Dictish, 'HOME')) == "EnvironmentName<[HOME]>"
assert repr(stattr(Dictish(), 'HOME')) == "EnvironmentName<[HOME]>"
assert repr(stattr(Dictish, 'USER')) == "EnvironmentName<[USER]>"
assert repr(stattr(Dictish(), 'USER')) == "EnvironmentName<[USER]>"
# Slotted attributes are named descriptors that can compare with __eq__():
assert stattr(Slotted, 'yo') == stattr(Slotted(), 'yo') == attr(Slotted, 'yo')
assert stattr(Slotted, 'dogg') == stattr(Slotted(), 'dogg') == attr(Slotted, 'dogg')
assert stattr(Slotted, 'wtf') == stattr(Slotted(), 'wtf') == attr(Slotted, 'wtf')
# Dictish attributes are just attributes -- statically obtained attributes come from
# the class __dict__, while “normally” obtained attributes are probably coming out of
# the instance __dict__:
atts = ('yo', 'dogg', 'wtf')
assert stattrs(Dictish, *atts) == stattrs(Dictish(), *atts) != attrs(Dictish(), *atts)
# N.B. Dictish.hax is a ValueDescriptor instance:
atts = ('yo', 'dogg', 'hax')
assert stattrs(Dictish, *atts) != stattrs(Dictish(), *atts) == attrs(Dictish(), *atts)
| 40.577002 | 101 | 0.556095 | 1,947 | 19,761 | 5.491525 | 0.134052 | 0.026375 | 0.035915 | 0.025814 | 0.491957 | 0.438085 | 0.415918 | 0.393191 | 0.374486 | 0.324355 | 0 | 0.003303 | 0.325793 | 19,761 | 486 | 102 | 40.660494 | 0.798994 | 0.098882 | 0 | 0.291411 | 0 | 0 | 0.080036 | 0.015951 | 0 | 0 | 0 | 0.002058 | 0.340491 | 1 | 0.119632 | false | 0.033742 | 0.058282 | 0.030675 | 0.315951 | 0.006135 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff56f2ff008767215761ca02c50006457d3c8cc8 | 8,395 | py | Python | text_run.py | cjymz886/text_bert_cnn | 0b7fc34ee4ee3d1486b934737ebe574a054df9ce | [
"MIT"
] | 72 | 2019-03-29T12:13:54.000Z | 2022-03-13T15:42:56.000Z | text_run.py | lvcheer/text_bert_cnn | 2a781881eea45d888ba3b2f32e20a6b706b07b9e | [
"MIT"
] | 3 | 2019-09-25T03:04:18.000Z | 2021-06-15T08:27:14.000Z | text_run.py | lvcheer/text_bert_cnn | 2a781881eea45d888ba3b2f32e20a6b706b07b9e | [
"MIT"
] | 19 | 2019-04-03T07:56:38.000Z | 2022-03-21T13:37:12.000Z | import sys
import os
import time
from sklearn import metrics
from text_model import *
from loader import *
def evaluate(sess,dev_data):
'''批量的形式计算验证集或测试集上数据的平均loss,平均accuracy'''
data_len = 0
total_loss = 0.0
total_acc = 0.0
for batch_ids,batch_mask,batch_segment,batch_label in batch_iter(dev_data,config.batch_size):
batch_len = len(batch_ids)
data_len+=batch_len
feed_dict = feed_data(batch_ids,batch_mask,batch_segment,batch_label, 1.0)
loss, acc = sess.run([model.loss, model.acc], feed_dict=feed_dict)
total_loss += loss * batch_len
total_acc += acc * batch_len
return total_loss/data_len, total_acc/data_len
def feed_data(batch_ids,batch_mask,batch_segment,batch_label,keep_prob):
'''构建text_model需要传入的数据'''
feed_dict = {
model.input_ids: np.array(batch_ids),
model.input_mask: np.array(batch_mask),
model.segment_ids: np.array(batch_segment),
model.labels: np.array(batch_label),
model.keep_prob:keep_prob
}
return feed_dict
def optimistic_restore(session, save_file):
"""载入bert模型"""
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
var_names = sorted([(var.name, var.name.split(':')[0]) for
var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes])
restore_vars = []
name2var = dict(zip(map(lambda x: x.name.split(':')[0],tf.global_variables()),tf.global_variables()))
with tf.variable_scope('', reuse=True):
for var_name, saved_var_name in var_names:
curr_var = name2var[saved_var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
# print("going to restore.var_name:",var_name,";saved_var_name:",saved_var_name)
restore_vars.append(curr_var)
else:
print("variable not trained.var_name:",var_name)
saver = tf.train.Saver(restore_vars)
saver.restore(session, save_file)
def train():
'''训练模型text_bert_cnn模型'''
tensorboard_dir=os.path.join(config.output_dir, "tensorboard/textcnn")
save_dir=os.path.join(config.output_dir, "checkpoints/textcnn")
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, 'best_validation')
start_time = time.time()
tf.logging.info("*****************Loading training data*****************")
train_examples = TextProcessor().get_train_examples(config.data_dir)
trian_data= convert_examples_to_features(train_examples, label_list, config.seq_length,tokenizer)
tf.logging.info("*****************Loading dev data*****************")
dev_examples = TextProcessor().get_dev_examples(config.data_dir)
dev_data = convert_examples_to_features(dev_examples, label_list, config.seq_length, tokenizer)
tf.logging.info("Time cost: %.3f seconds...\n" % (time.time() - start_time))
tf.logging.info("Building session and restore bert_model...\n")
session = tf.Session()
saver = tf.train.Saver()
session.run(tf.global_variables_initializer())
tf.summary.scalar("loss", model.loss)
tf.summary.scalar("accuracy", model.acc)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(tensorboard_dir)
writer.add_graph(session.graph)
optimistic_restore(session, config.init_checkpoint)
tf.logging.info('Training and evaluating...\n')
best_acc= 0
last_improved = 0 # record global_step at best_val_accuracy
flag=False
for epoch in range(config.num_epochs):
batch_train = batch_iter(trian_data,config.batch_size)
start = time.time()
tf.logging.info('Epoch:%d'%(epoch + 1))
for batch_ids,batch_mask,batch_segment,batch_label in batch_train:
feed_dict = feed_data(batch_ids,batch_mask,batch_segment,batch_label, config.keep_prob)
_, global_step, train_summaries, train_loss, train_accuracy = session.run([model.optim, model.global_step,
merged_summary, model.loss,
model.acc], feed_dict=feed_dict)
if global_step % config.print_per_batch == 0:
end = time.time()
val_loss,val_accuracy=evaluate(session,dev_data)
merged_acc=(train_accuracy+val_accuracy)/2
if merged_acc > best_acc:
saver.save(session, save_path)
best_acc = merged_acc
last_improved=global_step
improved_str = '*'
else:
improved_str = ''
tf.logging.info("step: {},train loss: {:.3f}, train accuracy: {:.3f}, val loss: {:.3f}, val accuracy: {:.3f},training speed: {:.3f}sec/batch {}".format(
global_step, train_loss, train_accuracy, val_loss, val_accuracy,(end - start) / config.print_per_batch,improved_str))
start = time.time()
if global_step - last_improved > config.require_improvement:
tf.logging.info("No optimization over 1500 steps, stop training")
flag = True
break
if flag:
break
config.lr *= config.lr_decay
def test():
'''testing'''
save_dir = os.path.join(config.output_dir, "checkpoints/textcnn")
save_path = os.path.join(save_dir, 'best_validation')
if not os.path.exists(save_dir):
tf.logging.info("maybe you don't train")
exit()
tf.logging.info("*****************Loading testing data*****************")
test_examples = TextProcessor().get_test_examples(config.data_dir)
test_data= convert_examples_to_features(test_examples, label_list, config.seq_length,tokenizer)
input_ids,input_mask,segment_ids=[],[],[]
for features in test_data:
input_ids.append(features['input_ids'])
input_mask.append(features['input_mask'])
segment_ids.append(features['segment_ids'])
config.is_training = False
session=tf.Session()
session.run(tf.global_variables_initializer())
saver=tf.train.Saver()
saver.restore(sess=session,save_path=save_path)
tf.logging.info('Testing...')
test_loss,test_accuracy = evaluate(session,test_data)
msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'
tf.logging.info(msg.format(test_loss, test_accuracy))
batch_size=config.batch_size
data_len=len(test_data)
num_batch=int((data_len-1)/batch_size)+1
y_test_cls=[features['label_ids'] for features in test_data]
y_pred_cls=np.zeros(shape=data_len,dtype=np.int32)
for i in range(num_batch):
start_id=i*batch_size
end_id=min((i+1)*batch_size,data_len)
feed_dict={
model.input_ids: np.array(input_ids[start_id:end_id]),
model.input_mask: np.array(input_mask[start_id:end_id]),
model.segment_ids: np.array(segment_ids[start_id:end_id]),
model.keep_prob:1.0,
}
y_pred_cls[start_id:end_id]=session.run(model.y_pred_cls,feed_dict=feed_dict)
#evaluate
tf.logging.info("Precision, Recall and F1-Score...")
tf.logging.info(metrics.classification_report(y_test_cls, y_pred_cls, target_names=label_list))
tf.logging.info("Confusion Matrix...")
cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)
tf.logging.info(cm)
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1] not in ['train', 'test']:
raise ValueError("""usage: python run_cnn.py [train / test]""")
tf.logging.set_verbosity(tf.logging.INFO)
config = TextConfig()
label_list = TextProcessor().get_labels()
tokenizer = tokenization.FullTokenizer(vocab_file=config.vocab_file, do_lower_case=False)
model = TextCNN(config)
if sys.argv[1] == 'train':
train()
elif sys.argv[1] == 'test':
test()
else:
exit()
| 40.752427 | 169 | 0.629065 | 1,088 | 8,395 | 4.57261 | 0.201287 | 0.032563 | 0.044422 | 0.017085 | 0.254271 | 0.196181 | 0.156784 | 0.12201 | 0.108744 | 0.093065 | 0 | 0.007093 | 0.244312 | 8,395 | 205 | 170 | 40.95122 | 0.777112 | 0.026206 | 0 | 0.141975 | 0 | 0.006173 | 0.100604 | 0.016998 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030864 | false | 0 | 0.037037 | 0 | 0.080247 | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff57d1d8442a73a3ba2a1260389c32c6a812ced5 | 2,633 | py | Python | src/azure_run_train.py | yohskua/support-ticket-classification | ba26373583c8dc56902b8e9e4ba716f400a730b9 | [
"MIT"
] | 2 | 2020-10-27T19:49:07.000Z | 2020-11-04T12:35:15.000Z | src/azure_run_train.py | yohskua/support-ticket-classification | ba26373583c8dc56902b8e9e4ba716f400a730b9 | [
"MIT"
] | 3 | 2020-11-13T18:57:14.000Z | 2022-02-10T01:54:56.000Z | src/azure_run_train.py | yohskua/support-ticket-classification | ba26373583c8dc56902b8e9e4ba716f400a730b9 | [
"MIT"
] | null | null | null | """
Endpoint to launch an experiment on AzureML.
"""
import os
from os.path import dirname
from typing import Optional
from azureml.train.estimator import Estimator
from azureml.core import Workspace, Datastore, Experiment, Run
from src.utils import pip_packages
from src.azure_utils import load_azure_conf
def run_azure_experiment_with_storage(
subscription_id: str,
resource_group: str,
workspace_name: str,
datastore_name: str,
container_name: str,
storage_account_name: str,
storage_account_key: str,
compute_name: str,
experiment_name: Optional[str] = None,
source_directory: Optional[str] = None,
image_name: Optional[str] = None,
use_gpu=True,
) -> Run:
workspace = Workspace(subscription_id, resource_group, workspace_name,)
data_store = Datastore.register_azure_blob_container(
workspace=workspace,
datastore_name=datastore_name,
container_name=container_name,
account_name=storage_account_name,
account_key=storage_account_key,
)
source_directory = source_directory or dirname(__file__)
assert (
compute_name in workspace.compute_targets
), f"compute {compute_name} is not created in {workspace_name} workspace"
estimator = Estimator(
source_directory=source_directory,
script_params={"--data-folder": data_store.as_mount()},
compute_target=workspace.compute_targets[compute_name],
pip_packages=pip_packages(),
entry_script=os.path.join(source_directory, "azure_train.py"),
use_gpu=use_gpu,
custom_docker_image=image_name,
)
experiment_name = experiment_name or __file__.split(os.sep)[-1].split(".py")[0]
experiment = Experiment(workspace=workspace, name=experiment_name)
run = experiment.submit(estimator)
return run
if __name__ == "__main__":
azure_conf = load_azure_conf()
run = run_azure_experiment_with_storage(
subscription_id=azure_conf["SUBSCRIPTION_ID"],
resource_group=azure_conf["RESOURCE_GROUP"],
workspace_name=azure_conf["WORKSPACE_NAME"],
datastore_name=azure_conf["DATASTORE_NAME"],
container_name=azure_conf["CONTAINER_NAME"],
storage_account_name=azure_conf["STORAGE"]["AccountName"],
storage_account_key=azure_conf["STORAGE"]["AccountKey"],
compute_name=azure_conf["COMPUTE_NAME"],
experiment_name=__file__.split(os.sep)[-1].split(".py")[0],
# source_directory is whole src directory
source_directory=os.path.dirname(__file__),
image_name=azure_conf["IMAGE_NAME"],
use_gpu=True,
)
| 35.581081 | 83 | 0.720091 | 322 | 2,633 | 5.490683 | 0.254658 | 0.061086 | 0.044118 | 0.024887 | 0.074661 | 0.074661 | 0.074661 | 0.026018 | 0 | 0 | 0 | 0.001861 | 0.183821 | 2,633 | 73 | 84 | 36.068493 | 0.820847 | 0.032283 | 0 | 0.032258 | 0 | 0 | 0.092913 | 0 | 0 | 0 | 0 | 0 | 0.016129 | 1 | 0.016129 | false | 0 | 0.112903 | 0 | 0.145161 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff57f5081cf328886c5e3633e7f6b890aa504bf4 | 2,386 | py | Python | utils.py | X-Gamer-Guide/twitch-mod-activity-tracker | b2f59c06732ae5e84e2407540cb63a6842ba27e4 | [
"MIT"
] | 2 | 2022-03-09T17:54:53.000Z | 2022-03-11T13:49:55.000Z | utils.py | X-Gamer-Guide/twitch-mod-activity-tracker | b2f59c06732ae5e84e2407540cb63a6842ba27e4 | [
"MIT"
] | null | null | null | utils.py | X-Gamer-Guide/twitch-mod-activity-tracker | b2f59c06732ae5e84e2407540cb63a6842ba27e4 | [
"MIT"
] | null | null | null | ##################################################################
# #
# twitch mod activity tracker #
# #
# Copyright (C) 2022 X Gamer Guide #
# https://github.com/X-Gamer-Guide/twitch-mod-activity-tracker #
# #
##################################################################
import os
import shelve
from config import data_path, language
def get_actions(mod_format: bool = False) -> dict:
"Load all mod actions from a file"
actions = {}
with shelve.open(os.path.join(data_path, "actions")) as db:
for action in db.keys():
if mod_format:
for mod in db[action]:
if mod not in actions:
actions[mod] = {}
actions[mod][action] = db[action][mod]
else:
actions[action] = db[action]
return actions
def put_actions(actions: dict) -> None:
"Saves mod actions to a file"
with shelve.open(os.path.join(data_path, "actions")) as db:
for key in actions:
db[key] = actions[key]
def get_command(command_display_name) -> str:
"Returns the original name of a command"
for command in language.commands:
if language.commands[command].display_name == command_display_name:
return command
def get_action(action_display_name) -> str:
"Returns the original name of a command action"
for action in language.actions:
if language.actions[action] == action_display_name:
return action
def command_help(command) -> str:
"Returns how a command should be used"
description = f"**/{language.commands[command].display_name}**"
if "arguments" in language.commands[command]:
description += " "
for argument in language.commands[command].arguments:
if language.commands[command].arguments[argument].marked_as_optional:
description += f"**[** `{language.commands[command].arguments[argument].display_name}` **]**"
else:
description += f"**<** `{language.commands[command].arguments[argument].display_name}` **>**"
return description
| 37.28125 | 109 | 0.523889 | 243 | 2,386 | 5.045267 | 0.288066 | 0.071778 | 0.131321 | 0.104405 | 0.350734 | 0.252855 | 0.252855 | 0.252855 | 0.252855 | 0.150082 | 0 | 0.002483 | 0.324811 | 2,386 | 63 | 110 | 37.873016 | 0.758535 | 0.209975 | 0 | 0.095238 | 0 | 0 | 0.214209 | 0.092573 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119048 | false | 0 | 0.071429 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff59619cfc20a7cda244fc4d89652be6e3d0a62b | 644 | py | Python | setup.py | Newky/when_can_i_run | e7f5a97acb32db3601ea0e22cd382c270802c6e1 | [
"MIT"
] | null | null | null | setup.py | Newky/when_can_i_run | e7f5a97acb32db3601ea0e22cd382c270802c6e1 | [
"MIT"
] | null | null | null | setup.py | Newky/when_can_i_run | e7f5a97acb32db3601ea0e22cd382c270802c6e1 | [
"MIT"
] | null | null | null | from setuptools import setup
def parse_requirements(requirements_path):
requirements = []
try:
for line in open(requirements_path):
if '#' not in line:
requirements.append(line)
except IOError:
print("Could not open requirements file {}".format(requirements_path))
return requirements
setup(
name='when_can_i_run',
version='0.0.1',
description="Script which tells you at what times you can run.",
author="Richard Delaney",
author_email="richdel1991@gmail.com",
install_requires=parse_requirements('requirements.txt'),
scripts=['bin/when_can_i_run']
)
| 25.76 | 78 | 0.675466 | 78 | 644 | 5.410256 | 0.653846 | 0.113744 | 0.137441 | 0.052133 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013944 | 0.220497 | 644 | 24 | 79 | 26.833333 | 0.826693 | 0 | 0 | 0 | 0 | 0 | 0.270186 | 0.032609 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.052632 | 0 | 0.157895 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff5c22e528b0317b09aca3288a9e47cff9255e98 | 1,864 | py | Python | notebook/numpy/frac_dimension.py | pipepipexiaji/matplotlib | f7a25f22afeceb11f37354ff8249790090ab3df6 | [
"MIT"
] | null | null | null | notebook/numpy/frac_dimension.py | pipepipexiaji/matplotlib | f7a25f22afeceb11f37354ff8249790090ab3df6 | [
"MIT"
] | null | null | null | notebook/numpy/frac_dimension.py | pipepipexiaji/matplotlib | f7a25f22afeceb11f37354ff8249790090ab3df6 | [
"MIT"
] | null | null | null | import sys, os
import numpy as np
def frac_dimension(z, threshold=0.9):
def pointcount(z,k):
s=np.add.reduceat(np.add.reduceat(
z, np.arange(0, z.shape[0], k), axis=0 ),
np.arange(0, z.shape[1], k), axis=1)
return len(np.where( ( s>0 ) & (s<k*k) )[0])
z=(z<threshold)
p = min(z.shape)
n=2**np.floor(np.log(p)/np.log(2))
n=int(np.log(n)/np.log(2))
sizes=2**np.arange(n, 1, -1)
counts = []
for size in sizes:
counts.append(pointcount(z, size))
coeffs = np.polyfit(np.log(sizes), np.log(counts), 1)
return -coeffs[0]
if __name__=='__main__':
from scipy import misc
import matplotlib.pyplot as plt
import matplotlib.patches as patches
z=1.0-misc.imread("https://shrtm.nu/zw04")/255
print(frac_dimension(z, threshold=0.25))
sizes = 128, 64, 32
xmin, xmax = 0, z.shape[1]
ymin, ymax = 0, z.shape[0]
fig = plt.figure(figsize=(10, 5))
for i, size in enumerate(sizes):
ax = plt.subplot(1, len(sizes), i+1, frameon=False)
ax.imshow(1-Z, plt.cm.gray, interpolation="bicubic", vmin=0, vmax=1,
extent=[xmin, xmax, ymin, ymax], origin="upper")
ax.set_xticks([])
ax.set_yticks([])
for y in range(z.shape[0]//size+1):
for x in range(z.shape[1]//size+1):
s = (z[y*size:(y+1)*size, x*size:(x+1)*size] > 0.25).sum()
if s > 0 and s < size*size:
rect = patches.Rectangle(
(x*size, z.shape[0]-1-(y+1)*size),
width=size, height=size,
linewidth=.5, edgecolor='.25',
facecolor='.75', alpha=.5)
ax.add_patch(rect)
plt.tight_layout()
plt.savefig("fractal-dimension.png")
plt.show() | 34.518519 | 76 | 0.526288 | 285 | 1,864 | 3.392982 | 0.385965 | 0.049638 | 0.028956 | 0.04757 | 0.080662 | 0 | 0 | 0 | 0 | 0 | 0 | 0.050652 | 0.300966 | 1,864 | 54 | 77 | 34.518519 | 0.691481 | 0 | 0 | 0 | 0 | 0 | 0.036461 | 0.01126 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.106383 | 0 | 0.191489 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff5d5019c2e1556f979ae8023b4f76dfde167be2 | 705 | py | Python | tests/test_quotient.py | ICB-DCM/fiddy | c6300f60e7b67ca3b5255cc27a22293aa2690a96 | [
"BSD-3-Clause"
] | 3 | 2022-03-04T13:37:10.000Z | 2022-03-04T13:37:16.000Z | tests/test_quotient.py | ICB-DCM/fiddy | c6300f60e7b67ca3b5255cc27a22293aa2690a96 | [
"BSD-3-Clause"
] | 9 | 2022-02-04T14:03:15.000Z | 2022-02-24T15:41:15.000Z | tests/test_quotient.py | ICB-DCM/fiddy | c6300f60e7b67ca3b5255cc27a22293aa2690a96 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import fiddy
import numpy as np
@pytest.fixture
def line():
def function(x):
return (3 * x + 2)[0]
def derivative(x):
return 3
return {
"function": function,
"derivative": derivative,
"point": np.array([3]),
"dimension": np.array([0]),
"size": 1e-10,
}
def test_forward(line):
step = fiddy.step.dstep(
point=line["point"],
dimension=line["dimension"],
size=line["size"],
)
fd = fiddy.quotient.forward(
function=line["function"],
point=line["point"],
step=step,
)
expected = line["derivative"](line["point"])
assert np.isclose(fd, expected)
| 19.054054 | 48 | 0.547518 | 80 | 705 | 4.8125 | 0.3875 | 0.07013 | 0.041558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018145 | 0.296454 | 705 | 36 | 49 | 19.583333 | 0.758065 | 0 | 0 | 0.068966 | 0 | 0 | 0.116312 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.137931 | false | 0 | 0.103448 | 0.068966 | 0.344828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff5d8d4b13202abe6fea868590e72915ddb862ae | 2,388 | py | Python | library/dbscripts_mkdirs.py | Acidburn0zzz/upstream-infra | 82b91b4de88782d924ce01ae2b4b1b6b34eed6f7 | [
"MIT"
] | 73 | 2019-09-25T05:11:53.000Z | 2022-03-21T13:18:57.000Z | library/dbscripts_mkdirs.py | Acidburn0zzz/upstream-infra | 82b91b4de88782d924ce01ae2b4b1b6b34eed6f7 | [
"MIT"
] | 5 | 2019-12-14T23:36:38.000Z | 2021-03-19T06:14:23.000Z | library/dbscripts_mkdirs.py | Acidburn0zzz/upstream-infra | 82b91b4de88782d924ce01ae2b4b1b6b34eed6f7 | [
"MIT"
] | 20 | 2019-12-14T23:24:09.000Z | 2022-01-13T04:25:03.000Z | #!/usr/bin/python
import errno
import grp
import os
import pwd
from stat import *
# simple module that creates many directories for users
# initially created for dbscripts to create staging directories in the user homes
def main():
module = AnsibleModule(
argument_spec = dict(
permissions = dict(required=True),
users = dict(required=True, type='list'),
group = dict(required=True),
directories = dict(required=True, type='list'),
pathtmpl = dict(required=True),
),
supports_check_mode=True,
)
users = module.params['users']
directories = module.params['directories']
permissions = int(module.params['permissions'], 8)
pathtmpl = module.params['pathtmpl']
group = module.params['group']
gid = grp.getgrnam(group).gr_gid
changed = 0
changed_dirs = []
for user in users:
uid = pwd.getpwnam(user).pw_uid
for dirname in directories:
path = pathtmpl.format(**{"user": user, "dirname": dirname})
permissions_incorrect = True
dirmode = None
if os.path.exists(path):
stat = os.stat(path)
dirmode = oct(stat.st_mode & 0o777)
diruid = stat.st_uid
dirgid = stat.st_gid
permissions_incorrect = diruid != uid or dirgid != gid
if not os.path.isdir(path) or dirmode != oct(permissions) or permissions_incorrect:
changed += 1
changed_dirs.append(path)
if not module.check_mode:
try:
try:
os.mkdir(path, permissions)
except OSError as ex:
if not (ex.errno == errno.EEXIST and os.path.isdir(path)):
raise
except Exception as e:
module.fail_json(path=path, msg='There was an issue creating %s as requested: %s' % (path, str(e)))
os.chmod(path, permissions)
os.chown(path, uid, gid)
module.exit_json(changed=changed > 0, msg="%s directories changed" % (changed), changed_dirs=changed_dirs)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| 33.166667 | 123 | 0.55402 | 260 | 2,388 | 4.984615 | 0.396154 | 0.046296 | 0.061728 | 0.030864 | 0.037037 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005161 | 0.350921 | 2,388 | 71 | 124 | 33.633803 | 0.830968 | 0.062814 | 0 | 0.037037 | 0 | 0 | 0.060877 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018519 | false | 0 | 0.111111 | 0 | 0.12963 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff5dab03c7e7990a7a73aa7873bb629886472ec5 | 8,264 | py | Python | setup.py | jessa/PySDD | 57ccc22c4720ece54f11a593900e0a885023c6cc | [
"Apache-2.0"
] | 34 | 2018-03-21T01:26:57.000Z | 2022-03-10T08:53:18.000Z | setup.py | jessa/PySDD | 57ccc22c4720ece54f11a593900e0a885023c6cc | [
"Apache-2.0"
] | 12 | 2019-01-22T16:25:02.000Z | 2022-02-01T10:21:29.000Z | setup.py | jessa/PySDD | 57ccc22c4720ece54f11a593900e0a885023c6cc | [
"Apache-2.0"
] | 14 | 2018-12-17T17:40:48.000Z | 2022-03-22T02:53:01.000Z | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
setup.py
~~~~~~~~
Usage: python3 setup.py build_ext --inplace
:author: Wannes Meert
:copyright: Copyright 2017-2019 KU Leuven and Regents of the University of California.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
from setuptools import setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext as BuildExtCommand
from setuptools import Distribution
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
import platform
import os
import re
from pathlib import Path
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
try:
import cysignals
except ImportError as exc:
print(f"cysignals not found\n{exc}")
cysignals = None
class MyDistribution(Distribution):
global_options = Distribution.global_options + [
('debug', None, 'Compile with debug options on (PySDD option)'),
('usecysignals', None, 'Compile with CySignals (PySDD option)')
]
def __init__(self, attrs=None):
self.debug = 0
self.usecysignals = 0
super().__init__(attrs)
# build_type = "debug"
build_type = "optimized"
here = Path(".") # setup script requires relative paths
with (here / "pysdd" / "__init__.py").open('r') as fd:
wrapper_version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not wrapper_version:
raise RuntimeError('Cannot find version information')
sdd_version = "2.0"
libwrapper_path = here / "pysdd" / "lib"
sdd_path = libwrapper_path / f"sdd-{sdd_version}"
lib_path = sdd_path / "lib"
inc_path = sdd_path / "include"
src_path = sdd_path / "src"
csrc_path = here / "pysdd" / "src"
# c_files_paths = src_path.glob("**/*.c")
c_files_paths = (src_path / "fnf").glob("*.c")
sdd_extra_inc_path = libwrapper_path / "sdd_extra" / "include"
# weight optimization wrapper
wo_path = libwrapper_path / "weight_optimization"
wo_inc_path = wo_path / "include"
wo_src_path = wo_path / "src"
wo_c_files_paths = wo_src_path.glob("*.c")
c_dirs_paths = set(p.parent for p in src_path.glob("**/*.c")) | {wo_src_path}
all_c_file_paths = [str(p) for p in c_files_paths] + [str(p) for p in wo_c_files_paths]
# print("Found c files: ", ", ".join([str(p) for p in all_c_file_paths]))
os.environ["LDFLAGS"] = f"-L{lib_path}"
os.environ["CPPFLAGS"] = f"-I{inc_path} " + f"-I{wo_inc_path} " + f"-I{sdd_extra_inc_path} " + f"-I{csrc_path} " + \
" ".join(f"-I{p}" for p in c_dirs_paths)
compile_time_env = {'HAVE_CYSIGNALS': False}
# if cysignals is not None:
# compile_time_env['HAVE_CYSIGNALS'] = True
c_args = {
'unix': ['-O3', '-march=native'],
'msvc': ['/Ox', '/fp:fast', '/favor:INTEL64', '/Og'],
'mingw32': ['-O3', '-march=native']
}
c_args_debug = {
'unix': ["-march=native", "-O0", '-g'],
'msvc': [["-Zi", "/Od"]],
'mingw32': ["-march=native", "-O0", '-g']
}
l_args = {
'unix': [],
'msvc': [],
'mingw32': []
}
l_args_debug = {
'unix': ['-g'],
'msvc': ["-debug"],
'mingw32': ['-g']
}
class MyBuildExtCommand(BuildExtCommand):
def build_extensions(self):
global lib_path
c = self.compiler.compiler_type
print("Compiler type: {}".format(c))
compiler_name = self.compiler.compiler[0]
print("Compiler name: {}".format(compiler_name))
print("--debug: {}".format(self.distribution.debug))
print("--usecysignals: {}".format(self.distribution.usecysignals))
# Compiler and linker options
if self.distribution.debug:
self.force = True # force full rebuild in debugging mode
cur_c_args = c_args_debug
cur_l_args = l_args_debug
else:
cur_c_args = c_args
cur_l_args = l_args
if "gcc" in compiler_name:
cur_c_args["unix"].append("-std=c99")
if c in cur_c_args:
args = cur_c_args[c]
for e in self.extensions: # type: Extension
e.extra_compile_args = args
else:
print("Unknown compiler type: {}".format(c))
if c in cur_l_args:
args = cur_l_args[c]
for e in self.extensions: # type: Extension
e.extra_link_args = args
if self.distribution.usecysignals:
if cysignals is not None:
if self.cython_compile_time_env is None:
self.cython_compile_time_env = {'HAVE_CYSIGNALS': True}
else:
self.cython_compile_time_env['HAVE_CYSIGNALS'] = True
else:
print("Warning: import cysignals failed")
# Extra objects
if "Darwin" in platform.system():
cur_lib_path = lib_path / "Darwin"
if build_type == "debug":
cur_lib_path = cur_lib_path / "debug"
libsdd_path = cur_lib_path / "libsdd.a"
elif "Linux" in platform.system():
cur_lib_path = lib_path / "Linux"
libsdd_path = cur_lib_path / "libsdd.a"
elif "Windows" in platform.system():
cur_lib_path = lib_path / "Windows"
libsdd_path = cur_lib_path / "libsdd.dll"
else:
libsdd_path = lib_path / "libsdd.a"
for e in self.extensions: # type: Extension
e.extra_objects = [str(libsdd_path)]
BuildExtCommand.build_extensions(self)
if cythonize is not None:
ext_modules = cythonize([
Extension(
"pysdd.sdd", [str(here / "pysdd" / "sdd.pyx")] + all_c_file_paths
# extra_objects=[str(libsdd_path)],
# extra_compile_args=extra_compile_args,
# extra_link_args=extra_link_args
# include_dirs=[numpy.get_include()]
)],
compiler_directives={'embedsignature': True},
# gdb_debug=gdb_debug,
compile_time_env=compile_time_env)
else:
ext_modules = []
print('**********************************************')
print('Cython not yet available, skipping compilation')
print('**********************************************')
# install_requires = ['numpy', 'cython']
install_requires = ['cython>=0.29.6']
setup_requires = ['setuptools>=18.0', 'cython>=0.29.6']
tests_require = ['pytest']
with (here / 'README.rst').open('r', encoding='utf-8') as f:
long_description = f.read()
setup_kwargs = {}
def set_setup_kwargs(**kwargs):
global setup_kwargs
setup_kwargs = kwargs
set_setup_kwargs(
name='PySDD',
version=wrapper_version,
description='Sentential Decision Diagrams',
long_description=long_description,
author='Wannes Meert, Arthur Choi',
author_email='wannes.meert@cs.kuleuven.be',
url='https://github.com/wannesm/PySDD',
project_urls={
'PySDD documentation': 'http://pysdd.readthedocs.io/en/latest/',
'PySDD source': 'https://github.com/wannesm/PySDD'
},
packages=["pysdd"],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require={
'all': ['cysignals', 'numpy']
},
include_package_data=True,
package_data={
'': ['*.pyx', '*.pxd', '*.h', '*.c', '*.so', '*.a', '*.dll', '*lib'],
},
distclass=MyDistribution,
cmdclass={
'build_ext': MyBuildExtCommand
},
entry_points={
'console_scripts': [
'pysdd = pysdd.cli:main'
]},
python_requires='>=3.6',
license='Apache 2.0',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
keywords='sdd, knowledge compilation',
ext_modules=ext_modules,
zip_safe=False
)
try:
setup(**setup_kwargs)
except (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError, SystemExit) as exc:
print("********************************************")
print("ERROR: The C extension could not be compiled")
print("********************************************")
print(exc)
raise exc
| 32.924303 | 116 | 0.60818 | 998 | 8,264 | 4.803607 | 0.274549 | 0.021902 | 0.016688 | 0.005841 | 0.162286 | 0.096996 | 0.078849 | 0.078849 | 0.045265 | 0.018356 | 0 | 0.008197 | 0.232333 | 8,264 | 250 | 117 | 33.056 | 0.747478 | 0.106123 | 0 | 0.091371 | 0 | 0 | 0.238076 | 0.036962 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015228 | false | 0 | 0.071066 | 0 | 0.101523 | 0.071066 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff66fc191e5cd060a3a30c03c46d05cc46439ecb | 2,578 | py | Python | benchmark/d3pe/scripts/eval_ope.py | ssimonc/NeoRL | 098c58c8e4c3e43e67803f6384619d3bfe7fce5d | [
"Apache-2.0"
] | 50 | 2021-02-07T08:10:28.000Z | 2022-03-25T09:10:26.000Z | benchmark/d3pe/scripts/eval_ope.py | ssimonc/NeoRL | 098c58c8e4c3e43e67803f6384619d3bfe7fce5d | [
"Apache-2.0"
] | 7 | 2021-07-29T14:58:31.000Z | 2022-02-01T08:02:54.000Z | benchmark/d3pe/scripts/eval_ope.py | ssimonc/NeoRL | 098c58c8e4c3e43e67803f6384619d3bfe7fce5d | [
"Apache-2.0"
] | 4 | 2021-04-01T16:30:15.000Z | 2022-03-31T17:38:05.000Z | import os
import json
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
from d3pe.metric.score import RC_score, TopK_score, get_policy_mean
BenchmarkFolder = 'benchmarks'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--domain', type=str)
parser.add_argument('-l', '--level', type=str)
parser.add_argument('-a', '--amount', type=int)
parser.add_argument('-en', '--evaluate_name', type=str)
args = parser.parse_args()
if not os.path.exists('ope_plots'): os.makedirs('ope_plots')
''' load ope and gt values '''
task = f'{args.domain}-{args.level}-{args.amount}'
task_folder = os.path.join(BenchmarkFolder, task)
evaluate_file = os.path.join(task_folder, args.evaluate_name + '.json')
with open(evaluate_file, 'r') as f:
evaluate_json = json.load(f)
gt_file = os.path.join(task_folder, 'gt.json')
with open(gt_file, 'r') as f:
gt_json = json.load(f)
real_score = []
esitmate_score = []
for key in gt_json.keys():
real_score.append(gt_json[key])
esitmate_score.append(evaluate_json[key])
''' evaluation '''
print('RC score:', RC_score(real_score, esitmate_score))
print('Top 1 mean:', TopK_score(real_score, esitmate_score, 1, 'mean'))
print('Top 3 mean:', TopK_score(real_score, esitmate_score, 3, 'mean'))
print('Top 5 mean:', TopK_score(real_score, esitmate_score, 5, 'mean'))
print('Top 1 max:', TopK_score(real_score, esitmate_score, 1, 'max'))
print('Top 3 max:', TopK_score(real_score, esitmate_score, 3, 'max'))
print('Top 5 max:', TopK_score(real_score, esitmate_score, 5, 'max'))
print('Policy Mean Score:', get_policy_mean(real_score))
''' plot '''
plt.figure()
gt_scale = 1 / (1 - 0.99) / 1000.0 if not 'finance' in task else 1 / (1 - 0.99) / 2516
try:
r = pearsonr(np.array(real_score) * gt_scale, np.array(esitmate_score))[0]
except:
r = float('nan')
plt.scatter(np.array(real_score) * gt_scale, np.array(esitmate_score))
max_value = max((np.array(real_score) * gt_scale).max(), np.array(esitmate_score).max())
min_value = min((np.array(real_score) * gt_scale).min(), np.array(esitmate_score).min())
plt.plot([min_value, max_value], [min_value, max_value], 'k--')
plt.title(task + ' (r=%.2f)' % r)
plt.xlabel('Ground Truth', {'size' : 12})
plt.ylabel(args.evaluate_name.upper(), {'size' : 12})
plt.savefig(f'ope_plots/{task}-{args.evaluate_name}.png') | 40.920635 | 92 | 0.655935 | 384 | 2,578 | 4.1875 | 0.273438 | 0.078358 | 0.084577 | 0.109453 | 0.307214 | 0.244403 | 0.185945 | 0.053483 | 0.053483 | 0.053483 | 0 | 0.017908 | 0.176881 | 2,578 | 63 | 93 | 40.920635 | 0.739868 | 0 | 0 | 0 | 0 | 0 | 0.131401 | 0.032156 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.137255 | 0 | 0.137255 | 0.156863 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff685253b9c0863707f35b85f1ef58190321a525 | 1,185 | py | Python | python-deprecated/asset.py | unfixed/Dell-Warranty-Lookup | 8775636d4b0a8aa30945b7bd8cedbf1b85d885c6 | [
"WTFPL"
] | 3 | 2015-02-13T16:14:14.000Z | 2017-04-03T15:40:36.000Z | python-deprecated/asset.py | unfixed/Dell-Warranty-Lookup | 8775636d4b0a8aa30945b7bd8cedbf1b85d885c6 | [
"WTFPL"
] | null | null | null | python-deprecated/asset.py | unfixed/Dell-Warranty-Lookup | 8775636d4b0a8aa30945b7bd8cedbf1b85d885c6 | [
"WTFPL"
] | null | null | null | #Seperate File - Filename - asset.py
class Asset():
def __init__(self,service_tag,name,user,dept):
def GetAssetInformation(service_tag):
from suds.client import Client
import uuid
client = Client("http://xserv.dell.com/services/assetservice.asmx?WSDL")
return client.service.GetAssetInformation(uuid.uuid1(), "dell_asset_lookup", service_tag)
def warranty_end_date(asset_info):
dates = []
for n in range(len(asset_info[0][0][1][0])):
dates.append(asset_info[0][0][1][0][n][4].date())
return max(dates)
asset_info = GetAssetInformation(service_tag)
self.name = name.upper()
self.model = str(asset_info.Asset[0][0][4])
self.service_tag = str(asset_info.Asset[0][0][0])
self.shipped_date = str(asset_info.Asset[0][0][6].date())
self.warranty_end_date = str(warranty_end_date(asset_info))
self.user = user
self.dept = dept
def print_csv(self):
return str('%s,%s,%s,%s,%s,%s,%s\n' %(self.name,self.model,self.service_tag,self.shipped_date,self.warranty_end_date,self.user,self.dept))
| 40.862069 | 146 | 0.624473 | 166 | 1,185 | 4.271084 | 0.325301 | 0.101551 | 0.021157 | 0.022567 | 0.25952 | 0.126939 | 0 | 0 | 0 | 0 | 0 | 0.020811 | 0.229536 | 1,185 | 28 | 147 | 42.321429 | 0.75575 | 0.029536 | 0 | 0 | 0 | 0 | 0.08007 | 0.019147 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0.045455 | 0.454545 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff69eebfb5fd5176bd938dc3c3b14c86aedd74b9 | 3,366 | py | Python | src/app.py | dyylaan/corona | 73e834ecdbcc167542bd169a9b19eae759218ab9 | [
"MIT"
] | null | null | null | src/app.py | dyylaan/corona | 73e834ecdbcc167542bd169a9b19eae759218ab9 | [
"MIT"
] | null | null | null | src/app.py | dyylaan/corona | 73e834ecdbcc167542bd169a9b19eae759218ab9 | [
"MIT"
] | null | null | null |
import datetime
import os
import yaml
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Répertoire du fichier des données
PROCESSED_DIR = '../data/processed/'
# Table principale
ALL_DATA_FILE= 'all_data.csv'
ENV_FILE = '../env.yaml'
with open(ENV_FILE) as f:
params = yaml.load(f, Loader=yaml.FullLoader)
#initialisation des chemins vers les fichiers
ROOT_DIR = os.path.dirname(os.path.abspath(ENV_FILE))
DATA_FILE = os.path.join(ROOT_DIR,
params['directories']['processed'],
params['files']['all_data'])
#lecture du fichier de données
epidemie_df = (pd.read_csv(DATA_FILE, parse_dates=['Last Update'])
.assign(day=lambda _df: _df['Last Update'].dt.date)
.drop_duplicates(subset=['Country/Region', 'Province/State','day'])
[lambda df: df['day']<= datetime.date(2020, 3, 10)]
)
countries = [{'label': c, 'value': c} for c in sorted(epidemie_df['Country/Region'].unique())]
app = dash.Dash('corona Virus Explorer') # l'ordre des elements donne l'affichage dans l'appli ( premier en haut etc)
app.layout = html.Div([
html.H1(['Corona Virus Explorer'], style={'textAlign': 'center'}),
html.Div([
dcc.Dropdown(
id='country',
options=countries
)
]),
html.Div([
dcc.Dropdown(
id='country2',
options=countries
)
]),
html.Div([
dcc.RadioItems(
id='variable',
options=[
{'label':'Confirmed','value': 'Confirmed'},
{'label':'Deaths','value': 'Deaths'},
{'label':'Recovered','value': 'Recovered'},
],
value= 'Confirmed',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Graph(id='graph1')
]),
])
@app.callback(
Output('graph1', 'figure'),
[
Input('country','value'),
Input('country2','value'),
Input('variable','value'),
]
)
def update_graph(country, country2, variable): # respect l'ordre du callback
print(country)
if country is None:
graph_df = epidemie_df.groupby('day').agg({variable: 'sum'}).reset_index()
else:
graph_df = (epidemie_df[epidemie_df['Country/Region'] == country]
.groupby(['Country/Region', 'day'])
.agg({variable: 'sum'})
.reset_index()
)
if country2 is not None:
graph2_df = (epidemie_df[epidemie_df['Country/Region'] == country2]
.groupby(['Country/Region', 'day'])
.agg({variable: 'sum'})
.reset_index()
)
return {
'data':[
dict(
x=graph_df['day'],
y=graph_df[variable],
type='line',
name=country if country is not None else'Total'
)
] + ([
dict(
x=graph2_df['day'],
y=graph2_df[variable],
type='line',
name=country2 if country is not None else []
)
]if country2 is not None else[])
}
if __name__=='__main__':
app.run_server(debug=True) | 29.526316 | 117 | 0.541295 | 368 | 3,366 | 4.820652 | 0.396739 | 0.039459 | 0.033822 | 0.038895 | 0.228298 | 0.132469 | 0.092446 | 0.052988 | 0.052988 | 0 | 0 | 0.008666 | 0.31432 | 3,366 | 114 | 118 | 29.526316 | 0.759965 | 0.067439 | 0 | 0.226804 | 0 | 0 | 0.159962 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010309 | false | 0 | 0.082474 | 0 | 0.103093 | 0.010309 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff6cda50a149363e3fbb320406dd695af6260253 | 2,629 | py | Python | templating/core/src/main/python/jsonteng/tags/for_each_tag.py | vmware/json-template-engine | afa606aef47599f8ff7cf7d1f927877a9ce786e7 | [
"Apache-2.0"
] | 21 | 2019-09-30T08:41:01.000Z | 2022-03-15T08:23:28.000Z | templating/core/src/main/python/jsonteng/tags/for_each_tag.py | vmware/json-template-engine | afa606aef47599f8ff7cf7d1f927877a9ce786e7 | [
"Apache-2.0"
] | 1 | 2020-03-17T03:14:00.000Z | 2020-05-01T18:02:23.000Z | templating/core/src/main/python/jsonteng/tags/for_each_tag.py | vmware/json-template-engine | afa606aef47599f8ff7cf7d1f927877a9ce786e7 | [
"Apache-2.0"
] | 7 | 2019-06-18T14:35:57.000Z | 2021-12-06T23:01:30.000Z | # Copyright 2019 VMware, Inc.
# SPDX-License-Indentifier: Apache-2.0
from ..exception import TemplateEngineException
from .tag_base import TagBase
class ForEachTag(TagBase):
"""
Apply a list of binding data to a template repeatedly and return the
resolved templates in a list.
"""
name = "for-each"
def __init__(self, tag_resolver):
"""
Construct this Tag.
:param tag_resolver: Tag resolver
:type tag_resolver: 'TagResolver'
"""
super().__init__(tag_resolver)
self._element_resolver = tag_resolver.get_element_resolver()
self._template_loader = tag_resolver.get_template_loader()
def process(self, tag_tokens, binding_data_list):
"""
Process this tag.
:param tag_tokens: Tag arguments.
:type tag_tokens: 'list'
:param binding_data_list: Binding data used during the processing.
:type binding_data_list: 'list'
:return: JSON object
:rtype: JSON object
"""
if len(tag_tokens) < 2 or len(tag_tokens) > 3:
raise Exception(
"Tag \"{}\" requires 2 or 3 parameters."
" Parameters given {}".
format(ForEachTag.name, tag_tokens))
data_list = tag_tokens[0]
try:
template = self._element_resolver.resolve(
tag_tokens[1], binding_data_list)
except TemplateEngineException:
# If encounter exception, treat the token as the template.
# The resolve may need a loop dependent binding data.
template = tag_tokens[1]
template_json = self._template_loader.load(template)
resolved_data_list = self._element_resolver.resolve(
data_list, binding_data_list)
resolved_json = list()
for index, data in enumerate(resolved_data_list):
binding_data_list.insert(0, data)
binding_data_list.insert(0, {"_index_": index})
if len(tag_tokens) == 3:
condition_expr = self._element_resolver.resolve(
tag_tokens[2], binding_data_list)
if not self.safe_eval(condition_expr):
binding_data_list.pop(0)
binding_data_list.pop(0)
continue
resolved_template = self._element_resolver.resolve(
template_json, binding_data_list)
resolved_json.append(resolved_template)
binding_data_list.pop(0)
binding_data_list.pop(0)
self._template_loader.unload(template)
return resolved_json
| 38.101449 | 74 | 0.617345 | 299 | 2,629 | 5.12709 | 0.311037 | 0.088715 | 0.127202 | 0.067841 | 0.196999 | 0.095238 | 0.049576 | 0.049576 | 0.049576 | 0.049576 | 0 | 0.011463 | 0.303157 | 2,629 | 68 | 75 | 38.661765 | 0.825328 | 0.219855 | 0 | 0.097561 | 0 | 0 | 0.035454 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.04878 | 0 | 0.170732 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff6d5561a4aef51c3287d4dc9af0364e55380487 | 2,882 | py | Python | src/the_tale/the_tale/game/cards/tests/test_give_stability.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/game/cards/tests/test_give_stability.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/game/cards/tests/test_give_stability.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
class GiveStabilityMixin(helpers.CardsTestMixin):
CARD = None
def setUp(self):
super(GiveStabilityMixin, self).setUp()
places_tt_services.effects.cmd_debug_clear_service()
self.place_1, self.place_2, self.place_3 = game_logic.create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.account_2 = self.accounts_factory.create_account(is_fast=True)
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(self.account_1)
self.storage.load_account_data(self.account_2)
self.hero = self.storage.accounts_to_heroes[self.account_1.id]
self.card = self.CARD.effect.create_card(type=self.CARD, available_for_auction=True)
def test_use(self):
with mock.patch('the_tale.game.balance.constants.PLACE_BASE_STABILITY', 0.25):
self.place_1.refresh_attributes()
self.assertLess(self.place_1.attrs.stability, 1.0 - self.CARD.effect.modificator)
with self.check_almost_delta(lambda: self.place_1.attrs.stability, self.CARD.effect.modificator):
result, step, postsave_actions = self.CARD.effect.use(**self.use_attributes(hero=self.hero,
card=self.card,
value=self.place_1.id))
self.assertEqual((result, step, postsave_actions),
(game_postponed_tasks.ComplexChangeTask.RESULT.SUCCESSED, game_postponed_tasks.ComplexChangeTask.STEP.SUCCESS, ()))
def test_use_for_wrong_place_id(self):
with mock.patch('the_tale.game.balance.constants.PLACE_BASE_STABILITY', 0.25):
self.place_1.refresh_attributes()
self.assertLess(self.place_1.attrs.stability, 1.0 - self.CARD.effect.modificator)
with self.check_not_changed(lambda: self.place_1.attrs.stability):
self.assertEqual(self.CARD.effect.use(**self.use_attributes(hero=self.hero, value=666, storage=self.storage)),
(game_postponed_tasks.ComplexChangeTask.RESULT.FAILED,
game_postponed_tasks.ComplexChangeTask.STEP.ERROR,
()))
class GiveStabilityUncommonTests(GiveStabilityMixin, utils_testcase.TestCase):
CARD = types.CARD.GIVE_STABILITY_UNCOMMON
class GiveStabilityRareTests(GiveStabilityMixin, utils_testcase.TestCase):
CARD = types.CARD.GIVE_STABILITY_RARE
class GiveStabilityEpicTests(GiveStabilityMixin, utils_testcase.TestCase):
CARD = types.CARD.GIVE_STABILITY_EPIC
class GiveStabilityLegendaryTests(GiveStabilityMixin, utils_testcase.TestCase):
CARD = types.CARD.GIVE_STABILITY_LEGENDARY
| 41.171429 | 140 | 0.670715 | 324 | 2,882 | 5.709877 | 0.308642 | 0.048649 | 0.043243 | 0.032432 | 0.571892 | 0.450811 | 0.450811 | 0.374054 | 0.374054 | 0.233514 | 0 | 0.012733 | 0.236988 | 2,882 | 69 | 141 | 41.768116 | 0.828558 | 0 | 0 | 0.142857 | 0 | 0 | 0.036099 | 0.036099 | 0 | 0 | 0 | 0 | 0.095238 | 1 | 0.071429 | false | 0 | 0.047619 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff70ad31a7318cd530102a309a6d7b1760414492 | 1,831 | py | Python | 16.3-sum-closest.py | leonhx/leetcode-practice | 35fabe5a1b98c05a5dd5d6a62201e9cb54be69ec | [
"MIT"
] | null | null | null | 16.3-sum-closest.py | leonhx/leetcode-practice | 35fabe5a1b98c05a5dd5d6a62201e9cb54be69ec | [
"MIT"
] | null | null | null | 16.3-sum-closest.py | leonhx/leetcode-practice | 35fabe5a1b98c05a5dd5d6a62201e9cb54be69ec | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=16 lang=python3
#
# [16] 3Sum Closest
#
class Solution:
def __init__(self, *, debug=False):
super().__init__()
self.debug = debug
def binarySearchClosest(self, nums: List[int], target: int) -> int:
if len(nums) == 0:
return None
if target <= nums[0]:
return nums[0]
if target >= nums[-1]:
return nums[-1]
mid_i = len(nums) // 2
mid_v = nums[mid_i]
if target == mid_v:
return target
elif target < mid_v:
x = self.binarySearchClosest(nums[:mid_i], target)
else:
x = self.binarySearchClosest(nums[mid_i + 1:], target)
return mid_v if not x or abs(target - mid_v) < abs(target - x) else x
def threeSumClosest(self, nums: List[int], target: int) -> int:
nums.sort()
if self.debug:
print(nums)
closest = sum(nums[-3:])
for i in range(len(nums)):
if nums[i] * 3 > max([closest, target]):
break
for j in range(i + 1, len(nums) - 1):
sij = nums[i] + nums[j]
if sij + nums[j] > max([closest, target]):
break
tgt = target - sij
x = self.binarySearchClosest(nums[j + 1:], tgt)
if self.debug:
print(f'given {nums[i]}, {nums[j]}, find {tgt}, get {x}')
if x:
candidate = sij + x
if candidate == target:
return candidate
if abs(target - candidate) < abs(target - closest):
if self.debug:
print(f'{nums[i]} + {nums[j]} + {x}')
closest = candidate
return closest
| 34.54717 | 77 | 0.464227 | 214 | 1,831 | 3.892523 | 0.261682 | 0.054022 | 0.028812 | 0.10084 | 0.182473 | 0.141657 | 0.064826 | 0 | 0 | 0 | 0 | 0.016744 | 0.412889 | 1,831 | 52 | 78 | 35.211538 | 0.75814 | 0.028946 | 0 | 0.111111 | 0 | 0 | 0.041761 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0 | 0 | 0.244444 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff72d32718ca52261b3ee6803ac5fbc46065c1fa | 4,262 | py | Python | projects/mmdet3d_plugin/models/utils/detr.py | XiangTodayEatsWhat/detr3d | 34a47673011fe13593a3e594a376668acca8bddb | [
"MIT"
] | 237 | 2021-10-13T05:29:29.000Z | 2022-03-31T13:04:13.000Z | projects/mmdet3d_plugin/models/utils/detr.py | XiangTodayEatsWhat/detr3d | 34a47673011fe13593a3e594a376668acca8bddb | [
"MIT"
] | 23 | 2021-10-20T13:57:27.000Z | 2022-03-30T08:03:19.000Z | projects/mmdet3d_plugin/models/utils/detr.py | XiangTodayEatsWhat/detr3d | 34a47673011fe13593a3e594a376668acca8bddb | [
"MIT"
] | 47 | 2021-10-14T05:38:30.000Z | 2022-03-31T09:15:59.000Z | import torch
from mmcv.cnn.bricks.registry import TRANSFORMER_LAYER_SEQUENCE
from mmcv.cnn.bricks.transformer import TransformerLayerSequence
def inverse_sigmoid(x, eps=1e-5):
"""Inverse function of sigmoid.
Args:
x (Tensor): The tensor to do the
inverse.
eps (float): EPS avoid numerical
overflow. Defaults 1e-5.
Returns:
Tensor: The x has passed the inverse
function of sigmoid, has same
shape with input.
"""
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
@TRANSFORMER_LAYER_SEQUENCE.register_module()
class Deformable3DDetrTransformerDecoder(TransformerLayerSequence):
"""Copy the decoder in DETR transformer.
Args:
return_intermediate (bool): Whether to return intermediate outputs.
coder_norm_cfg (dict): Config of last normalization layer. Default:
`LN`.
"""
def __init__(self, *args, return_intermediate=False, **kwargs):
super(Deformable3DDetrTransformerDecoder, self).__init__(*args, **kwargs)
self.return_intermediate = return_intermediate
def forward(self,
query,
*args,
reference_points=None,
valid_ratios=None,
reg_branches=None,
**kwargs):
"""Forward function for `TransformerDecoder`.
Args:
query (Tensor): Input query with shape
`(num_query, bs, embed_dims)`.
reference_points (Tensor): The reference
points of offset. has shape
(bs, num_query, 4) when as_two_stage,
otherwise has shape ((bs, num_query, 2).
valid_ratios (Tensor): The radios of valid
points on the feature map, has shape
(bs, num_levels, 2)
reg_branch: (obj:`nn.ModuleList`): Used for
refining the regression results. Only would
be passed when with_box_refine is True,
otherwise would be passed a `None`.
Returns:
Tensor: Results with shape [1, num_query, bs, embed_dims] when
return_intermediate is `False`, otherwise it has shape
[num_layers, num_query, bs, embed_dims].
"""
output = query
intermediate = []
intermediate_reference_points = []
for lid, layer in enumerate(self.layers):
if reference_points.shape[-1] == 4:
reference_points_input = reference_points[:, :, None] * \
torch.cat([valid_ratios, valid_ratios], -1)[:, None]
else:
assert reference_points.shape[-1] == 2
reference_points_input = reference_points[:, :, None] * \
valid_ratios[:, None]
output = layer(
output,
*args,
reference_points=reference_points_input,
**kwargs)
output = output.permute(1, 0, 2)
if reg_branches is not None:
tmp = reg_branches[lid](output)
if reference_points.shape[-1] == 4:
new_reference_points = tmp + inverse_sigmoid(
reference_points)
new_reference_points = new_reference_points.sigmoid()
else:
assert reference_points.shape[-1] == 2
# This is to deal with the different output number (10).
# new_reference_points = tmp
new_reference_points = tmp[
..., :2] + inverse_sigmoid(reference_points)
new_reference_points = new_reference_points.sigmoid()
reference_points = new_reference_points.detach()
output = output.permute(1, 0, 2)
if self.return_intermediate:
intermediate.append(output)
intermediate_reference_points.append(reference_points)
if self.return_intermediate:
return torch.stack(intermediate), torch.stack(
intermediate_reference_points)
return output, reference_points
| 39.831776 | 81 | 0.572032 | 443 | 4,262 | 5.300226 | 0.309255 | 0.185264 | 0.061329 | 0.057496 | 0.237223 | 0.197615 | 0.109029 | 0.061329 | 0.061329 | 0.061329 | 0 | 0.012567 | 0.346551 | 4,262 | 106 | 82 | 40.207547 | 0.830521 | 0.320272 | 0 | 0.280702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035088 | 1 | 0.052632 | false | 0 | 0.052632 | 0 | 0.175439 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff74ecb0d326e0eac765860b694710655beda7d9 | 1,594 | py | Python | mikaponics/production/serializers/production_list_serializer.py | mikaponics/mikaponics-back | 98e1ff8bab7dda3492e5ff637bf5aafd111c840c | [
"BSD-3-Clause"
] | 2 | 2019-04-30T23:51:41.000Z | 2019-05-04T00:35:52.000Z | mikaponics/production/serializers/production_list_serializer.py | mikaponics/mikaponics-back | 98e1ff8bab7dda3492e5ff637bf5aafd111c840c | [
"BSD-3-Clause"
] | 27 | 2019-04-30T20:22:28.000Z | 2022-02-10T08:10:32.000Z | mikaponics/production/serializers/production_list_serializer.py | mikaponics/mikaponics-back | 98e1ff8bab7dda3492e5ff637bf5aafd111c840c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.contrib.auth.models import Group
from django.contrib.auth import authenticate
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string # HTML / TXT
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions, serializers
from rest_framework.response import Response
from foundation.models import Production
class ProductionListSerializer(serializers.ModelSerializer):
pretty_state = serializers.CharField(required=True, allow_blank=False, source="get_pretty_state")
pretty_environment = serializers.CharField(required=True, allow_blank=False, source="get_pretty_environment")
pretty_type_of = serializers.CharField(required=True, allow_blank=False, source="get_pretty_type_of")
pretty_grow_system = serializers.CharField(required=True, allow_blank=False, source="get_pretty_grow_system")
absolute_url = serializers.CharField(required=True, allow_blank=False, source="get_absolute_url")
class Meta:
model = Production
fields = (
'name',
'description',
'state',
'pretty_state',
'slug',
'is_commercial',
'environment',
'pretty_environment',
'type_of',
'pretty_type_of',
'grow_system',
'pretty_grow_system',
'grow_system_other',
'started_at',
'finished_at',
'absolute_url',
)
| 37.952381 | 113 | 0.693852 | 178 | 1,594 | 5.949438 | 0.38764 | 0.0661 | 0.1322 | 0.151086 | 0.287063 | 0.287063 | 0.287063 | 0.287063 | 0.287063 | 0.234183 | 0 | 0.003213 | 0.218946 | 1,594 | 41 | 114 | 38.878049 | 0.84739 | 0.020075 | 0 | 0 | 0 | 0 | 0.174471 | 0.028223 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.277778 | 0 | 0.472222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff75e873bc50af3add423f1f76edc56efdae6ca7 | 2,135 | py | Python | 2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/05-Lists-Advanced/02_Exercises/04-Office-Chairs.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/05-Lists-Advanced/02_Exercises/04-Office-Chairs.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/05-Lists-Advanced/02_Exercises/04-Office-Chairs.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | # 4. Office Chairs
# So you've found a meeting room - phew! ' \
# 'You arrive there ready to present, and find that someone has taken one or more of the chairs!! ' \
# 'You need to find some quick.... check all the other meeting rooms to see if all of the chairs are in use.
# You will be given a number n representing how many rooms there are.
# On the next n lines for each room you will get how many chairs there are and how many of them will be taken.
# The chairs will be represented by "X"s, then there will be a space " " and a number representing the taken places.
# Example: "XXXXX 4" (5 chairs and 1 of them is left free). Keep track of the free chairs, you will need them later.
# However if you get to a room where there are more people than chairs, print the following message:
# "{needed_chairs_in_room} more chairs needed in room {number_of_room}". If there is enough chairs in each room print: \
# "Game On, {total_free_chairs} free chairs left"
# rooms_count = int(input())
#
# free_chairs_in_each_room = []
# enough_chairs_in_each_room = True
#
# for room in range(1, rooms_count + 1):
# room_chairs = input().split()
# free_chairs_in_room = len(room_chairs[0]) - int(room_chairs[1])
# if free_chairs_in_room < 0:
# print(f"{abs(free_chairs_in_room)} more chairs needed in room {room}")
# enough_chairs_in_each_room = False
# else:
# free_chairs_in_each_room.append(free_chairs_in_room)
#
# if enough_chairs_in_each_room:
# print(f"Game On, {sum(free_chairs_in_each_room)} free chairs left")
rooms_count = int(input())
free_chairs_in_each_room = []
enough_chairs_in_each_room = True
for room in range(1, rooms_count + 1):
room_chairs, n_people = input().split()
free_chairs_in_room = len(room_chairs) - int(n_people)
if free_chairs_in_room < 0:
print(f"{abs(free_chairs_in_room)} more chairs needed in room {room}")
enough_chairs_in_each_room = False
else:
free_chairs_in_each_room.append(free_chairs_in_room)
if enough_chairs_in_each_room:
print(f"Game On, {sum(free_chairs_in_each_room)} free chairs left") | 47.444444 | 120 | 0.719906 | 367 | 2,135 | 3.942779 | 0.26158 | 0.121631 | 0.116102 | 0.143746 | 0.542502 | 0.542502 | 0.523842 | 0.523842 | 0.500346 | 0.446441 | 0 | 0.006957 | 0.192037 | 2,135 | 45 | 121 | 47.444444 | 0.831884 | 0.709602 | 0 | 0 | 0 | 0 | 0.197302 | 0.096121 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff769261d192cb236045382aced01d930dd67c3c | 1,378 | py | Python | src/SamplePreprocessor.py | msfasha/Arabic-Deep-Learning-OCR | 10cd02937eb4aa7f3e40265ec6a4d50f4ef5ef29 | [
"MIT"
] | 7 | 2021-12-16T08:23:20.000Z | 2022-03-14T19:10:25.000Z | src/SamplePreprocessor.py | msfasha/ArabicDLOCR | 10cd02937eb4aa7f3e40265ec6a4d50f4ef5ef29 | [
"MIT"
] | 3 | 2021-09-17T14:31:34.000Z | 2021-12-09T05:32:12.000Z | src/SamplePreprocessor.py | msfasha/ArabicDLOCR | 10cd02937eb4aa7f3e40265ec6a4d50f4ef5ef29 | [
"MIT"
] | 1 | 2021-02-21T20:11:30.000Z | 2021-02-21T20:11:30.000Z | from __future__ import division
from __future__ import print_function
import random
import numpy as np
import cv2
import Config as config
def preprocess(img):
"scale image into the desired imgSize, transpose it for TF and normalize gray-values"
# increase dataset size by applying random stretches to the images
if config.AUGMENT_IMAGE:
stretch = (random.random() - 0.5) # -0.5 .. +0.5
# random width, but at least 1
wStretched = max(int(img.shape[1] * (1 + stretch)), 1)
# stretch horizontally by factor 0.5 .. 1.5
img = cv2.resize(img, (wStretched, img.shape[0]))
# create target image and copy sample image into it
(h, w) = img.shape
fx = w / config.IMAGE_WIDTH
fy = h / config.IMAGE_HEIGHT
f = max(fx, fy)
# scale according to f (result at least 1 and at most wt or ht)
newSize = (max(min(config.IMAGE_WIDTH, int(w / f)), 1),
max(min(config.IMAGE_HEIGHT, int(h / f)), 1))
img = cv2.resize(img, newSize)
target = np.ones([config.IMAGE_HEIGHT, config.IMAGE_WIDTH]) * 255
target[0:newSize[1], 0:newSize[0]] = img
# transpose for TF
img = cv2.transpose(target)
# normalize
(m, s) = cv2.meanStdDev(img)
m = m[0][0]
s = s[0][0]
img = img - m
img = img / s if s > 0 else img
return img
| 31.318182 | 90 | 0.609579 | 209 | 1,378 | 3.942584 | 0.37799 | 0.080097 | 0.058252 | 0.009709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035282 | 0.280116 | 1,378 | 43 | 91 | 32.046512 | 0.795363 | 0.269956 | 0 | 0 | 0 | 0 | 0.079885 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.214286 | 0 | 0.285714 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff769c71c4daf7a143069fafb6b927bdd86beb99 | 11,249 | py | Python | feature_examples/tensorflow/inspecting_tensors/pipelined_model.py | Paperspace/tutorials | 8e20ffb687080c44e75dabea594d2b57acc53713 | [
"MIT"
] | null | null | null | feature_examples/tensorflow/inspecting_tensors/pipelined_model.py | Paperspace/tutorials | 8e20ffb687080c44e75dabea594d2b57acc53713 | [
"MIT"
] | 78 | 2021-09-20T11:48:08.000Z | 2021-10-21T07:10:39.000Z | feature_examples/tensorflow/inspecting_tensors/pipelined_model.py | Paperspace/tutorials | 8e20ffb687080c44e75dabea594d2b57acc53713 | [
"MIT"
] | 1 | 2022-02-25T12:07:16.000Z | 2022-02-25T12:07:16.000Z | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import argparse
from functools import partial
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras import layers
from tensorflow.python import ipu
from outfeed_optimizer import OutfeedOptimizer, OutfeedOptimizerMode
from maybe_outfeed_queue import MaybeOutfeedQueue
tf.disable_v2_behavior()
BATCH_SIZE = 32
LEARNING_RATE = 0.01
def parse_args():
# Handle command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--repeat-count", type=int, default=100,
help="The number of times the pipeline will be executed for each step."
" Set this to a small value (such as 1) if profiling.")
parser.add_argument("--epochs", type=float, default=3,
help="Total number of epochs to train for.")
parser.add_argument('--gradient-accumulation-count', type=int, default=16,
help="The number of times each pipeline stage will be executed in each pipeline execution.")
parser.add_argument('--outfeed-pre-accumulated-gradients', action='store_true',
help="Outfeed the pre-accumulated rather than accumulated gradients.")
parser.add_argument('--run-single-step', action="store_true",
help="Shorten the run for profiling: runs for a single step.")
args = parser.parse_args()
return args
def create_dataset():
# Prepare a tf dataset with mnist data
train_data, _ = mnist.load_data()
def normalise(x, y):
return x.astype("float32") / 255.0, y.astype("int32")
x_train, y_train = normalise(*train_data)
def generator():
return zip(x_train, y_train)
types = (x_train.dtype, y_train.dtype)
shapes = (x_train.shape[1:], y_train.shape[1:])
n_examples = len(x_train)
dataset = tf.data.Dataset.from_generator(generator, types, shapes)
dataset = dataset.shuffle(n_examples)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
dataset = dataset.repeat()
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return n_examples, dataset
# The following is a schematic representation of the model defined in this example,
# which also shows how it is split across two IPUs:
# ------------------------------------ Model Definition ------------------------------------
# <----------------------- ipu0 -----------------------> <------------- ipu1 ------------->
#
# inputs --|-- Dense --|-- Relu --|-- Dense --|-- Relu --|-- Dense--|-- SoftmaxCE --|-- Loss
# w0 --| w1 --| w2 --|
# b0 --| b1 --| b2 --|
def stage1(stage1_outfeed_queue, lr, images, labels):
# Stage 1 of the pipeline. Will be placed on the first IPU.
x = layers.Flatten()(images)
x = layers.Dense(256, activation=tf.nn.relu, name="dense1")(x)
stage1_outfeed_queue.maybe_outfeed("dense1", x)
x = layers.Dense(128, activation=tf.nn.relu, name="dense2")(x)
stage1_outfeed_queue.maybe_outfeed("dense2", x)
enqueue = stage1_outfeed_queue.maybe_enqueue()
if enqueue:
with tf.control_dependencies([enqueue]):
x = tf.identity(x)
return lr, x, labels
def stage2(stage2_outfeed_queue, lr, inputs, labels):
# Stage 2 of the pipeline. Will be placed on the second IPU.
logits = layers.Dense(10, name="dense3")(inputs)
stage2_outfeed_queue.maybe_outfeed("dense3", logits)
enqueue = stage2_outfeed_queue.maybe_enqueue()
if enqueue:
with tf.control_dependencies([enqueue]):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
else:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(cross_entropy)
return lr, loss
def optimizer_function(optimizer_outfeed_queue, outfeed_optimizer_mode, lr, loss):
# Optimizer function used by the pipeline to automatically set up
# the gradient accumulation and weight update steps.
optimizer = tf.train.GradientDescentOptimizer(lr)
# Wrap the optimizer to outfeed the gradients for selected layers.
# OutfeedOptimizerMode.BEFORE_APPLY will enqueue the accumulated gradients.
# OutfeedOptimizerMode.AFTER_COMPUTE will enqueue the individual gradients.
outfeed_optimizer = OutfeedOptimizer(optimizer, optimizer_outfeed_queue,
outfeed_optimizer_mode=outfeed_optimizer_mode)
return ipu.pipelining_ops.OptimizerFunctionOutput(outfeed_optimizer, loss)
def model(stage1_outfeed_queue, stage2_outfeed_queue,
optimizer_outfeed_queue, outfeed_optimizer_mode, lr):
# Defines a pipelined model which is split accross two stages
with tf.variable_scope("FCModel", use_resource=True):
pipeline_op = ipu.pipelining_ops.pipeline(
computational_stages=[partial(stage1, stage1_outfeed_queue),
partial(stage2, stage2_outfeed_queue)],
gradient_accumulation_count=args.gradient_accumulation_count,
repeat_count=args.repeat_count,
inputs=[lr],
infeed_queue=infeed_queue,
outfeed_queue=outfeed_queue,
optimizer_function=partial(optimizer_function,
optimizer_outfeed_queue,
outfeed_optimizer_mode),
outfeed_loss=True,
name="Pipeline")
return pipeline_op
def print_vals(vals, step):
data = []
index = 0
name_length = np.max([len(name) for name in vals.keys()]) + 5
for val_name, val in vals.items():
data_item = [index]
index += 1
data_item.append(val_name)
data_item.append(f'{np.mean(val):<4.6f}') # means
data_item.append(f'{np.std(val.astype(np.float64)):<4.6f}') # stds
data_item.append(f'{np.min(val):<4.6f}') # min extreme
data_item.append(f'{np.max(val):<4.6f}') # max extreme
data_item.append(f'{np.isnan(val).any()}') # nans?
data_item.append(f'{np.isinf(val).any()}') # infs?
data.append(data_item)
print(f"\nStep {step} - Summary Stats")
print(f'{"Index":<5} {"Name":<{name_length}} {"Mean":<12} {"Std":<12} {"Minimum":<12} {"Maximum":<12} {"NaNs":<7} {"infs":<7}')
for index, name, avg, std, dmin, dmax, nans, infs in data:
print(f"{index:<5} {name:<{name_length}} {avg:<12} {std:<12} {dmin:<12} {dmax:<12} {nans:<7} {infs:<7}")
print()
if __name__ == "__main__":
args = parse_args()
print(args)
if args.outfeed_pre_accumulated_gradients:
outfeed_optimizer_mode = OutfeedOptimizerMode.AFTER_COMPUTE
else:
outfeed_optimizer_mode = OutfeedOptimizerMode.BEFORE_APPLY
n_examples, dataset = create_dataset()
# Create the data queues to/from the IPU
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
# Create the outfeed queue for selected gradients
optimizer_outfeed_queue = MaybeOutfeedQueue(filters=["dense1", "dense2"])
# Create outfeed queues for selected activations in the pipeline stages
# The filters argument is optional
stage1_outfeed_queue = MaybeOutfeedQueue(filters=["dense1"])
stage2_outfeed_queue = MaybeOutfeedQueue()
# With batch size BS, gradient accumulation count GA and repeat count RPT,
# at every step n = (BS * GA * RPT) examples are used.
# So in order to evaluate at least N total examples, do ceil(N / n) steps
num_train_examples = int(args.epochs * n_examples)
examples_per_step = BATCH_SIZE * args.gradient_accumulation_count * args.repeat_count
steps = ((num_train_examples - 1) // examples_per_step) + 1
if args.run_single_step:
steps = 1
with tf.device('cpu'):
lr = tf.placeholder(np.float32, [])
with ipu.scopes.ipu_scope("/device:IPU:0"):
compiled_model = ipu.ipu_compiler.compile(
partial(model, stage1_outfeed_queue, stage2_outfeed_queue,
optimizer_outfeed_queue, outfeed_optimizer_mode),
inputs=[lr])
outfeed_op = outfeed_queue.dequeue()
# Get the dequeue op (or None) for each MaybeOutfeedQueue object
# (maybe_dequeue() returns None if nothing was enqueued)
optimizer_outfeed_op = optimizer_outfeed_queue.maybe_dequeue()
stage1_outfeed_queue_op = stage1_outfeed_queue.maybe_dequeue()
stage2_outfeed_queue_op = stage2_outfeed_queue.maybe_dequeue()
ipu.utils.move_variable_initialization_to_cpu()
init_op = tf.global_variables_initializer()
# Configure the IPU device
cfg = ipu.config.IPUConfig()
# Auto select as many IPUs as we want to pipeline across
cfg.auto_select_ipus = 2
cfg.configure_ipu_system()
with tf.Session() as sess:
# Initialize
sess.run(init_op)
sess.run(infeed_queue.initializer)
# Run
for step in range(steps):
sess.run(compiled_model, {lr: LEARNING_RATE})
# Read the outfeed for the training losses
losses = sess.run(outfeed_op)
# Read any activations that have been added to the pipeline stage outfeeds
activations = {}
if stage1_outfeed_queue_op:
vals1 = sess.run(stage1_outfeed_queue_op)
activations.update(vals1)
if stage2_outfeed_queue_op:
vals2 = sess.run(stage2_outfeed_queue_op)
activations.update(vals2)
for k, v in activations.items():
# The first dimension will be args.gradient_accumulation_count * args.repeat_count
# The second dimension is BATCH_SIZE
print(f"Activation key: {k} shape: {v.shape}")
# Print statistics for the selected activations
# cast to float32 to avoid overflow when calculating statistics
activations = {k: v.astype(np.float32) for k, v in activations.items()}
print_vals(activations, step)
# Read any gradients that have been added to the optimizer outfeed
if optimizer_outfeed_op:
gradients = sess.run(optimizer_outfeed_op)
for k, v in gradients.items():
# If using OutfeedOptimizerMode.BEFORE_APPLY then the first dimension will be args.repeat_count
# If using OutfeedOptimizerMode.AFTER_COMPUTE it will be args.gradient_accumulation_count * args.repeat_count
print(f"Gradient key: {k} shape: {v.shape}")
# Print statistics for the selected gradients
# cast to float32 to avoid overflow when calculating statistics
gradients = {k: v.astype(np.float32) for k, v in gradients.items()}
print_vals(gradients, step)
epoch = float(examples_per_step * step / n_examples)
print("Epoch {:.1f}, Mean loss: {:.3f}\n".format(
epoch, np.mean(losses)))
| 42.289474 | 131 | 0.649391 | 1,388 | 11,249 | 5.074207 | 0.25072 | 0.063041 | 0.030669 | 0.012779 | 0.242936 | 0.197217 | 0.158597 | 0.13659 | 0.1116 | 0.075536 | 0 | 0.016191 | 0.242333 | 11,249 | 265 | 132 | 42.449057 | 0.810161 | 0.224553 | 0 | 0.059172 | 0 | 0.011834 | 0.122334 | 0.021676 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053254 | false | 0 | 0.053254 | 0.011834 | 0.153846 | 0.065089 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff77cf7a8eea7809ab411aa9b0c87d02e9b83629 | 5,837 | py | Python | tests/rbac/test_policies.py | giangbui/fence | 5a28b77c30ce7fb11fd05b09a023d0aec1e57e16 | [
"Apache-2.0"
] | 2 | 2019-06-10T15:30:51.000Z | 2020-01-18T23:24:13.000Z | tests/rbac/test_policies.py | giangbui/fence | 5a28b77c30ce7fb11fd05b09a023d0aec1e57e16 | [
"Apache-2.0"
] | null | null | null | tests/rbac/test_policies.py | giangbui/fence | 5a28b77c30ce7fb11fd05b09a023d0aec1e57e16 | [
"Apache-2.0"
] | 1 | 2022-03-31T09:52:46.000Z | 2022-03-31T09:52:46.000Z | # pylint: disable=unused-argument
"""
Run tests for the policy endpoints in the RBAC blueprint.
Note that any test which will cause a call to
``fence.blueprints.rbac.lookup_policies`` must add the test policies to the
database, otherwise fence will raise an error from not finding a policy. Use
something like this:
# Put the example policies in the database first.
for policy in example_policies:
db_session.add(policy)
"""
import json
try:
import mock
except ImportError:
from unittest import mock
from fence.blueprints.rbac import _get_user_policy_ids
from fence.rbac.client import ArboristClient
from fence.models import Policy, User
def test_list_policies(db_session, client, example_policies):
"""
Test the ``/rbac/policy`` endpoint for listing existing policies.
"""
# Put the example policies in the database first.
for policy in example_policies:
db_session.add(policy)
policies_response = client.get("/rbac/policies/").json
assert "policies" in policies_response
policy_ids = policies_response["policies"]
assert set(policy_ids) == set(policy.id for policy in example_policies)
def test_list_user_policies(db_session, client, user_client, example_policies):
"""
Test listing the policies granted to the user using the
``/rbac/user/<user_id>/policies`` endpoint.
"""
# Set up the user to have the example policies.
user = db_session.query(User).filter_by(id=user_client.user_id).first()
user.policies = example_policies
path = "/rbac/user/{}/policies/".format(user_client.user_id)
policies_response = client.get(path)
assert "policies" in policies_response.json
policies_from_db = _get_user_policy_ids(user_client.user_id)
assert set(policies_from_db) == set(policies_response.json["policies"])
def test_grant_policy_to_user(
client, db_session, user_client, example_policies, mock_arborist_client
):
"""
Test granting an additional policy to a user and check in the policy
listing endpoint and the database that the change goes through correctly.
"""
# Put the example policies in the database first.
for policy in example_policies:
db_session.add(policy)
# Get the list of policies before adding a new one
path = "/rbac/user/{}/policies/".format(user_client.user_id)
policies_before = client.get(path).json["policies"]
# Grant user one additional policy for example
policies = {"policies": [example_policies[0].id]}
response = client.post(
path, data=json.dumps(policies), content_type="application/json"
)
assert response.status_code == 204
# Check that the new one was added correctly (shows up in endpoint).
policies_after = client.get(path).json["policies"]
assert len(policies_after) == len(policies_before) + 1
assert example_policies[0].id in policies_after
# Check new policy is in database.
db_policies = _get_user_policy_ids(user_client.user_id)
assert set(policies_after) == set(db_policies)
def test_replace_user_policies(
client, db_session, user_client, example_policies, mock_arborist_client
):
"""
Test overwriting the policies granted to a user and check in the policy
listing endpoint and the database that the change goes through correctly.
"""
# Put the example policies in the database first.
for policy in example_policies:
db_session.add(policy)
policies_even = example_policies[::2]
policies_odd = example_policies[1::2]
# Set up the user to have every odd example policy in the test list.
user = db_session.query(User).filter_by(id=user_client.user_id).first()
user.policies = policies_odd
# Hit the endpoint and change the user's policies to be every even test
# policy.
path = "/rbac/user/{}/policies/".format(user_client.user_id)
policies = {"policies": [policy.id for policy in policies_even]}
response = client.put(
path, data=json.dumps(policies), content_type="application/json"
)
assert response.status_code == 204
# Check policies from endpoint.
expected_policy_ids = [policy.id for policy in policies_even]
policies_after = client.get(path).json["policies"]
assert set(policies_after) == set(expected_policy_ids)
# Check policies in database.
user_policies_from_db = _get_user_policy_ids(user_client.user_id)
assert set(user_policies_from_db) == set(expected_policy_ids)
def test_revoke_user_policies(client, user_client):
"""
Test revoking all the policies granted to a user using the
``/rbac/user/policies/`` endpoint with a ``DELETE`` call.
"""
path = "/rbac/user/{}/policies/".format(user_client.user_id)
response = client.delete(path)
assert response.status_code == 204
# Check policies response for the user is empty.
policies_after = client.get(path).json["policies"]
assert policies_after == []
# Check policies in database are empty.
db_policies = _get_user_policy_ids(user_client.user_id)
assert db_policies == []
def test_create_policy(client, db_session):
"""
Test creating a policy using the ``/rbac/policies/`` endpoint, adding the
policy in the fence database and also registering it in arborist.
"""
policies = {"policies": ["test-policy-1", "test-policy-2"]}
with (
mock.patch.object(ArboristClient, "policies_not_exist", return_value=[])
) as mock_policies_not_exist:
response = client.post(
"/rbac/policies/",
data=json.dumps(policies),
content_type="application/json",
)
mock_policies_not_exist.assert_called_once()
assert response.status_code == 201
policy = db_session.query(Policy).filter(Policy.id == "test-policy-1").first()
assert policy
| 36.710692 | 82 | 0.716464 | 807 | 5,837 | 4.988848 | 0.173482 | 0.074516 | 0.034774 | 0.039742 | 0.475658 | 0.429459 | 0.409836 | 0.382514 | 0.338053 | 0.32762 | 0 | 0.004437 | 0.189138 | 5,837 | 158 | 83 | 36.943038 | 0.846186 | 0.31129 | 0 | 0.341463 | 0 | 0 | 0.081039 | 0.023669 | 0 | 0 | 0 | 0 | 0.207317 | 1 | 0.073171 | false | 0 | 0.085366 | 0 | 0.158537 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff7ba6f9e8860cad59ba673c821554190e51c56e | 4,773 | py | Python | source/evaluate.py | dovietchinh/multi-task-classification | 23a70300a7a800bc982f87902b6aa1faaf91b489 | [
"RSA-MD"
] | null | null | null | source/evaluate.py | dovietchinh/multi-task-classification | 23a70300a7a800bc982f87902b6aa1faaf91b489 | [
"RSA-MD"
] | null | null | null | source/evaluate.py | dovietchinh/multi-task-classification | 23a70300a7a800bc982f87902b6aa1faaf91b489 | [
"RSA-MD"
] | null | null | null | import torch
from models.mobilenetv2 import MobileNetV2
import sklearn.metrics
from tqdm import tqdm
import argparse
import logging
from utils.dataset import LoadImagesAndLabels, preprocess
from utils.torch_utils import select_device
import yaml
import pandas as pd
import os
import numpy as np
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
logging.basicConfig()
def evaluate(opt):
if isinstance(opt.val_csv,str):
opt.val_csv = [opt.val_csv]
df_val = []
for df in opt.val_csv:
df = pd.read_csv(df)
df_val.append(df)
df_val = pd.concat(df_val, axis=0)
# df_val = df.sample(frac=1).reset_index(drop_index=True)
model_config = []
for k,v in opt.classes .items():
model_config.append(len(v))
model = MobileNetV2(model_config)
if not os.path.isfile(opt.weights):
LOGGER.info(f"{opt.weights} is not a file")
exit()
checkpoint = torch.load(opt.weights)
model.load_state_dict(checkpoint['state_dict'])
padding = getattr(checkpoint['meta_data'], 'padding')
img_size = getattr(checkpoint['meta_data'], 'img_size')
ds_val = LoadImagesAndLabels(df_val,
data_folder=opt.DATA_FOLDER,
img_size = img_size,
padding= padding,
classes = opt.classes,
format_index = opt.format_index,
preprocess=preprocess,
augment=False)
loader_val = torch.utils.data.DataLoader(ds_val,
batch_size=opt.batch_size,
shuffle=True
)
loader = {'val':loader_val}
device = select_device(opt.device, model_name=getattr(checkpoint['meta_data'],'model_name'))
model = model.to(device)
model.eval()
y_true = []
y_pred = []
for _ in range(len(opt.classes)):
y_true.append([])
y_pred.append([])
with torch.no_grad():
for i,(imgs,labels,path) in tqdm(enumerate(loader['val']),total=len(loader['val'])):
imgs = imgs.to(device)
preds = model.predict(imgs)
labels = [label.to(device).cpu().numpy().ravel() for label in labels]
# LOGGER.info(f'len_labels: {len(labels[0])}')
preds = [x.detach().cpu().numpy().argmax(axis=-1).ravel() for x in preds]
for j in range(len(opt.classes)):
y_true[j].append(labels[j])
y_pred[j].append(preds[j])
y_true = [ np.concatenate(x, axis=0) for x in y_true ]
y_pred = [ np.concatenate(x, axis=0) for x in y_pred ]
LOGGER.debug(f"y_true[0]_len = {len(y_true[0])}")
LOGGER.debug(f"y_true[1]_len = {len(y_true[1])}")
LOGGER.debug(f"y_pred[0]_len = {len(y_pred[0])}")
LOGGER.debug(f"y_pred[1]_len = {len(y_true[1])}")
for i,(k,v) in enumerate(opt.classes.items()):
fi = sklearn.metrics.classification_report(y_true[i],y_pred[i],digits=4,zero_division=1,target_names=v)
with open(opt.logfile,'a') as f:
f.write(f'-------------{k}-----------\n')
f.write(fi+'\n')
print(f'-------------{k}-----------\n')
print(fi+'\n')
def parse_opt(know):
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='', help="checkpoint path")
parser.add_argument('--batch_size', type=int, default=64, help="batch size in evaluating")
parser.add_argument('--device', type=str, default='', help="select gpu")
parser.add_argument("--val_csv",type=str, default='',help='')
# parser.add_argument('--cfg',type=str,default='/u01/Intern/chinhdv/code/multi-task-classification/config/human_attribute_4/train_config.yaml')
parser.add_argument('--data',type=str,default='/u01/Intern/chinhdv/code/multi-task-classification/config/human_attribute_4/data_config.yaml')
parser.add_argument("--logfile", type=str, default="log.evaluate.txt", help="log the evaluating result")
opt = parser.parse_known_args()[0] if know else parser.parse_arg()
return opt
def main():
opt = parse_opt(True)
# with open(opt.cfg) as f:
# cfg = yaml.safe_load(f)
with open(opt.data) as f:
data = yaml.safe_load(f)
# for k,v in cfg.items():
# setattr(opt,k,v)
for k,v in data.items():
setattr(opt,k,v)
assert isinstance(opt.classes,dict), "Invalid format of classes in data_config.yaml"
# assert len(opt.task_weights) == len(opt.classes), "task weight should has the same length with classes"
evaluate(opt)
if __name__ =='__main__':
main()
| 40.449153 | 147 | 0.600251 | 648 | 4,773 | 4.256173 | 0.259259 | 0.019942 | 0.043147 | 0.018854 | 0.160261 | 0.102973 | 0.093546 | 0.075417 | 0.075417 | 0.056563 | 0 | 0.007832 | 0.250995 | 4,773 | 117 | 148 | 40.794872 | 0.763636 | 0.092185 | 0 | 0 | 0 | 0.010309 | 0.133503 | 0.034706 | 0 | 0 | 0 | 0 | 0.010309 | 1 | 0.030928 | false | 0 | 0.123711 | 0 | 0.164948 | 0.020619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff7c13128a462bb3be23d19e422d7d63b0a4d08d | 1,070 | py | Python | exec_command.py | Stefantb/CMakeTools | 3fd909e0d45034180e8cf97d73779468c555368f | [
"MIT"
] | null | null | null | exec_command.py | Stefantb/CMakeTools | 3fd909e0d45034180e8cf97d73779468c555368f | [
"MIT"
] | null | null | null | exec_command.py | Stefantb/CMakeTools | 3fd909e0d45034180e8cf97d73779468c555368f | [
"MIT"
] | null | null | null | import imp
import Default.exec
from . import build_tools
from . import logging
imp.reload(build_tools)
imp.reload(logging)
# *****************************************************************************
#
# *****************************************************************************
logger = logging.get_logger(__name__)
# *****************************************************************************
# A wrapper that tries to make sure we dont thrash while building
# *****************************************************************************
class CmaketoolsExecCommand(Default.exec.ExecCommand):
def run(self, id=None, **kwargs):
logger.info('cmaketools_exec called with {}'.format(kwargs))
if build_tools.is_building():
logger.info('Already building so we will wait')
return
build_tools.set_is_building(True)
super().run(**kwargs)
def on_finished(self, proc):
logger.info('cmaketools_exec finished')
super().on_finished(proc)
build_tools.set_is_building(False)
| 28.157895 | 79 | 0.483178 | 98 | 1,070 | 5.081633 | 0.530612 | 0.100402 | 0.080321 | 0.096386 | 0.092369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.156075 | 1,070 | 37 | 80 | 28.918919 | 0.551495 | 0.350467 | 0 | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.210526 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff7d34e02261e5588658057813346b3480351089 | 700 | py | Python | test/test_wire2.py | aravindhk/Vides | 65d9ea9764ddf5f6ef40e869bd31387d0e3e378f | [
"BSD-4-Clause"
] | 2 | 2021-11-03T17:24:24.000Z | 2021-12-02T06:06:50.000Z | test/test_wire2.py | aravindhk/Vides | 65d9ea9764ddf5f6ef40e869bd31387d0e3e378f | [
"BSD-4-Clause"
] | null | null | null | test/test_wire2.py | aravindhk/Vides | 65d9ea9764ddf5f6ef40e869bd31387d0e3e378f | [
"BSD-4-Clause"
] | null | null | null | from NanoTCAD_ViDES import *
#a=array([5.4,0,0,10,6])
#rank = MPI.COMM_WORLD.Get_rank()
rank=0;
a=array([5.43,0,1,3.85,10.5])
[x,y,z]=atoms_coordinates_nanowire(a);
save_format_xyz("Z2.xyz",x/10.0,y/10.0,z/10.0,"Si");
[HH,n,Nc]=create_H_from_xyz(x,y,z,10,thop_Si,3,4);
ind=argsort(HH[:,1]);
savetxt("H",(real(HH[:,0:10])),fmt='%10.2f');
savetxt("H.sort",(real(HH[ind,0:10])),fmt='%10.2f');
H=Hamiltonian(n,Nc);
H.bands=1;
H.H=HH;
H.Eupper=10;
H.Elower=0;
H.eta=1e-12;
H.dE=0.1
#MPIze(H)
H.charge_T()
#H.charge_T()
a=[H.E,H.T]
if (rank==0): savetxt("T.SNW",transpose(a));
#MPI.Finalize()
string="transmission-test-Si.dat"
a=loadtxt(string);
plot(H.E,H.T);
hold
plot(a[:,0],a[:,1],'o');
show();
| 21.212121 | 52 | 0.631429 | 159 | 700 | 2.698113 | 0.433962 | 0.020979 | 0.032634 | 0.037296 | 0.04662 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088415 | 0.062857 | 700 | 32 | 53 | 21.875 | 0.565549 | 0.127143 | 0 | 0 | 0 | 0 | 0.094059 | 0.039604 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.04 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff81e93f9d63ba522af9720a31163a75f3b71887 | 2,127 | py | Python | setup.py | spacemeat/humon-py | f67119be656c169fda5624ef1d33d639e0e84809 | [
"MIT"
] | null | null | null | setup.py | spacemeat/humon-py | f67119be656c169fda5624ef1d33d639e0e84809 | [
"MIT"
] | null | null | null | setup.py | spacemeat/humon-py | f67119be656c169fda5624ef1d33d639e0e84809 | [
"MIT"
] | null | null | null | # We're linking against '../build/bin/libhumon-d.a' which is built by `../build.py`.
from setuptools import setup, Extension
with open ('README.md', 'r') as f:
long_desc = f.read()
setup(name="humon",
version='0.0.3',
description='A Python wrapper over humon\'s C API, for reading Humon token streams.',
long_description = long_desc,
long_description_content_type = 'text/markdown',
author='Trevor Schrock',
author_email='spacemeat@gmail.com',
url='https://github.com/spacemeat/humon-py',
packages=["humon"],
ext_package="humon",
ext_modules=[Extension("humon",
include_dirs = ['./clib/include/humon', './clib/src'],
extra_compile_args = ['-ggdb3', '-O0'],
sources = ["./clib/src/ansiColors.c",
"./clib/src/encoding.c",
"./clib/src/node.c",
"./clib/src/parse.c",
"./clib/src/printing.c",
"./clib/src/tokenize.c",
"./clib/src/trove.c",
"./clib/src/utils.c",
"./clib/src/vector.c",
"./humon/cpkg/enumConsts.c",
"./humon/cpkg/humonModule.c",
"./humon/cpkg/node-py.c",
"./humon/cpkg/token-py.c",
"./humon/cpkg/trove-py.c",
"./humon/cpkg/utils.c"])
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Topic :: Software Development"
],
extras_require = {
'dev': ['check-manifest', 'twine']
},
python_requires='>=3.8'
)
| 41.705882 | 91 | 0.416549 | 189 | 2,127 | 4.613757 | 0.566138 | 0.080275 | 0.073395 | 0.041284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00674 | 0.441937 | 2,127 | 50 | 92 | 42.54 | 0.727885 | 0.038552 | 0 | 0.046512 | 0 | 0 | 0.324841 | 0.100441 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.023256 | 0 | 0.023256 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff8380265f376dd391d1a11edf2a540c53f53a18 | 3,755 | py | Python | indra/lib/python/indra/base/metrics.py | humbletim/archived-casviewer | 3b51b1baae7e7cebf1c7dca62d9c02751709ee57 | [
"Unlicense"
] | null | null | null | indra/lib/python/indra/base/metrics.py | humbletim/archived-casviewer | 3b51b1baae7e7cebf1c7dca62d9c02751709ee57 | [
"Unlicense"
] | null | null | null | indra/lib/python/indra/base/metrics.py | humbletim/archived-casviewer | 3b51b1baae7e7cebf1c7dca62d9c02751709ee57 | [
"Unlicense"
] | null | null | null | """\
@file metrics.py
@author Phoenix
@date 2007-11-27
@brief simple interface for logging metrics
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import sys
try:
import syslog
except ImportError:
# Windows
import sys
class syslog(object):
# wrap to a lame syslog for windows
_logfp = sys.stderr
def syslog(msg):
_logfp.write(msg)
if not msg.endswith('\n'):
_logfp.write('\n')
syslog = staticmethod(syslog)
from indra.base.llsd import format_notation
def record_metrics(table, stats):
"Write a standard metrics log"
_log("LLMETRICS", table, stats)
def record_event(table, data):
"Write a standard logmessage log"
_log("LLLOGMESSAGE", table, data)
def set_destination(dest):
"""Set the destination of metrics logs for this process.
If you do not call this function prior to calling a logging
method, that function will open sys.stdout as a destination.
Attempts to set dest to None will throw a RuntimeError.
@param dest a file-like object which will be the destination for logs."""
if dest is None:
raise RuntimeError("Attempt to unset metrics destination.")
global _destination
_destination = dest
def destination():
"""Get the destination of the metrics logs for this process.
Returns None if no destination is set"""
global _destination
return _destination
class SysLogger(object):
"A file-like object which writes to syslog."
def __init__(self, ident='indra', logopt = None, facility = None):
try:
if logopt is None:
logopt = syslog.LOG_CONS | syslog.LOG_PID
if facility is None:
facility = syslog.LOG_LOCAL0
syslog.openlog(ident, logopt, facility)
import atexit
atexit.register(syslog.closelog)
except AttributeError:
# No syslog module on Windows
pass
def write(str):
syslog.syslog(str)
write = staticmethod(write)
def flush():
pass
flush = staticmethod(flush)
#
# internal API
#
_sequence_id = 0
_destination = None
def _next_id():
global _sequence_id
next = _sequence_id
_sequence_id += 1
return next
def _dest():
global _destination
if _destination is None:
# this default behavior is documented in the metrics functions above.
_destination = sys.stdout
return _destination
def _log(header, table, data):
log_line = "%s (%d) %s %s" \
% (header, _next_id(), table, format_notation(data))
dest = _dest()
dest.write(log_line)
dest.flush()
| 30.778689 | 77 | 0.69241 | 506 | 3,755 | 5.05336 | 0.416996 | 0.034415 | 0.010168 | 0.014079 | 0.035198 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008045 | 0.238615 | 3,755 | 121 | 78 | 31.033058 | 0.886324 | 0.499867 | 0 | 0.171875 | 0 | 0 | 0.093443 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.171875 | false | 0.03125 | 0.09375 | 0 | 0.40625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff83eb9a77537e2ab8fa2e7059f2473c401773ef | 955 | py | Python | CodeChef/KAVGMAT.py | PROxZIMA/Competitive-Coding | ba6b365ea130b6fcaa15c5537b530ed363bab793 | [
"MIT"
] | 1 | 2021-01-10T13:29:21.000Z | 2021-01-10T13:29:21.000Z | CodeChef/KAVGMAT.py | PROxZIMA/Competitive-Coding | ba6b365ea130b6fcaa15c5537b530ed363bab793 | [
"MIT"
] | null | null | null | CodeChef/KAVGMAT.py | PROxZIMA/Competitive-Coding | ba6b365ea130b6fcaa15c5537b530ed363bab793 | [
"MIT"
] | null | null | null | # https://www.codechef.com/viewsolution/44674409
from itertools import accumulate
def binSearch(arr: tuple, r: int, lens: int, k: int, siz: int) -> int:
s, l, siz2, total = 1, lens, siz ** 2, lens + 1
while (s <= l):
m = int((s + l) / 2)
avg = (arr[r][m] - arr[r][m - siz] - arr[r - siz][m] + arr[r - siz][m - siz]) / siz2
if (avg >= k):
total = m
l = m - 1
else:
s = m + 1
return (lens - total + 1)
def update(inp: list) -> tuple:
global a
a = tuple(x + y for x, y in zip(a, accumulate(map(int, inp), lambda x, y : x + y)))
return a
for _ in range(int(input())):
n, m, k = map(int, input().split())
a = (0,) * (m + 1)
arr = (a,) + tuple(update([0] + input().split()) for _ in range(n))
ans = 0
for leng in range(1, min(n, m) + 1):
for row in range(leng, n + 1):
ans += binSearch(arr, row, m, k, leng)
print(ans) | 28.939394 | 92 | 0.489005 | 157 | 955 | 2.961783 | 0.33758 | 0.034409 | 0.021505 | 0.034409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037209 | 0.324607 | 955 | 33 | 93 | 28.939394 | 0.683721 | 0.048168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.04 | 0 | 0.2 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff84bc8bc1a66d6322c20e43d9ff178f6a164a04 | 3,074 | py | Python | solver.py | koffes/professorspillet | 735b460c4757fe9dbc5f12e25dd3c808a8fe3d1f | [
"MIT"
] | null | null | null | solver.py | koffes/professorspillet | 735b460c4757fe9dbc5f12e25dd3c808a8fe3d1f | [
"MIT"
] | null | null | null | solver.py | koffes/professorspillet | 735b460c4757fe9dbc5f12e25dd3c808a8fe3d1f | [
"MIT"
] | null | null | null | """Professor game solver."""
from deck import Deck
import tiles
from display import Display
from board import Board
import argparse
class Solver:
"""Solver for the professor game."""
def __init__(self):
"""Create the deck and start the solver."""
self.deck = Deck(tiles.DECK)
self.disp = Display()
self.board = Board()
self.sol_attemp = 0
self.sol_found = 0
self.tile_idx_last = 0
def _prog_monit(self, curr_pos, tile_idx):
self.sol_attemp += 1
if self.sol_attemp % 100000 == 0:
print('Solutions tried: {0}'.format(self.sol_attemp))
if curr_pos == 0 and tile_idx > self.tile_idx_last:
self.tile_idx_last = tile_idx
print('Prog {0:.2f} %'.format(tile_idx * 100 / tiles.TILES_NUM))
def _tile_rotation(self, curr_pos, row, col, curr_tile):
rot = 0
while rot < tiles.EDGE_NUM:
if self.board.tile_valid(row, col):
if curr_pos == (tiles.TILES_NUM - 1):
self.sol_found += 1
print('Solution! num: {0}'.format(self.sol_found))
save_filename = 'solutions/sol_'+str(self.sol_found)+'.eps'
self.disp.board(self.board.board, False, save_filename)
else:
self._tile_recursive(curr_pos + 1)
curr_tile.rotate_cw()
rot += 1
else:
curr_tile.rotate_cw()
rot += 1
def _tile_recursive(self, curr_pos):
"""Curr pos indicates current board tile."""
for tile_idx in range(tiles.TILES_NUM):
curr_tile = self.deck.get_tile(self.tile_at_pos[curr_pos])
if curr_tile.get_in_use():
self.tile_at_pos[curr_pos] += 1
if self.tile_at_pos[curr_pos] >= tiles.TILES_NUM:
self.tile_at_pos[curr_pos] = 0
return
else:
row, col = self.board.tile_push(curr_tile)
self._prog_monit(curr_pos, tile_idx)
self._tile_rotation(curr_pos, row, col, curr_tile)
self.tile_at_pos[curr_pos] += 1
self.board.tile_pop()
if self.tile_at_pos[curr_pos] >= tiles.TILES_NUM:
self.tile_at_pos[curr_pos] = 0
return
self.tile_at_pos[curr_pos] = 0
return
def solve(self, dest):
"""Invoke solver."""
self.tile_at_pos = [0]*tiles.TILES_NUM
self._tile_recursive(0)
print('Solutions found: {}'.format(self.sol_found))
return self.sol_found
def main():
"""Run solver based on cmd line args."""
parser = argparse.ArgumentParser(description='Professor game solver')
parser.add_argument('-d', '--destination',
help='solution destination folder',
default='solutions/')
args = parser.parse_args()
s = Solver()
s.solve(args.destination)
if __name__ == '__main__':
main()
| 34.155556 | 79 | 0.559857 | 390 | 3,074 | 4.133333 | 0.233333 | 0.073821 | 0.055831 | 0.072581 | 0.233251 | 0.185484 | 0.137097 | 0.093672 | 0.076923 | 0.076923 | 0 | 0.015557 | 0.330839 | 3,074 | 89 | 80 | 34.539326 | 0.768109 | 0.058556 | 0 | 0.242857 | 0 | 0 | 0.059378 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.071429 | 0 | 0.228571 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff8661954cbb2044fab53dc99ce42a19a19a7154 | 720 | py | Python | main.py | henry2ee/danmu | 997c8a89caf24fb8735516208b79bd7353577818 | [
"MIT"
] | null | null | null | main.py | henry2ee/danmu | 997c8a89caf24fb8735516208b79bd7353577818 | [
"MIT"
] | null | null | null | main.py | henry2ee/danmu | 997c8a89caf24fb8735516208b79bd7353577818 | [
"MIT"
] | null | null | null | import time
import asyncio
from examples.test_bili_danmu import test_tcp_v2_danmu_client as bi_danmu
from examples.test_huya_danmu import test_ws_danmu_client as hy_danmu
from examples.test_douyu_danmu import test_ws_danmu_client as dy_danmu
# loop.run_until_complete(test_tcp_v2_danmu_client())
print('请选择直播平台 1.B站 2. 虎牙 3. 斗鱼')
platform = int(input())
print('请输入对应平台的房间号码 room id')
room_id = int(input())
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if platform == 1:
loop.run_until_complete(bi_danmu(room_id))
elif platform == 2:
loop.run_until_complete(hy_danmu(room_id))
elif platform == 3:
loop.run_until_complete(dy_danmu(room_id))
loop.close()
print('结束,,,请关闭,,,')
time.sleep(100000)
| 24.827586 | 73 | 0.791667 | 123 | 720 | 4.292683 | 0.373984 | 0.056818 | 0.090909 | 0.151515 | 0.276515 | 0.113636 | 0.113636 | 0 | 0 | 0 | 0 | 0.021638 | 0.101389 | 720 | 29 | 74 | 24.827586 | 0.794436 | 0.070833 | 0 | 0 | 0 | 0 | 0.082335 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff89c28a62c2b3ce0f84616a879b1b818bbc1672 | 653 | py | Python | dynamic_rest/renderers.py | reinert/dynamic-rest | aaf3973f69b53ed317b9c8468942523715814fa8 | [
"MIT"
] | 690 | 2016-02-05T22:46:03.000Z | 2022-03-28T18:59:49.000Z | dynamic_rest/renderers.py | reinert/dynamic-rest | aaf3973f69b53ed317b9c8468942523715814fa8 | [
"MIT"
] | 190 | 2015-03-06T16:57:21.000Z | 2022-02-02T21:56:07.000Z | dynamic_rest/renderers.py | reinert/dynamic-rest | aaf3973f69b53ed317b9c8468942523715814fa8 | [
"MIT"
] | 117 | 2016-05-05T13:51:07.000Z | 2022-02-28T18:25:56.000Z | """This module contains custom renderer classes."""
from rest_framework.renderers import BrowsableAPIRenderer
class DynamicBrowsableAPIRenderer(BrowsableAPIRenderer):
"""Renderer class that adds directory support to the Browsable API."""
template = 'dynamic_rest/api.html'
def get_context(self, data, media_type, context):
from dynamic_rest.routers import get_directory
context = super(DynamicBrowsableAPIRenderer, self).get_context(
data,
media_type,
context
)
request = context['request']
context['directory'] = get_directory(request)
return context
| 31.095238 | 74 | 0.690658 | 65 | 653 | 6.8 | 0.553846 | 0.049774 | 0.058824 | 0.090498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.23124 | 653 | 20 | 75 | 32.65 | 0.880478 | 0.168453 | 0 | 0 | 0 | 0 | 0.069549 | 0.039474 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff8bd9285a91afad46966a1aacbf71a2f7a0647a | 4,839 | py | Python | pdffitx/tests/modeling/test_running.py | st3107/pdffitx | c746f6dfaf5656e9bb62508a9847c00567b34bbe | [
"BSD-3-Clause"
] | null | null | null | pdffitx/tests/modeling/test_running.py | st3107/pdffitx | c746f6dfaf5656e9bb62508a9847c00567b34bbe | [
"BSD-3-Clause"
] | null | null | null | pdffitx/tests/modeling/test_running.py | st3107/pdffitx | c746f6dfaf5656e9bb62508a9847c00567b34bbe | [
"BSD-3-Clause"
] | null | null | null | import pytest
from matplotlib import pyplot as plt
from pdffitx.modeling import F, multi_phase, optimize, fit_calib, MyParser
@pytest.mark.parametrize(
"data_key,kwargs,free_params,use_cf,expected",
[
(
"Ni_stru",
{
'values': {'scale_G0': 0.1, 'a_G0': 3.42, 'Biso_Ni_G0': 0.07, 'psize_f0': 300,
'delta2_G0': 2.5},
'bounds': {'scale_G0': [-1, 1], 'a_G0': [0, 6], 'Biso_Ni_G0': [0, 1], 'psize_f0': [2, 400],
'delta2_G0': [0, 5]}
},
True,
True,
{'scale_G0', 'a_G0', 'Biso_Ni_G0', 'psize_f0', 'delta2_G0'}
),
(
"Ni_stru_diffpy",
{
'values': {'scale_G0': 0.1, 'a_G0': 3.42, 'Biso_Ni_G0': 0.07, 'psize_f0': 300,
'delta2_G0': 2.5},
'bounds': {'scale_G0': [-1, 1], 'a_G0': [0, 6], 'Biso_Ni_G0': [0, 1], 'psize_f0': [2, 400],
'delta2_G0': [0, 5]},
},
False,
True,
{'Biso_Ni_G0', 'a_G0', 'alpha_G0', 'b_G0', 'beta_G0', 'c_G0', 'gamma_G0', 'delta2_G0',
'psize_f0', 'scale_G0'}
),
(
"ZrP_stru",
dict(),
False,
True,
{'Biso_O_G0', 'Biso_P_G0', 'Biso_Zr_G0', 'a_G0', 'b_G0', 'beta_G0', 'c_G0',
'delta2_G0', 'psize_f0', 'scale_G0'}
),
(
"ZrP_stru",
{
'bounds': {'x_0_G0': [-2, 2]}
},
True,
True,
{'Biso_O_G0', 'Biso_P_G0', 'Biso_Zr_G0', 'a_G0', 'b_G0', 'beta_G0', 'c_G0', 'delta2_G0',
'psize_f0', 'scale_G0', 'x_0_G0', 'x_1_G0', 'x_2_G0', 'x_3_G0', 'x_4_G0', 'x_5_G0', 'x_6_G0',
'x_7_G0', 'x_8_G0', 'x_9_G0', 'y_0_G0', 'y_1_G0', 'y_2_G0', 'y_3_G0', 'y_4_G0',
'y_5_G0', 'y_6_G0', 'y_7_G0', 'y_8_G0', 'y_9_G0', 'z_0_G0', 'z_1_G0', 'z_2_G0',
'z_3_G0', 'z_4_G0', 'z_5_G0', 'z_6_G0', 'z_7_G0', 'z_8_G0', 'z_9_G0'}
),
(
"Ni_stru",
{
'cf_params': ['psize_f0'],
'sg_params': dict()
},
True,
True,
{'psize_f0'}
),
(
"Ni_stru_diffpy",
{
'cf_params': list(),
'sg_params': {'G0': 225}
},
True,
True,
{'scale_G0', 'a_G0', 'Biso_Ni_G0', 'delta2_G0'}
),
(
"Ni_stru_diffpy",
{
'sg_params': {'G0': 225}
},
True,
False,
{'scale_G0', 'a_G0', 'Biso_Ni_G0', 'delta2_G0'}
),
(
"Ni_stru",
{},
True,
False,
{'scale_G0', 'a_G0', 'Biso_Ni_G0', 'delta2_G0'}
),
(
"Ni_stru",
{"add_eq": "A * exp(- B * r ** 2) * sin(C * r)"},
True,
False,
{'scale_G0', 'a_G0', 'Biso_Ni_G0', 'delta2_G0', "A", "B", "C"}
)
]
)
def test_multi_phase(db, data_key, kwargs, free_params, use_cf, expected):
parser = MyParser()
parser.parseFile(db['Ni_gr_file'])
phase = (F.sphericalCF, db[data_key]) if use_cf else db[data_key]
recipe = multi_phase(
[phase], parser,
fit_range=(2., 8.0, .1),
**kwargs
)
# xyz is added as fixed variables, free them for testing purpose
if free_params:
recipe.free("all")
# check parameters
if expected:
assert set(recipe.getNames()) == expected
# check default values
values = kwargs.get('values')
if values:
actual_values = dict(zip(recipe.getNames(), recipe.getValues()))
for name, expected_value in values.items():
assert actual_values[name] == expected_value
# check bounds
bounds = kwargs.get('bounds')
if bounds:
actual_bounds = dict(zip(recipe.getNames(), recipe.getBounds()))
for name, expected_bound in bounds.items():
assert actual_bounds[name] == expected_bound
@pytest.mark.parametrize(
"kwargs",
[
dict(tags=['G0_scale'], xtol=1e-2, gtol=1e-2, ftol=1e-2),
dict(tags=[('G0_scale', 'G0_lat'), 'G0_adp'], xtol=1e-2, gtol=1e-2, ftol=1e-2),
dict(tags=['G0_scale'], verbose=1, xtol=1e-2, gtol=1e-2, ftol=1e-2),
dict(tags=[('G0_scale', 'G0_lat'), 'G0_adp'], verbose=1, xtol=1e-2, gtol=1e-2, ftol=1e-2)
]
)
def test_optimize(filled_recipe, kwargs):
optimize(filled_recipe, **kwargs)
def test_fit_calib(db):
parser = MyParser()
parser.parseFile(db['Ni_gr_file'])
fit_calib(db['Ni_stru'], parser, fit_range=(2., 8., .1))
plt.clf()
| 32.695946 | 107 | 0.468899 | 634 | 4,839 | 3.242902 | 0.194006 | 0.047665 | 0.038911 | 0.024319 | 0.496109 | 0.428988 | 0.428988 | 0.422179 | 0.349222 | 0.314202 | 0 | 0.080064 | 0.357305 | 4,839 | 147 | 108 | 32.918367 | 0.581029 | 0.023352 | 0 | 0.382353 | 0 | 0 | 0.230883 | 0.009108 | 0 | 0 | 0 | 0 | 0.022059 | 1 | 0.022059 | false | 0 | 0.022059 | 0 | 0.044118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff8cf0a690ddb21019a081d81240d10700344897 | 1,032 | py | Python | geo.py | biliarski/Geo | 3eb8f3073f734ed13d8fa508a2fa29828e13cd11 | [
"MIT"
] | null | null | null | geo.py | biliarski/Geo | 3eb8f3073f734ed13d8fa508a2fa29828e13cd11 | [
"MIT"
] | null | null | null | geo.py | biliarski/Geo | 3eb8f3073f734ed13d8fa508a2fa29828e13cd11 | [
"MIT"
] | null | null | null | from segpy.reader import create_reader
from segpy.writer import write_segy
import numpy as np
"""
Docs:
Segpy https://segpy.readthedocs.io/en/latest/
NumPy https://docs.scipy.org/doc/numpy-1.13.0/reference/index.html
"""
if __name__ == '__main__':
with open('data/CUTE.sgy', 'rb') as segy_in_file:
# The seg_y_dataset is a lazy-reader, so keep the file open throughout.
seg_y_dataset = create_reader(segy_in_file) # Non-standard Rev 1 little-endian
print(seg_y_dataset.num_traces())
# # Write the seg_y_dataset out to another file, in big-endian format
# with open('seismic_big.sgy', 'wb') as segy_out_file:
# write_segy(segy_out_file, seg_y_dataset, endian='>') # Standard Rev 1 big-endian
data = []
for i in range(0, seg_y_dataset.num_traces()):
trace = seg_y_dataset.trace_samples(i)
data.append(trace)
data_matrix = np.array(data, np.int32)
## each row is a trace
# print(data_matrix)
| 31.272727 | 95 | 0.655039 | 157 | 1,032 | 4.050955 | 0.464968 | 0.044025 | 0.121069 | 0.044025 | 0.062893 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011407 | 0.235465 | 1,032 | 32 | 96 | 32.25 | 0.794677 | 0.335271 | 0 | 0 | 0 | 0 | 0.04291 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff8da5b6c055840eb2495a1ff2b0ff2da60c7317 | 5,405 | py | Python | cogs/rng.py | AbstractUmbra/Okayu | 0a96ee4acbb82dd19302f90e49b6b7f550b1f8fc | [
"MIT"
] | 6 | 2020-05-05T14:46:45.000Z | 2020-08-07T21:48:48.000Z | cogs/rng.py | AbstractUmbra/Okayu | 0a96ee4acbb82dd19302f90e49b6b7f550b1f8fc | [
"MIT"
] | 1 | 2020-10-01T18:50:15.000Z | 2020-10-01T18:51:12.000Z | cogs/rng.py | AbstractUmbra/Robo-Hz | 0a96ee4acbb82dd19302f90e49b6b7f550b1f8fc | [
"MIT"
] | 4 | 2020-10-01T18:30:30.000Z | 2020-10-02T00:35:02.000Z | """
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
from __future__ import annotations
import random
import re
from collections import Counter
from typing import TYPE_CHECKING, Optional, Union
import discord
from discord.ext import commands
from utils.context import Context
from utils.formats import plural, to_codeblock
if TYPE_CHECKING:
from bot import Kukiko
DICE_RE = re.compile(r"^(?P<rolls>\d+)[dD](?P<die>\d+)$")
DiceType = dict[str, Union[int, list[int]]]
class DiceRoll(commands.Converter):
async def convert(self, _: commands.Context, argument: str) -> DiceType:
search = DICE_RE.fullmatch(argument)
if not search:
raise commands.BadArgument("Dice roll doesn't seem valid, please use it in the format of `2d20`.")
search = search.groupdict()
rolls = max(min(int(search["rolls"]), 15), 1)
die = max(min(int(search["die"]), 1000), 2)
totals = [random.randint(1, die) for _ in range(rolls)]
return {"rolls": rolls, "die": die, "totals": totals}
class RNG(commands.Cog):
"""Utilities that provide pseudo-RNG."""
def __init__(self, bot: Kukiko):
self.bot = bot
@commands.group(pass_context=True)
async def random(self, ctx):
"""Displays a random thing you request."""
if ctx.invoked_subcommand is None:
await ctx.send(f"Incorrect random subcommand passed. Try {ctx.prefix}help random")
@random.command()
async def tag(self, ctx):
"""Displays a random tag.
A tag showing up in this does not get its usage count increased.
"""
tags = self.bot.get_cog("Tags")
if tags is None:
return await ctx.send("Tag commands currently disabled.")
tag = await tags.get_random_tag(ctx.guild, connection=ctx.db)
if tag is None:
return await ctx.send("This server has no tags.")
await ctx.send(f'Random tag found: {tag["name"]}\n{tag["content"]}')
@random.command()
async def number(self, ctx, minimum=0, maximum=100):
"""Displays a random number within an optional range.
The minimum must be smaller than the maximum and the maximum number
accepted is 1000.
"""
maximum = min(maximum, 1000)
if minimum >= maximum:
await ctx.send("Maximum is smaller than minimum.")
return
await ctx.send(random.randint(minimum, maximum))
@commands.command()
async def choose(self, ctx, *choices: commands.clean_content):
"""Chooses between multiple choices.
To denote multiple choices, you should use double quotes.
"""
if len(choices) < 2:
return await ctx.send("Not enough choices to pick from.")
await ctx.send(random.choice(choices))
@commands.command()
async def choosebestof(self, ctx, times: Optional[int], *choices: commands.clean_content):
"""Chooses between multiple choices N times.
To denote multiple choices, you should use double quotes.
You can only choose up to 10001 times and only the top 10 results are shown.
"""
if len(choices) < 2:
return await ctx.send("Not enough choices to pick from.")
if times is None:
times = (len(choices) ** 2) + 1
times = min(10001, max(1, times))
results = Counter(random.choice(choices) for _ in range(times))
builder = []
if len(results) > 10:
builder.append("Only showing top 10 results...")
for index, (elem, count) in enumerate(results.most_common(10), start=1):
builder.append(f"{index}. {elem} ({plural(count):time}, {count/times:.2%})")
await ctx.send("\n".join(builder))
@commands.command()
async def roll(self, ctx: commands.Context, *dice: DiceRoll):
"""Roll DnD die!"""
if len(dice) >= 25:
return await ctx.send("No more than 25 rolls per invoke, please.")
embed = discord.Embed(title="Rolls", colour=discord.Colour.random())
for i in dice:
fmt = ""
total = i["totals"]
die = i["die"]
rolls = i["rolls"]
# split = [total[x:x+5] for x in range(0, len(total), 5)]
builder = []
roll_sum = 0
for count, roll in enumerate(total, start=1):
builder.append(f"{count}: {roll}")
roll_sum += roll
fmt += "\n".join(builder)
fmt += f"\nSum: {roll_sum}\n"
embed.add_field(name=f"{rolls}d{die}", value=to_codeblock(fmt, language="prolog"))
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar.url)
await ctx.send(embed=embed)
@roll.error
async def roll_error(self, ctx: Context, error: BaseException) -> None:
error = getattr(error, "original", error)
if isinstance(error, commands.BadArgument):
await ctx.send(str(error), delete_after=5)
return
async def setup(bot: Kukiko):
await bot.add_cog(RNG(bot))
| 33.364198 | 111 | 0.595745 | 703 | 5,405 | 4.530583 | 0.330014 | 0.032653 | 0.04898 | 0.033909 | 0.141915 | 0.115542 | 0.100471 | 0.100471 | 0.065306 | 0.035793 | 0 | 0.016334 | 0.286401 | 5,405 | 161 | 112 | 33.571429 | 0.809437 | 0.052544 | 0 | 0.139785 | 0 | 0 | 0.140556 | 0.019846 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010753 | false | 0.021505 | 0.107527 | 0 | 0.225806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff8ef7348f50df3b9ddae02489685e3cf3e047f9 | 28,928 | py | Python | lib/silfont/scripts/psfrenameglyphs.py | silnrsi/pysilfont | 3c7855d21426590a3c0e235c965e8f90b76eccbb | [
"MIT"
] | 41 | 2015-05-21T21:12:26.000Z | 2022-02-17T17:23:14.000Z | lib/silfont/scripts/psfrenameglyphs.py | silnrsi/pysilfont | 3c7855d21426590a3c0e235c965e8f90b76eccbb | [
"MIT"
] | 63 | 2015-05-15T10:25:55.000Z | 2021-02-23T04:51:17.000Z | lib/silfont/scripts/psfrenameglyphs.py | silnrsi/pysilfont | 3c7855d21426590a3c0e235c965e8f90b76eccbb | [
"MIT"
] | 12 | 2015-06-12T11:52:08.000Z | 2020-09-23T10:40:59.000Z | #!/usr/bin/env python
__doc__ = '''Assign new working names to glyphs based on csv input file
- csv format oldname,newname'''
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'Bob Hallissy'
from silfont.core import execute
from xml.etree import ElementTree as ET
import re
import os
from glob import glob
argspec = [
('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
('-c', '--classfile', {'help': 'Classes file'}, {}),
('-i','--input',{'help': 'Input csv file'}, {'type': 'incsv', 'def': 'namemap.csv'}),
('--mergecomps',{'help': 'turn on component merge', 'action': 'store_true', 'default': False},{}),
('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_renameglyphs.log'})]
csvmap = "" # Variable used globally
def doit(args) :
global csvmap, ksetsbymember
font = args.ifont
incsv = args.input
logger = args.logger
mergemode = args.mergecomps
failerrors = 0 # Keep count of errors that should cause the script to fail
csvmap = {} # List of all real maps in incsv, so excluding headers, blank lines, comments and identity maps
nameMap = {} # remember all glyphs actually renamed
kerngroupsrenamed = {} # List of all kern groups actually renamed
# List of secondary layers (ie layers other than the default)
secondarylayers = [x for x in font.layers if x.layername != "public.default"]
# Obtain lib.plist glyph order(s) and psnames if they exist:
publicGlyphOrder = csGlyphOrder = psnames = displayStrings = None
if hasattr(font, 'lib'):
if 'public.glyphOrder' in font.lib:
publicGlyphOrder = font.lib.getval('public.glyphOrder') # This is an array
if 'com.schriftgestaltung.glyphOrder' in font.lib:
csGlyphOrder = font.lib.getval('com.schriftgestaltung.glyphOrder') # This is an array
if 'public.postscriptNames' in font.lib:
psnames = font.lib.getval('public.postscriptNames') # This is a dict keyed by glyphnames
if 'com.schriftgestaltung.customParameter.GSFont.DisplayStrings' in font.lib:
displayStrings = font.lib.getval('com.schriftgestaltung.customParameter.GSFont.DisplayStrings')
else:
logger.log("no lib.plist found in font", "W")
# Renaming within the UFO is done in two passes to make sure we can handle circular renames such as:
# someglyph.alt = someglyph
# someglyph = someglyph.alt
# Note that the various objects with glyph names are all done independently since
# the same glyph names are not necessarily in all structures.
# First pass: process all records of csv, and for each glyph that is to be renamed:
# If the new glyphname is not already present, go ahead and rename it now.
# If the new glyph name already exists, rename the glyph to a temporary name
# and put relevant details in saveforlater[]
saveforlaterFont = [] # For the font itself
saveforlaterPGO = [] # For public.GlyphOrder
saveforlaterCSGO = [] # For GlyphsApp GlyphOrder (com.schriftgestaltung.glyphOrder)
saveforlaterPSN = [] # For public.postscriptNames
deletelater = [] # Glyphs we'll delete after merging
for r in incsv:
oldname = r[0].strip()
newname = r[1].strip()
# ignore header row and rows where the newname is blank or a comment marker
if oldname == "Name" or oldname.startswith('#') or newname == "" or oldname == newname:
continue
if len(oldname)==0:
logger.log('empty glyph oldname in glyph_data; ignored (newname: %s)' % newname, 'W')
continue
csvmap[oldname]=newname
# Handle font first:
if oldname not in font.deflayer:
logger.log("glyph name not in font: " + oldname , "I")
elif newname not in font.deflayer:
inseclayers = False
for layer in secondarylayers:
if newname in layer:
logger.log("Glyph %s is already in non-default layers; can't rename %s" % (newname, oldname), "E")
failerrors += 1
inseclayers = True
continue
if not inseclayers:
# Ok, this case is easy: just rename the glyph in all layers
for layer in font.layers:
if oldname in layer: layer[oldname].name = newname
nameMap[oldname] = newname
logger.log("Pass 1 (Font): Renamed %s to %s" % (oldname, newname), "I")
elif mergemode:
mergeglyphs(font.deflayer[oldname], font.deflayer[newname])
for layer in secondarylayers:
if oldname in layer:
if newname in layer:
mergeglyphs(layer[oldname], layer[newname])
else:
layer[oldname].name = newname
nameMap[oldname] = newname
deletelater.append(oldname)
logger.log("Pass 1 (Font): merged %s to %s" % (oldname, newname), "I")
else:
# newname already in font -- but it might get renamed later in which case this isn't actually a problem.
# For now, then, rename glyph to a temporary name and remember it for second pass
tempname = gettempname(lambda n : n not in font.deflayer)
for layer in font.layers:
if oldname in layer:
layer[oldname].name = tempname
saveforlaterFont.append( (tempname, oldname, newname) )
# Similar algorithm for public.glyphOrder, if present:
if publicGlyphOrder:
if oldname not in publicGlyphOrder:
logger.log("glyph name not in publicGlyphorder: " + oldname , "I")
else:
x = publicGlyphOrder.index(oldname)
if newname not in publicGlyphOrder:
publicGlyphOrder[x] = newname
nameMap[oldname] = newname
logger.log("Pass 1 (PGO): Renamed %s to %s" % (oldname, newname), "I")
elif mergemode:
del publicGlyphOrder[x]
nameMap[oldname] = newname
logger.log("Pass 1 (PGO): Removed %s (now using %s)" % (oldname, newname), "I")
else:
tempname = gettempname(lambda n : n not in publicGlyphOrder)
publicGlyphOrder[x] = tempname
saveforlaterPGO.append( (x, oldname, newname) )
# And for GlyphsApp glyph order, if present:
if csGlyphOrder:
if oldname not in csGlyphOrder:
logger.log("glyph name not in csGlyphorder: " + oldname , "I")
else:
x = csGlyphOrder.index(oldname)
if newname not in csGlyphOrder:
csGlyphOrder[x] = newname
nameMap[oldname] = newname
logger.log("Pass 1 (csGO): Renamed %s to %s" % (oldname, newname), "I")
elif mergemode:
del csGlyphOrder[x]
nameMap[oldname] = newname
logger.log("Pass 1 (csGO): Removed %s (now using %s)" % (oldname, newname), "I")
else:
tempname = gettempname(lambda n : n not in csGlyphOrder)
csGlyphOrder[x] = tempname
saveforlaterCSGO.append( (x, oldname, newname) )
# And for psnames
if psnames:
if oldname not in psnames:
logger.log("glyph name not in psnames: " + oldname , "I")
elif newname not in psnames:
psnames[newname] = psnames.pop(oldname)
nameMap[oldname] = newname
logger.log("Pass 1 (psn): Renamed %s to %s" % (oldname, newname), "I")
elif mergemode:
del psnames[oldname]
nameMap[oldname] = newname
logger.log("Pass 1 (psn): Removed %s (now using %s)" % (oldname, newname), "I")
else:
tempname = gettempname(lambda n: n not in psnames)
psnames[tempname] = psnames.pop(oldname)
saveforlaterPSN.append( (tempname, oldname, newname))
# Second pass: now we can reprocess those things we saved for later:
# If the new glyphname is no longer present, we can complete the renaming
# Otherwise we've got a fatal error
for j in saveforlaterFont:
tempname, oldname, newname = j
if newname in font.deflayer: # Only need to check deflayer, since (if present) it would have been renamed in all
# Ok, this really is a problem
logger.log("Glyph %s already in font; can't rename %s" % (newname, oldname), "E")
failerrors += 1
else:
for layer in font.layers:
if tempname in layer:
layer[tempname].name = newname
nameMap[oldname] = newname
logger.log("Pass 2 (Font): Renamed %s to %s" % (oldname, newname), "I")
for j in saveforlaterPGO:
x, oldname, newname = j
if newname in publicGlyphOrder:
# Ok, this really is a problem
logger.log("Glyph %s already in public.GlyphOrder; can't rename %s" % (newname, oldname), "E")
failerrors += 1
else:
publicGlyphOrder[x] = newname
nameMap[oldname] = newname
logger.log("Pass 2 (PGO): Renamed %s to %s" % (oldname, newname), "I")
for j in saveforlaterCSGO:
x, oldname, newname = j
if newname in csGlyphOrder:
# Ok, this really is a problem
logger.log("Glyph %s already in com.schriftgestaltung.glyphOrder; can't rename %s" % (newname, oldname), "E")
failerrors += 1
else:
csGlyphOrder[x] = newname
nameMap[oldname] = newname
logger.log("Pass 2 (csGO): Renamed %s to %s" % (oldname, newname), "I")
for tempname, oldname, newname in saveforlaterPSN:
if newname in psnames:
# Ok, this really is a problem
logger.log("Glyph %s already in public.postscriptNames; can't rename %s" % (newname, oldname), "E")
failerrors += 1
else:
psnames[newname] = psnames.pop(tempname)
nameMap[oldname] = newname
logger.log("Pass 2 (psn): Renamed %s to %s" % (oldname, newname), "I")
# Rebuild font structures from the modified lists we have:
# Rebuild glyph order elements:
if publicGlyphOrder:
array = ET.Element("array")
for name in publicGlyphOrder:
ET.SubElement(array, "string").text = name
font.lib.setelem("public.glyphOrder", array)
if csGlyphOrder:
array = ET.Element("array")
for name in csGlyphOrder:
ET.SubElement(array, "string").text = name
font.lib.setelem("com.schriftgestaltung.glyphOrder", array)
# Rebuild postscriptNames:
if psnames:
dict = ET.Element("dict")
for n in psnames:
ET.SubElement(dict, "key").text = n
ET.SubElement(dict, "string").text = psnames[n]
font.lib.setelem("public.postscriptNames", dict)
# Iterate over all glyphs, and fix up any components that reference renamed glyphs
for layer in font.layers:
for name in layer:
glyph = layer[name]
for component in glyph.etree.findall('./outline/component[@base]'):
oldname = component.get('base')
if oldname in nameMap:
component.set('base', nameMap[oldname])
logger.log(f'renamed component base {oldname} to {component.get("base")} in glyph {name} layer {layer.layername}', 'I')
lib = glyph['lib']
if lib:
if 'com.schriftgestaltung.Glyphs.ComponentInfo' in lib:
cielem = lib['com.schriftgestaltung.Glyphs.ComponentInfo'][1]
for component in cielem:
for i in range(0,len(component),2):
if component[i].text == 'name':
oldname = component[i+1].text
if oldname in nameMap:
component[i+1].text = nameMap[oldname]
logger.log(f'renamed component info {oldname} to {nameMap[oldname]} in glyph {name} layer {layer.layername}', 'I')
# Delete anything we no longer need:
for name in deletelater:
for layer in font.layers:
if name in layer: layer.delGlyph(name)
logger.log("glyph %s removed" % name, "I")
# Other structures with glyphs in are handled by looping round the structures replacing glyphs rather than
# looping round incsv
# Update Display Strings
if displayStrings:
changed = False
glyphRE = re.compile(r'/([a-zA-Z0-9_.-]+)') # regex to match / followed by a glyph name
for i, dispstr in enumerate(displayStrings): # Passing the glyphSub function to .sub() causes it to
displayStrings[i] = glyphRE.sub(glyphsub, dispstr) # every non-overlapping occurrence of pattern
if displayStrings[i] != dispstr:
changed = True
if changed:
array = ET.Element("array")
for dispstr in displayStrings:
ET.SubElement(array, "string").text = dispstr
font.lib.setelem('com.schriftgestaltung.customParameter.GSFont.DisplayStrings', array)
logger.log("com.schriftgestaltung.customParameter.GSFont.DisplayStrings updated", "I")
# Process groups.plist and kerning.plist
# group names in the form public.kern[1|2].<glyph name> will automatically be renamed if the glyph name is in the csvmap
#
groups = kerning = None
kgroupprefixes = {"public.kern1.": 1, "public.kern2.": 2}
if "groups" in font.__dict__: groups = font.groups
if "kerning" in font.__dict__: kerning = font.kerning
if (groups or kerning) and mergemode:
logger.log("Note - Kerning and group data not processed when using mergecomps", "P")
elif groups or kerning:
kgroupsmap = ["", {}, {}] # Dicts of kern1/kern2 group renames. Outside the groups if statement, since also used with kerning.plist
if groups:
# Analyse existing data, building dict from existing data and building some indexes
gdict = {}
kgroupsbyglyph = ["", {}, {}] # First entry dummy, so index is 1 or 2 for kern1 and kern2
kgroupduplicates = ["", [], []] #
for gname in groups:
group = groups.getval(gname)
gdict[gname] = group
kprefix = gname[0:13]
if kprefix in kgroupprefixes:
ktype = kgroupprefixes[kprefix]
for glyph in group:
if glyph in kgroupsbyglyph[ktype]:
kgroupduplicates[ktype].append(glyph)
logger.log("In existing kern groups, %s is in more than one kern%s group" % (glyph, str(ktype)), "E")
failerrors += 1
else:
kgroupsbyglyph[ktype][glyph] = gname
# Now process the group data
glyphsrenamed = []
saveforlaterKgroups = []
for gname in list(gdict): # Loop round groups renaming glyphs within groups and kern group names
group = gdict[gname]
# Rename group if kern1 or kern2 group
kprefix = gname[:13]
if kprefix in kgroupprefixes:
ktype = kgroupprefixes[kprefix]
ksuffix = gname[13:]
if ksuffix in csvmap: # This is a kern group that we should rename
newgname = kprefix + csvmap[ksuffix]
if newgname in gdict: # Will need to be renamed in second pass
tempname = gettempname(lambda n : n not in gdict)
gdict[tempname] = gdict.pop(gname)
saveforlaterKgroups.append((tempname, gname, newgname))
else:
gdict[newgname] = gdict.pop(gname)
kerngroupsrenamed[gname] = newgname
logger.log("Pass 1 (Kern groups): Renamed %s to %s" % (gname, newgname), "I")
kgroupsmap[ktype][gname] = newgname
# Now rename glyphs within the group
# - This could lead to duplicate names, but that might be valid for arbitrary groups so not checked
# - kern group validity will be checked after all renaming is done
for (i, glyph) in enumerate(group):
if glyph in csvmap:
group[i] = csvmap[glyph]
if glyph not in glyphsrenamed: glyphsrenamed.append(glyph)
# Need to report glyphs renamed after the loop, since otherwise could report multiple times
for oldname in glyphsrenamed:
nameMap[oldname] = csvmap[oldname]
logger.log("Glyphs in groups: Renamed %s to %s" % (oldname, csvmap[oldname]), "I")
# Second pass for renaming kern groups. (All glyph renaming is done in first pass)
for (tempname, oldgname, newgname) in saveforlaterKgroups:
if newgname in gdict: # Can't rename
logger.log("Kern group %s already in groups.plist; can't rename %s" % (newgname, oldgname), "E")
failerrors += 1
else:
gdict[newgname] = gdict.pop(tempname)
kerngroupsrenamed[oldgname] = newgname
logger.log("Pass 2 (Kern groups): Renamed %s to %s" % (oldgname, newgname), "I")
# Finally check kern groups follow the UFO rules!
kgroupsbyglyph = ["", {}, {}] # Reset for new analysis
for gname in gdict:
group = gdict[gname]
kprefix = gname[:13]
if kprefix in kgroupprefixes:
ktype = kgroupprefixes[kprefix]
for glyph in group:
if glyph in kgroupsbyglyph[ktype]: # Glyph already in a kern group so we have a duplicate
if glyph not in kgroupduplicates[ktype]: # This is a newly-created duplicate so report
logger.log("After renaming, %s is in more than one kern%s group" % (glyph, str(ktype)), "E")
failerrors += 1
kgroupduplicates[ktype].append(glyph)
else:
kgroupsbyglyph[ktype][glyph] = gname
# Now need to recreate groups.plist from gdict
for group in list(groups): groups.remove(group) # Empty existing contents
for gname in gdict:
elem = ET.Element("array")
for glyph in gdict[gname]:
ET.SubElement(elem, "string").text = glyph
groups.setelem(gname, elem)
# Now process kerning data
if kerning:
k1map = kgroupsmap[1]
k2map = kgroupsmap[2]
kdict = {}
for setname in kerning: kdict[setname] = kerning.getval(setname) # Create a working dict from plist
saveforlaterKsets = []
# First pass on set names
for setname in list(kdict): # setname could be a glyph in csvmap or a kern1 group name in k1map
if setname in csvmap or setname in k1map:
newname = csvmap[setname] if setname in csvmap else k1map[setname]
if newname in kdict:
tempname = gettempname(lambda n : n not in kdict)
kdict[tempname] = kdict.pop(setname)
saveforlaterKsets.append((tempname, setname, newname))
else:
kdict[newname] = kdict.pop(setname)
if setname in csvmap: nameMap[setname] = newname # Change to kern set name will have been logged previously
logger.log("Pass 1 (Kern sets): Renamed %s to %s" % (setname, newname), "I")
# Now do second pass for set names
for (tempname, oldname, newname) in saveforlaterKsets:
if newname in kdict: # Can't rename
logger.log("Kern set %s already in kerning.plist; can't rename %s" % (newname, oldname), "E")
failerrors += 1
else:
kdict[newname] = kdict.pop(tempname)
if oldname in csvmap: nameMap[oldname] = newname
logger.log("Pass 1 (Kern sets): Renamed %s to %s" % (oldname, newname), "I")
# Rename kern set members next.
# Here, since a member could be in more than one set, take different approach to two passes.
# - In first pass, rename to a temp (and invalid) name so duplicates are not possible. Name to include
# old name for reporting purposes
# - In second pass, set to correct new name after checking for duplicates
# Do first pass for set names
tempnames = []
for setname in list(kdict):
kset = kdict[setname]
for mname in list(kset): # mname could be a glyph in csvmap or a kern2 group name in k2map
if mname in csvmap or mname in k2map:
newname = csvmap[mname] if mname in csvmap else k2map[mname]
newname = "^" + newname + "^" + mname
if newname not in tempnames: tempnames.append(newname)
kset[newname] = kset.pop(mname)
# Second pass to change temp names to correct final names
# We need an index of which sets each member is in
ksetsbymember = {}
for setname in kdict:
kset = kdict[setname]
for member in kset:
if member not in ksetsbymember:
ksetsbymember[member] = [setname]
else:
ksetsbymember[member].append(setname)
# Now do the renaming
for tname in tempnames:
(newname, oldname) = tname[1:].split("^")
if newname in ksetsbymember: # Can't rename
logger.log("Kern set %s already in kerning.plist; can't rename %s" % (newname, oldname), "E")
failerrors += 1
else:
for ksetname in ksetsbymember[tname]:
kset = kdict[ksetname]
kset[newname] = kset.pop(tname)
ksetsbymember[newname] = ksetsbymember.pop(tname)
if tname in csvmap: nameMap[oldname] = newname
logger.log("Kern set members: Renamed %s to %s" % (oldname, newname), "I")
# Now need to recreate kerning.plist from kdict
for kset in list(kerning): kerning.remove(kset) # Empty existing contents
for kset in kdict:
elem = ET.Element("dict")
for member in kdict[kset]:
ET.SubElement(elem, "key").text = member
ET.SubElement(elem, "integer").text = str(kdict[kset][member])
kerning.setelem(kset, elem)
if failerrors:
logger.log(str(failerrors) + " issues detected - see errors reported above", "S")
logger.log("%d glyphs renamed in UFO" % (len(nameMap)), "P")
if kerngroupsrenamed: logger.log("%d kern groups renamed in UFO" % (len(kerngroupsrenamed)), "P")
# If a classfile was provided, change names within it also
#
if args.classfile:
logger.log("Processing classfile {}".format(args.classfile), "P")
# In order to preserve comments we use our own TreeBuilder
class MyTreeBuilder(ET.TreeBuilder):
def comment(self, data):
self.start(ET.Comment, {})
self.data(data)
self.end(ET.Comment)
# RE to match separators between glyph names (whitespace):
notGlyphnameRE = re.compile(r'(\s+)')
# Keep a list of glyphnames that were / were not changed
changed = set()
notChanged = set()
# Process one token (might be whitespace separator, glyph name, or embedded classname starting with @):
def dochange(gname, logErrors = True):
if len(gname) == 0 or gname.isspace() or gname not in csvmap or gname.startswith('@'):
# No change
return gname
try:
newgname = csvmap[gname]
changed.add(gname)
return newgname
except KeyError:
if logErrors: notChanged.add(gname)
return gname
doc = ET.parse(args.classfile, parser=ET.XMLParser(target=MyTreeBuilder()))
for e in doc.iter(None):
if e.tag in ('class', 'property'):
if 'exts' in e.attrib:
logger.log("{} '{}' has 'exts' attribute which may need editing".format(e.tag.title(), e.get('name')), "W")
# Rather than just split() the text, we'll use re and thus try to preserve whitespace
e.text = ''.join([dochange(x) for x in notGlyphnameRE.split(e.text)])
elif e.tag is ET.Comment:
# Go ahead and look for glyph names in comment text but don't flag as error
e.text = ''.join([dochange(x, False) for x in notGlyphnameRE.split(e.text)])
# and process the tail as this might be valid part of class or property
e.tail = ''.join([dochange(x) for x in notGlyphnameRE.split(e.tail)])
if len(changed):
# Something in classes changed so rewrite it... saving backup
(dn,fn) = os.path.split(args.classfile)
dn = os.path.join(dn, args.paramsobj.sets['main']['backupdir'])
if not os.path.isdir(dn):
os.makedirs(dn)
# Work out backup name based on existing backups
backupname = os.path.join(dn,fn)
nums = [int(re.search(r'\.(\d+)~$',n).group(1)) for n in glob(backupname + ".*~")]
backupname += ".{}~".format(max(nums) + 1 if nums else 1)
logger.log("Backing up input classfile to {}".format(backupname), "P")
# Move the original file to backupname
os.rename(args.classfile, backupname)
# Write the output file
doc.write(args.classfile)
if len(notChanged):
logger.log("{} glyphs renamed, {} NOT renamed in {}: {}".format(len(changed), len(notChanged), args.classfile, ' '.join(notChanged)), "W")
else:
logger.log("All {} glyphs renamed in {}".format(len(changed), args.classfile), "P")
return font
def mergeglyphs(mergefrom, mergeto): # Merge any "moving" anchors (i.e., those starting with '_') into the glyph we're keeping
# Assumption: we are merging one or more component references to just one component; deleting the others
for a in mergefrom['anchor']:
aname = a.element.get('name')
if aname.startswith('_'):
# We want to copy this anchor to the glyph being kept:
for i, a2 in enumerate(mergeto['anchor']):
if a2.element.get('name') == aname:
# Overwrite existing anchor of same name
mergeto['anchor'][i] = a
break
else:
# Append anchor to glyph
mergeto['anchor'].append(a)
def gettempname(f):
''' return a temporary glyph name that, when passed to function f(), returns true'''
# Initialize function attribute for use as counter
if not hasattr(gettempname, "counter"): gettempname.counter = 0
while True:
name = "tempglyph%d" % gettempname.counter
gettempname.counter += 1
if f(name): return name
def glyphsub(m): # Function passed to re.sub() when updating display strings
global csvmap
gname = m.group(1)
return '/' + csvmap[gname] if gname in csvmap else m.group(0)
def cmd() : execute("UFO",doit,argspec)
if __name__ == "__main__": cmd()
| 49.197279 | 154 | 0.565715 | 3,309 | 28,928 | 4.93291 | 0.170746 | 0.025914 | 0.012743 | 0.009435 | 0.278135 | 0.215157 | 0.18042 | 0.149482 | 0.13141 | 0.091834 | 0 | 0.004554 | 0.339533 | 28,928 | 587 | 155 | 49.28109 | 0.849785 | 0.203471 | 0 | 0.263158 | 0 | 0.004577 | 0.145512 | 0.025612 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016018 | false | 0.036613 | 0.011442 | 0 | 0.04119 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff915cb80b4b2c60c7c3e4a9792e6477f9f90875 | 272 | py | Python | apiclients/client.py | Crossing-Minds/reco-api-benchmarks | f740072a23e88270632a1acda468854e457d5086 | [
"MIT"
] | 3 | 2021-07-21T04:56:10.000Z | 2022-01-24T21:14:45.000Z | apiclients/client.py | Crossing-Minds/reco-api-benchmarks | f740072a23e88270632a1acda468854e457d5086 | [
"MIT"
] | null | null | null | apiclients/client.py | Crossing-Minds/reco-api-benchmarks | f740072a23e88270632a1acda468854e457d5086 | [
"MIT"
] | 1 | 2021-07-21T04:56:11.000Z | 2021-07-21T04:56:11.000Z | from xminds.api.client import CrossingMindsApiClient
from .config import ENVS_HOST
class ApiClientInternal(CrossingMindsApiClient):
ENVS_HOST = ENVS_HOST
def __init__(self, **kwargs):
host = self.ENVS_HOST
super().__init__(host=host, **kwargs)
| 22.666667 | 52 | 0.731618 | 31 | 272 | 6.032258 | 0.516129 | 0.171123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.180147 | 272 | 11 | 53 | 24.727273 | 0.838565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.714286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff92ec13ffa6d364097db1db2973673b5c2e828a | 4,763 | py | Python | rebiber/arxiv.py | EntilZha/rebiber | 88c9fbef12cae73e2ed6f67ee6f32f39040f8299 | [
"MIT"
] | null | null | null | rebiber/arxiv.py | EntilZha/rebiber | 88c9fbef12cae73e2ed6f67ee6f32f39040f8299 | [
"MIT"
] | null | null | null | rebiber/arxiv.py | EntilZha/rebiber | 88c9fbef12cae73e2ed6f67ee6f32f39040f8299 | [
"MIT"
] | null | null | null | import json
import os
import pickle
from typing import Dict
import bibtexparser
import typer
from functional import pseq
from rich.console import Console
from rebiber.bib2json import normalize_title
console = Console()
app = typer.Typer()
def construct_paper_db(bib_list_file, start_dir=""):
with open(bib_list_file) as f:
filenames = f.readlines()
console.log(f"Loading bibs for {len(filenames)} conferences")
entries = []
original_entries = {}
for file in filenames:
with open(start_dir + file.strip()) as f:
conf_entries = json.load(f)
for title, str_entry in conf_entries.items():
entries.append((title, str_entry))
original_entries[title] = str_entry
console.log(f"Loaded {len(entries)} entries")
console.log("Parsing entries")
bib_db = pseq(entries).smap(parse_entry).dict()
return bib_db, original_entries
def parse_entry(title: str, entry: Dict):
parser = bibtexparser.bparser.BibTexParser(interpolate_strings=False)
return title, parser.parse("".join(entry)).entries[0]
def load_bibfile(bib_path: str):
parser = bibtexparser.bparser.BibTexParser(
interpolate_strings=False, ignore_nonstandard_types=False
)
with open(bib_path) as f:
contents = f.read()
return parser.parse(contents)
def load_or_build_db(bib_list: str, start_dir: str, force: bool = False):
if force or not os.path.exists("/tmp/papers_bib.pickle"):
bib_db, original_entries = construct_paper_db(bib_list, start_dir=start_dir)
console.log("Caching papers")
with open("/tmp/papers_bib.pickle", "wb") as f:
pickle.dump({"bib_db": bib_db, "original_entries": original_entries}, f)
else:
console.log("Loading cached papers")
with open("/tmp/papers_bib.pickle", "rb") as f:
cached = pickle.load(f)
bib_db = cached["bib_db"]
original_entries = cached["original_entries"]
return bib_db, original_entries
@app.command()
def unarxiv(
user_bib_path: str,
bib_list: str = "rebiber/bib_list.txt",
filepath: str = "rebiber/",
):
console.log("Loading bibliography database")
bib_db, original_entries = load_or_build_db(bib_list, start_dir=filepath)
console.log("Loading user bibliography")
bibliography = load_bibfile(user_bib_path)
console.log(f"Read bibliography: {len(bibliography.entries)} Entries")
writer = bibtexparser.bwriter.BibTexWriter()
for entry in bibliography.entries:
if entry["ENTRYTYPE"] == "article":
if "archiveprefix" in entry or "arxiv" in entry.get("url", ""):
entry_title = normalize_title(entry["title"])
if entry_title in bib_db:
new_entry = "".join(original_entries[entry_title])
console.print("[bold red]Original entry:[/bold red]")
user_entry = "".join(writer._entry_to_bibtex(entry))
console.print(f"{user_entry}")
console.print("[bold green]New Entry:[/bold green]")
console.print(f"{new_entry}")
console.print(
"[bold yellow]-------------------------------------------[/bold yellow]"
)
@app.command()
def doi(
user_bib_path: str,
bib_list: str = "rebiber/bib_list.txt",
filepath: str = "rebiber/",
):
console.log("Loading bibliography database")
bib_db, original_entries = load_or_build_db(bib_list, start_dir=filepath)
console.log("Loading user bibliography")
user_bibliography = load_bibfile(user_bib_path)
console.log(f"Read bibliography: {len(user_bibliography.entries)} Entries")
writer = bibtexparser.bwriter.BibTexWriter()
for user_entry in user_bibliography.entries:
entry_title = normalize_title(user_entry["title"])
if "doi" not in user_entry:
if entry_title in bib_db:
db_entry = bib_db[entry_title]
if "doi" in db_entry:
text_user_entry = "".join(writer._entry_to_bibtex(user_entry))
console.print("[bold red]Entry missing DOI:[/bold red]")
console.print(f"{text_user_entry}")
text_db_entry = "".join(original_entries[entry_title])
console.print("[bold green]Entry with DOI:[/bold green]")
console.print(f"DOI: {db_entry['doi']}")
console.print(f"{text_db_entry}")
console.print(
"[bold yellow]-----------------------------------------------------------------[/bold yellow]"
)
if __name__ == "__main__":
app()
| 35.022059 | 118 | 0.617258 | 574 | 4,763 | 4.893728 | 0.195122 | 0.06942 | 0.032396 | 0.04984 | 0.466358 | 0.395158 | 0.374511 | 0.259879 | 0.212887 | 0.177287 | 0 | 0.000559 | 0.248793 | 4,763 | 135 | 119 | 35.281481 | 0.784516 | 0 | 0 | 0.226415 | 0 | 0 | 0.202603 | 0.054377 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0 | 0.084906 | 0 | 0.179245 | 0.103774 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff960735fdb34528546506bb607e3faee741a1e4 | 927 | py | Python | tests/schema/parser/test_parse_string.py | george-fry/statham-schema | 19aa64de8750001cbc24f2775e0684f2298f840f | [
"MIT"
] | 23 | 2020-06-25T15:55:29.000Z | 2022-03-31T16:51:40.000Z | tests/schema/parser/test_parse_string.py | george-fry/statham-schema | 19aa64de8750001cbc24f2775e0684f2298f840f | [
"MIT"
] | 25 | 2020-02-29T15:32:35.000Z | 2022-03-03T17:22:45.000Z | tests/schema/parser/test_parse_string.py | george-fry/statham-schema | 19aa64de8750001cbc24f2775e0684f2298f840f | [
"MIT"
] | 5 | 2020-10-18T19:14:32.000Z | 2022-03-09T10:40:41.000Z | from typing import Any, Dict
import pytest
from statham.schema.elements import Element, String
from statham.schema.parser import parse_element
@pytest.mark.parametrize(
"schema,expected",
[
pytest.param({"type": "string"}, String(), id="with-no-keywords"),
pytest.param(
{
"type": "string",
"default": "sample",
"format": "my_format",
"pattern": ".*",
"minLength": 1,
"maxLength": 3,
},
String(
default="sample",
format="my_format",
pattern=".*",
minLength=1,
maxLength=3,
),
id="with-all-keywords",
),
],
)
def test_parse_string_produces_expected_element(
schema: Dict[str, Any], expected: Element
):
assert parse_element(schema) == expected
| 25.054054 | 74 | 0.499461 | 82 | 927 | 5.536585 | 0.45122 | 0.048458 | 0.07489 | 0.092511 | 0.264317 | 0.264317 | 0.264317 | 0.264317 | 0.264317 | 0.264317 | 0 | 0.006873 | 0.372168 | 927 | 36 | 75 | 25.75 | 0.773196 | 0 | 0 | 0.0625 | 0 | 0 | 0.151025 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 1 | 0.03125 | false | 0 | 0.125 | 0 | 0.15625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff968ae04edbff3d50de711811fb00ace4d24f74 | 789 | py | Python | linkedList/445 add two numbers||.py | windowssocket/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 3 | 2018-05-29T02:29:40.000Z | 2020-02-05T03:28:16.000Z | linkedList/445 add two numbers||.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 1 | 2019-03-08T13:22:32.000Z | 2019-03-08T13:22:32.000Z | linkedList/445 add two numbers||.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 3 | 2018-05-29T11:50:24.000Z | 2018-11-27T12:31:01.000Z | # Definition for singly-linked list.
# 从尾到头建立链表
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
x1, x2 = 0, 0
while l1 != None:
x1 = x1 * 10 + l1.val
l1 = l1.next
while l2 != None:
x2 = x2 * 10 + l2.val
l2 = l2.next
x = x1 + x2
head = ListNode(x)
if x == 0:
return head
else:
while x:
a, b = x % 10, x // 10
head.next, head.next.next = ListNode(a), head.next
x = x // 10
return head.next
| 22.542857 | 66 | 0.439797 | 96 | 789 | 3.572917 | 0.34375 | 0.093294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076389 | 0.452471 | 789 | 34 | 67 | 23.205882 | 0.717593 | 0.125475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff96bcbff2c0f2f668c7a304aea3d86e2f5859c9 | 5,461 | py | Python | xrdinfo/xrd_all_methods.py | JyrgenSuvalov/X-Road-scripts | 42ec7279fc1404f7e37fbc0676f55f4b83052157 | [
"MIT"
] | 11 | 2017-11-10T14:24:30.000Z | 2021-12-17T08:23:46.000Z | xrdinfo/xrd_all_methods.py | JyrgenSuvalov/X-Road-scripts | 42ec7279fc1404f7e37fbc0676f55f4b83052157 | [
"MIT"
] | 5 | 2017-11-10T11:50:46.000Z | 2021-04-14T10:28:21.000Z | xrdinfo/xrd_all_methods.py | JyrgenSuvalov/X-Road-scripts | 42ec7279fc1404f7e37fbc0676f55f4b83052157 | [
"MIT"
] | 8 | 2017-10-04T13:37:39.000Z | 2020-04-28T13:35:31.000Z | #!/usr/bin/python3
from threading import Thread, Event
import argparse
import xrdinfo
import queue
import sys
# By default return listMethods
DEFAULT_METHOD = 'listMethods'
# Default timeout for HTTP requests
DEFAULT_TIMEOUT = 5.0
# Do not use threading by default
DEFAULT_THREAD_COUNT = 1
def print_error(content):
"""Error printer."""
content = "ERROR: {}\n".format(content)
sys.stderr.write(content)
def worker(params):
while True:
# Checking periodically if it is the time to gracefully shutdown
# the worker.
try:
subsystem = params['work_queue'].get(True, 0.1)
except queue.Empty:
if params['shutdown'].is_set():
return
else:
continue
try:
if params['rest']:
for method in xrdinfo.methods_rest(
addr=params['url'], client=params['client'], producer=subsystem,
method=params['method'], timeout=params['timeout'], verify=params['verify'],
cert=params['cert']):
line = xrdinfo.identifier(method) + '\n'
# Using thread safe "write" instead of "print"
sys.stdout.write(line)
else:
for method in xrdinfo.methods(
addr=params['url'], client=params['client'], producer=subsystem,
method=params['method'], timeout=params['timeout'],
verify=params['verify'],
cert=params['cert']):
line = xrdinfo.identifier(method) + '\n'
# Using thread safe "write" instead of "print"
sys.stdout.write(line)
except Exception as e:
print_error('{}: {}'.format(type(e).__name__, e))
finally:
params['work_queue'].task_done()
def main():
parser = argparse.ArgumentParser(
description='X-Road listMethods request to all members.',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='By default peer TLS certificate is not validated.'
)
parser.add_argument(
'url', metavar='SERVER_URL',
help='URL of local Security Server accepting X-Road requests.')
parser.add_argument(
'client', metavar='CLIENT',
help='Client identifier consisting of slash separated Percent-Encoded parts (e.g. '
'"INSTANCE/MEMBER_CLASS/MEMBER_CODE/SUBSYSTEM_CODE" '
'or "INSTANCE/MEMBER_CLASS/MEMBER_CODE").')
parser.add_argument('-t', metavar='TIMEOUT', help='timeout for HTTP query', type=float)
parser.add_argument('--allowed', help='return only allowed methods', action='store_true')
parser.add_argument('--rest', help='return REST methods instead of SOAP', action='store_true')
parser.add_argument('--threads', metavar='THREADS', help='amount of threads to use', type=int)
parser.add_argument(
'--verify', metavar='CERT_PATH',
help='validate peer TLS certificate using CA certificate file.')
parser.add_argument(
'--cert', metavar='CERT_PATH', help='use TLS certificate for HTTPS requests.')
parser.add_argument('--key', metavar='KEY_PATH', help='private key for TLS certificate.')
parser.add_argument(
'--instance', metavar='INSTANCE',
help='use this instance instead of local X-Road instance.')
args = parser.parse_args()
params = {
'url': args.url,
'client': xrdinfo.identifier_parts(args.client),
'method': DEFAULT_METHOD,
'instance': None,
'timeout': DEFAULT_TIMEOUT,
'verify': False,
'cert': None,
'rest': args.rest,
'thread_cnt': DEFAULT_THREAD_COUNT,
'work_queue': queue.Queue(),
'shutdown': Event()
}
if not (len(params['client']) in (3, 4)):
print_error('Client name is incorrect: "{}"'.format(args.client))
exit(1)
if args.allowed:
params['method'] = 'allowedMethods'
if args.instance:
params['instance'] = args.instance
if args.t:
params['timeout'] = args.t
if args.verify:
params['verify'] = args.verify
if args.cert and args.key:
params['cert'] = (args.cert, args.key)
if args.threads and args.threads > 0:
params['thread_cnt'] = args.threads
shared_params = None
try:
shared_params = xrdinfo.shared_params_ss(
addr=args.url, instance=params['instance'], timeout=params['timeout'],
verify=params['verify'], cert=params['cert'])
except xrdinfo.XrdInfoError as e:
print_error('Cannot download Global Configuration: {}'.format(e))
exit(1)
# Create and start new threads
threads = []
for _ in range(params['thread_cnt']):
t = Thread(target=worker, args=(params,))
t.daemon = True
t.start()
threads.append(t)
# Populate the queue
try:
for subsystem in xrdinfo.registered_subsystems(shared_params):
params['work_queue'].put(subsystem)
except xrdinfo.XrdInfoError as e:
print_error(e)
exit(1)
# Block until all tasks in queue are done
params['work_queue'].join()
# Set shutdown event and wait until all daemon processes finish
params['shutdown'].set()
for t in threads:
t.join()
if __name__ == '__main__':
main()
| 33.919255 | 100 | 0.600256 | 623 | 5,461 | 5.154093 | 0.284109 | 0.028029 | 0.052943 | 0.024292 | 0.216755 | 0.183121 | 0.163189 | 0.13952 | 0.13952 | 0.123326 | 0 | 0.003036 | 0.27614 | 5,461 | 160 | 101 | 34.13125 | 0.809259 | 0.081121 | 0 | 0.196721 | 0 | 0 | 0.229754 | 0.017397 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02459 | false | 0 | 0.040984 | 0 | 0.07377 | 0.040984 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff98472de29779514bd38ec9ab5d58884f067f9e | 3,013 | py | Python | web/handlers_public_api.py | PCNI/homeless-helper | 2216199da2e4b41be6eb5c287d9c1a054fec3945 | [
"MIT"
] | 3 | 2015-05-04T14:52:10.000Z | 2020-02-22T07:03:20.000Z | web/handlers_public_api.py | PCNI/homeless-helper | 2216199da2e4b41be6eb5c287d9c1a054fec3945 | [
"MIT"
] | 1 | 2015-01-12T18:25:03.000Z | 2015-01-12T18:53:33.000Z | web/handlers_public_api.py | PCNI/homeless-helper | 2216199da2e4b41be6eb5c287d9c1a054fec3945 | [
"MIT"
] | null | null | null | import os
import sys
import time
import random
import string
import json
import hashlib
import socket
import urllib
import urllib2
from pprint import pprint
import tornado.web
import handlers_base
BaseHandler = handlers_base.BaseHandler
import twilio.twiml
from HomelessHelper.config import Config
from HomelessHelper.database import DbPrimary
from HomelessHelper.system import System
from HomelessHelper.tools import Tools
from HomelessHelper.resource import Resource
from HomelessHelper.google_geo import GoogleGeo
config = Config()
## system
class SystemHealth(BaseHandler):
def get(self):
system = System(self.db, config)
self.write(system.health())
class SystemVersion(BaseHandler):
def get(self):
system = System(self.db, config)
self.write(system.version())
class ResourceNew(BaseHandler):
def post(self):
token = self.get_argument('token', None)
resource_type = self.get_argument('resource_type', None)
va_status = self.get_argument('va_status', None)
name_1 = self.get_argument('name_1', None)
name_2 = self.get_argument('name_2', None)
street_1 = self.get_argument('street_1', None)
street_2 = self.get_argument('street_2', None)
city = self.get_argument('city', None)
state = self.get_argument('state', None)
zipcode = self.get_argument('zipcode', None)
lat = self.get_argument('lat', None)
lng = self.get_argument('lng', None)
phone = self.get_argument('phone', None)
url = self.get_argument('url', None)
hours = self.get_argument('hours', None)
notes = self.get_argument('notes', None)
resource = Resource(self.db, config)
self.write(resource.public_api_new(token, resource_type, va_status, name_1, name_2, street_1, street_2, city, state, zipcode, lat, lng, phone, url, hours, notes))
class ResourceGet(BaseHandler):
def get(self):
resource_id = self.get_argument('resource_id', None)
resource = Resource(self.db, config)
self.write(resource.public_api_get(resource_id))
class ResourceAddBed(BaseHandler):
def post(self):
token = self.get_argument('token', None)
resource_id = self.get_argument('resource_id', None)
resource = Resource(self.db, config)
self.write(resource.public_api_add_bed(token, resource_id))
class ResourceDelBed(BaseHandler):
def post(self):
token = self.get_argument('token', None)
resource_id = self.get_argument('resource_id', None)
resource = Resource(self.db, config)
self.write(resource.public_api_del_bed(token, resource_id))
class ResourceUpdateBed(BaseHandler):
def post(self):
token = self.get_argument('token', None)
number = self.get_argument('number', None)
resource_id = self.get_argument('resource_id', None)
resource = Resource(self.db, config)
self.write(resource.public_api_update_bed(token, resource_id, number))
| 33.853933 | 170 | 0.699303 | 387 | 3,013 | 5.263566 | 0.191214 | 0.082474 | 0.17673 | 0.054983 | 0.398625 | 0.376043 | 0.376043 | 0.376043 | 0.376043 | 0.376043 | 0 | 0.005354 | 0.194159 | 3,013 | 88 | 171 | 34.238636 | 0.833608 | 0.001991 | 0 | 0.297297 | 0 | 0 | 0.053262 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094595 | false | 0 | 0.27027 | 0 | 0.459459 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff9927cca6dd5850053f49e3edbc6473af7f6bc7 | 587 | py | Python | falcon/web.py | irr/python-labs | 43bb3a528c151653b2be832c7ff13240a10e18a4 | [
"Apache-2.0"
] | 4 | 2015-11-25T09:06:44.000Z | 2019-12-11T21:35:21.000Z | falcon/web.py | irr/python-labs | 43bb3a528c151653b2be832c7ff13240a10e18a4 | [
"Apache-2.0"
] | null | null | null | falcon/web.py | irr/python-labs | 43bb3a528c151653b2be832c7ff13240a10e18a4 | [
"Apache-2.0"
] | 2 | 2015-11-25T09:19:38.000Z | 2016-02-26T03:54:06.000Z | import falcon
class WebResource:
def on_get(self, req, resp):
"""Handles GET requests"""
resp.status = falcon.HTTP_200 # This is the default status
resp.body = ('Test OK!')
# falcon.API instances are callable WSGI apps
app = falcon.API()
# Resources are represented by long-lived class instances
web = WebResource()
# things will handle all requests to the '/' URL path
app.add_route('/', web)
# gunicorn -w 4 -k eventlet --threads 100 --worker-connections 100 -b localhost:1972 --log-level info --error-logfile - --log-file - --access-logfile - web:app | 32.611111 | 159 | 0.688245 | 84 | 587 | 4.77381 | 0.75 | 0.044888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029474 | 0.190801 | 587 | 18 | 159 | 32.611111 | 0.814737 | 0.609881 | 0 | 0 | 0 | 0 | 0.040909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff9a5956d10a9a6fb72dea22176041ce89b5315d | 8,803 | py | Python | services/Autocorrect/inc/wikipedia_correction.py | muhammad-abbady/JenTab | df7b1450cb14e64edade30ea7de49d05a7d7dbf2 | [
"Apache-2.0"
] | 9 | 2021-03-23T11:32:40.000Z | 2022-03-31T13:58:41.000Z | services/Autocorrect/inc/wikipedia_correction.py | muhammad-abbady/JenTab | df7b1450cb14e64edade30ea7de49d05a7d7dbf2 | [
"Apache-2.0"
] | null | null | null | services/Autocorrect/inc/wikipedia_correction.py | muhammad-abbady/JenTab | df7b1450cb14e64edade30ea7de49d05a7d7dbf2 | [
"Apache-2.0"
] | 1 | 2021-04-29T21:27:08.000Z | 2021-04-29T21:27:08.000Z | from collections import Counter
import re
from os.path import join
from config import ASSET_PATH
class WikipediaCorrection(object):
def __init__(self):
self.Vocab = [] # Vocab list
self.vocab_n = None # Vocab length
self.probs = {} # Probabilities dict
self.init_vocab()
def init_vocab(self):
""" Loads probabilities dict, vocab list and calculates its length """
with open(join(ASSET_PATH, 'wiki_en_vocab_probs.csv'), 'r') as file:
content = file.read().splitlines()
for line in content:
k, v = line.split(',')
self.probs[k] = float(v)
self.Vocab.append(k)
self.vocab_n = len(self.Vocab)
def __get_word_splits(self, word):
return [(word[:i], word[i:]) for i in range(len(word) + 1)]
def __delete_letter(self, split_l):
return [L + R[1:] for L, R in split_l if R]
def __switch_letter(self, split_l):
return [L + R[1] + R[0] + R[2:] for L, R in split_l if len(R) > 1]
def __replace_letter(self, split_l):
letters = 'abcdefghijklmnopqrstuvwxyz'
return [L + c + R[1:] for L, R in split_l for c in letters if R and c != R[0]]
def __insert_letter(self, split_l):
letters = 'abcdefghijklmnopqrstuvwxyz'
return [L + c + R[0:] for L, R in split_l for c in letters]
def edit_one_letter(self, word, allow_switches=True):
"""
Input:
word: the string/word for which we will generate all possible wordsthat are one edit away.
Output:
edit_one_set: a set of words with one possible edit. Please return a set. and not a list.
"""
split_l = self.__get_word_splits(word)
all_l = self.__insert_letter(split_l) + self.__replace_letter(split_l) + self.__delete_letter(split_l)
if allow_switches:
all_l = all_l + self.__switch_letter(split_l)
edit_one_set = set(all_l)
return edit_one_set
def edit_two_letters(self, word, allow_switches=True):
"""
Input:
word: the input string/word
Output:
edit_two_set: a set of strings with all possible two edits
"""
edit_two_set = set(
[e2 for e1 in self.edit_one_letter(word, allow_switches) for e2 in self.edit_one_letter(e1, allow_switches)])
return edit_two_set
def __filter_edits(self, edits, n):
suggestions = {}
# [suggestions.append((e, self.probs[e])) for e in edits if e in self.Vocab]
filtered_edits = [e for e in edits if e in self.Vocab]
for e in filtered_edits:
suggestions[e] = self.probs[e]
counter = Counter(suggestions)
n_best = counter.most_common(n)
return n_best
def get_corrections(self, word, n=1):
"""
Input:
word: a user entered string to check for suggestions
n: number of possible word corrections you want returned in the dictionary
Output:
n_best: a list of tuples with the most probable n corrected words and their probabilities.
"""
suggestions = {}
n_best = []
word = word.lower()
# First Priority
if word in self.Vocab:
# suggestions.append((word, self.probs[word]))
return word, self.probs[word]
# 2nd Priority is less edits a.k.a edit one letter
edits = self.edit_one_letter(word)
n_best = self.__filter_edits(edits, n)
if len(n_best) > 0:
return n_best[0] # Short Circuit
# 3rd Priority
edits = self.edit_two_letters(word)
n_best = self.__filter_edits(edits, n)
if len(n_best) > 0:
return n_best[0] # Short Circuit
else:
return word, 0.0 # Worst case, if nothing worked, return the word itself, prob = 0.0
if __name__ == '__main__':
print("==============================Test 1=========================================")
wikiCorrect = WikipediaCorrection()
tmp_word = "at"
tmp_edit_one_set = wikiCorrect.edit_one_letter(tmp_word)
# turn this into a list to sort it, in order to view it
tmp_edit_one_l = sorted(list(tmp_edit_one_set))
print(f"input word {tmp_word} \nedit_one_l \n{tmp_edit_one_l}\n")
print(f"The type of the returned object should be a set {type(tmp_edit_one_set)}")
print(f"Number of outputs from edit_one_letter('at') is {len(wikiCorrect.edit_one_letter('at'))}")
"""
*********** Excepected Output **************
input word at
edit_one_l
['a', 'aa', 'aat', 'ab', 'abt', 'ac', 'act', 'ad', 'adt', 'ae', 'aet', 'af', 'aft', 'ag', 'agt', 'ah', 'aht', 'ai', 'ait', 'aj', 'ajt', 'ak', 'akt', 'al', 'alt', 'am', 'amt', 'an', 'ant', 'ao', 'aot', 'ap', 'apt', 'aq', 'aqt', 'ar', 'art', 'as', 'ast', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz', 'au', 'aut', 'av', 'avt', 'aw', 'awt', 'ax', 'axt', 'ay', 'ayt', 'az', 'azt', 'bat', 'bt', 'cat', 'ct', 'dat', 'dt', 'eat', 'et', 'fat', 'ft', 'gat', 'gt', 'hat', 'ht', 'iat', 'it', 'jat', 'jt', 'kat', 'kt', 'lat', 'lt', 'mat', 'mt', 'nat', 'nt', 'oat', 'ot', 'pat', 'pt', 'qat', 'qt', 'rat', 'rt', 'sat', 'st', 't', 'ta', 'tat', 'tt', 'uat', 'ut', 'vat', 'vt', 'wat', 'wt', 'xat', 'xt', 'yat', 'yt', 'zat', 'zt']
The type of the returned object should be a set <class 'set'>
Number of outputs from edit_one_letter('at') is 129
"""
print("==============================Test 2=========================================")
tmp_edit_two_set = wikiCorrect.edit_two_letters("a")
tmp_edit_two_l = sorted(list(tmp_edit_two_set))
print(f"Number of strings with edit distance of two: {len(tmp_edit_two_l)}")
print(f"First 10 strings {tmp_edit_two_l[:10]}")
print(f"Last 10 strings {tmp_edit_two_l[-10:]}")
print(f"The data type of the returned object should be a set {type(tmp_edit_two_set)}")
print(f"Number of strings that are 2 edit distances from 'at' is {len(wikiCorrect.edit_two_letters('at'))}")
"""
*********** Excepected Output **************
Number of strings with edit distance of two: 2654
First 10 strings ['', 'a', 'aa', 'aaa', 'aab', 'aac', 'aad', 'aae', 'aaf', 'aag']
Last 10 strings ['zv', 'zva', 'zw', 'zwa', 'zx', 'zxa', 'zy', 'zya', 'zz', 'zza']
The data type of the returned object should be a set <class 'set'>
Number of strings that are 2 edit distances from 'at' is 7154
"""
print("==============================Test 3=========================================")
vals = ['Spaziano . Florida', \
'Smith v/ Maryland', \
'SEC v. Texas Gulf Sumphur Co.', \
'Reieer v. Thompso', \
'Reed v. Pennsylvania Railroad Compan|', \
'Building Service Employees International Union Local 262 v/ Gazzam', \
'Ramspeck v. Federal Trial Exainers Conference', \
'Cowma Dairy Company v. United States', \
'Noswood v. Kirkpatrick', \
'Mongomery Building & Construction Trades Council v. Ledbetter Erection Company', \
'Southern Pacfic Company v. Gileo', \
'Colgate-Palmolive-Peft Company v. National Labor Relations Board', \
'Unitee States v. United States Smelting Refining', \
'Poizzi v. Cowles Magazies']
expected = ['Spaziano v. Florida', \
'Smith v. Maryland', \
'SEC v. Texas Gulf Sulphur Co', \
'Reider v. Thompson ', \
'Reed v. Pennsylvania Railroad Company', \
'Building Service Employees International Union Local 262 v. Gazzam', \
'ramspeck v. federal trial examiners conference', \
'Bowman Dairy Company v. United States', \
'Norwood v. Kirkpatrick', \
'Montgomery Building & Construction Trades Council v. Ledbetter Erection Company', \
'Southern Pacific Company v. Gileo', \
'Colgate-Palmolive-Peet Company v. National Labor Relations Board', \
'United States v. United States Smelting Refining', \
'Polizzi v. Cowles Magazines']
cnt = 0
for val, exp in zip(vals, expected):
words = re.findall(r'\w+', val)
# fix word by word in the given value
corrections = [wikiCorrect.get_corrections(word) for word in words]
res = ' '.join([c[0] for c in corrections])
print(res)
if res.lower() == exp.lower(): # normalize case insenstive
cnt = cnt + 1
print((cnt / len(vals)) * 100) | 47.074866 | 852 | 0.564239 | 1,182 | 8,803 | 4.051607 | 0.331641 | 0.02631 | 0.024431 | 0.013364 | 0.370432 | 0.32345 | 0.287743 | 0.284193 | 0.229275 | 0.167049 | 0 | 0.009986 | 0.271953 | 8,803 | 187 | 853 | 47.074866 | 0.737245 | 0.122004 | 0 | 0.083333 | 0 | 0.008333 | 0.321191 | 0.093313 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091667 | false | 0 | 0.033333 | 0.025 | 0.233333 | 0.108333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff9b138f03c8da437646fca20eb6180ca1f35e5b | 347 | py | Python | finorch/sessions/cit/wrapper.py | ADACS-Australia/SS2021B-DBrown | 67b93b316e6f9ab09e3bd5105edbbc71108e0723 | [
"MIT"
] | null | null | null | finorch/sessions/cit/wrapper.py | ADACS-Australia/SS2021B-DBrown | 67b93b316e6f9ab09e3bd5105edbbc71108e0723 | [
"MIT"
] | null | null | null | finorch/sessions/cit/wrapper.py | ADACS-Australia/SS2021B-DBrown | 67b93b316e6f9ab09e3bd5105edbbc71108e0723 | [
"MIT"
] | null | null | null | from pathlib import Path
import finesse
from finorch.sessions.abstract_wrapper import AbstractWrapper
class CITWrapper(AbstractWrapper):
def run(self):
katscript = open('script.k', 'r').read()
kat = finesse.Model()
kat.parse(katscript)
out = kat.run()
finesse.save(out, Path.cwd() / "data.pickle")
| 21.6875 | 61 | 0.654179 | 41 | 347 | 5.512195 | 0.707317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.224784 | 347 | 15 | 62 | 23.133333 | 0.840149 | 0 | 0 | 0 | 0 | 0 | 0.057637 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff9b480b8efa2a0703a1bdaca93521ce030786a1 | 1,048 | py | Python | app/api/tokens.py | simal1717/Backend_Airbnb_237 | d155882cb1e779e648629dd41a1c2e7c83436a90 | [
"MIT"
] | null | null | null | app/api/tokens.py | simal1717/Backend_Airbnb_237 | d155882cb1e779e648629dd41a1c2e7c83436a90 | [
"MIT"
] | null | null | null | app/api/tokens.py | simal1717/Backend_Airbnb_237 | d155882cb1e779e648629dd41a1c2e7c83436a90 | [
"MIT"
] | null | null | null | from flask import jsonify, g, session
from app import db
from app.api import bp
from app.api.auth import basic_auth, token_auth
#? Not GET ?
@bp.route('/tokens', methods=['POST'])
@basic_auth.login_required
def get_token():
#print(1111)
token = g.current_user.get_token()
#print(2222, token)
db.session.commit()
#print('current user: ', g.current_user)
#for u in session.active_users:
# print(u)
#print(3333)
#payload = jwt.encode(
# {'token': token, 'user_id': g.current_user.id,
# 'expire': g.current_user.token_expiration},
# current_app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8')
#return jsonify({'token': token, 'user': g.current_user.to_dict()})
payload = {'token': token, 'user_id': g.current_user.id,
'expire': g.current_user.token_expiration}
return jsonify(payload)
@bp.route('/tokens', methods=['DELETE'])
@token_auth.login_required
def revoke_token():
g.current_user.revoke_token()
db.session.commit()
return '', 204
| 29.111111 | 77 | 0.660305 | 145 | 1,048 | 4.593103 | 0.372414 | 0.148649 | 0.144144 | 0.06006 | 0.189189 | 0.189189 | 0.189189 | 0.189189 | 0.189189 | 0.189189 | 0 | 0.022145 | 0.181298 | 1,048 | 35 | 78 | 29.942857 | 0.754079 | 0.371183 | 0 | 0.111111 | 0 | 0 | 0.064915 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff9e05c2ec37c37c723574b5d5401eb33290ab98 | 1,608 | py | Python | bsbolt/Utils/MatrixIterator.py | HarryZhang1224/BSBolt | a5df4db8497d7c0274f5f12041c87472c06ff78e | [
"MIT"
] | 10 | 2020-04-05T00:44:44.000Z | 2022-02-11T15:11:00.000Z | bsbolt/Utils/MatrixIterator.py | HarryZhang1224/BSBolt | a5df4db8497d7c0274f5f12041c87472c06ff78e | [
"MIT"
] | 7 | 2019-01-25T22:14:40.000Z | 2022-01-17T18:58:05.000Z | bsbolt/Utils/MatrixIterator.py | HarryZhang1224/BSBolt | a5df4db8497d7c0274f5f12041c87472c06ff78e | [
"MIT"
] | 5 | 2019-07-19T22:57:36.000Z | 2021-11-12T14:57:37.000Z | #! /usr/env python3
import gzip
import io
from typing import List, Tuple, Union
import numpy as np
class OpenMatrix:
""" Simple class to simple class to iterate through bsbolt matrix file
------------------------------------------------------------------------------------
input: path to matrix file
returns: matrix iteration object"""
def __init__(self, matrix: str = None):
self.header = False
if matrix.endswith(".gz"):
self.f = io.BufferedReader(gzip.open(matrix, 'rb'))
else:
self.f = open(matrix, 'r')
def __iter__(self):
with self.f as matrix:
while True:
line = matrix.readline()
if not line.strip():
break
line = self.process_line(line)
yield line
def process_line(self, line) -> Tuple[str, Union[List[str], np.ndarray]]:
converted_line = self.line_conversion(line)
if not self.header:
self.header = True
return converted_line[0], converted_line[1:]
return converted_line[0], np.asarray([self.convert_to_float(value) for value in converted_line[1:]])
@staticmethod
def line_conversion(line) -> List[str]:
if isinstance(line, bytes):
return line.decode('utf-8').replace('\n', '').split('\t')
else:
return line.replace('\n', '').split('\t')
@staticmethod
def convert_to_float(value: str) -> Union[float, None]:
try:
return float(value)
except ValueError:
return np.nan
| 31.529412 | 108 | 0.548507 | 186 | 1,608 | 4.629032 | 0.435484 | 0.075494 | 0.030197 | 0.046458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005263 | 0.291045 | 1,608 | 50 | 109 | 32.16 | 0.75 | 0.143035 | 0 | 0.108108 | 0 | 0 | 0.013981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.108108 | 0 | 0.432432 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ff9ee3da1f88b893679112e185450b21bd4f45aa | 1,036 | py | Python | Leetcode/Python/_1694.py | Xrenya/algorithms | aded82cacde2f4f2114241907861251e0e2e5638 | [
"MIT"
] | 1 | 2021-11-28T15:03:32.000Z | 2021-11-28T15:03:32.000Z | Leetcode/Python/_1694.py | Xrenya/algorithms | aded82cacde2f4f2114241907861251e0e2e5638 | [
"MIT"
] | null | null | null | Leetcode/Python/_1694.py | Xrenya/algorithms | aded82cacde2f4f2114241907861251e0e2e5638 | [
"MIT"
] | null | null | null | class Solution:
def reformatNumber(self, number: str) -> str:
number = number.replace('-', '')
number = number.replace(' ', '')
string = ''
while len(number) > 4:
string += number[:3] + '-'
number = number[3:]
if len(number) == 4:
string += number[:2] + '-' + number[2:4]
else:
string += number
return string
class Solution:
# Recursion
def reformatNumber(self, number: str) -> str:
number = number.replace('-', '')
number = number.replace(' ', '')
phone_str = ''
return self.solver(phone_str, number)
def solver(self, phone_str, number):
if len(number) == 2 or len(number) == 3:
return number
elif len(number) == 4:
return number[:2] + '-' + number[2:]
else:
phone_str = number[:3]
number = number[3:]
return phone_str + '-' + self.solver(phone_str, number)
| 29.6 | 67 | 0.478764 | 104 | 1,036 | 4.711538 | 0.211538 | 0.110204 | 0.155102 | 0.110204 | 0.546939 | 0.289796 | 0.289796 | 0.289796 | 0.289796 | 0.289796 | 0 | 0.021638 | 0.375483 | 1,036 | 34 | 68 | 30.470588 | 0.735703 | 0.008687 | 0 | 0.428571 | 0 | 0 | 0.007805 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ffa0775e71e040481abc339f4098a4d9e15eabd9 | 10,989 | py | Python | devro/env/components.py | abnvar/devro | 25a9e996e73494728595d3135451c9fceb5f0324 | [
"MIT"
] | 8 | 2020-04-19T17:03:44.000Z | 2021-04-30T20:22:14.000Z | devro/env/components.py | abnvar/SLAM-devTools | 25a9e996e73494728595d3135451c9fceb5f0324 | [
"MIT"
] | 4 | 2020-04-22T17:25:18.000Z | 2020-04-29T15:43:36.000Z | devro/env/components.py | abnvar/SLAM-devTools | 25a9e996e73494728595d3135451c9fceb5f0324 | [
"MIT"
] | null | null | null | import cv2
import time
import simpy
import random
import threading
import numpy as np
from math import sin, cos
from devro.visualization import display
class DynObstacle():
def __init__(self, radius, velocity, pixelSpan, direction):
self.velocity = velocity
self.position = {"x":random.randint(0,pixelSpan),"y":random.randint(0,pixelSpan)}
self.radius = radius
self.direction = direction
def updatePosition(self, dt):
self.position = {"x": self.position["x"]+ self.velocity*cos(self.direction)*dt,
"y": self.position["y"]+ self.velocity*sin(self.direction)*dt}
def switchDirection(self):
self.velocity = -self.velocity
class Bot():
'''
Class to hold the robot properties
...
Attributes
----------
scanner : class
Adds a scanner object to the robot.
wheelDist : float
distance between the wheels center (in meters)
Methods
-------
None
'''
def __init__(self, leftMotor, rightMotor, wheelDist, scanner = None):
self.scanner = scanner
self.wheelDist = wheelDist
self.leftMotor = leftMotor
self.rightMotor = rightMotor
self.vl = 0
self.vr = 0
self.theta = 0
self.omega = 0
self.vx = 0
self.vy = 0
self.map_ = None
def setInitPos(self, x, y):
self.x = x
self.y = y
def setDestPos(self, x, y):
self.destX = x
self.destY = y
def attachSim(self, sim, envMap):
self.sim = sim
self.map_ = envMap
self.setInitPos(50, sim.pixelSpan - 50)
self.setDestPos(sim.pixelSpan - 50, 50)
self.wheelDist = int(self.wheelDist * (sim.pixelSpan/sim.distSpan))
if self.scanner is not None:
self.scanner.attachBot(self)
def updateEncoders(self, dt):
if self.leftMotor.encoder != None:
self.leftMotor.updateEncoder(dt)
if self.rightMotor.encoder != None:
self.rightMotor.updateEncoder(dt)
def collisionHandler(self, clearance):
collision = False
collDir = 0
if collision is False:
numPts = 0
for phi in range(360):
pt = (int(self.x + self.wheelDist*cos(phi*np.pi/180)/2), int(self.y + self.wheelDist*sin(phi*np.pi/180)/2))
val = self.map_[pt[1]][pt[0]]
# if black
if val < 127:
collision = True
collDir += phi
numPts += 1
collDir = collDir/numPts if numPts != 0 else 0 # average of the direction of pixels of obstacles
if abs(collDir%360 - (180/np.pi)*self.theta%360) > 90:
collision = False
return collision, collDir
def drive(self, dt):
clearance = self.wheelDist/2
collision, collDir = self.collisionHandler(clearance)
# self.sim.active = (self.x > clearance and self.y > clearance and self.x < self.sim.pixelSpan-clearance and self.y < self.sim.pixelSpan-clearance)
v = (self.vl + self.vr)/2
if collision is True:
phi = (collDir+90)*np.pi/180
self.vx = v*cos(self.theta-phi)*cos(phi)
self.vy = v*cos(self.theta-phi)*sin(phi)
else:
self.vx = v*cos(self.theta)
self.vy = v*sin(self.theta)
if (self.x + self.vx*dt > clearance) and (self.x + self.vx*dt < self.sim.pixelSpan-clearance):
self.x += self.vx*dt
if (self.y + self.vy*dt > clearance) and (self.y + self.vy*dt < self.sim.pixelSpan-clearance):
self.y += self.vy*dt
self.omega = (self.vl-self.vr)/(self.wheelDist)
self.theta += self.omega*dt
self.updateEncoders(dt)
def setVel(self, vl, vr):
self.leftMotor.setSpeed(vl)
self.rightMotor.setSpeed(vr)
self.vl = self.leftMotor.groundVelocity() * (self.sim.pixelSpan/self.sim.distSpan)
self.vr = self.rightMotor.groundVelocity() * (self.sim.pixelSpan/self.sim.distSpan)
def plotBot(self, canvas):
canvas = cv2.circle(canvas, (int(self.x), int(self.y)), self.wheelDist//2, (0,0,0), -self.wheelDist//2) # Robot
canvas = cv2.putText(canvas, 'x', (self.destX, self.destY), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2) # Dest cross
canvas = cv2.line(canvas, (int(self.x), int(self.y)), (int(self.x+self.wheelDist*cos(self.theta)), int(self.y+self.wheelDist*sin(self.theta))), (0,0,0), 2) # direction line
if self.scanner is not None:
angle = self.scanner.fieldAngle*180/np.pi
rangeRad = int(self.scanner.range_*(self.sim.pixelSpan/self.sim.distSpan))
canvas = cv2.ellipse(canvas, (int(self.x), int(self.y)), (rangeRad, rangeRad), (180/np.pi)*self.theta-angle//2, 0, angle, (255,0,0), 2) # scanner range
minRad = int(self.scanner.minDist*(self.sim.pixelSpan/self.sim.distSpan))
canvas = cv2.ellipse(canvas, (int(self.x), int(self.y)), (minRad, minRad), (180/np.pi)*self.theta-angle//2, 0, angle, (127,0,127), 2) # scanner minDist
return canvas
def updateMap(self, img):
self.map_ = img
def reset(self):
self.theta = 0
self.omega = 0
self.setVel(0, 0)
self.setInitPos(50, self.sim.pixelSpan - 50)
def scannerToWorld(self,pose, point):
""" Given a robot pose (xR, yR, thetaR) and a point (xL, yL) from a
landmark in the scanner's coordinate system, return the point's
coordinates in the world coordinate system. The point could be a
landmark, or any point measured the scanner sensor.
Args:
pose (tuple): Robot's pose.
point (tuple): The point to transform to world coordinates.
Returns:
tuple: The transformed point to the world coordinate system.
"""
scannerPose = ( pose[0] + cos(pose[2]) * self.scanner.distToRobotCenter,
pose[1] + sin(pose[2]) * self.scanner.distToRobotCenter,
pose[2] )
x, y = point
# check rotation matrix
return (x * cos(scannerPose[2]) - y * sin(scannerPose[2]) + scannerPose[0],
x * sin(scannerPose[2]) + y * cos(scannerPose[2]) + scannerPose[1])
class Simulation(threading.Thread):
'''
The head simulation class to hold the simulation parameters.
...
Attributes
----------
pixelSpan : int
number of pixels in one row of the simulation screen (sim is always square)
distSpan : int
distance (in meters) in actual world corresponding to each row of the simulation screen
dt : int
one time step of simulation (in miliseconds)
envMap : numpy image array (pixelSpan x pixelSpan)
Map of the simulation world
bot : class
object of the Bot class
visualize : bool
whether to visualize the simulation or not
Methods
-------
begin()
starts the simpy simulation
showSimulation()
updates the environment frame on the display component
showScanner()
updates the scanner output fram on the display component
'''
def __init__(self, pixelSpan = 720, distSpan = 10, dt = 100, envMap = None, bot = None, visualize = True):
threading.Thread.__init__(self)
self.daemon = True
self.pixelSpan = pixelSpan
self.distSpan = distSpan
self.dt = dt/1000 # converting to milliseconds
self.envMap = envMap
self.currMap = envMap
self.bot = bot
self.visualize = visualize
self.env = simpy.RealtimeEnvironment(strict=False)
self.active = True
self.paused = False
self.obstacles = []
self.landmarks = None
if self.visualize is True:
self.win = display.Window('Simulation', height = self.pixelSpan, dt = dt, endSimFunc = self.end, toggleSimFunc = self.toggle, scale = 0.7)
bot.attachSim(self, self.currMap)
self.stepProc = self.env.process(self.step(self.env))
def step(self, env):
while self.active:
if self.paused is False:
self.updateObstacles()
self.bot.drive(self.dt)
if self.visualize == True:
self.showEnv()
self.plotLandmarks()
yield env.timeout(self.dt)
def toggle(self):
if self.paused is True:
self.paused = False
else:
self.paused = True
def run(self):
self.env.run(until=self.stepProc)
self._is_running = False
def hold(self):
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
self.end()
def begin(self):
self.start()
def end(self):
try:
self.win.close()
except:
pass
self.active = False
import os
os._exit(0)
def addDynamicObstacles(self, qty=0, radiusRange=(10,20), maxVelocity=10):
minR, maxR = radiusRange
for x in range(qty):
self.obstacles.append(DynObstacle(radius = random.randint(minR, maxR),
velocity = random.randint(-maxVelocity, maxVelocity),
pixelSpan = self.pixelSpan,
direction = random.uniform(0, 2*np.pi)
))
def updateObstacles(self):
mask = np.asarray(self.envMap, np.uint8)
for obstacle in self.obstacles:
obstacle.updatePosition(self.dt)
if not 0 < int(obstacle.position["x"]) < self.pixelSpan or not 0 < int(obstacle.position["y"]) < self.pixelSpan:
obstacle.switchDirection()
else:
mask = cv2.circle(mask, (int(obstacle.position["x"]), int(obstacle.position["y"])), obstacle.radius, (0,0,0), -obstacle.radius)
self.currMap = mask
self.bot.updateMap(self.currMap)
def reset(self):
self.bot.reset()
self.bot.leftMotor.encoder.reset()
self.bot.rightMotor.encoder.reset()
def showEnv(self):
canvas = np.asarray(self.currMap, np.uint8)
canvas = np.stack((canvas,)*3, axis=-1)
canvas = self.bot.plotBot(canvas)
self.win.setEnvFrame(canvas)
def showScanner(self, img):
self.win.setScannerFrame(img)
def setLandmarks(self, pts):
self.landmarks = pts
def plotLandmarks(self):
if self.landmarks is not None:
mask = np.ones((self.pixelSpan, self.pixelSpan))*255
pts = np.asarray(self.landmarks).T
scaler = (self.pixelSpan/self.distSpan)
for x,y in pts:
mask = cv2.circle(mask, (int(x*scaler), int(y*scaler)), 2, (0,0,0), -2)
self.win.setSlamFrame(mask)
| 33.099398 | 183 | 0.57867 | 1,351 | 10,989 | 4.687639 | 0.19393 | 0.017685 | 0.022738 | 0.01579 | 0.16264 | 0.112743 | 0.059372 | 0.030633 | 0.030633 | 0.021791 | 0 | 0.021975 | 0.304304 | 10,989 | 331 | 184 | 33.199396 | 0.806409 | 0.153608 | 0 | 0.082126 | 0 | 0 | 0.002321 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140097 | false | 0.004831 | 0.043478 | 0 | 0.21256 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ffa6f74f43f47f17e2e67712e39ba72803e3ef7d | 4,177 | py | Python | seqsearch/search/hmmer.py | xapple/seqsearch | cf3a7691285d245829bc9c6d18354e01e631fc12 | [
"MIT"
] | null | null | null | seqsearch/search/hmmer.py | xapple/seqsearch | cf3a7691285d245829bc9c6d18354e01e631fc12 | [
"MIT"
] | null | null | null | seqsearch/search/hmmer.py | xapple/seqsearch | cf3a7691285d245829bc9c6d18354e01e631fc12 | [
"MIT"
] | 1 | 2015-01-21T14:38:46.000Z | 2015-01-21T14:38:46.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair.
MIT Licensed.
Contact at www.sinclair.bio
"""
# Built-in modules #
import warnings, multiprocessing
# Internal modules #
from seqsearch.databases.pfam import pfam
from seqsearch.databases.tigrfam import tigrfam
# First party modules #
from fasta import FASTA
from autopaths.file_path import FilePath
# Third party modules #
import sh
# Warnings #
warnings.filterwarnings("ignore", "Bio.SearchIO")
warnings.filterwarnings("ignore", "BiopythonWarning")
from Bio import SearchIO
###############################################################################
class HmmQuery(object):
"""An `hmmsearch` job."""
short_name = 'hmmsearch'
long_name = 'HMMER 3.1b2 (February 2015)'
executable = 'hmmsearch'
url = 'http://hmmer.org/'
license = 'GPLv3'
dependencies = []
def __nonzero__(self): return bool(self.out_path)
def __repr__(self):
return '<%s object on %s>' % (self.__class__.__name__, self.query)
def __init__(self, query_path, # The input sequences
db_path = pfam.hmm_db, # The database to search
seq_type = 'prot' or 'nucl', # The seq type of the query_path file
e_value = 0.001, # The search threshold
params = None, # Add extra params for the command line
out_path = None, # Where the results will be dropped
executable = None, # If you want a specific binary give the path
cpus = None): # The number of threads to use
# Save attributes #
self.query = FASTA(query_path)
self.db = FilePath(db_path)
self.params = params if params else {}
self.e_value = e_value
self.seq_type = seq_type
self.executable = FilePath(executable)
# Cores to use #
if cpus is None: self.cpus = min(multiprocessing.cpu_count(), 32)
else: self.cpus = cpus
# Auto detect database short name #
if db_path == 'pfam': self.db = pfam.hmm_db
if db_path == 'tigrfam': self.db = tigrfam.hmm_db
# Output #
if out_path is None:
self.out_path = FilePath(self.query.prefix_path + '.hmmout')
elif out_path.endswith('/'):
self.out_path = FilePath(out_path + self.query.prefix + '.hmmout')
else:
self.out_path = FilePath(out_path)
@property
def command(self):
# Executable #
if self.executable: cmd = [self.executable.path]
else: cmd = ["hmmsearch"]
# Essentials #
cmd += ('-o', '/dev/null', # direct output to file <f>, not stdout
'--tblout', self.out_path, # parsable table of per-sequence hits
'--seed', 1, # set RNG seed to <n>
'--notextw', # unlimited ASCII text output line width
'--acc', # prefer accessions over names in output
self.db,
self.query)
# Options #
for k,v in self.params.items(): cmd += [k, v]
# Return #
return map(str, cmd)
def run(self, cpus=None):
"""Simply run the HMM search locally."""
# Number of threads #
if cpus is None: cpus = self.cpus
# Checks #
assert self.query.exists
assert self.db.exists
# Check if query is not empty #
if self.query.count_bytes == 0:
message = "Hmm search on a file with no sequences. File at '%s'"
warnings.warn(message % self.query, RuntimeWarning)
return False
# Do it #
sh.Command(self.command[0])(['--cpu', str(cpus)] + self.command[1:])
@property
def hits(self):
if not self.out_path:
raise Exception("You can't access results from HMMER before running the algorithm.")
return SearchIO.read(self.out_path, 'hmmer3-tab')
| 37.294643 | 99 | 0.547043 | 482 | 4,177 | 4.628631 | 0.408714 | 0.037651 | 0.034514 | 0.025549 | 0.023308 | 0.023308 | 0 | 0 | 0 | 0 | 0 | 0.007554 | 0.334451 | 4,177 | 111 | 100 | 37.630631 | 0.794964 | 0.202777 | 0 | 0.028571 | 0 | 0 | 0.106156 | 0 | 0 | 0 | 0 | 0 | 0.028571 | 1 | 0.085714 | false | 0 | 0.1 | 0.028571 | 0.342857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ffa705ad0694eff49275d52588a266e77c3b8d06 | 2,290 | py | Python | tests/test_solve.py | Sen-R/dynamic-programming | cfb6cada511dcef4b442841cf95159d8deb55616 | [
"MIT"
] | null | null | null | tests/test_solve.py | Sen-R/dynamic-programming | cfb6cada511dcef4b442841cf95159d8deb55616 | [
"MIT"
] | null | null | null | tests/test_solve.py | Sen-R/dynamic-programming | cfb6cada511dcef4b442841cf95159d8deb55616 | [
"MIT"
] | null | null | null | from typing import Dict
import pytest
from numpy.testing import assert_almost_equal
import numpy as np
from dp._types import Policy
from dp import FiniteMDP
from dp.solve import (
backup_optimal_values,
policy_evaluation,
policy_evaluation_affine_operator,
)
from .conftest import SimpleMDP, TState, TAction
gamma = 0.9
@pytest.fixture
def v() -> Dict[TState, float]:
"""Initial state values for use in tests."""
return {"A": 3.0, "B": 1.0, "C": 0.0}
@pytest.fixture
def tying_v() -> Dict[TState, float]:
"""Alternative state values with tied optimal actions for state A."""
return {"A": 2.0, "B": 20 / 9, "C": 0.0}
@pytest.fixture
def pi() -> Policy[TState, TAction]:
"""Stochastic policy for use in tests."""
pi_dict = {
"A": (("L", 0.6), ("R", 0.4)),
"B": (("L", 1.0),),
"C": (("L", 1.0),),
}
return lambda s: pi_dict[s]
class TestSolveBasicComponents:
def test_backup_optimal_values(
self, test_mdp: FiniteMDP, v: Dict[TState, float]
) -> None:
initial_v_array = np.array(list(v.values()))
updated_v = backup_optimal_values(test_mdp, initial_v_array, gamma)
expected_v = [0.725, 1.525, 0.0]
assert_almost_equal(updated_v, expected_v)
class TestPolicyEvaluationByLinearSolve:
def test_backup_policy_values_operator(
self,
test_mdp: SimpleMDP,
pi: Policy[TState, TAction],
) -> None:
A, b = policy_evaluation_affine_operator(test_mdp, pi, gamma)
# A should be gamma times the transition matrix
expected_A = gamma * np.array(
[[0.0, 0.45, 0.55], [0.75, 0.0, 0.25], [0.0, 0.0, 1.0]]
)
assert_almost_equal(A, expected_A)
# b should be a vector of expected reward per starting state
expected_b = np.array([0.1, -0.5, 0.0])
assert_almost_equal(b, expected_b)
def test_policy_evaluation(
self, test_mdp: SimpleMDP, pi: Policy[TState, TAction]
) -> None:
"""Tests exact policy evaluation solver against previously
calculated state values for this policy and MDP."""
v_known = [-0.14106313, -0.59521761, 0.0]
v = policy_evaluation(test_mdp, pi, gamma)
assert_almost_equal(list(v._v.values()), v_known)
| 30.131579 | 75 | 0.634498 | 329 | 2,290 | 4.246201 | 0.303951 | 0.01718 | 0.060845 | 0.034359 | 0.118826 | 0.091625 | 0.064424 | 0.064424 | 0.064424 | 0 | 0 | 0.046259 | 0.235371 | 2,290 | 75 | 76 | 30.533333 | 0.751571 | 0.152402 | 0 | 0.111111 | 0 | 0 | 0.006803 | 0 | 0 | 0 | 0 | 0 | 0.092593 | 1 | 0.111111 | false | 0 | 0.148148 | 0 | 0.351852 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ffa9885e7671ec6782e68cccef479cdd2f9ce49e | 8,192 | py | Python | python/paddle/fluid/tests/unittests/test_logspace.py | RangeKing/Paddle | 2d87300809ae75d76f5b0b457d8112cb88dc3e27 | [
"Apache-2.0"
] | 8 | 2016-08-15T07:02:27.000Z | 2016-08-24T09:34:00.000Z | python/paddle/fluid/tests/unittests/test_logspace.py | RangeKing/Paddle | 2d87300809ae75d76f5b0b457d8112cb88dc3e27 | [
"Apache-2.0"
] | 1 | 2022-01-28T07:23:22.000Z | 2022-01-28T07:23:22.000Z | python/paddle/fluid/tests/unittests/test_logspace.py | RangeKing/Paddle | 2d87300809ae75d76f5b0b457d8112cb88dc3e27 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
class TestLogspaceOpCommonCase(OpTest):
def setUp(self):
self.op_type = "logspace"
dtype = 'float32'
self.inputs = {
'Start': np.array([0]).astype(dtype),
'Stop': np.array([10]).astype(dtype),
'Num': np.array([11]).astype('int32'),
'Base': np.array([2]).astype(dtype),
}
self.attrs = {'dtype': int(paddle.float32)}
self.outputs = {'Out': np.power(2, np.arange(0, 11)).astype(dtype)}
def test_check_output(self):
self.check_output()
class TestLogspaceOpReverseCase(OpTest):
def setUp(self):
self.op_type = "logspace"
dtype = 'float32'
self.inputs = {
'Start': np.array([10]).astype(dtype),
'Stop': np.array([0]).astype(dtype),
'Num': np.array([11]).astype('int32'),
'Base': np.array([2]).astype(dtype)
}
self.attrs = {'dtype': int(paddle.float32)}
self.outputs = {'Out': np.power(2, np.arange(10, -1, -1)).astype(dtype)}
def test_check_output(self):
self.check_output()
class TestLogspaceOpNumOneCase(OpTest):
def setUp(self):
self.op_type = "logspace"
dtype = 'float32'
self.inputs = {
'Start': np.array([10]).astype(dtype),
'Stop': np.array([0]).astype(dtype),
'Num': np.array([1]).astype('int32'),
'Base': np.array([2]).astype(dtype)
}
self.attrs = {'dtype': int(paddle.float32)}
self.outputs = {'Out': np.power(2, np.array(10)).astype(dtype)}
def test_check_output(self):
self.check_output()
class TestLogspaceOpMinusBaseCase(OpTest):
def setUp(self):
self.op_type = "logspace"
dtype = 'float32'
self.inputs = {
'Start': np.array([0]).astype(dtype),
'Stop': np.array([10]).astype(dtype),
'Num': np.array([11]).astype('int32'),
'Base': np.array([-2]).astype(dtype),
}
self.attrs = {'dtype': int(paddle.float32)}
self.outputs = {'Out': np.power(-2, np.arange(0, 11)).astype(dtype)}
def test_check_output(self):
self.check_output()
class TestLogspaceOpZeroBaseCase(OpTest):
def setUp(self):
self.op_type = "logspace"
dtype = 'float32'
self.inputs = {
'Start': np.array([0]).astype(dtype),
'Stop': np.array([10]).astype(dtype),
'Num': np.array([11]).astype('int32'),
'Base': np.array([0]).astype(dtype),
}
self.attrs = {'dtype': int(paddle.float32)}
self.outputs = {'Out': np.power(0, np.arange(0, 11)).astype(dtype)}
def test_check_output(self):
self.check_output()
class TestLogspaceAPI(unittest.TestCase):
def test_variable_input1(self):
paddle.enable_static()
prog = paddle.static.Program()
with paddle.static.program_guard(prog):
start = paddle.full(shape=[1], fill_value=0, dtype='float32')
stop = paddle.full(shape=[1], fill_value=10, dtype='float32')
num = paddle.full(shape=[1], fill_value=5, dtype='int32')
base = paddle.full(shape=[1], fill_value=2, dtype='float32')
out = paddle.logspace(start, stop, num, base, dtype='float32')
exe = paddle.static.Executor()
res = exe.run(prog, fetch_list=[out])
np_res = np.logspace(0, 10, 5, base=2, dtype='float32')
self.assertEqual((res == np_res).all(), True)
paddle.disable_static()
def test_variable_input2(self):
paddle.disable_static()
start = paddle.full(shape=[1], fill_value=0, dtype='float32')
stop = paddle.full(shape=[1], fill_value=10, dtype='float32')
num = paddle.full(shape=[1], fill_value=5, dtype='int32')
base = paddle.full(shape=[1], fill_value=2, dtype='float32')
out = paddle.logspace(start, stop, num, base, dtype='float32')
np_res = np.logspace(0, 10, 5, base=2, dtype='float32')
self.assertEqual((out.numpy() == np_res).all(), True)
paddle.enable_static()
def test_dtype(self):
paddle.enable_static()
prog = paddle.static.Program()
with paddle.static.program_guard(prog):
out_1 = paddle.logspace(0, 10, 5, 2, dtype='float32')
out_2 = paddle.logspace(0, 10, 5, 2, dtype=np.float32)
exe = paddle.static.Executor()
res_1, res_2 = exe.run(prog, fetch_list=[out_1, out_2])
assert np.array_equal(res_1, res_2)
paddle.disable_static()
def test_name(self):
with paddle.static.program_guard(paddle.static.Program()):
out = paddle.logspace(
0, 10, 5, 2, dtype='float32', name='logspace_res')
assert 'logspace_res' in out.name
def test_imperative(self):
paddle.disable_static()
out1 = paddle.logspace(0, 10, 5, 2, dtype='float32')
np_out1 = np.logspace(0, 10, 5, base=2, dtype='float32')
out2 = paddle.logspace(0, 10, 5, 2, dtype='int32')
np_out2 = np.logspace(0, 10, 5, base=2, dtype='int32')
out3 = paddle.logspace(0, 10, 200, 2, dtype='int32')
np_out3 = np.logspace(0, 10, 200, base=2, dtype='int32')
paddle.enable_static()
self.assertEqual((out1.numpy() == np_out1).all(), True)
self.assertEqual((out2.numpy() == np_out2).all(), True)
self.assertEqual((out3.numpy() == np_out3).all(), True)
class TestLogspaceOpError(unittest.TestCase):
def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
def test_dtype():
paddle.logspace(0, 10, 1, 2, dtype="int8")
self.assertRaises(TypeError, test_dtype)
def test_dtype1():
paddle.logspace(0, 10, 1.33, 2, dtype="int32")
self.assertRaises(TypeError, test_dtype1)
def test_start_type():
paddle.logspace([0], 10, 1, 2, dtype="float32")
self.assertRaises(TypeError, test_start_type)
def test_end_type():
paddle.logspace(0, [10], 1, 2, dtype="float32")
self.assertRaises(TypeError, test_end_type)
def test_num_type():
paddle.logspace(0, 10, [0], 2, dtype="float32")
self.assertRaises(TypeError, test_num_type)
def test_start_dtype():
start = paddle.static.data(
shape=[1], dtype="float64", name="start")
paddle.logspace(start, 10, 1, 2, dtype="float32")
self.assertRaises(ValueError, test_start_dtype)
def test_end_dtype():
end = paddle.static.data(shape=[1], dtype="float64", name="end")
paddle.logspace(0, end, 1, 2, dtype="float32")
self.assertRaises(ValueError, test_end_dtype)
def test_num_dtype():
num = paddle.static.data(
shape=[1], dtype="float32", name="step")
paddle.logspace(0, 10, num, 2, dtype="float32")
self.assertRaises(TypeError, test_num_dtype)
def test_base_dtype():
base = paddle.static.data(
shape=[1], dtype="float64", name="end")
paddle.logspace(0, 10, 1, base, dtype="float32")
self.assertRaises(ValueError, test_base_dtype)
if __name__ == "__main__":
unittest.main()
| 35.310345 | 80 | 0.586426 | 1,024 | 8,192 | 4.577148 | 0.15332 | 0.069127 | 0.042245 | 0.047152 | 0.658844 | 0.630681 | 0.593983 | 0.578195 | 0.507147 | 0.478771 | 0 | 0.049925 | 0.264038 | 8,192 | 231 | 81 | 35.463203 | 0.727484 | 0.071167 | 0 | 0.475904 | 0 | 0 | 0.063348 | 0 | 0 | 0 | 0 | 0 | 0.096386 | 1 | 0.150602 | false | 0 | 0.03012 | 0 | 0.222892 | 0.006024 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ffaa3706e807f3b2ff19ebdc4ddab83d08ac4319 | 719 | py | Python | kwis/urls.py | Navisence/kwispel | cc93f345d3b53040ef1ec4b9e022040dca376173 | [
"MIT"
] | 1 | 2016-06-14T18:42:55.000Z | 2016-06-14T18:42:55.000Z | kwis/urls.py | Navisence/kwispel | cc93f345d3b53040ef1ec4b9e022040dca376173 | [
"MIT"
] | 1 | 2020-02-12T00:33:01.000Z | 2020-02-12T00:33:01.000Z | kwis/urls.py | Navisence/kwispel | cc93f345d3b53040ef1ec4b9e022040dca376173 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^team_overview.png$', views.team_overview),
url(r'^rnd_overview.png$', views.rnd_overview),
url(r'^ranking/$', views.ranking, name='ranking'),
url(r'^ranking/overview.png$', views.ranking_overview),
url(r'^round/(?P<rnd_id>[0-9]+)/$', views.rnd_detail, name='rnd_detail'),
url(r'^round/(?P<rnd_id>[0-9]+)/result.png$', views.rnd_result),
url(r'^team/(?P<team_id>[0-9]+)/$', views.team_detail, name='team_detail'),
url(r'^team/(?P<team_id>[0-9]+)/result.png$', views.team_result),
url(r'^vote/(?P<rnd_id>[0-9]+)/(?P<team_id>[0-9]+)/$', views.vote, name='vote'),
]
| 42.294118 | 84 | 0.628651 | 118 | 719 | 3.686441 | 0.20339 | 0.091954 | 0.055172 | 0.048276 | 0.282759 | 0.264368 | 0.156322 | 0.156322 | 0 | 0 | 0 | 0.01875 | 0.109875 | 719 | 16 | 85 | 44.9375 | 0.660938 | 0 | 0 | 0 | 0 | 0 | 0.392211 | 0.272601 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ffac01add95f30509a3513e66d50a6bca1ffa573 | 6,608 | py | Python | networks/SRM_SE_GE/resnet.py | eepgxxy/Paddle_Attention_CNNs | a1e102afd03eab4c2839ed8df58ae2640edd5955 | [
"Apache-2.0"
] | 2 | 2021-06-21T08:14:25.000Z | 2021-09-08T08:24:18.000Z | networks/SRM_SE_GE/resnet.py | eepgxxy/Paddle_Attention_CNNs | a1e102afd03eab4c2839ed8df58ae2640edd5955 | [
"Apache-2.0"
] | null | null | null | networks/SRM_SE_GE/resnet.py | eepgxxy/Paddle_Attention_CNNs | a1e102afd03eab4c2839ed8df58ae2640edd5955 | [
"Apache-2.0"
] | 1 | 2021-09-08T08:25:15.000Z | 2021-09-08T08:25:15.000Z | import paddle
import paddle.nn as nn
import math
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2D(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Layer):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, rclb_layer=None, layer_idx=1, is_ge=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2D(planes)
self.relu = nn.ReLU()
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2D(planes)
self.downsample = downsample
self.stride = stride
if rclb_layer == None:
self.rclb = None
else:
if is_ge:
self.rclb = rclb_layer(planes, layer_idx)
else:
self.rclb = rclb_layer(planes)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.rclb != None:
out = self.rclb(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Layer):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, rclb_layer=None, layer_idx=1, is_ge=False):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2D(inplanes, planes, kernel_size=1, bias_attr=False)
self.bn1 = nn.BatchNorm2D(planes)
self.conv2 = nn.Conv2D(planes, planes, kernel_size=3, stride=stride,
padding=1, bias_attr=False)
self.bn2 = nn.BatchNorm2D(planes)
self.conv3 = nn.Conv2D(planes, planes * 4, kernel_size=1, bias_attr=False)
self.bn3 = nn.BatchNorm2D(planes * 4)
self.relu = nn.ReLU()
self.downsample = downsample
self.stride = stride
if rclb_layer == None:
self.rclb = None
else:
if is_ge:
self.rclb = rclb_layer(planes * 4, layer_idx)
else:
self.rclb = rclb_layer(planes * 4)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.rclb != None:
out = self.rclb(out)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Layer):
def __init__(self, block, layers, input_channels=3, num_classes=1000, recalibration_type=None):
super(ResNet, self).__init__()
self.is_ge = True if recalibration_type == 'ge' else False
if recalibration_type == None:
self.rclb_layer = None
elif recalibration_type == 'srm':
from recalibration_modules import SRMLayer as rclb_layer
self.rclb_layer = rclb_layer
elif recalibration_type == 'se':
from recalibration_modules import SELayer as rclb_layer
self.rclb_layer = rclb_layer
elif recalibration_type == 'ge':
from recalibration_modules import GELayer as rclb_layer
self.rclb_layer = rclb_layer
else:
raise NotImplementedError
self.inplanes = 64
self.conv1 = nn.Conv2D(3, 64, kernel_size=7, stride=2, padding=3, bias_attr=False)
self.bn1 = nn.BatchNorm2D(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], layer_idx=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, layer_idx=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, layer_idx=3)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, layer_idx=4)
self.avgpool = nn.AdaptiveAvgPool2D(1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.sublayers():
if isinstance(m, nn.Conv2D):
n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels
m.weight_attr=paddle.ParamAttr(initializer=nn.initializer.Normal(0, math.sqrt(2. / n)))
elif isinstance(m, nn.BatchNorm2D):
m.weight.set_value(paddle.ones(m.weight.shape))
m.bias.set_value(paddle.zeros(m.bias.shape))
def _make_layer(self, block, planes, blocks, stride=1, layer_idx=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2D(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias_attr=False),
nn.BatchNorm2D(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, rclb_layer=self.rclb_layer, layer_idx=layer_idx, is_ge=self.is_ge))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, rclb_layer=self.rclb_layer, layer_idx=layer_idx, is_ge=self.is_ge))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = paddle.reshape(x, [x.shape[0], -1])
x = self.fc(x)
return x
def resnet(depth, **kwargs):
if depth == 18:
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
elif depth == 34:
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
elif depth == 50:
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
elif depth == 101:
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
elif depth == 152:
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
if __name__ == '__main__':
network = resnet(50, num_classes=10, recalibration_type='srm')
img = paddle.zeros([1, 3, 224, 224])
outs = network(img)
print(outs.shape)
| 33.543147 | 138 | 0.585805 | 861 | 6,608 | 4.350755 | 0.156794 | 0.052856 | 0.024026 | 0.018687 | 0.454084 | 0.402029 | 0.357181 | 0.313401 | 0.285905 | 0.264015 | 0 | 0.037724 | 0.297972 | 6,608 | 196 | 139 | 33.714286 | 0.769778 | 0.004237 | 0 | 0.365385 | 0 | 0 | 0.007264 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.038462 | 0 | 0.166667 | 0.00641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ffafa132bf34854b4b313ea5ff73b935c4ed677f | 531 | py | Python | addons/blender-addon-fbx-bundle/platform_obj.py | V-Sekai/game-tools-V-Sekai | 74eb79e9b97bb1954e647ed2f909f4f326189cb5 | [
"MIT"
] | 2 | 2021-12-21T16:38:58.000Z | 2022-01-08T00:56:35.000Z | addons/blender-addon-fbx-bundle/platform_obj.py | V-Sekai/game-tools-V-Sekai | 74eb79e9b97bb1954e647ed2f909f4f326189cb5 | [
"MIT"
] | 1 | 2022-01-29T05:46:50.000Z | 2022-01-29T05:46:50.000Z | addons/blender-addon-fbx-bundle/platform_obj.py | V-Sekai/game-tools-V-Sekai | 74eb79e9b97bb1954e647ed2f909f4f326189cb5 | [
"MIT"
] | 1 | 2021-11-07T19:41:34.000Z | 2021-11-07T19:41:34.000Z | import bpy
import bmesh
import operator
import mathutils
import addon_utils
from . import platform
class Platform(platform.Platform):
extension = 'obj'
def __init__(self):
super().__init__()
def is_valid(self):
return True, ""
def file_export(self, path):
bpy.ops.export_scene.obj(
filepath =path,
use_selection=True,
use_mesh_modifiers=True,
use_smooth_groups=True,
use_triangles=True,
use_uvs=True,
use_materials=True,
global_scale=100,
axis_forward = 'Y',
axis_up = 'Z'
)
| 14.351351 | 34 | 0.704331 | 72 | 531 | 4.875 | 0.597222 | 0.099715 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007009 | 0.193974 | 531 | 36 | 35 | 14.75 | 0.813084 | 0 | 0 | 0 | 0 | 0 | 0.009416 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.24 | 0.04 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ffb52d18ba01244fe0a111aa498702d1a5db89c0 | 7,096 | py | Python | trustpayments/models/refund_create.py | TrustPayments/python-sdk | 6fde6eb8cfce270c3612a2903a845c13018c3bb9 | [
"Apache-2.0"
] | 2 | 2020-01-16T13:24:06.000Z | 2020-11-21T17:40:17.000Z | postfinancecheckout/models/refund_create.py | pfpayments/python-sdk | b8ef159ea3c843a8d0361d1e0b122a9958adbcb4 | [
"Apache-2.0"
] | 4 | 2019-10-14T17:33:23.000Z | 2021-10-01T14:49:11.000Z | postfinancecheckout/models/refund_create.py | pfpayments/python-sdk | b8ef159ea3c843a8d0361d1e0b122a9958adbcb4 | [
"Apache-2.0"
] | 2 | 2019-10-15T14:17:10.000Z | 2021-09-17T13:07:09.000Z | # coding: utf-8
import pprint
import six
from enum import Enum
class RefundCreate:
swagger_types = {
'amount': 'float',
'completion': 'int',
'external_id': 'str',
'merchant_reference': 'str',
'reductions': 'list[LineItemReductionCreate]',
'transaction': 'int',
'type': 'RefundType',
}
attribute_map = {
'amount': 'amount','completion': 'completion','external_id': 'externalId','merchant_reference': 'merchantReference','reductions': 'reductions','transaction': 'transaction','type': 'type',
}
_amount = None
_completion = None
_external_id = None
_merchant_reference = None
_reductions = None
_transaction = None
_type = None
def __init__(self, **kwargs):
self.discriminator = None
self.amount = kwargs.get('amount', None)
self.completion = kwargs.get('completion', None)
self.external_id = kwargs.get('external_id')
self.merchant_reference = kwargs.get('merchant_reference', None)
self.reductions = kwargs.get('reductions', None)
self.transaction = kwargs.get('transaction', None)
self.type = kwargs.get('type')
@property
def amount(self):
"""Gets the amount of this RefundCreate.
:return: The amount of this RefundCreate.
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this RefundCreate.
:param amount: The amount of this RefundCreate.
:type: float
"""
self._amount = amount
@property
def completion(self):
"""Gets the completion of this RefundCreate.
:return: The completion of this RefundCreate.
:rtype: int
"""
return self._completion
@completion.setter
def completion(self, completion):
"""Sets the completion of this RefundCreate.
:param completion: The completion of this RefundCreate.
:type: int
"""
self._completion = completion
@property
def external_id(self):
"""Gets the external_id of this RefundCreate.
The external id helps to identify duplicate calls to the refund service. As such the external ID has to be unique per transaction.
:return: The external_id of this RefundCreate.
:rtype: str
"""
return self._external_id
@external_id.setter
def external_id(self, external_id):
"""Sets the external_id of this RefundCreate.
The external id helps to identify duplicate calls to the refund service. As such the external ID has to be unique per transaction.
:param external_id: The external_id of this RefundCreate.
:type: str
"""
if external_id is None:
raise ValueError("Invalid value for `external_id`, must not be `None`")
if external_id is not None and len(external_id) > 100:
raise ValueError("Invalid value for `external_id`, length must be less than or equal to `100`")
if external_id is not None and len(external_id) < 1:
raise ValueError("Invalid value for `external_id`, length must be greater than or equal to `1`")
self._external_id = external_id
@property
def merchant_reference(self):
"""Gets the merchant_reference of this RefundCreate.
:return: The merchant_reference of this RefundCreate.
:rtype: str
"""
return self._merchant_reference
@merchant_reference.setter
def merchant_reference(self, merchant_reference):
"""Sets the merchant_reference of this RefundCreate.
:param merchant_reference: The merchant_reference of this RefundCreate.
:type: str
"""
if merchant_reference is not None and len(merchant_reference) > 100:
raise ValueError("Invalid value for `merchant_reference`, length must be less than or equal to `100`")
self._merchant_reference = merchant_reference
@property
def reductions(self):
"""Gets the reductions of this RefundCreate.
:return: The reductions of this RefundCreate.
:rtype: list[LineItemReductionCreate]
"""
return self._reductions
@reductions.setter
def reductions(self, reductions):
"""Sets the reductions of this RefundCreate.
:param reductions: The reductions of this RefundCreate.
:type: list[LineItemReductionCreate]
"""
self._reductions = reductions
@property
def transaction(self):
"""Gets the transaction of this RefundCreate.
:return: The transaction of this RefundCreate.
:rtype: int
"""
return self._transaction
@transaction.setter
def transaction(self, transaction):
"""Sets the transaction of this RefundCreate.
:param transaction: The transaction of this RefundCreate.
:type: int
"""
self._transaction = transaction
@property
def type(self):
"""Gets the type of this RefundCreate.
:return: The type of this RefundCreate.
:rtype: RefundType
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this RefundCreate.
:param type: The type of this RefundCreate.
:type: RefundType
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, Enum):
result[attr] = value.value
else:
result[attr] = value
if issubclass(RefundCreate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, RefundCreate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 26.980989 | 195 | 0.584977 | 757 | 7,096 | 5.346103 | 0.136063 | 0.071658 | 0.124537 | 0.039783 | 0.419817 | 0.252039 | 0.180381 | 0.124537 | 0.124537 | 0.111688 | 0 | 0.003765 | 0.32624 | 7,096 | 262 | 196 | 27.083969 | 0.842711 | 0.268461 | 0 | 0.07563 | 0 | 0 | 0.146629 | 0.010734 | 0 | 0 | 0 | 0 | 0 | 1 | 0.168067 | false | 0 | 0.02521 | 0.02521 | 0.386555 | 0.016807 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |