hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eeb92568acbeb085fdd7abb46fd384c3584021b6 | 110 | py | Python | contests/aizu/itp1/1d.py | conao3/coder | 2cdb610fec013da88a3470d460108e8a9b462445 | [
"CC0-1.0"
] | null | null | null | contests/aizu/itp1/1d.py | conao3/coder | 2cdb610fec013da88a3470d460108e8a9b462445 | [
"CC0-1.0"
] | null | null | null | contests/aizu/itp1/1d.py | conao3/coder | 2cdb610fec013da88a3470d460108e8a9b462445 | [
"CC0-1.0"
] | null | null | null | S = int(input())
s = S % 60
S = S // 60
m = S % 60
S = S // 60
h = S
print(':'.join(map(str, [h, m, s])))
| 9.166667 | 36 | 0.409091 | 24 | 110 | 1.875 | 0.416667 | 0.266667 | 0.266667 | 0.222222 | 0.311111 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 0.309091 | 110 | 11 | 37 | 10 | 0.486842 | 0 | 0 | 0.285714 | 0 | 0 | 0.009091 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0 | 1 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
eedbcd017382ab06e7a203a5c6bf5b288d0c424d | 3,367 | py | Python | Audit_MLE_functions.py | LeonardMK/Microeconometrics | 66fcfca32e97f777504f90420cf297b14525f7e5 | [
"MIT"
] | null | null | null | Audit_MLE_functions.py | LeonardMK/Microeconometrics | 66fcfca32e97f777504f90420cf297b14525f7e5 | [
"MIT"
] | null | null | null | Audit_MLE_functions.py | LeonardMK/Microeconometrics | 66fcfca32e97f777504f90420cf297b14525f7e5 | [
"MIT"
] | null | null | null |
# coding: utf-8
# In[1]:
import pandas as pd
import scipy as sp
import numpy as np
# In[198]:
# Implementation of objective function 1
def objfun1(theta, tstar, Ts, months, years, ds):
logps = np.repeat(0, len(Ts))
logps = logps.astype("double")
for i in range(len(Ts)):
T = Ts[i].astype("int")
d = ds[i]
if not np.isnan(months[i]):
t = months[i].astype("int")
logps[i] = np.log(prob_1st_monthly(theta, tstar, t, T, d))
for tau in range(t + 1, T + 1):
logps[i] = logps[i] + np.log(1 - prob_1st_monthly(theta, tstar, tau, T, d))
elif not np.isnan(years[i]):
t = years[i].astype("int")
logps[i] = np.log(prob_1st_yearly(theta, tstar, t, 1, d))
for tau in range(t * 12 + 1, T + 1):
logps[i] = logps[i] + np.log(1 - prob_1st_monthly(theta, tstar, tau, T, d))
else:
for tau in range(-22, T + 1):
logps[i] = logps[i] + np.log(1 - prob_1st_monthly(theta, tstar, tau, T, d))
return -np.sum(logps)
# In[3]:
def prob_1st_monthly(theta, tstar, t, T, d):
temp = theta[0] * (t >= tstar) + theta[1] * (d==11) + theta[2] * (d==24) + theta[3] * (d==29) + theta[4] * t
return (np.exp(temp) / (1 + np.exp(temp)))
def prob_1st_yearly(theta, tstar, t, T, d):
temp = 1
for m in range(12):
temp = temp * (1 - prob_1st_monthly(theta, tstar, (t - 1) * 12 + m + 1, T, d))
return (1 - temp)
# In[202]:
# Implementation of objective function 2
def objfun2(theta, tstar, Ts, months, years, ds):
logps = np.repeat(0, len(Ts))
logps = logps.astype("double")
for i in range(len(Ts)):
T = Ts[i].astype("int")
d = ds[i]
if not np.isnan(months[i]):
t = months[i].astype("int")
logps[i] = np.log(prob_2nd_monthly(theta, tstar, t, T, d))
for tau in range(t + 1, T + 1):
logps[i] = logps[i] + np.log(1 - prob_2nd_monthly(theta, tstar, tau, T, d))
elif not np.isnan(years[i]):
t = years[i].astype("int")
logps[i] = np.log(prob_2nd_yearly(theta, tstar, t, 1, d))
for tau in range((t * 12 + 1), T + 1):
logps[i] = logps[i] + np.log(1 - prob_2nd_monthly(theta, tstar, tau, T, d))
else:
# HARDCODED CUTOFF DATE HERE
for tau in range(-22, T + 1):
logps[i] = logps[i] + np.log(1 - prob_2nd_monthly(theta, tstar, tau, T, d))
return (-np.sum(logps))
# In[205]:
# Define prob2 functions
def prob_2nd_monthly(theta, tstar, t, T, d):
temp = theta[0] * (t >= tstar) + theta[1] * (d==11) + theta[2] * (d==24) + theta[3] * (d==29) + theta[4] * t + theta[5] * t * t
return (np.exp(temp) / (1 + np.exp(temp)))
def prob_2nd_yearly(theta, tstar, t, T, d):
temp = 1
for m in range(12):
temp = temp * (1 - prob_2nd_monthly(theta, tstar, (t-1)*12 + m + 1, T, d))
return (1 - temp)
| 25.315789 | 131 | 0.468072 | 492 | 3,367 | 3.138211 | 0.150407 | 0.11658 | 0.132124 | 0.071244 | 0.859456 | 0.859456 | 0.851684 | 0.846503 | 0.837435 | 0.829663 | 0 | 0.049383 | 0.374517 | 3,367 | 132 | 132 | 25.507576 | 0.683761 | 0.054054 | 0 | 0.644068 | 0 | 0 | 0.009458 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0.050847 | 0 | 0.254237 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
eee181aaca84ee63e5adc9b9fd3510519fad6ce5 | 797 | py | Python | lib/webtest/__init__.py | zenlambda/aeta | 3781ac916be069a1d01eaa8b2a42375b689a82fe | [
"Apache-2.0"
] | 1 | 2015-07-22T15:58:06.000Z | 2015-07-22T15:58:06.000Z | lib/webtest/__init__.py | agostodev/agar | 66b7937a35ae93717d5e9683c7dc7c80c4bcc5d6 | [
"MIT"
] | 1 | 2016-04-19T13:03:17.000Z | 2016-04-19T13:03:17.000Z | lib/webtest/__init__.py | agostodev/agar | 66b7937a35ae93717d5e9683c7dc7c80c4bcc5d6 | [
"MIT"
] | null | null | null | # (c) 2005 Ian Bicking and contributors; written for Paste
# (http://pythonpaste.org)
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
Routines for testing WSGI applications.
Most interesting is app
"""
from webtest.app import TestApp
from webtest.app import TestRequest
from webtest.app import TestResponse
from webtest.app import Form
from webtest.app import Field
from webtest.app import AppError
from webtest.app import Select
from webtest.app import Radio
from webtest.app import Checkbox
from webtest.app import Text
from webtest.app import Textarea
from webtest.app import Hidden
from webtest.app import Submit
from webtest.app import Upload
from webtest.ext import casperjs
from webtest.sel import SeleniumApp
from webtest.sel import selenium
| 26.566667 | 58 | 0.811794 | 119 | 797 | 5.436975 | 0.428571 | 0.289026 | 0.302937 | 0.432767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005764 | 0.129235 | 797 | 29 | 59 | 27.482759 | 0.926513 | 0.288582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e10b21743a763af52fcc098588293bb65b2d49bf | 2,283 | py | Python | tests/test_mqtt.py | madron/mqttassistant | a6e40612b74e60585fd612785da1f2ba81f11881 | [
"MIT"
] | null | null | null | tests/test_mqtt.py | madron/mqttassistant | a6e40612b74e60585fd612785da1f2ba81f11881 | [
"MIT"
] | null | null | null | tests/test_mqtt.py | madron/mqttassistant | a6e40612b74e60585fd612785da1f2ba81f11881 | [
"MIT"
] | 2 | 2022-02-04T15:29:37.000Z | 2022-02-05T16:56:33.000Z | import unittest
from mqttassistant.dispatch import Signal
from mqttassistant.mqtt import Mqtt
from .test import Callback
class MqttTest(unittest.IsolatedAsyncioTestCase):
async def test_topic_signal_connect(self):
signal = Signal()
mqtt = Mqtt(topic_signal=signal)
with self.assertLogs('Mqtt', level='DEBUG') as cm:
await signal.connect('sensor', subject='sensor/state', callback=Callback())
self.assertEqual(mqtt.subscribed_topics, {'sensor/state'})
self.assertEqual(cm.output, [
'DEBUG:Mqtt:topic_subscribe: sensor/state',
])
# connecting again does not add another item
await signal.connect('sensor', subject='sensor/state', callback=Callback())
self.assertEqual(mqtt.subscribed_topics, {'sensor/state'})
self.assertEqual(cm.output, [
'DEBUG:Mqtt:topic_subscribe: sensor/state',
])
# connecting another topic does
await signal.connect('another', subject='another/state', callback=Callback())
self.assertEqual(mqtt.subscribed_topics, {'sensor/state', 'another/state'})
self.assertEqual(cm.output, [
'DEBUG:Mqtt:topic_subscribe: sensor/state',
'DEBUG:Mqtt:topic_subscribe: another/state',
])
async def test_topic_signal_disconnect(self):
signal = Signal()
mqtt = Mqtt(topic_signal=signal)
await signal.connect('sensor', subject='sensor/state', callback=Callback())
await signal.connect('another', subject='another/state', callback=Callback())
with self.assertLogs('Mqtt', level='DEBUG') as cm:
await signal.disconnect('sensor', subject='sensor/state')
self.assertEqual(mqtt.subscribed_topics, {'another/state'})
self.assertEqual(cm.output, [
'DEBUG:Mqtt:topic_unsubscribe: sensor/state',
])
# disconnecting again does not change anything
await signal.disconnect('sensor', subject='sensor/state')
self.assertEqual(mqtt.subscribed_topics, {'another/state'})
self.assertEqual(cm.output, [
'DEBUG:Mqtt:topic_unsubscribe: sensor/state',
])
| 47.5625 | 89 | 0.629435 | 233 | 2,283 | 6.085837 | 0.188841 | 0.100846 | 0.098731 | 0.084626 | 0.801834 | 0.769394 | 0.769394 | 0.769394 | 0.711566 | 0.59591 | 0 | 0 | 0.251862 | 2,283 | 47 | 90 | 48.574468 | 0.830211 | 0.051248 | 0 | 0.780488 | 0 | 0 | 0.216466 | 0.076781 | 0 | 0 | 0 | 0 | 0.292683 | 1 | 0 | false | 0 | 0.097561 | 0 | 0.121951 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
01489eba904532dd22ea3beae51ddac2e5c5413e | 47 | py | Python | src/mean.py | bdavies3/Calculator | ea524e141e19d8e6894b55d9ee72f07c3005f914 | [
"MIT"
] | null | null | null | src/mean.py | bdavies3/Calculator | ea524e141e19d8e6894b55d9ee72f07c3005f914 | [
"MIT"
] | null | null | null | src/mean.py | bdavies3/Calculator | ea524e141e19d8e6894b55d9ee72f07c3005f914 | [
"MIT"
] | null | null | null | def mean(data):
mean = data
return mean | 15.666667 | 15 | 0.617021 | 7 | 47 | 4.142857 | 0.571429 | 0.551724 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.297872 | 47 | 3 | 16 | 15.666667 | 0.878788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0 | 0.666667 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
01723e88074d0db7e53e189ef8810f4e3ac05a87 | 20 | py | Python | models/gcn/__init__.py | NIRVANALAN/Centroid_GCN | e93ec415d769cc3b1bbf737056097e8cbe65ded5 | [
"MIT"
] | 3 | 2020-11-12T07:00:20.000Z | 2021-07-12T02:56:41.000Z | models/gcn/__init__.py | NIRVANALAN/Centroid_GCN | e93ec415d769cc3b1bbf737056097e8cbe65ded5 | [
"MIT"
] | null | null | null | models/gcn/__init__.py | NIRVANALAN/Centroid_GCN | e93ec415d769cc3b1bbf737056097e8cbe65ded5 | [
"MIT"
] | null | null | null | from .gcn import GCN | 20 | 20 | 0.8 | 4 | 20 | 4 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15 | 20 | 1 | 20 | 20 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
0179a8e536fec66e7140011357ec2be8bce19d97 | 173 | py | Python | dz/models/__init__.py | ivandeex/dz | 15a010f99f9cf3e6b2f9bcba6eb52bed2dfc13a9 | [
"MIT"
] | 2 | 2016-09-15T20:38:12.000Z | 2016-11-01T05:40:13.000Z | dz/models/__init__.py | ivandeex/dz | 15a010f99f9cf3e6b2f9bcba6eb52bed2dfc13a9 | [
"MIT"
] | null | null | null | dz/models/__init__.py | ivandeex/dz | 15a010f99f9cf3e6b2f9bcba6eb52bed2dfc13a9 | [
"MIT"
] | null | null | null | from .crawl import Crawl # NOQA
from .schedule import Schedule # NOQA
from .news import News, NewsText # NOQA
from .tip import Tip # NOQA
from .user import User # NOQA
| 28.833333 | 40 | 0.728324 | 26 | 173 | 4.846154 | 0.346154 | 0.253968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208092 | 173 | 5 | 41 | 34.6 | 0.919708 | 0.138728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6dfead19e62f25c67a3eeb477bb2cc736f2a1d1a | 180 | py | Python | routes/index.py | theevann/notebook-progress-tracker | 0fb82f64d3b5157a88aef3e1a2b392ad1b426cff | [
"Apache-2.0"
] | 3 | 2020-08-12T01:52:48.000Z | 2021-02-24T15:03:32.000Z | routes/index.py | theevann/notebook-progress-tracker | 0fb82f64d3b5157a88aef3e1a2b392ad1b426cff | [
"Apache-2.0"
] | null | null | null | routes/index.py | theevann/notebook-progress-tracker | 0fb82f64d3b5157a88aef3e1a2b392ad1b426cff | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint, render_template
index_bp = Blueprint('index', __name__)
@index_bp.route('/', methods=["GET"])
def index():
return render_template("index.html")
| 20 | 44 | 0.722222 | 23 | 180 | 5.304348 | 0.652174 | 0.229508 | 0.311475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.122222 | 180 | 8 | 45 | 22.5 | 0.772152 | 0 | 0 | 0 | 0 | 0 | 0.105556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0.2 | 0.6 | 0.4 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
098dddba8b55fd89e9998bff0d4bcdb29b2c55e9 | 185 | py | Python | pcachefs/__init__.py | pcram-techcyte/pcachefs | bc7fd93b41beb59b44d5e946ccd755c7f64ff059 | [
"Apache-2.0"
] | 38 | 2016-07-21T18:10:03.000Z | 2022-02-11T20:37:44.000Z | pcachefs/__init__.py | pcram-techcyte/pcachefs | bc7fd93b41beb59b44d5e946ccd755c7f64ff059 | [
"Apache-2.0"
] | 4 | 2015-09-22T14:07:10.000Z | 2018-10-13T17:53:39.000Z | pcachefs/__init__.py | ibizaman/pcachefs | dce69058037db3f336c475bb39abb2d526efb759 | [
"Apache-2.0"
] | 10 | 2016-02-01T02:50:44.000Z | 2020-07-22T17:45:14.000Z | """
pcachefs package.
"""
from pcachefs import main
from pcachefs import FuseStat
from pcachefs import PersistentCacheFs
from pcachefs import Cacher
from pcachefs import UnderlyingFs
| 16.818182 | 38 | 0.821622 | 22 | 185 | 6.909091 | 0.409091 | 0.394737 | 0.592105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.140541 | 185 | 10 | 39 | 18.5 | 0.955975 | 0.091892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
09afdb7e45de2d3f0c4c531667b9378dbb5a83cf | 241 | py | Python | telegram_bot_api/schemas/VenueSchema.py | IsVir/telegram-bot-api | 927e96452ad0c62ebae71304f13e5d34121b2ca9 | [
"MIT"
] | null | null | null | telegram_bot_api/schemas/VenueSchema.py | IsVir/telegram-bot-api | 927e96452ad0c62ebae71304f13e5d34121b2ca9 | [
"MIT"
] | null | null | null | telegram_bot_api/schemas/VenueSchema.py | IsVir/telegram-bot-api | 927e96452ad0c62ebae71304f13e5d34121b2ca9 | [
"MIT"
] | null | null | null | from marshmallow import Schema, fields
class VenueSchema(Schema):
location = fields.Nested('LocationSchema', required=True)
title = fields.Str(required=True)
address = fields.Str(required=True)
foursquare_id = fields.Str()
| 26.777778 | 61 | 0.73444 | 28 | 241 | 6.285714 | 0.607143 | 0.204545 | 0.193182 | 0.238636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.157676 | 241 | 8 | 62 | 30.125 | 0.866995 | 0 | 0 | 0 | 0 | 0 | 0.058091 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
09cd5395b877dc546d43958f529d9a908a8f4af8 | 2,166 | py | Python | epytope/Data/pssms/tepitopepan/mat/DRB1_0473_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 7 | 2021-02-01T18:11:28.000Z | 2022-01-31T19:14:07.000Z | epytope/Data/pssms/tepitopepan/mat/DRB1_0473_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 22 | 2021-01-02T15:25:23.000Z | 2022-03-14T11:32:53.000Z | epytope/Data/pssms/tepitopepan/mat/DRB1_0473_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 4 | 2021-05-28T08:50:38.000Z | 2022-03-14T11:45:32.000Z | DRB1_0473_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.98558, 'I': -0.014418, 'H': -999.0, 'K': -999.0, 'M': -0.014418, 'L': -0.014418, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.98558, 'V': -0.014418, 'Y': -0.98558}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -0.075565, 'D': -0.1121, 'G': -1.8011, 'F': 0.15754, 'I': 0.85132, 'H': 0.20012, 'K': -1.3997, 'M': 1.2634, 'L': 0.73828, 'N': 0.040241, 'Q': 0.27883, 'P': -1.3951, 'S': -0.06627, 'R': -1.7559, 'T': -0.14728, 'W': -0.36967, 'V': -0.23509, 'Y': -0.81731}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -2.3667, 'D': -1.0959, 'G': -1.4568, 'F': -1.087, 'I': -0.091608, 'H': -1.374, 'K': -2.3347, 'M': -1.076, 'L': -1.0665, 'N': 1.2755, 'Q': -1.4785, 'P': -0.00027712, 'S': 0.98481, 'R': -2.3124, 'T': 1.8559, 'W': -0.99151, 'V': 0.87952, 'Y': -1.4636}, 6: {'A': 0.0, 'E': -0.46807, 'D': -0.95217, 'G': -1.1581, 'F': -0.07187, 'I': 0.34318, 'H': 0.0010486, 'K': -0.60849, 'M': 0.88878, 'L': 0.76029, 'N': 0.61498, 'Q': 0.033941, 'P': -0.66805, 'S': 0.072632, 'R': -0.55275, 'T': 0.12046, 'W': -0.44125, 'V': 0.063351, 'Y': -0.25209}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -1.4451, 'D': -1.4784, 'G': -0.85127, 'F': -0.85196, 'I': -0.24326, 'H': 0.1277, 'K': -0.34019, 'M': -0.25881, 'L': -0.8896, 'N': -1.236, 'Q': 0.51458, 'P': -1.2269, 'S': 0.71804, 'R': -0.92794, 'T': -1.109, 'W': -0.94394, 'V': -0.63235, 'Y': -0.86857}} | 2,166 | 2,166 | 0.395199 | 525 | 2,166 | 1.626667 | 0.201905 | 0.114754 | 0.028103 | 0.037471 | 0.21897 | 0.142857 | 0.142857 | 0.142857 | 0.133489 | 0.133489 | 0 | 0.374862 | 0.162512 | 2,166 | 1 | 2,166 | 2,166 | 0.095921 | 0 | 0 | 0 | 0 | 0 | 0.078911 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
09dfeddf63b649c8b2d03e46ebb6a5a2e80e21a0 | 241 | py | Python | weather/admin.py | dhruvil410/Django-Weather-Web-App | 3f70bdb94d9aa7f387c04ab70b0ed8d07f15ec95 | [
"MIT"
] | 1 | 2021-01-04T17:10:00.000Z | 2021-01-04T17:10:00.000Z | weather/admin.py | dhruvil410/Django-Weather-Web-App | 3f70bdb94d9aa7f387c04ab70b0ed8d07f15ec95 | [
"MIT"
] | null | null | null | weather/admin.py | dhruvil410/Django-Weather-Web-App | 3f70bdb94d9aa7f387c04ab70b0ed8d07f15ec95 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import city,country,hourly_forecast_log,daily_forecast_log
admin.site.register(city)
admin.site.register(country)
admin.site.register(hourly_forecast_log)
admin.site.register(daily_forecast_log) | 34.428571 | 71 | 0.863071 | 36 | 241 | 5.555556 | 0.388889 | 0.22 | 0.34 | 0.2 | 0.28 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.049793 | 241 | 7 | 72 | 34.428571 | 0.873362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
61ffae73c56e26af033b0652bf777f29712fc545 | 45 | py | Python | standard-lib-date.py | jepster/python_basics_learning_scripts | 863170e86c5a375b4f1455b4c87c2d6a9727a7f8 | [
"MIT"
] | null | null | null | standard-lib-date.py | jepster/python_basics_learning_scripts | 863170e86c5a375b4f1455b4c87c2d6a9727a7f8 | [
"MIT"
] | null | null | null | standard-lib-date.py | jepster/python_basics_learning_scripts | 863170e86c5a375b4f1455b4c87c2d6a9727a7f8 | [
"MIT"
] | null | null | null | import datetime
print(datetime.date.today()) | 15 | 28 | 0.8 | 6 | 45 | 6 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 45 | 3 | 28 | 15 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
11107a1247bc1206dabcad5fc80a2fa23917c7fa | 109 | py | Python | webplane/__main__.py | joaompinto/webplane | 87c4132421ac249a4945dbc5fe43b3b904cd3286 | [
"MIT"
] | null | null | null | webplane/__main__.py | joaompinto/webplane | 87c4132421ac249a4945dbc5fe43b3b904cd3286 | [
"MIT"
] | null | null | null | webplane/__main__.py | joaompinto/webplane | 87c4132421ac249a4945dbc5fe43b3b904cd3286 | [
"MIT"
] | 1 | 2020-04-20T11:27:52.000Z | 2020-04-20T11:27:52.000Z | import appframe
from webplane.version import version
appframe.main(__name__, __file__, "webplane", version) | 21.8 | 54 | 0.816514 | 13 | 109 | 6.230769 | 0.615385 | 0.37037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.100917 | 109 | 5 | 54 | 21.8 | 0.826531 | 0 | 0 | 0 | 0 | 0 | 0.072727 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
1165614e9890302ddfed04edde7bf1c4a239ea28 | 17,517 | py | Python | project_automation/commands/utils.py | Guigui14460/project-automation | 98f9b73be2000b0ecb07b1cca758693c29032947 | [
"Apache-2.0"
] | null | null | null | project_automation/commands/utils.py | Guigui14460/project-automation | 98f9b73be2000b0ecb07b1cca758693c29032947 | [
"Apache-2.0"
] | 2 | 2021-01-17T16:04:03.000Z | 2021-08-13T13:00:49.000Z | project_automation/commands/utils.py | Guigui14460/project-automation | 98f9b73be2000b0ecb07b1cca758693c29032947 | [
"Apache-2.0"
] | null | null | null | import sys
from typing import NoReturn
from project_automation.settings import SHELL_COLORS
from project_automation.utils import execute_command, execute_command2
class WindowsInstallationPackage:
"""
Windows package installer shortcut.
It allows users to install or give information to install packages/programs on the Windows operating system.
Attributes
----------
windows_download_link : str
link to download Windows installer of the given package or program
standard_command : str
command to install package/program via standard shell
winget_command : str
command to install package/program via Winget, https://docs.microsoft.com/en-us/windows/package-manager/winget/
scoop_command : str
command to install package/program via scoop, https://scoop.sh/
choco_command : str
command to install package/program via choco, https://chocolatey.org/
update_package_manager : bool
allows this program to automatically update and upgrade all packages installed in the system (via the package manager used)
"""
def __init__(self,
windows_download_link: str = None,
standard_command: str = None,
winget_command: str = None,
scoop_command: str = None,
choco_command: str = None,
update_package_manager: bool = True) -> NoReturn:
"""
Constructor and initializer.
Parameters
----------
windows_download_link : str
link to download Windows installer of the given package or program
standard_command : str
command to install package/program via standard shell
winget_command : str
command to install package/program via Winget, https://docs.microsoft.com/en-us/windows/package-manager/winget/
scoop_command : str
command to install package/program via scoop, https://scoop.sh/
choco_command : str
command to install package/program via choco, https://chocolatey.org/
update_package_manager : bool
allows this program to automatically update and upgrade all packages installed in the system (via the package manager used)
"""
self.windows_download_link = windows_download_link
self.standard_command = standard_command
self.winget_command = winget_command
self.scoop_command = scoop_command
self.choco_command = choco_command
self.update_package_manager = update_package_manager
def install(self, allow_install: bool) -> NoReturn:
"""
Install the needed package/program.
Parameters
----------
allow_install : bool
True if you want to automatically install the required package, False otherwise
If the value of this parameter is False, it displays all the possibilities to install the required package
"""
code_winget, _, _ = execute_command("winget --version")
code_scoop, _, _ = execute_command("scoop help")
code_choco, _, _ = execute_command("choco --version")
if allow_install:
if self.scoop_command is not None and code_scoop == 0:
execute_command2("scoop bucket add extras")
if self.update_package_manager:
execute_command2("scoop update")
execute_command2("scoop update *")
execute_command2(self.scoop_command)
elif self.choco_command is not None and code_choco == 0:
if self.update_package_manager:
execute_command2("choco upgrade chocolatey")
execute_command2("choco outdated")
execute_command2(self.choco_command)
elif self.winget_command is not None and code_winget == 0:
execute_command2(self.winget_command)
elif self.standard_command is not None:
execute_command2(self.standard_command)
elif self.windows_download_link is not None:
print(
f"Download the file at this link : {SHELL_COLORS['underline']}{self.windows_download_link}{SHELL_COLORS['endcolor']} and put the path in your {SHELL_COLORS['bold']}PATH{SHELL_COLORS['endcolor']} environment variable")
sys.exit(1)
else:
print(
f"{SHELL_COLORS['red']}You cannot install this package or it isn't referenced here ...{SHELL_COLORS['endcolor']}")
sys.exit(1)
else:
if self.standard_command is not None and self.scoop_command is None and self.winget_command is None and self.windows_download_link is None and self.choco_command is None:
print(
f"{SHELL_COLORS['red']}You are no way to install this package ...{SHELL_COLORS['endcolor']}")
else:
print("You can install from multiple ways :")
if self.windows_download_link is not None:
print(
f"\t- Download the file at this link : {SHELL_COLORS['underline']}{self.windows_download_link}{SHELL_COLORS['endcolor']} and put the path in your {SHELL_COLORS['bold']}PATH{SHELL_COLORS['endcolor']} environment variable")
if self.standard_command is not None:
print(
f"\t- Launch the following command : {self.standard_command}")
if self.winget_command is not None and code_winget == 0:
print(
f"\t- Launch the following command : {self.winget_command}")
if self.scoop_command is not None and code_scoop == 0:
print(
f"\t- Launch the following command : {self.scoop_command}")
if self.choco_command is not None and code_choco == 0:
print(
f"\t- Launch the following command : {self.choco_command}")
class MacOSInstallationPackage:
"""
MacOS package installer shortcut.
It allows users to install or give information to install packages/programs on the Mac operating system.
Attributes
----------
macos_download_link : str
link to download MacOS installer of the given package or program
standard_command : str
command to install package/program via standard shell
brew_command : str
command to install package/program via Homebrew, https://brew.sh/
update_package_manager : bool
allows this program to automatically update and upgrade all packages installed in the system (via the package manager used)
"""
def __init__(self,
macos_download_link: str = None,
standard_command: str = None,
brew_command: str = None,
update_package_manager: bool = True) -> NoReturn:
"""
Constructor and initializer.
Parameters
----------
macos_download_link : str
link to download MacOS installer of the given package or program
standard_command : str
command to install package/program via standard shell
brew_command : str
command to install package/program via Homebrew, https://brew.sh/
update_package_manager : bool
allows this program to automatically update and upgrade all packages installed in the system (via the package manager used)
"""
self.macos_download_link = macos_download_link
self.standard_command = standard_command
self.brew_command = brew_command
self.update_package_manager = update_package_manager
def install(self, allow_install: bool) -> NoReturn:
"""
Install the needed package/program.
Parameters
----------
allow_install : bool
True if you want to automatically install the required package, False otherwise
If the value of this parameter is False, it displays all the possibilities to install the required package
"""
code_brew, _, _ = execute_command("brew --version")
if allow_install:
if self.brew_command is not None:
if code_brew != 0:
execute_command2(
"/bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)\"")
if self.update_package_manager:
execute_command2("brew update")
execute_command2("brew upgrade")
execute_command2(self.brew_command)
elif self.standard_command is not None:
execute_command2(self.standard_command)
elif self.macos_download_link is not None:
print(
f"Download the file at this link : {SHELL_COLORS['underline']}{self.macos_download_link}{SHELL_COLORS['endcolor']} and put the path in your {SHELL_COLORS['bold']}PATH{SHELL_COLORS['endcolor']} environment variable")
sys.exit(1)
else:
print(
f"{SHELL_COLORS['red']}You cannot install this package or it isn't referenced here ...{SHELL_COLORS['endcolor']}")
sys.exit(1)
else:
if self.macos_download_link is None and self.brew_command is None and self.standard_command is not None:
print(
f"{SHELL_COLORS['red']}You are no way to install this package ...{SHELL_COLORS['endcolor']}")
else:
print("You can install from multiple ways :")
if self.macos_download_link is not None:
print(
f"\t- Download the file at this link : {SHELL_COLORS['underline']}{self.macos_download_link}{SHELL_COLORS['endcolor']} and put the path in your {SHELL_COLORS['bold']}PATH{SHELL_COLORS['endcolor']} environment variable")
if self.standard_command is not None:
print(
f"\t- Launch the following command : {self.standard_command}")
if self.brew_command is not None:
if code_brew != 0:
execute_command2(
"/bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)\"")
print(
f"\t- Launch the following command : {self.brew_command}")
class GNULinuxDistributionInstallationPackage:
"""
GNU/Linux package installer shortcut.
It allows users to install or give information to install packages/programs on the GNU/Linux operating system.
Attributes
----------
linux_download_link : str
link to download Linux installer of the given package or program
standard_command : str
command to install package/program via standard shell
apt_command : str
command to install package/program via APT, for Debian-based distrib
dnf_command : str
command to install package/program via DNF, for RedHat-based, CentOS-based, and Fedora-based distrib
yum_command : str
command to install package/program via YUM, for RedHat-based, CentOS-based, and Fedora-based old distrib
pacman_command : str
command to install package/program via Pacman, for ArchLinux-based distrib
update_package_manager : bool
allows this program to automatically update and upgrade all packages installed in the system (via the package manager used)
"""
def __init__(self,
linux_download_link: str = None,
standard_command: str = None,
apt_command: str = None,
dnf_command: str = None,
yum_command: str = None,
pacman_command: str = None,
update_package_manager: bool = True) -> NoReturn:
"""
Constructor and initializer.
Parameters
----------
linux_download_link : str
link to download Linux installer of the given package or program
standard_command : str
command to install package/program via standard shell
apt_command : str
command to install package/program via APT, for Debian-based distrib
dnf_command : str
command to install package/program via DNF, for RedHat-based, CentOS-based, and Fedora-based distrib
yum_command : str
command to install package/program via YUM, for RedHat-based, CentOS-based, and Fedora-based old distrib
pacman_command : str
command to install package/program via Pacman, for ArchLinux-based distrib
update_package_manager : bool
allows this program to automatically update and upgrade all packages installed in the system (via the package manager used)
"""
self.linux_download_link = linux_download_link
self.standard_command = standard_command
self.apt_command = apt_command
self.dnf_command = dnf_command
self.yum_command = yum_command
self.pacman_command = pacman_command
self.update_package_manager = update_package_manager
def install(self, allow_install: bool) -> NoReturn:
"""
Install the needed package/program.
Parameters
----------
allow_install : bool
True if you want to automatically install the required package, False otherwise
If the value of this parameter is False, it displays all the possibilities to install the required package
"""
code_aptget, _, _ = execute_command("apt-get --help")
code_dnf, _, _ = execute_command("dnf --help")
code_yum, _, _ = execute_command("yum help")
code_pacman, _, _ = execute_command("pacman -S --help")
if allow_install:
if self.apt_command is not None and code_aptget == 0:
if self.update_package_manager:
execute_command2("sudo apt-get update")
execute_command2("sudo apt-get upgrade")
execute_command2(self.apt_command)
elif self.dnf_command is not None and code_dnf == 0:
if self.update_package_manager:
execute_command2("sudo dnf upgrade")
execute_command2(self.dnf_command)
elif self.yum_command is not None and code_yum == 0:
if self.update_package_manager:
execute_command2("sudo yum update")
execute_command2("sudo yum upgrade")
execute_command2(self.yum_command)
elif self.pacman_command is not None and code_pacman == 0:
if self.update_package_manager:
execute_command2("pacman -Syu")
execute_command2(self.pacman_command)
elif self.standard_command is not None:
execute_command2(self.standard_command)
elif self.linux_download_link is not None:
print(
f"Download the file at this link : {SHELL_COLORS['underline']}{self.linux_download_link}{SHELL_COLORS['endcolor']} and put the path in your {SHELL_COLORS['bold']}PATH{SHELL_COLORS['endcolor']} environment variable")
sys.exit(1)
else:
print(
f"{SHELL_COLORS['red']}No command match with your Linux distribution or it isn't referenced here ...{SHELL_COLORS['endcolor']}")
sys.exit(1)
print(
f"{SHELL_COLORS['warning']}Try to search on Intenet for your distribution ;){SHELL_COLORS['endcolor']}")
sys.exit(1)
else:
if self.standard_command is not None and self.linux_download_link is None and self.apt_command is None and self.dnf_command is None and self.yum_command is None and self.pacman_command is None:
print(
f"{SHELL_COLORS['red']}You are no way to install this package or ...{SHELL_COLORS['endcolor']}")
else:
print("You can install from multiple ways :")
if self.linux_download_link is not None:
print(
f"\t- Download the file at this link : {SHELL_COLORS['underline']}{self.linux_download_link}{SHELL_COLORS['endcolor']} and put the path in your {SHELL_COLORS['bold']}PATH{SHELL_COLORS['endcolor']} environment variable")
if self.standard_command is not None:
print(
f"\t- Launch the following command : {self.standard_command}")
if self.apt_command is not None and code_aptget == 0:
print(
f"\t- Launch the following command : {self.apt_command}")
elif self.dnf_command is not None and code_dnf == 0:
print(
f"\t- Launch the following command : {self.dnf_command}")
elif self.yum_command is not None and code_yum == 0:
print(
f"\t- Launch the following command : {self.yum_command}")
elif self.pacman_command is not None and code_pacman == 0:
print(
f"\t- Launch the following command : {self.pacman_command}")
| 51.369501 | 245 | 0.618199 | 2,060 | 17,517 | 5.085922 | 0.081068 | 0.040947 | 0.02663 | 0.038179 | 0.853775 | 0.837263 | 0.820655 | 0.812828 | 0.773218 | 0.73418 | 0 | 0.004128 | 0.308557 | 17,517 | 340 | 246 | 51.520588 | 0.860882 | 0.296398 | 0 | 0.544041 | 0 | 0.046632 | 0.264724 | 0.110504 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031088 | false | 0 | 0.020725 | 0 | 0.067358 | 0.139896 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
fec1732d41fe7ee5aa2278ade22770c88bc3b627 | 179 | py | Python | devel/apps/ik/utils/compressor.py | riscoscloverleaf/chatcube | a7184ef76108f90a74a88d3183a3d21c1249a0f5 | [
"MIT"
] | null | null | null | devel/apps/ik/utils/compressor.py | riscoscloverleaf/chatcube | a7184ef76108f90a74a88d3183a3d21c1249a0f5 | [
"MIT"
] | null | null | null | devel/apps/ik/utils/compressor.py | riscoscloverleaf/chatcube | a7184ef76108f90a74a88d3183a3d21c1249a0f5 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.utils.encoding import force_text
def ik_cachekey(key):
return 'ik_compressor.{}.{}'.format(settings.APP_VERSION, force_text(key)) | 35.8 | 78 | 0.787709 | 26 | 179 | 5.230769 | 0.692308 | 0.147059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.094972 | 179 | 5 | 78 | 35.8 | 0.839506 | 0 | 0 | 0 | 0 | 0 | 0.105556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.5 | 0.25 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
28a65f14057afd2bf756df084ded171a7d945b24 | 1,387 | py | Python | crypto/Fermat/solve.py | Enigmatrix/hats-ctf-2019 | 0dc1b9a5a4583c81b5f1b7bce0cbb9bd0fd2b192 | [
"MIT"
] | 5 | 2019-10-04T07:20:37.000Z | 2021-06-15T21:34:07.000Z | crypto/Fermat/solve.py | Enigmatrix/hats-ctf-2019 | 0dc1b9a5a4583c81b5f1b7bce0cbb9bd0fd2b192 | [
"MIT"
] | null | null | null | crypto/Fermat/solve.py | Enigmatrix/hats-ctf-2019 | 0dc1b9a5a4583c81b5f1b7bce0cbb9bd0fd2b192 | [
"MIT"
] | null | null | null | from pwn import *
from gmpy2 import lcm,iroot,invert
def fermat(n, verbose=True):
a = iroot(n,2)[0]+1
b = a*a - n
while not iroot(b,2)[1]:
a = a + 1
b = a*a - n
b = iroot(b,2)[0]
p = a + b
q = a - b
return p, q
n = 0x6216bfea994a31f3a352e8f162b1b6896025ba91188a458daa0aa758d4ecb595089aaa379a8b3c2c1bde708e6bbb0fa99dce996b8d9f259e319c881e41bc8635d348c6004325dae4d3a6bfe78e62499f819cd9bd74686943c7cbe9b68372bb43dc375341bae69120ee763cb282ddf0f117a150aa3c862bdad372401220caa3a1fb1dd6c369d4d5dbd78f15f40e0bbebb6f3fa123a5756d10fb62e49f7c73aa171b007a281de6910dfc67aae5a691c3329a5c64700b0b54ceaeaa95639c6030925f190f587f53ee9d718e0e7dfa2b059b1a6a701620b058498cd2c2ebeac76153150b8886fcfc99d35d10139f9d364c7393c70181569e2269d5ff214d5e6a253
c = 0x725f17256760e05a02359447947aeb7d83fa7350408dc1abb1ba03ff9e1c15167171c6f493620005ac5e2c641912e5c09cbeea9fed542bf7dbd8016bf3ce5758bf83f1d941b8926992816912e47a62344c4d0ca068a071933c98300a073930be59f051cda8bc8fc69e61a490090a95c18380877c73b230b4a65ecb676bcfa0a7b2d8ce61a6857b822bea9304686e6393f03040489926e940e1f098fdd9ceb74d54e972625141bc7fb322c68bb00f27c030286c1d1e71e861f70ff08c17d5461261ee44614c6fb94e5490d86669f6b9af6fd6a141cb0c39ac692e7d51f3ab28bd71b3b5502c5be6ebbec6622065ce60be64247e7cd2298c746716fc686981f7
p, q = fermat(n)
d = invert(65537,lcm(p-1,q-1))
m = pow(c,d,n)
print hex(m)[2:].decode('hex')
| 63.045455 | 517 | 0.868061 | 78 | 1,387 | 15.435897 | 0.410256 | 0.004983 | 0.004983 | 0.006645 | 0.008306 | 0 | 0 | 0 | 0 | 0 | 0 | 0.529827 | 0.081471 | 1,387 | 21 | 518 | 66.047619 | 0.415228 | 0 | 0 | 0.111111 | 0 | 0 | 0.002163 | 0 | 0 | 1 | 0.739005 | 0 | 0 | 0 | null | null | 0 | 0.111111 | null | null | 0.055556 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
28bb4287d7247950608efa0619ae7ba536b107e1 | 321 | py | Python | pykotor/resource/formats/gff/__init__.py | NickHugi/PyKotor | cab1089f8a8a135861bef45340203718d39f5e1f | [
"MIT"
] | 1 | 2022-02-21T15:17:28.000Z | 2022-02-21T15:17:28.000Z | pykotor/resource/formats/gff/__init__.py | NickHugi/PyKotor | cab1089f8a8a135861bef45340203718d39f5e1f | [
"MIT"
] | 1 | 2022-03-12T16:06:23.000Z | 2022-03-12T16:06:23.000Z | pykotor/resource/formats/gff/__init__.py | NickHugi/PyKotor | cab1089f8a8a135861bef45340203718d39f5e1f | [
"MIT"
] | null | null | null | from pykotor.resource.formats.gff.data import GFF, GFFList, GFFStruct, GFFFieldType, GFFContent
from pykotor.resource.formats.gff.io_binary import GFFBinaryReader, GFFBinaryWriter
from pykotor.resource.formats.gff.io_xml import GFFXMLReader, GFFXMLWriter
from pykotor.resource.formats.gff.auto import write_gff, load_gff
| 64.2 | 95 | 0.856698 | 43 | 321 | 6.302326 | 0.488372 | 0.162362 | 0.280443 | 0.383764 | 0.442804 | 0.228782 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071651 | 321 | 4 | 96 | 80.25 | 0.909396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e90efc63f66bcf28c22b43a597645366f740e53f | 6,085 | py | Python | datasets/tods_datasets.py | 1326899446/tods | 2bf27fab2d8bab80ec222beb8f615800d77a01a4 | [
"Apache-2.0"
] | 544 | 2020-09-21T06:02:33.000Z | 2022-03-27T07:16:32.000Z | datasets/tods_datasets.py | 1326899446/tods | 2bf27fab2d8bab80ec222beb8f615800d77a01a4 | [
"Apache-2.0"
] | 35 | 2020-09-21T06:33:13.000Z | 2022-03-11T14:20:21.000Z | datasets/tods_datasets.py | 1326899446/tods | 2bf27fab2d8bab80ec222beb8f615800d77a01a4 | [
"Apache-2.0"
] | 86 | 2020-09-21T16:44:33.000Z | 2022-03-11T18:20:22.000Z | import os
import pandas as pd
from tods_dataset_base import TODS_dataset
from shutil import copyfile
class kpi_dataset(TODS_dataset):
resources = [
# ("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz", "f68b3c2dcbeaaa9fbdd348bbdeb94873"),
# ("https://github.com/datamllab/tods/blob/master/datasets/anomaly/kpi/TRAIN/dataset_TRAIN/tables/learningData.csv", None),
# ("https://github.com/NetManAIOps/KPI-Anomaly-Detection/blob/master/Preliminary_dataset/train.csv", None),
("https://hegsns.github.io/tods_datasets/kpi/TRAIN/dataset_TRAIN/tables/learningData.csv", None), # it needs md5 to check if local learningData.csv is the same with online.
("https://hegsns.github.io/tods_datasets/kpi/TRAIN/dataset_TRAIN/datasetDoc.json", None),
# needs a server to store the dataset.
# ("https://raw.githubusercontent.com/datamllab/tods/master/datasets/anomaly/kpi/TRAIN/dataset_TRAIN/tables/learningData.csv", None), # it needs md5 to check if local learningData.csv is the same with online.
]
training_file = 'learningData.csv'
testing_file = 'testingData.csv'
ground_truth_index = 3
_repr_indent = 4
# def __init__(self, root, train, transform=None, target_transform=None, download=True):
# super().__init__(root, train, transform=None, target_transform=None, download=True)
def process(self) -> None:
print('Processing...')
os.makedirs(self.processed_folder, exist_ok=True)
os.makedirs(os.path.join(self.processed_folder, 'tables'), exist_ok=True)
training_set_fname = os.path.join(self.raw_folder, 'learningData.csv')
self.training_set_dataframe = pd.read_csv(training_set_fname)
testing_set_fname = os.path.join(self.raw_folder, 'learningData.csv') # temperarily same with training set
self.testing_set_dataframe = pd.read_csv(testing_set_fname)
self.process_dataframe()
self.training_set_dataframe.to_csv(os.path.join(self.processed_folder, 'tables', self.training_file))
self.testing_set_dataframe.to_csv(os.path.join(self.processed_folder, 'tables', self.testing_file))
copyfile(os.path.join(self.raw_folder, 'datasetDoc.json'), os.path.join(self.processed_folder, 'datasetDoc.json'))
print('Done!')
class yahoo_dataset(TODS_dataset):
resources = [
# ("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz", "f68b3c2dcbeaaa9fbdd348bbdeb94873"),
# ("https://github.com/datamllab/tods/blob/master/datasets/anomaly/kpi/TRAIN/dataset_TRAIN/tables/learningData.csv", None),
# ("https://github.com/NetManAIOps/KPI-Anomaly-Detection/blob/master/Preliminary_dataset/train.csv", None),
("https://hegsns.github.io/tods_datasets/yahoo_sub_5/TRAIN/dataset_TRAIN/tables/learningData.csv", None), # it needs md5 to check if local learningData.csv is the same with online.
("https://hegsns.github.io/tods_datasets/yahoo_sub_5/TRAIN/dataset_TRAIN/datasetDoc.json", None),
# needs a server to store the dataset.
# ("https://raw.githubusercontent.com/datamllab/tods/master/datasets/anomaly/kpi/TRAIN/dataset_TRAIN/tables/learningData.csv", None), # it needs md5 to check if local learningData.csv is the same with online.
]
training_file = 'learningData.csv'
testing_file = 'testingData.csv'
ground_truth_index = 7
_repr_indent = 4
def process(self) -> None:
print('Processing...')
os.makedirs(self.processed_folder, exist_ok=True)
os.makedirs(os.path.join(self.processed_folder, 'tables'), exist_ok=True)
training_set_fname = os.path.join(self.raw_folder, 'learningData.csv')
self.training_set_dataframe = pd.read_csv(training_set_fname)
testing_set_fname = os.path.join(self.raw_folder, 'learningData.csv') # temperarily same with training set
self.testing_set_dataframe = pd.read_csv(testing_set_fname)
self.process_dataframe()
self.training_set_dataframe.to_csv(os.path.join(self.processed_folder, 'tables', self.training_file))
self.testing_set_dataframe.to_csv(os.path.join(self.processed_folder, 'tables', self.testing_file))
copyfile(os.path.join(self.raw_folder, 'datasetDoc.json'), os.path.join(self.processed_folder, 'datasetDoc.json'))
print('Done!')
class NAB_dataset(TODS_dataset):
resources = [
("https://hegsns.github.io/tods_datasets/NAB/realTweets/labeled_Twitter_volume_AMZN.csv", None),
# it needs md5 to check if local learningData.csv is the same with online.
("https://hegsns.github.io/tods_datasets/NAB/realTweets/labeled_Twitter_volume_AMZN.json", None),
# needs a server to store the dataset.
]
training_file = 'learningData.csv'
testing_file = 'testingData.csv'
ground_truth_index = 2
_repr_indent = 4
def process(self) -> None:
print('Processing...')
os.makedirs(self.processed_folder, exist_ok=True)
os.makedirs(os.path.join(self.processed_folder, 'tables'), exist_ok=True)
training_set_fname = os.path.join(self.raw_folder, 'labeled_Twitter_volume_AMZN.csv')
self.training_set_dataframe = pd.read_csv(training_set_fname)
testing_set_fname = os.path.join(self.raw_folder, 'labeled_Twitter_volume_AMZN.csv') # temperarily same with training set
self.testing_set_dataframe = pd.read_csv(testing_set_fname)
self.process_dataframe()
self.training_set_dataframe.to_csv(os.path.join(self.processed_folder, 'tables', self.training_file))
self.testing_set_dataframe.to_csv(os.path.join(self.processed_folder, 'tables', self.testing_file))
copyfile(os.path.join(self.raw_folder, 'labeled_Twitter_volume_AMZN.json'),
os.path.join(self.processed_folder, 'datasetDoc.json'))
print('Done!')
# kpi_dataset(root='./datasets', train=True, transform='binarize')
# yahoo_dataset(root='./datasets', train=True, transform='binarize')
# NAB_dataset(root='./datasets', train=True, transform='binarize')
| 52.008547 | 216 | 0.721446 | 814 | 6,085 | 5.179361 | 0.133907 | 0.029886 | 0.04981 | 0.069734 | 0.959203 | 0.959203 | 0.959203 | 0.927182 | 0.927182 | 0.893264 | 0 | 0.007947 | 0.152177 | 6,085 | 116 | 217 | 52.456897 | 0.809265 | 0.308299 | 0 | 0.676056 | 0 | 0.028169 | 0.226816 | 0.022467 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042254 | false | 0 | 0.056338 | 0 | 0.352113 | 0.084507 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3a604e6df47b1fb935e39664cfc80c3ca61f5a30 | 130 | py | Python | app/helpers.py | Icoqu/SecretShare | 1b0c25c3cc64803157499d2c62870254d32b3022 | [
"MIT"
] | null | null | null | app/helpers.py | Icoqu/SecretShare | 1b0c25c3cc64803157499d2c62870254d32b3022 | [
"MIT"
] | 206 | 2020-05-23T18:44:20.000Z | 2022-03-31T19:11:25.000Z | app/helpers.py | Icoqu/SecretShare | 1b0c25c3cc64803157499d2c62870254d32b3022 | [
"MIT"
] | null | null | null | from flask import flash as flask_flash
def flash(message: str, category: str = 'info'): flask_flash(message, category=category)
| 26 | 88 | 0.769231 | 19 | 130 | 5.157895 | 0.526316 | 0.204082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.130769 | 130 | 4 | 89 | 32.5 | 0.867257 | 0 | 0 | 0 | 0 | 0 | 0.030769 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0.5 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
3aa561e3fd33810511e8c86e5a2fe06a7acd7fda | 37,832 | py | Python | instances/passenger_demand/pas-20210421-2109-int8e-1/67.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/passenger_demand/pas-20210421-2109-int8e-1/67.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/passenger_demand/pas-20210421-2109-int8e-1/67.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 1910
passenger_arriving = (
(2, 5, 3, 4, 1, 0, 8, 3, 2, 5, 0, 0), # 0
(2, 3, 9, 2, 2, 0, 3, 7, 5, 0, 2, 0), # 1
(1, 3, 3, 3, 0, 0, 4, 3, 4, 0, 0, 0), # 2
(4, 5, 2, 3, 0, 0, 4, 3, 6, 1, 2, 0), # 3
(2, 3, 3, 6, 1, 0, 4, 3, 3, 3, 2, 0), # 4
(1, 8, 1, 4, 1, 0, 2, 4, 6, 3, 2, 0), # 5
(1, 4, 7, 2, 5, 0, 5, 6, 6, 3, 3, 0), # 6
(1, 4, 7, 2, 0, 0, 6, 3, 2, 1, 2, 0), # 7
(1, 4, 4, 2, 1, 0, 2, 6, 1, 1, 1, 0), # 8
(1, 5, 4, 1, 1, 0, 4, 3, 5, 3, 1, 0), # 9
(2, 5, 4, 2, 1, 0, 1, 1, 1, 2, 2, 0), # 10
(2, 2, 4, 2, 4, 0, 3, 7, 1, 3, 2, 0), # 11
(2, 7, 4, 2, 2, 0, 1, 11, 8, 4, 3, 0), # 12
(1, 4, 5, 3, 0, 0, 4, 2, 2, 5, 0, 0), # 13
(1, 2, 5, 3, 2, 0, 2, 5, 5, 2, 3, 0), # 14
(4, 4, 4, 3, 0, 0, 6, 8, 4, 5, 0, 0), # 15
(4, 7, 8, 1, 1, 0, 6, 3, 4, 2, 1, 0), # 16
(2, 3, 4, 2, 3, 0, 3, 6, 3, 4, 1, 0), # 17
(4, 5, 8, 3, 2, 0, 3, 2, 2, 2, 3, 0), # 18
(8, 7, 5, 1, 2, 0, 3, 6, 3, 2, 3, 0), # 19
(2, 4, 11, 0, 1, 0, 5, 6, 2, 4, 0, 0), # 20
(1, 5, 4, 1, 1, 0, 5, 8, 4, 3, 0, 0), # 21
(1, 6, 5, 0, 1, 0, 6, 3, 2, 3, 1, 0), # 22
(3, 9, 3, 4, 0, 0, 7, 8, 4, 5, 1, 0), # 23
(2, 5, 6, 4, 0, 0, 8, 3, 5, 3, 1, 0), # 24
(3, 8, 4, 1, 1, 0, 3, 5, 1, 1, 1, 0), # 25
(1, 5, 4, 5, 0, 0, 9, 4, 4, 1, 0, 0), # 26
(4, 8, 4, 3, 5, 0, 2, 1, 0, 2, 2, 0), # 27
(5, 8, 2, 0, 1, 0, 3, 1, 0, 6, 1, 0), # 28
(0, 3, 1, 1, 3, 0, 5, 6, 0, 3, 4, 0), # 29
(5, 7, 7, 2, 0, 0, 4, 6, 2, 5, 2, 0), # 30
(4, 4, 8, 1, 1, 0, 0, 3, 5, 5, 5, 0), # 31
(4, 3, 7, 2, 0, 0, 3, 5, 7, 3, 0, 0), # 32
(3, 5, 1, 5, 1, 0, 3, 7, 3, 5, 3, 0), # 33
(1, 7, 9, 3, 0, 0, 2, 6, 5, 4, 2, 0), # 34
(1, 4, 4, 3, 0, 0, 3, 7, 4, 1, 4, 0), # 35
(2, 3, 2, 1, 0, 0, 3, 4, 0, 1, 1, 0), # 36
(0, 2, 7, 3, 1, 0, 2, 9, 2, 1, 0, 0), # 37
(3, 7, 6, 0, 1, 0, 4, 5, 3, 3, 1, 0), # 38
(6, 7, 4, 1, 1, 0, 1, 7, 5, 2, 1, 0), # 39
(3, 3, 10, 1, 0, 0, 2, 5, 0, 2, 4, 0), # 40
(3, 2, 6, 3, 2, 0, 7, 11, 3, 2, 2, 0), # 41
(3, 3, 4, 2, 3, 0, 3, 6, 3, 2, 1, 0), # 42
(0, 5, 3, 4, 2, 0, 2, 2, 1, 0, 1, 0), # 43
(8, 4, 2, 2, 1, 0, 2, 7, 3, 7, 1, 0), # 44
(2, 4, 2, 1, 0, 0, 3, 6, 5, 6, 0, 0), # 45
(2, 3, 5, 1, 1, 0, 3, 5, 3, 3, 1, 0), # 46
(1, 3, 7, 0, 2, 0, 5, 9, 5, 1, 2, 0), # 47
(2, 5, 5, 2, 2, 0, 0, 3, 1, 3, 2, 0), # 48
(2, 12, 6, 4, 1, 0, 3, 7, 12, 3, 1, 0), # 49
(2, 3, 5, 1, 0, 0, 2, 3, 2, 3, 0, 0), # 50
(1, 9, 2, 1, 1, 0, 3, 4, 0, 1, 4, 0), # 51
(4, 5, 7, 3, 1, 0, 4, 5, 3, 2, 2, 0), # 52
(3, 6, 5, 5, 2, 0, 3, 4, 2, 3, 0, 0), # 53
(6, 9, 3, 2, 0, 0, 2, 4, 5, 3, 2, 0), # 54
(2, 9, 7, 6, 2, 0, 1, 5, 3, 1, 1, 0), # 55
(4, 8, 4, 4, 1, 0, 1, 6, 3, 5, 0, 0), # 56
(3, 8, 6, 3, 2, 0, 3, 4, 4, 5, 0, 0), # 57
(5, 5, 4, 3, 3, 0, 5, 7, 5, 1, 2, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(2.1197212467076385, 5.437168560606061, 6.39538881748072, 5.069021739130434, 5.714423076923077, 3.805434782608696), # 0
(2.13961760803824, 5.497633278970259, 6.429932430019996, 5.097251509661836, 5.757253205128205, 3.8041377113526575), # 1
(2.159286781777387, 5.5572011223344555, 6.4636560982576405, 5.124859903381643, 5.7991794871794875, 3.802800966183575), # 2
(2.178712071992976, 5.615807812500001, 6.496535186375322, 5.151823369565217, 5.840163461538463, 3.80142472826087), # 3
(2.1978767827529024, 5.673389071268239, 6.528545058554698, 5.178118357487923, 5.880166666666668, 3.800009178743961), # 4
(2.216764218125061, 5.729880620440517, 6.559661078977435, 5.203721316425121, 5.919150641025641, 3.7985544987922704), # 5
(2.235357682177349, 5.785218181818182, 6.5898586118251945, 5.2286086956521745, 5.957076923076923, 3.7970608695652177), # 6
(2.253640478977661, 5.839337477202581, 6.6191130212796345, 5.252756944444445, 5.9939070512820525, 3.7955284722222227), # 7
(2.2715959125938934, 5.892174228395061, 6.6473996715224235, 5.2761425120772945, 6.029602564102564, 3.7939574879227056), # 8
(2.289207287093942, 5.94366415719697, 6.67469392673522, 5.298741847826087, 6.064125, 3.7923480978260873), # 9
(2.306457906545703, 5.993742985409653, 6.700971151099686, 5.320531400966185, 6.097435897435898, 3.790700483091787), # 10
(2.3233310750170717, 6.042346434834457, 6.726206708797486, 5.341487620772948, 6.129496794871795, 3.7890148248792275), # 11
(2.339810096575944, 6.089410227272727, 6.750375964010283, 5.361586956521739, 6.160269230769231, 3.787291304347826), # 12
(2.355878275290215, 6.134870084525815, 6.7734542809197364, 5.380805857487923, 6.189714743589745, 3.7855301026570047), # 13
(2.371518915227783, 6.178661728395063, 6.795417023707511, 5.399120772946859, 6.2177948717948714, 3.7837314009661833), # 14
(2.3867153204565406, 6.220720880681817, 6.816239556555269, 5.416508152173913, 6.244471153846154, 3.781895380434783), # 15
(2.401450795044386, 6.2609832631874305, 6.835897243644673, 5.432944444444445, 6.269705128205128, 3.7800222222222226), # 16
(2.415708643059214, 6.299384597713243, 6.854365449157384, 5.448406099033817, 6.293458333333334, 3.778112107487923), # 17
(2.4294721685689202, 6.335860606060606, 6.871619537275065, 5.462869565217392, 6.315692307692309, 3.7761652173913043), # 18
(2.4427246756414007, 6.370347010030864, 6.887634872179378, 5.476311292270531, 6.33636858974359, 3.7741817330917877), # 19
(2.455449468344552, 6.402779531425364, 6.902386818051984, 5.4887077294686, 6.355448717948718, 3.772161835748792), # 20
(2.467629850746269, 6.433093892045453, 6.915850739074552, 5.500035326086957, 6.37289423076923, 3.7701057065217394), # 21
(2.479249126914447, 6.461225813692481, 6.9280019994287345, 5.510270531400966, 6.388666666666666, 3.7680135265700487), # 22
(2.4902906009169836, 6.48711101816779, 6.938815963296202, 5.519389794685991, 6.402727564102564, 3.765885477053141), # 23
(2.5007375768217734, 6.510685227272727, 6.948267994858611, 5.527369565217391, 6.415038461538462, 3.763721739130435), # 24
(2.5105733586967123, 6.531884162808642, 6.95633345829763, 5.534186292270532, 6.425560897435897, 3.761522493961353), # 25
(2.5197812506096966, 6.550643546576879, 6.962987717794916, 5.539816425120772, 6.43425641025641, 3.759287922705314), # 26
(2.5283445566286216, 6.566899100378787, 6.968206137532133, 5.544236413043479, 6.44108653846154, 3.7570182065217397), # 27
(2.5362465808213837, 6.580586546015713, 6.971964081690946, 5.54742270531401, 6.44601282051282, 3.7547135265700486), # 28
(2.5434706272558776, 6.591641605289002, 6.974236914453013, 5.54935175120773, 6.448996794871795, 3.752374064009662), # 29
(2.5500000000000003, 6.6000000000000005, 6.9750000000000005, 5.550000000000001, 6.45, 3.75), # 30
(2.5561096227621487, 6.606943039772727, 6.974427958937198, 5.549882924836602, 6.449634929078015, 3.7467010078294187), # 31
(2.562087340153453, 6.613794318181819, 6.972728019323672, 5.549533986928104, 6.448547517730496, 3.7416198067632855), # 32
(2.5679358375959076, 6.620552982954546, 6.96992445652174, 5.548956617647059, 6.446749468085106, 3.734806146926536), # 33
(2.573657800511509, 6.627218181818183, 6.96604154589372, 5.548154248366014, 6.444252482269504, 3.7263097784441115), # 34
(2.5792559143222507, 6.633789062499999, 6.961103562801933, 5.547130310457517, 6.441068262411348, 3.7161804514409464), # 35
(2.584732864450128, 6.640264772727274, 6.955134782608695, 5.545888235294118, 6.437208510638299, 3.7044679160419793), # 36
(2.5900913363171356, 6.646644460227273, 6.9481594806763285, 5.544431454248366, 6.432684929078014, 3.691221922372147), # 37
(2.5953340153452684, 6.652927272727273, 6.94020193236715, 5.54276339869281, 6.427509219858156, 3.676492220556388), # 38
(2.600463586956522, 6.6591123579545455, 6.931286413043478, 5.5408875, 6.421693085106383, 3.66032856071964), # 39
(2.60548273657289, 6.665198863636364, 6.9214371980676335, 5.538807189542484, 6.415248226950354, 3.6427806929868396), # 40
(2.6103941496163685, 6.671185937499999, 6.910678562801933, 5.536525898692811, 6.408186347517731, 3.623898367482926), # 41
(2.6152005115089514, 6.677072727272729, 6.899034782608696, 5.534047058823529, 6.400519148936171, 3.6037313343328337), # 42
(2.6199045076726346, 6.682858380681818, 6.8865301328502415, 5.53137410130719, 6.392258333333333, 3.5823293436615025), # 43
(2.624508823529412, 6.688542045454546, 6.8731888888888895, 5.52851045751634, 6.38341560283688, 3.5597421455938694), # 44
(2.6290161445012785, 6.694122869318182, 6.859035326086958, 5.525459558823529, 6.374002659574469, 3.5360194902548727), # 45
(2.6334291560102305, 6.699600000000001, 6.844093719806764, 5.522224836601307, 6.36403120567376, 3.511211127769449), # 46
(2.637750543478261, 6.7049725852272735, 6.828388345410628, 5.5188097222222225, 6.3535129432624124, 3.4853668082625355), # 47
(2.641982992327366, 6.710239772727274, 6.811943478260869, 5.515217647058823, 6.342459574468085, 3.4585362818590712), # 48
(2.6461291879795397, 6.7154007102272715, 6.794783393719808, 5.511452042483661, 6.33088280141844, 3.430769298683991), # 49
(2.6501918158567777, 6.720454545454544, 6.776932367149759, 5.507516339869282, 6.318794326241135, 3.4021156088622355), # 50
(2.6541735613810746, 6.725400426136364, 6.758414673913044, 5.503413970588236, 6.3062058510638295, 3.3726249625187408), # 51
(2.6580771099744247, 6.7302375, 6.73925458937198, 5.499148366013072, 6.293129078014185, 3.3423471097784443), # 52
(2.6619051470588238, 6.734964914772728, 6.719476388888889, 5.49472295751634, 6.279575709219859, 3.3113318007662835), # 53
(2.6656603580562663, 6.739581818181818, 6.699104347826086, 5.490141176470589, 6.265557446808511, 3.2796287856071964), # 54
(2.6693454283887466, 6.7440873579545455, 6.6781627415458935, 5.485406454248366, 6.251085992907802, 3.2472878144261204), # 55
(2.6729630434782607, 6.748480681818181, 6.6566758454106285, 5.4805222222222225, 6.236173049645391, 3.214358637347993), # 56
(2.6765158887468035, 6.7527609375000015, 6.634667934782609, 5.475491911764706, 6.220830319148936, 3.180891004497751), # 57
(2.6800066496163684, 6.756927272727272, 6.612163285024154, 5.470318954248366, 6.205069503546099, 3.1469346660003334), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(2, 5, 3, 4, 1, 0, 8, 3, 2, 5, 0, 0), # 0
(4, 8, 12, 6, 3, 0, 11, 10, 7, 5, 2, 0), # 1
(5, 11, 15, 9, 3, 0, 15, 13, 11, 5, 2, 0), # 2
(9, 16, 17, 12, 3, 0, 19, 16, 17, 6, 4, 0), # 3
(11, 19, 20, 18, 4, 0, 23, 19, 20, 9, 6, 0), # 4
(12, 27, 21, 22, 5, 0, 25, 23, 26, 12, 8, 0), # 5
(13, 31, 28, 24, 10, 0, 30, 29, 32, 15, 11, 0), # 6
(14, 35, 35, 26, 10, 0, 36, 32, 34, 16, 13, 0), # 7
(15, 39, 39, 28, 11, 0, 38, 38, 35, 17, 14, 0), # 8
(16, 44, 43, 29, 12, 0, 42, 41, 40, 20, 15, 0), # 9
(18, 49, 47, 31, 13, 0, 43, 42, 41, 22, 17, 0), # 10
(20, 51, 51, 33, 17, 0, 46, 49, 42, 25, 19, 0), # 11
(22, 58, 55, 35, 19, 0, 47, 60, 50, 29, 22, 0), # 12
(23, 62, 60, 38, 19, 0, 51, 62, 52, 34, 22, 0), # 13
(24, 64, 65, 41, 21, 0, 53, 67, 57, 36, 25, 0), # 14
(28, 68, 69, 44, 21, 0, 59, 75, 61, 41, 25, 0), # 15
(32, 75, 77, 45, 22, 0, 65, 78, 65, 43, 26, 0), # 16
(34, 78, 81, 47, 25, 0, 68, 84, 68, 47, 27, 0), # 17
(38, 83, 89, 50, 27, 0, 71, 86, 70, 49, 30, 0), # 18
(46, 90, 94, 51, 29, 0, 74, 92, 73, 51, 33, 0), # 19
(48, 94, 105, 51, 30, 0, 79, 98, 75, 55, 33, 0), # 20
(49, 99, 109, 52, 31, 0, 84, 106, 79, 58, 33, 0), # 21
(50, 105, 114, 52, 32, 0, 90, 109, 81, 61, 34, 0), # 22
(53, 114, 117, 56, 32, 0, 97, 117, 85, 66, 35, 0), # 23
(55, 119, 123, 60, 32, 0, 105, 120, 90, 69, 36, 0), # 24
(58, 127, 127, 61, 33, 0, 108, 125, 91, 70, 37, 0), # 25
(59, 132, 131, 66, 33, 0, 117, 129, 95, 71, 37, 0), # 26
(63, 140, 135, 69, 38, 0, 119, 130, 95, 73, 39, 0), # 27
(68, 148, 137, 69, 39, 0, 122, 131, 95, 79, 40, 0), # 28
(68, 151, 138, 70, 42, 0, 127, 137, 95, 82, 44, 0), # 29
(73, 158, 145, 72, 42, 0, 131, 143, 97, 87, 46, 0), # 30
(77, 162, 153, 73, 43, 0, 131, 146, 102, 92, 51, 0), # 31
(81, 165, 160, 75, 43, 0, 134, 151, 109, 95, 51, 0), # 32
(84, 170, 161, 80, 44, 0, 137, 158, 112, 100, 54, 0), # 33
(85, 177, 170, 83, 44, 0, 139, 164, 117, 104, 56, 0), # 34
(86, 181, 174, 86, 44, 0, 142, 171, 121, 105, 60, 0), # 35
(88, 184, 176, 87, 44, 0, 145, 175, 121, 106, 61, 0), # 36
(88, 186, 183, 90, 45, 0, 147, 184, 123, 107, 61, 0), # 37
(91, 193, 189, 90, 46, 0, 151, 189, 126, 110, 62, 0), # 38
(97, 200, 193, 91, 47, 0, 152, 196, 131, 112, 63, 0), # 39
(100, 203, 203, 92, 47, 0, 154, 201, 131, 114, 67, 0), # 40
(103, 205, 209, 95, 49, 0, 161, 212, 134, 116, 69, 0), # 41
(106, 208, 213, 97, 52, 0, 164, 218, 137, 118, 70, 0), # 42
(106, 213, 216, 101, 54, 0, 166, 220, 138, 118, 71, 0), # 43
(114, 217, 218, 103, 55, 0, 168, 227, 141, 125, 72, 0), # 44
(116, 221, 220, 104, 55, 0, 171, 233, 146, 131, 72, 0), # 45
(118, 224, 225, 105, 56, 0, 174, 238, 149, 134, 73, 0), # 46
(119, 227, 232, 105, 58, 0, 179, 247, 154, 135, 75, 0), # 47
(121, 232, 237, 107, 60, 0, 179, 250, 155, 138, 77, 0), # 48
(123, 244, 243, 111, 61, 0, 182, 257, 167, 141, 78, 0), # 49
(125, 247, 248, 112, 61, 0, 184, 260, 169, 144, 78, 0), # 50
(126, 256, 250, 113, 62, 0, 187, 264, 169, 145, 82, 0), # 51
(130, 261, 257, 116, 63, 0, 191, 269, 172, 147, 84, 0), # 52
(133, 267, 262, 121, 65, 0, 194, 273, 174, 150, 84, 0), # 53
(139, 276, 265, 123, 65, 0, 196, 277, 179, 153, 86, 0), # 54
(141, 285, 272, 129, 67, 0, 197, 282, 182, 154, 87, 0), # 55
(145, 293, 276, 133, 68, 0, 198, 288, 185, 159, 87, 0), # 56
(148, 301, 282, 136, 70, 0, 201, 292, 189, 164, 87, 0), # 57
(153, 306, 286, 139, 73, 0, 206, 299, 194, 165, 89, 0), # 58
(153, 306, 286, 139, 73, 0, 206, 299, 194, 165, 89, 0), # 59
)
passenger_arriving_rate = (
(2.1197212467076385, 4.349734848484848, 3.837233290488432, 2.0276086956521735, 1.1428846153846153, 0.0, 3.805434782608696, 4.571538461538461, 3.0414130434782605, 2.5581555269922878, 1.087433712121212, 0.0), # 0
(2.13961760803824, 4.398106623176207, 3.8579594580119974, 2.038900603864734, 1.1514506410256409, 0.0, 3.8041377113526575, 4.6058025641025635, 3.0583509057971017, 2.5719729720079982, 1.0995266557940517, 0.0), # 1
(2.159286781777387, 4.445760897867564, 3.8781936589545842, 2.049943961352657, 1.1598358974358973, 0.0, 3.802800966183575, 4.639343589743589, 3.0749159420289858, 2.585462439303056, 1.111440224466891, 0.0), # 2
(2.178712071992976, 4.49264625, 3.897921111825193, 2.0607293478260864, 1.1680326923076925, 0.0, 3.80142472826087, 4.67213076923077, 3.09109402173913, 2.5986140745501287, 1.1231615625, 0.0), # 3
(2.1978767827529024, 4.53871125701459, 3.9171270351328187, 2.071247342995169, 1.1760333333333335, 0.0, 3.800009178743961, 4.704133333333334, 3.106871014492754, 2.611418023421879, 1.1346778142536476, 0.0), # 4
(2.216764218125061, 4.583904496352414, 3.9357966473864607, 2.0814885265700482, 1.183830128205128, 0.0, 3.7985544987922704, 4.735320512820512, 3.1222327898550724, 2.623864431590974, 1.1459761240881035, 0.0), # 5
(2.235357682177349, 4.628174545454545, 3.9539151670951167, 2.0914434782608695, 1.1914153846153845, 0.0, 3.7970608695652177, 4.765661538461538, 3.1371652173913045, 2.635943444730078, 1.1570436363636363, 0.0), # 6
(2.253640478977661, 4.671469981762065, 3.9714678127677807, 2.1011027777777778, 1.1987814102564105, 0.0, 3.7955284722222227, 4.795125641025642, 3.151654166666667, 2.647645208511854, 1.1678674954405162, 0.0), # 7
(2.2715959125938934, 4.7137393827160485, 3.988439802913454, 2.1104570048309177, 1.2059205128205128, 0.0, 3.7939574879227056, 4.823682051282051, 3.1656855072463768, 2.658959868608969, 1.1784348456790121, 0.0), # 8
(2.289207287093942, 4.754931325757576, 4.004816356041132, 2.119496739130435, 1.2128249999999998, 0.0, 3.7923480978260873, 4.851299999999999, 3.1792451086956524, 2.6698775706940876, 1.188732831439394, 0.0), # 9
(2.306457906545703, 4.794994388327722, 4.020582690659811, 2.1282125603864737, 1.2194871794871796, 0.0, 3.790700483091787, 4.877948717948718, 3.1923188405797105, 2.680388460439874, 1.1987485970819305, 0.0), # 10
(2.3233310750170717, 4.833877147867565, 4.035724025278491, 2.1365950483091787, 1.2258993589743588, 0.0, 3.7890148248792275, 4.903597435897435, 3.2048925724637685, 2.690482683518994, 1.2084692869668912, 0.0), # 11
(2.339810096575944, 4.8715281818181815, 4.050225578406169, 2.1446347826086956, 1.2320538461538462, 0.0, 3.787291304347826, 4.928215384615385, 3.2169521739130436, 2.700150385604113, 1.2178820454545454, 0.0), # 12
(2.355878275290215, 4.907896067620651, 4.0640725685518415, 2.152322342995169, 1.237942948717949, 0.0, 3.7855301026570047, 4.951771794871796, 3.2284835144927535, 2.7093817123678945, 1.2269740169051628, 0.0), # 13
(2.371518915227783, 4.94292938271605, 4.077250214224507, 2.1596483091787437, 1.2435589743589741, 0.0, 3.7837314009661833, 4.9742358974358964, 3.2394724637681156, 2.7181668094830043, 1.2357323456790126, 0.0), # 14
(2.3867153204565406, 4.976576704545454, 4.089743733933161, 2.166603260869565, 1.2488942307692308, 0.0, 3.781895380434783, 4.995576923076923, 3.2499048913043476, 2.726495822622107, 1.2441441761363634, 0.0), # 15
(2.401450795044386, 5.008786610549944, 4.101538346186804, 2.1731777777777777, 1.2539410256410255, 0.0, 3.7800222222222226, 5.015764102564102, 3.2597666666666667, 2.734358897457869, 1.252196652637486, 0.0), # 16
(2.415708643059214, 5.039507678170594, 4.11261926949443, 2.1793624396135267, 1.2586916666666665, 0.0, 3.778112107487923, 5.034766666666666, 3.2690436594202903, 2.7417461796629534, 1.2598769195426485, 0.0), # 17
(2.4294721685689202, 5.068688484848485, 4.122971722365039, 2.185147826086957, 1.2631384615384618, 0.0, 3.7761652173913043, 5.052553846153847, 3.277721739130435, 2.7486478149100257, 1.2671721212121212, 0.0), # 18
(2.4427246756414007, 5.096277608024691, 4.132580923307627, 2.190524516908212, 1.267273717948718, 0.0, 3.7741817330917877, 5.069094871794872, 3.2857867753623187, 2.7550539488717507, 1.2740694020061727, 0.0), # 19
(2.455449468344552, 5.122223625140291, 4.141432090831191, 2.1954830917874397, 1.2710897435897435, 0.0, 3.772161835748792, 5.084358974358974, 3.29322463768116, 2.7609547272207933, 1.2805559062850727, 0.0), # 20
(2.467629850746269, 5.146475113636362, 4.149510443444731, 2.2000141304347824, 1.2745788461538459, 0.0, 3.7701057065217394, 5.0983153846153835, 3.300021195652174, 2.76634029562982, 1.2866187784090906, 0.0), # 21
(2.479249126914447, 5.168980650953984, 4.156801199657241, 2.2041082125603864, 1.277733333333333, 0.0, 3.7680135265700487, 5.110933333333332, 3.3061623188405798, 2.7712007997714934, 1.292245162738496, 0.0), # 22
(2.4902906009169836, 5.1896888145342315, 4.163289577977721, 2.207755917874396, 1.2805455128205128, 0.0, 3.765885477053141, 5.122182051282051, 3.3116338768115945, 2.7755263853184804, 1.2974222036335579, 0.0), # 23
(2.5007375768217734, 5.208548181818181, 4.168960796915166, 2.210947826086956, 1.2830076923076923, 0.0, 3.763721739130435, 5.132030769230769, 3.3164217391304347, 2.779307197943444, 1.3021370454545453, 0.0), # 24
(2.5105733586967123, 5.225507330246913, 4.173800074978578, 2.213674516908213, 1.2851121794871794, 0.0, 3.761522493961353, 5.1404487179487175, 3.3205117753623195, 2.7825333833190515, 1.3063768325617282, 0.0), # 25
(2.5197812506096966, 5.240514837261503, 4.177792630676949, 2.2159265700483086, 1.2868512820512819, 0.0, 3.759287922705314, 5.147405128205127, 3.3238898550724634, 2.785195087117966, 1.3101287093153757, 0.0), # 26
(2.5283445566286216, 5.2535192803030295, 4.180923682519279, 2.2176945652173914, 1.2882173076923078, 0.0, 3.7570182065217397, 5.152869230769231, 3.3265418478260873, 2.787282455012853, 1.3133798200757574, 0.0), # 27
(2.5362465808213837, 5.26446923681257, 4.183178449014568, 2.2189690821256036, 1.289202564102564, 0.0, 3.7547135265700486, 5.156810256410256, 3.328453623188406, 2.7887856326763782, 1.3161173092031424, 0.0), # 28
(2.5434706272558776, 5.273313284231201, 4.184542148671808, 2.219740700483092, 1.289799358974359, 0.0, 3.752374064009662, 5.159197435897436, 3.329611050724638, 2.789694765781205, 1.3183283210578003, 0.0), # 29
(2.5500000000000003, 5.28, 4.1850000000000005, 2.22, 1.29, 0.0, 3.75, 5.16, 3.3300000000000005, 2.79, 1.32, 0.0), # 30
(2.5561096227621487, 5.285554431818181, 4.184656775362319, 2.219953169934641, 1.2899269858156028, 0.0, 3.7467010078294187, 5.159707943262411, 3.3299297549019613, 2.789771183574879, 1.3213886079545452, 0.0), # 31
(2.562087340153453, 5.2910354545454545, 4.183636811594202, 2.2198135947712414, 1.2897095035460993, 0.0, 3.7416198067632855, 5.158838014184397, 3.329720392156862, 2.7890912077294683, 1.3227588636363636, 0.0), # 32
(2.5679358375959076, 5.296442386363637, 4.181954673913044, 2.2195826470588234, 1.2893498936170211, 0.0, 3.734806146926536, 5.1573995744680845, 3.3293739705882355, 2.787969782608696, 1.3241105965909092, 0.0), # 33
(2.573657800511509, 5.3017745454545455, 4.179624927536232, 2.2192616993464056, 1.2888504964539007, 0.0, 3.7263097784441115, 5.155401985815603, 3.3288925490196086, 2.786416618357488, 1.3254436363636364, 0.0), # 34
(2.5792559143222507, 5.307031249999999, 4.176662137681159, 2.2188521241830066, 1.2882136524822696, 0.0, 3.7161804514409464, 5.152854609929078, 3.32827818627451, 2.784441425120773, 1.3267578124999997, 0.0), # 35
(2.584732864450128, 5.312211818181819, 4.173080869565217, 2.218355294117647, 1.2874417021276596, 0.0, 3.7044679160419793, 5.1497668085106385, 3.3275329411764707, 2.782053913043478, 1.3280529545454547, 0.0), # 36
(2.5900913363171356, 5.317315568181819, 4.168895688405797, 2.2177725816993465, 1.2865369858156026, 0.0, 3.691221922372147, 5.14614794326241, 3.32665887254902, 2.779263792270531, 1.3293288920454547, 0.0), # 37
(2.5953340153452684, 5.322341818181818, 4.16412115942029, 2.2171053594771237, 1.2855018439716313, 0.0, 3.676492220556388, 5.142007375886525, 3.325658039215686, 2.7760807729468597, 1.3305854545454545, 0.0), # 38
(2.600463586956522, 5.327289886363636, 4.158771847826086, 2.216355, 1.2843386170212765, 0.0, 3.66032856071964, 5.137354468085106, 3.3245325, 2.772514565217391, 1.331822471590909, 0.0), # 39
(2.60548273657289, 5.332159090909091, 4.1528623188405795, 2.2155228758169936, 1.2830496453900706, 0.0, 3.6427806929868396, 5.132198581560282, 3.3232843137254906, 2.768574879227053, 1.3330397727272727, 0.0), # 40
(2.6103941496163685, 5.336948749999999, 4.14640713768116, 2.214610359477124, 1.281637269503546, 0.0, 3.623898367482926, 5.126549078014184, 3.3219155392156865, 2.7642714251207727, 1.3342371874999996, 0.0), # 41
(2.6152005115089514, 5.3416581818181825, 4.1394208695652175, 2.2136188235294116, 1.280103829787234, 0.0, 3.6037313343328337, 5.120415319148936, 3.3204282352941177, 2.7596139130434785, 1.3354145454545456, 0.0), # 42
(2.6199045076726346, 5.346286704545454, 4.131918079710145, 2.2125496405228757, 1.2784516666666665, 0.0, 3.5823293436615025, 5.113806666666666, 3.3188244607843136, 2.754612053140096, 1.3365716761363635, 0.0), # 43
(2.624508823529412, 5.350833636363636, 4.123913333333333, 2.211404183006536, 1.2766831205673759, 0.0, 3.5597421455938694, 5.1067324822695035, 3.317106274509804, 2.7492755555555557, 1.337708409090909, 0.0), # 44
(2.6290161445012785, 5.355298295454545, 4.115421195652175, 2.2101838235294116, 1.2748005319148936, 0.0, 3.5360194902548727, 5.0992021276595745, 3.3152757352941173, 2.743614130434783, 1.3388245738636362, 0.0), # 45
(2.6334291560102305, 5.35968, 4.106456231884058, 2.2088899346405224, 1.2728062411347518, 0.0, 3.511211127769449, 5.091224964539007, 3.313334901960784, 2.7376374879227057, 1.33992, 0.0), # 46
(2.637750543478261, 5.363978068181818, 4.0970330072463765, 2.207523888888889, 1.2707025886524823, 0.0, 3.4853668082625355, 5.082810354609929, 3.3112858333333333, 2.7313553381642506, 1.3409945170454545, 0.0), # 47
(2.641982992327366, 5.368191818181819, 4.087166086956522, 2.2060870588235293, 1.268491914893617, 0.0, 3.4585362818590712, 5.073967659574468, 3.309130588235294, 2.7247773913043476, 1.3420479545454547, 0.0), # 48
(2.6461291879795397, 5.3723205681818165, 4.076870036231885, 2.2045808169934644, 1.2661765602836879, 0.0, 3.430769298683991, 5.064706241134751, 3.306871225490197, 2.7179133574879226, 1.3430801420454541, 0.0), # 49
(2.6501918158567777, 5.376363636363634, 4.066159420289855, 2.2030065359477127, 1.263758865248227, 0.0, 3.4021156088622355, 5.055035460992908, 3.3045098039215692, 2.7107729468599033, 1.3440909090909086, 0.0), # 50
(2.6541735613810746, 5.3803203409090905, 4.055048804347826, 2.2013655882352943, 1.2612411702127657, 0.0, 3.3726249625187408, 5.044964680851063, 3.3020483823529414, 2.7033658695652174, 1.3450800852272726, 0.0), # 51
(2.6580771099744247, 5.384189999999999, 4.043552753623188, 2.1996593464052285, 1.258625815602837, 0.0, 3.3423471097784443, 5.034503262411348, 3.2994890196078432, 2.695701835748792, 1.3460474999999998, 0.0), # 52
(2.6619051470588238, 5.387971931818182, 4.031685833333333, 2.197889183006536, 1.2559151418439718, 0.0, 3.3113318007662835, 5.023660567375887, 3.296833774509804, 2.6877905555555555, 1.3469929829545455, 0.0), # 53
(2.6656603580562663, 5.391665454545453, 4.019462608695652, 2.1960564705882355, 1.2531114893617021, 0.0, 3.2796287856071964, 5.0124459574468085, 3.2940847058823532, 2.6796417391304344, 1.3479163636363634, 0.0), # 54
(2.6693454283887466, 5.395269886363636, 4.006897644927536, 2.1941625816993464, 1.2502171985815602, 0.0, 3.2472878144261204, 5.000868794326241, 3.29124387254902, 2.6712650966183573, 1.348817471590909, 0.0), # 55
(2.6729630434782607, 5.398784545454545, 3.994005507246377, 2.1922088888888887, 1.247234609929078, 0.0, 3.214358637347993, 4.988938439716312, 3.2883133333333334, 2.662670338164251, 1.3496961363636362, 0.0), # 56
(2.6765158887468035, 5.402208750000001, 3.980800760869565, 2.1901967647058824, 1.2441660638297871, 0.0, 3.180891004497751, 4.9766642553191485, 3.285295147058824, 2.6538671739130435, 1.3505521875000002, 0.0), # 57
(2.6800066496163684, 5.405541818181817, 3.967297971014492, 2.188127581699346, 1.2410139007092198, 0.0, 3.1469346660003334, 4.964055602836879, 3.2821913725490197, 2.6448653140096616, 1.3513854545454542, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
66, # 1
)
| 112.931343 | 216 | 0.728695 | 5,147 | 37,832 | 5.353993 | 0.210025 | 0.313532 | 0.248213 | 0.470298 | 0.333128 | 0.329354 | 0.328918 | 0.328918 | 0.328338 | 0.328338 | 0 | 0.818717 | 0.119317 | 37,832 | 334 | 217 | 113.269461 | 0.008374 | 0.03201 | 0 | 0.202532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.015823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
aae8d279b0569ba27edd589cbd62b86ac1b99c6a | 72 | py | Python | manual/models/__init__.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | manual/models/__init__.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | manual/models/__init__.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | null | null | null | from .deployment_models import *
from .manual_migration_models import *
| 24 | 38 | 0.833333 | 9 | 72 | 6.333333 | 0.666667 | 0.421053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 72 | 2 | 39 | 36 | 0.890625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
aaf946043f8671de972d1b04ac1434fe19d0c6a9 | 2,229 | py | Python | stripstream/pddl/logic/formulas.py | nishadg246/stripstream-ivan-nishad | 9a275ae5836ee289cd09cbe6bc0ff6fd4a381135 | [
"MIT"
] | null | null | null | stripstream/pddl/logic/formulas.py | nishadg246/stripstream-ivan-nishad | 9a275ae5836ee289cd09cbe6bc0ff6fd4a381135 | [
"MIT"
] | null | null | null | stripstream/pddl/logic/formulas.py | nishadg246/stripstream-ivan-nishad | 9a275ae5836ee289cd09cbe6bc0ff6fd4a381135 | [
"MIT"
] | null | null | null | from stripstream.utils import flatten
# TODO - check if the quantified variables are used in children
class Formula(object):
def is_valid_condition(self):
return isinstance(self, Condition) and all(f.is_valid_condition() for f in self.get_formulas())
def is_valid_effect(self):
return isinstance(self, Effect) and all(f.is_valid_effect() for f in self.get_formulas())
def get_atoms(self): raise NotImplementedError()
def get_literals(self): raise NotImplementedError() # NOTE - to_dnf
def get_formulas(self): raise NotImplementedError()
#def normalize(self): raise NotImplementedError() # TODO - normalize by combining operators: i.e. And(And(...), ...)
def de_morgan(self, sign=True): raise NotImplementedError()
def simplify(self): return self
def get_objects(self):
return set(flatten(atom.args for atom in self.get_atoms()))
def get_parameters(self):
return set(flatten(atom.get_parameters() for atom in self.get_atoms()))
def get_quantified(self): raise NotImplementedError()
#def invert(self): raise NotImplementedError() # TODO - invert a formula
def propositional(self, constants): raise NotImplementedError()
def dequantify(self, constants): raise NotImplementedError()
def instantiate(self, parameter_map): raise NotImplementedError()
def clone(self): return self.instantiate({})
def substitute(self, atom, subformula): raise NotImplementedError()
def pddl(self): raise NotImplementedError()
__repr__ = pddl
"""
Logical formula abstract class.
"""
##################################################
class Condition():
def holds(self, atoms, constants): raise NotImplementedError()
def positive_supporters(self, atoms, constants): raise NotImplementedError()
def negative_supporters(self, atoms, constants): raise NotImplementedError()
#def relaxed_holds(self, atoms, constants): # TODO
# raise NotImplementedError()
"""
Legal condition component interface.
"""
class Effect():
def add(self, atoms, constants): raise NotImplementedError()
def delete(self, atoms, constants): raise NotImplementedError()
#def relaxed_add(self, atoms, constants): # TODO
# raise NotImplementedError()
"""
Legal effect component interface.
""" | 42.865385 | 118 | 0.73127 | 264 | 2,229 | 6.060606 | 0.30303 | 0.285 | 0.219375 | 0.1575 | 0.386875 | 0.289375 | 0.233125 | 0.03375 | 0 | 0 | 0 | 0 | 0.138179 | 2,229 | 52 | 119 | 42.865385 | 0.8329 | 0.184388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057692 | 0 | 1 | 0.7 | false | 0 | 0.033333 | 0.2 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
c94639c740e8273b2153f837694ba43282f14e13 | 47 | py | Python | selective_search/__init__.py | wahid18benz/selective_search | e928ecbb8e6f64adca3fb00d9b283c4720fb227b | [
"MIT"
] | 34 | 2019-10-06T18:47:22.000Z | 2022-03-24T19:22:53.000Z | selective_search/__init__.py | wahid18benz/selective_search | e928ecbb8e6f64adca3fb00d9b283c4720fb227b | [
"MIT"
] | 5 | 2020-05-10T06:55:49.000Z | 2022-02-09T02:15:50.000Z | selective_search/__init__.py | wahid18benz/selective_search | e928ecbb8e6f64adca3fb00d9b283c4720fb227b | [
"MIT"
] | 15 | 2020-02-03T06:05:15.000Z | 2022-02-08T11:14:07.000Z | from .core import selective_search, box_filter
| 23.5 | 46 | 0.851064 | 7 | 47 | 5.428571 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106383 | 47 | 1 | 47 | 47 | 0.904762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c98608844e164b723d652d3e9e4f29a259d37dbe | 90 | py | Python | py_greet/version.py | templ-project/python-app | 7a5f21df4774f4787963dfa0c9dc491a4cc9473c | [
"MIT"
] | null | null | null | py_greet/version.py | templ-project/python-app | 7a5f21df4774f4787963dfa0c9dc491a4cc9473c | [
"MIT"
] | 11 | 2019-08-13T06:03:43.000Z | 2020-07-05T18:08:13.000Z | py_greet/version.py | templ-project/python-app | 7a5f21df4774f4787963dfa0c9dc491a4cc9473c | [
"MIT"
] | null | null | null | """version handler"""
def get_static_version():
"""Static Version"""
return '0.0.1'
| 12.857143 | 25 | 0.633333 | 12 | 90 | 4.583333 | 0.666667 | 0.472727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039474 | 0.155556 | 90 | 6 | 26 | 15 | 0.684211 | 0.333333 | 0 | 0 | 0 | 0 | 0.102041 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
a309f2fc37f4601f4d6248321db2f5008d45d10e | 66 | py | Python | py_tdlib/constructors/test_call_empty.py | Mr-TelegramBot/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 24 | 2018-10-05T13:04:30.000Z | 2020-05-12T08:45:34.000Z | py_tdlib/constructors/test_call_empty.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 3 | 2019-06-26T07:20:20.000Z | 2021-05-24T13:06:56.000Z | py_tdlib/constructors/test_call_empty.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 5 | 2018-10-05T14:29:28.000Z | 2020-08-11T15:04:10.000Z | from ..factory import Method
class testCallEmpty(Method):
pass
| 11 | 28 | 0.772727 | 8 | 66 | 6.375 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.151515 | 66 | 5 | 29 | 13.2 | 0.910714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
a3297a2335da9046f6a3b59d54204d2e520eb6fe | 3,517 | py | Python | pushno/schemas/pushoverschema.py | keans/pushno | 4ddab55121234e0e29321e3b31ed570496274091 | [
"MIT"
] | 1 | 2021-07-16T06:30:29.000Z | 2021-07-16T06:30:29.000Z | pushno/schemas/pushoverschema.py | keans/pushno | 4ddab55121234e0e29321e3b31ed570496274091 | [
"MIT"
] | 1 | 2021-05-03T15:04:18.000Z | 2021-05-03T16:55:51.000Z | pushno/schemas/pushoverschema.py | keans/pushno | 4ddab55121234e0e29321e3b31ed570496274091 | [
"MIT"
] | null | null | null | from pushno.consts.pushoverconsts import LOWEST_PRIORITY, LOW_PRIORITY, \
NORMAL_PRIORITY, HIGH_PRIORIRY, EMERGENCY_PRIORITY, \
PUSHOVER_SOUND, BIKE_SOUND, BUGLE_SOUND, \
CASHREGISTER_SOUND, CLASSICAL_SOUND, COSMIC_SOUND, \
FALLING_SOUND, GAMELAN_SOUND, INCOMING_SOUND, \
INTERMISSION_SOUND, MAGIC_SOUND, MECHANICAL_SOUND, \
PIANOBAR_SOUND, SIREN_SOUND, SPACEALARM_SOUND, \
TUGBOAT_SOUND, ALIEN_SOUND, CLIMB_SOUND, \
PERSISTENT_SOUND, ECHO_SOUND, UPDOWN_SOUND, NONE_SOUND
# schema for PushOver message
pushover_message_schema = {
# ----- required -----
"token": {
"type": "string",
"required": True,
"empty": False,
"minlength": 30,
"maxlength": 30,
"regex": r"^[A-Za-z0-9]+$"
},
"user": {
"type": "string",
"required": True,
"empty": False,
"minlength": 30,
"maxlength": 30,
"regex": r"^[A-Za-z0-9]+$"
},
"message": {
"type": "string",
"required": True,
"empty": False,
"minlength": 1,
"maxlength": 1024,
},
# ----- optional -----
"attachment": {
# TODO: needs improved checking
"type": "string",
"required": False,
"empty": False,
},
"device": {
"type": "string",
"required": False,
"empty": False,
"minlength": 1,
"maxlength": 25,
"regex": r"^[A-Za-z0-9_-]+$"
},
"title": {
"type": "string",
"required": False,
"empty": False,
"minlength": 1,
"maxlength": 250,
},
"url": {
"type": "string",
"required": False,
"empty": False,
"minlength": 1,
"maxlength": 512,
"regex": (
r"^[a-z]+://([^/:]+\.[a-z]{2,10}'|"
r"([0-9]{1,3}\.){3}[0-9]{1,3})(:[0-9]+)?(\/.*)?$"
)
},
"url_title": {
"type": "string",
"required": False,
"empty": False,
"minlength": 1,
"maxlength": 100,
},
"priority": {
"type": "string",
"required": False,
"empty": False,
"allowed": [
LOWEST_PRIORITY, LOW_PRIORITY, NORMAL_PRIORITY,
HIGH_PRIORIRY, EMERGENCY_PRIORITY
]
},
"sound": {
"type": "string",
"required": False,
"empty": False,
"allowed": [
PUSHOVER_SOUND, BIKE_SOUND, BUGLE_SOUND,
CASHREGISTER_SOUND, CLASSICAL_SOUND, COSMIC_SOUND,
FALLING_SOUND, GAMELAN_SOUND, INCOMING_SOUND,
INTERMISSION_SOUND, MAGIC_SOUND, MECHANICAL_SOUND,
PIANOBAR_SOUND, SIREN_SOUND, SPACEALARM_SOUND,
TUGBOAT_SOUND, ALIEN_SOUND, CLIMB_SOUND,
PERSISTENT_SOUND, ECHO_SOUND, UPDOWN_SOUND, NONE_SOUND
]
},
}
# schema for PushOver message
pushover_validation_message_schema = {
# ----- required -----
"token": {
"type": "string",
"required": True,
"empty": False,
"minlength": 30,
"maxlength": 30,
"regex": r"^[A-Za-z0-9]+$"
},
"user": {
"type": "string",
"required": True,
"empty": False,
"minlength": 30,
"maxlength": 30,
"regex": r"^[A-Za-z0-9]+$"
},
# ----- optional -----
"device": {
"type": "string",
"required": False,
"empty": False,
"minlength": 1,
"maxlength": 25,
"regex": r"^[A-Za-z0-9_-]+$"
},
}
| 26.443609 | 73 | 0.499005 | 326 | 3,517 | 5.193252 | 0.236196 | 0.076787 | 0.138216 | 0.108683 | 0.904903 | 0.898996 | 0.879504 | 0.808033 | 0.808033 | 0.777318 | 0 | 0.027438 | 0.326415 | 3,517 | 132 | 74 | 26.643939 | 0.68721 | 0.048052 | 0 | 0.586777 | 0 | 0.008264 | 0.230838 | 0.023353 | 0 | 0 | 0 | 0.007576 | 0 | 1 | 0 | false | 0 | 0.008264 | 0 | 0.008264 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a336763ba0e434aa7ca66539c19b8e4f7e1faca9 | 137 | py | Python | numereval/__init__.py | parmarsuraj99/numereval | 77fedbb4d985a0ae63f5c7b7510ca3e5ae1daff1 | [
"MIT"
] | 24 | 2020-12-25T19:17:29.000Z | 2021-09-11T23:32:40.000Z | numereval/__init__.py | marianotir/numereval | 77fedbb4d985a0ae63f5c7b7510ca3e5ae1daff1 | [
"MIT"
] | 2 | 2021-01-21T03:19:52.000Z | 2021-04-15T05:41:16.000Z | numereval/__init__.py | marianotir/numereval | 77fedbb4d985a0ae63f5c7b7510ca3e5ae1daff1 | [
"MIT"
] | 3 | 2020-12-23T19:41:34.000Z | 2021-08-12T18:46:17.000Z | from numereval.numereval import evaluate, diagnostics
from numereval.signalseval import run_analytics
from numereval.scores import score
| 34.25 | 53 | 0.875912 | 17 | 137 | 7 | 0.588235 | 0.327731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.094891 | 137 | 3 | 54 | 45.666667 | 0.959677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a338776b727dfce65deee7128d5ba86f94631b39 | 254 | py | Python | bitmovin_api_sdk/encoding/inputs/gcs/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/encoding/inputs/gcs/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/encoding/inputs/gcs/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | from bitmovin_api_sdk.encoding.inputs.gcs.gcs_api import GcsApi
from bitmovin_api_sdk.encoding.inputs.gcs.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.inputs.gcs.gcs_input_list_query_params import GcsInputListQueryParams
| 63.5 | 100 | 0.901575 | 37 | 254 | 5.864865 | 0.432432 | 0.165899 | 0.207373 | 0.248848 | 0.511521 | 0.511521 | 0.511521 | 0.35023 | 0 | 0 | 0 | 0 | 0.047244 | 254 | 3 | 101 | 84.666667 | 0.896694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a34c958e5a5af5e1a6b00dc0ae85835c198ca885 | 126 | py | Python | qmctorch/wavefunction/jastrows/distance/__init__.py | NLESC-JCER/QMCTorch | c56472cd3e9cc59f2e01a880e674b7270d2cdc2b | [
"Apache-2.0"
] | 16 | 2020-06-26T17:43:38.000Z | 2022-03-03T14:16:02.000Z | qmctorch/wavefunction/jastrows/distance/__init__.py | NLESC-JCER/QMCTorch | c56472cd3e9cc59f2e01a880e674b7270d2cdc2b | [
"Apache-2.0"
] | 57 | 2020-05-01T07:13:49.000Z | 2021-07-13T19:51:55.000Z | qmctorch/wavefunction/jastrows/distance/__init__.py | NLESC-JCER/QMCTorch | c56472cd3e9cc59f2e01a880e674b7270d2cdc2b | [
"Apache-2.0"
] | 3 | 2020-07-30T09:56:04.000Z | 2021-08-12T02:55:45.000Z | from .electron_electron_distance import ElectronElectronDistance
from .electron_nuclei_distance import ElectronNucleiDistance
| 42 | 64 | 0.920635 | 12 | 126 | 9.333333 | 0.583333 | 0.214286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.063492 | 126 | 2 | 65 | 63 | 0.949153 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a37ac93458b58071dd71f131efd1ea470c8d1573 | 43 | py | Python | torchOnVideo/datasets/SPMCS/super_resolution/__init__.py | torchOnVideo/torchOnVideo | aa07d5661f772eca027ecc6b79e14bd68a515aa1 | [
"MIT"
] | 2 | 2021-03-19T08:05:06.000Z | 2021-05-22T21:54:10.000Z | torchOnVideo/datasets/SPMCS/super_resolution/__init__.py | torchOnVideo/torchOnVideo | aa07d5661f772eca027ecc6b79e14bd68a515aa1 | [
"MIT"
] | null | null | null | torchOnVideo/datasets/SPMCS/super_resolution/__init__.py | torchOnVideo/torchOnVideo | aa07d5661f772eca027ecc6b79e14bd68a515aa1 | [
"MIT"
] | null | null | null | from .test_iseebetter import TestISeeBetter | 43 | 43 | 0.906977 | 5 | 43 | 7.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069767 | 43 | 1 | 43 | 43 | 0.95 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6e7aa8ee8e1965202f2c932d3b768ecf8bb76e53 | 41 | py | Python | urwid_combobox/__init__.py | rbistolfi/urwid-combobox | a85d6e8ec92d0ef9e22829b612f0b0dc30e2c23c | [
"MIT"
] | 3 | 2016-02-25T22:46:18.000Z | 2020-09-23T11:41:20.000Z | urwid_combobox/__init__.py | rbistolfi/urwid-combobox | a85d6e8ec92d0ef9e22829b612f0b0dc30e2c23c | [
"MIT"
] | null | null | null | urwid_combobox/__init__.py | rbistolfi/urwid-combobox | a85d6e8ec92d0ef9e22829b612f0b0dc30e2c23c | [
"MIT"
] | null | null | null | # coding: utf8
from .combobox import *
| 8.2 | 23 | 0.682927 | 5 | 41 | 5.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 0.219512 | 41 | 4 | 24 | 10.25 | 0.84375 | 0.292683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6ec971acd7c02cd6357b800de201b526a148df6b | 161 | py | Python | jiminy/envs/vnc_core_env/__init__.py | sibeshkar/jiminy | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | [
"MIT"
] | 3 | 2020-03-16T13:50:40.000Z | 2021-06-09T05:26:13.000Z | jiminy/envs/vnc_core_env/__init__.py | sibeshkar/jiminy | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | [
"MIT"
] | null | null | null | jiminy/envs/vnc_core_env/__init__.py | sibeshkar/jiminy | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | [
"MIT"
] | null | null | null | from jiminy.envs.vnc_core_env.vnc_core_env import GymCoreEnv, GymCoreSyncEnv
from jiminy.envs.vnc_core_env.translator import AtariTranslator, CartPoleTranslator
| 53.666667 | 83 | 0.888199 | 22 | 161 | 6.227273 | 0.545455 | 0.153285 | 0.218978 | 0.248175 | 0.350365 | 0.350365 | 0 | 0 | 0 | 0 | 0 | 0 | 0.062112 | 161 | 2 | 84 | 80.5 | 0.907285 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
6ede1b52f76e23eab248f6ec30cf185642db01ed | 110 | py | Python | bitmovin_api_sdk/encoding/encodings/muxings/webm/drm/cenc/customdata/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/encoding/encodings/muxings/webm/drm/cenc/customdata/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/encoding/encodings/muxings/webm/drm/cenc/customdata/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | from bitmovin_api_sdk.encoding.encodings.muxings.webm.drm.cenc.customdata.customdata_api import CustomdataApi
| 55 | 109 | 0.890909 | 15 | 110 | 6.333333 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036364 | 110 | 1 | 110 | 110 | 0.896226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
42d6d03bc3644a52236824b73de0b5990c4de445 | 15,690 | py | Python | launchdarkly_api/models/__init__.py | launchdarkly/api-client-python | b72bd94fb65ac57bd95df5767aebcdaff50e5cb6 | [
"Apache-2.0"
] | 6 | 2020-02-06T20:17:25.000Z | 2021-12-28T20:13:34.000Z | launchdarkly_api/models/__init__.py | launchdarkly/api-client-python | b72bd94fb65ac57bd95df5767aebcdaff50e5cb6 | [
"Apache-2.0"
] | 7 | 2019-02-18T21:51:47.000Z | 2021-09-03T17:49:33.000Z | launchdarkly_api/models/__init__.py | launchdarkly/api-client-python | b72bd94fb65ac57bd95df5767aebcdaff50e5cb6 | [
"Apache-2.0"
] | 6 | 2019-08-02T16:10:31.000Z | 2021-05-23T17:47:03.000Z | # flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from launchdarkly_api.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from launchdarkly_api.model.access_denied_reason_rep import AccessDeniedReasonRep
from launchdarkly_api.model.access_denied_rep import AccessDeniedRep
from launchdarkly_api.model.access_rep import AccessRep
from launchdarkly_api.model.access_token_post import AccessTokenPost
from launchdarkly_api.model.action_input_rep import ActionInputRep
from launchdarkly_api.model.action_output_rep import ActionOutputRep
from launchdarkly_api.model.all_variations_summary import AllVariationsSummary
from launchdarkly_api.model.approval_condition_input_rep import ApprovalConditionInputRep
from launchdarkly_api.model.approval_condition_output_rep import ApprovalConditionOutputRep
from launchdarkly_api.model.approval_settings import ApprovalSettings
from launchdarkly_api.model.audit_log_entry_listing_rep import AuditLogEntryListingRep
from launchdarkly_api.model.audit_log_entry_listing_rep_collection import AuditLogEntryListingRepCollection
from launchdarkly_api.model.audit_log_entry_rep import AuditLogEntryRep
from launchdarkly_api.model.authorized_app_data_rep import AuthorizedAppDataRep
from launchdarkly_api.model.big_segment_target import BigSegmentTarget
from launchdarkly_api.model.branch_collection_rep import BranchCollectionRep
from launchdarkly_api.model.branch_rep import BranchRep
from launchdarkly_api.model.clause import Clause
from launchdarkly_api.model.client_side_availability import ClientSideAvailability
from launchdarkly_api.model.client_side_availability_post import ClientSideAvailabilityPost
from launchdarkly_api.model.condition_base_output_rep import ConditionBaseOutputRep
from launchdarkly_api.model.condition_input_rep import ConditionInputRep
from launchdarkly_api.model.condition_output_rep import ConditionOutputRep
from launchdarkly_api.model.confidence_interval_rep import ConfidenceIntervalRep
from launchdarkly_api.model.conflict import Conflict
from launchdarkly_api.model.conflict_output_rep import ConflictOutputRep
from launchdarkly_api.model.copied_from_env import CopiedFromEnv
from launchdarkly_api.model.create_copy_flag_config_approval_request_request import CreateCopyFlagConfigApprovalRequestRequest
from launchdarkly_api.model.create_flag_config_approval_request_request import CreateFlagConfigApprovalRequestRequest
from launchdarkly_api.model.custom_properties import CustomProperties
from launchdarkly_api.model.custom_property import CustomProperty
from launchdarkly_api.model.custom_role import CustomRole
from launchdarkly_api.model.custom_role_post import CustomRolePost
from launchdarkly_api.model.custom_role_post_data import CustomRolePostData
from launchdarkly_api.model.custom_roles import CustomRoles
from launchdarkly_api.model.custom_workflow_input_rep import CustomWorkflowInputRep
from launchdarkly_api.model.custom_workflow_meta import CustomWorkflowMeta
from launchdarkly_api.model.custom_workflow_output_rep import CustomWorkflowOutputRep
from launchdarkly_api.model.custom_workflow_stage_meta import CustomWorkflowStageMeta
from launchdarkly_api.model.custom_workflows_listing_output_rep import CustomWorkflowsListingOutputRep
from launchdarkly_api.model.default_client_side_availability_post import DefaultClientSideAvailabilityPost
from launchdarkly_api.model.defaults import Defaults
from launchdarkly_api.model.dependent_flag import DependentFlag
from launchdarkly_api.model.dependent_flag_environment import DependentFlagEnvironment
from launchdarkly_api.model.dependent_flags_by_environment import DependentFlagsByEnvironment
from launchdarkly_api.model.derived_attribute import DerivedAttribute
from launchdarkly_api.model.destination import Destination
from launchdarkly_api.model.destination_post import DestinationPost
from launchdarkly_api.model.destinations import Destinations
from launchdarkly_api.model.environment import Environment
from launchdarkly_api.model.environment_post import EnvironmentPost
from launchdarkly_api.model.execution_output_rep import ExecutionOutputRep
from launchdarkly_api.model.experiment_allocation_rep import ExperimentAllocationRep
from launchdarkly_api.model.experiment_enabled_period_rep import ExperimentEnabledPeriodRep
from launchdarkly_api.model.experiment_environment_setting_rep import ExperimentEnvironmentSettingRep
from launchdarkly_api.model.experiment_info_rep import ExperimentInfoRep
from launchdarkly_api.model.experiment_metadata_rep import ExperimentMetadataRep
from launchdarkly_api.model.experiment_rep import ExperimentRep
from launchdarkly_api.model.experiment_results_rep import ExperimentResultsRep
from launchdarkly_api.model.experiment_stats_rep import ExperimentStatsRep
from launchdarkly_api.model.experiment_time_series_slice import ExperimentTimeSeriesSlice
from launchdarkly_api.model.experiment_time_series_variation_slice import ExperimentTimeSeriesVariationSlice
from launchdarkly_api.model.experiment_time_series_variation_slices import ExperimentTimeSeriesVariationSlices
from launchdarkly_api.model.experiment_totals_rep import ExperimentTotalsRep
from launchdarkly_api.model.expiring_user_target_error import ExpiringUserTargetError
from launchdarkly_api.model.expiring_user_target_get_response import ExpiringUserTargetGetResponse
from launchdarkly_api.model.expiring_user_target_item import ExpiringUserTargetItem
from launchdarkly_api.model.expiring_user_target_patch_response import ExpiringUserTargetPatchResponse
from launchdarkly_api.model.extinction import Extinction
from launchdarkly_api.model.extinction_collection_rep import ExtinctionCollectionRep
from launchdarkly_api.model.extinction_list_post import ExtinctionListPost
from launchdarkly_api.model.feature_flag import FeatureFlag
from launchdarkly_api.model.feature_flag_body import FeatureFlagBody
from launchdarkly_api.model.feature_flag_config import FeatureFlagConfig
from launchdarkly_api.model.feature_flag_scheduled_change import FeatureFlagScheduledChange
from launchdarkly_api.model.feature_flag_scheduled_changes import FeatureFlagScheduledChanges
from launchdarkly_api.model.feature_flag_status import FeatureFlagStatus
from launchdarkly_api.model.feature_flag_status_across_environments import FeatureFlagStatusAcrossEnvironments
from launchdarkly_api.model.feature_flag_statuses import FeatureFlagStatuses
from launchdarkly_api.model.feature_flags import FeatureFlags
from launchdarkly_api.model.flag_config_approval_request_response import FlagConfigApprovalRequestResponse
from launchdarkly_api.model.flag_config_approval_requests_response import FlagConfigApprovalRequestsResponse
from launchdarkly_api.model.flag_copy_config_environment import FlagCopyConfigEnvironment
from launchdarkly_api.model.flag_copy_config_post import FlagCopyConfigPost
from launchdarkly_api.model.flag_global_attributes_rep import FlagGlobalAttributesRep
from launchdarkly_api.model.flag_listing_rep import FlagListingRep
from launchdarkly_api.model.flag_scheduled_changes_input import FlagScheduledChangesInput
from launchdarkly_api.model.flag_status_rep import FlagStatusRep
from launchdarkly_api.model.flag_summary import FlagSummary
from launchdarkly_api.model.forbidden_error_rep import ForbiddenErrorRep
from launchdarkly_api.model.form_variable_config import FormVariableConfig
from launchdarkly_api.model.hunk_rep import HunkRep
from launchdarkly_api.model.instruction import Instruction
from launchdarkly_api.model.instructions import Instructions
from launchdarkly_api.model.integration_metadata import IntegrationMetadata
from launchdarkly_api.model.integration_status import IntegrationStatus
from launchdarkly_api.model.invalid_request_error_rep import InvalidRequestErrorRep
from launchdarkly_api.model.ip_list import IpList
from launchdarkly_api.model.json_patch import JSONPatch
from launchdarkly_api.model.last_seen_metadata import LastSeenMetadata
from launchdarkly_api.model.link import Link
from launchdarkly_api.model.member import Member
from launchdarkly_api.model.member_data_rep import MemberDataRep
from launchdarkly_api.model.member_permission_grant_summary_rep import MemberPermissionGrantSummaryRep
from launchdarkly_api.model.member_summary_rep import MemberSummaryRep
from launchdarkly_api.model.member_team_summary_rep import MemberTeamSummaryRep
from launchdarkly_api.model.members import Members
from launchdarkly_api.model.method_not_allowed_error_rep import MethodNotAllowedErrorRep
from launchdarkly_api.model.metric_collection_rep import MetricCollectionRep
from launchdarkly_api.model.metric_listing_rep import MetricListingRep
from launchdarkly_api.model.metric_post import MetricPost
from launchdarkly_api.model.metric_rep import MetricRep
from launchdarkly_api.model.metric_seen import MetricSeen
from launchdarkly_api.model.modification import Modification
from launchdarkly_api.model.multi_environment_dependent_flag import MultiEnvironmentDependentFlag
from launchdarkly_api.model.multi_environment_dependent_flags import MultiEnvironmentDependentFlags
from launchdarkly_api.model.new_member_form import NewMemberForm
from launchdarkly_api.model.new_member_form_list_post import NewMemberFormListPost
from launchdarkly_api.model.not_found_error_rep import NotFoundErrorRep
from launchdarkly_api.model.parent_resource_rep import ParentResourceRep
from launchdarkly_api.model.patch_failed_error_rep import PatchFailedErrorRep
from launchdarkly_api.model.patch_operation import PatchOperation
from launchdarkly_api.model.patch_segment_instruction import PatchSegmentInstruction
from launchdarkly_api.model.patch_segment_request import PatchSegmentRequest
from launchdarkly_api.model.patch_with_comment import PatchWithComment
from launchdarkly_api.model.permission_grant_collection_rep import PermissionGrantCollectionRep
from launchdarkly_api.model.permission_grant_input import PermissionGrantInput
from launchdarkly_api.model.permission_grant_rep import PermissionGrantRep
from launchdarkly_api.model.post_approval_request_apply_request import PostApprovalRequestApplyRequest
from launchdarkly_api.model.post_approval_request_review_request import PostApprovalRequestReviewRequest
from launchdarkly_api.model.post_flag_scheduled_changes_input import PostFlagScheduledChangesInput
from launchdarkly_api.model.prerequisite import Prerequisite
from launchdarkly_api.model.project import Project
from launchdarkly_api.model.project_listing_rep import ProjectListingRep
from launchdarkly_api.model.project_post import ProjectPost
from launchdarkly_api.model.projects import Projects
from launchdarkly_api.model.pub_nub_detail_rep import PubNubDetailRep
from launchdarkly_api.model.put_branch import PutBranch
from launchdarkly_api.model.rate_limited_error_rep import RateLimitedErrorRep
from launchdarkly_api.model.reference_rep import ReferenceRep
from launchdarkly_api.model.relay_auto_config_collection_rep import RelayAutoConfigCollectionRep
from launchdarkly_api.model.relay_auto_config_post import RelayAutoConfigPost
from launchdarkly_api.model.relay_auto_config_rep import RelayAutoConfigRep
from launchdarkly_api.model.repository_collection_rep import RepositoryCollectionRep
from launchdarkly_api.model.repository_post import RepositoryPost
from launchdarkly_api.model.repository_rep import RepositoryRep
from launchdarkly_api.model.resource_access import ResourceAccess
from launchdarkly_api.model.resource_id_response import ResourceIDResponse
from launchdarkly_api.model.review_output_rep import ReviewOutputRep
from launchdarkly_api.model.review_response import ReviewResponse
from launchdarkly_api.model.rollout import Rollout
from launchdarkly_api.model.root_response import RootResponse
from launchdarkly_api.model.rule import Rule
from launchdarkly_api.model.schedule_condition_input_rep import ScheduleConditionInputRep
from launchdarkly_api.model.schedule_condition_output_rep import ScheduleConditionOutputRep
from launchdarkly_api.model.sdk_list_rep import SdkListRep
from launchdarkly_api.model.sdk_version_list_rep import SdkVersionListRep
from launchdarkly_api.model.sdk_version_rep import SdkVersionRep
from launchdarkly_api.model.segment_body import SegmentBody
from launchdarkly_api.model.segment_metadata import SegmentMetadata
from launchdarkly_api.model.segment_user_list import SegmentUserList
from launchdarkly_api.model.segment_user_state import SegmentUserState
from launchdarkly_api.model.series_list_rep import SeriesListRep
from launchdarkly_api.model.series_metadata_rep import SeriesMetadataRep
from launchdarkly_api.model.series_time_slice_rep import SeriesTimeSliceRep
from launchdarkly_api.model.source_flag import SourceFlag
from launchdarkly_api.model.stage_input_rep import StageInputRep
from launchdarkly_api.model.stage_output_rep import StageOutputRep
from launchdarkly_api.model.statement import Statement
from launchdarkly_api.model.statement_post import StatementPost
from launchdarkly_api.model.statement_post_data import StatementPostData
from launchdarkly_api.model.statement_post_list import StatementPostList
from launchdarkly_api.model.statement_rep import StatementRep
from launchdarkly_api.model.statistic_collection_rep import StatisticCollectionRep
from launchdarkly_api.model.statistic_rep import StatisticRep
from launchdarkly_api.model.statistics_root import StatisticsRoot
from launchdarkly_api.model.status_conflict_error_rep import StatusConflictErrorRep
from launchdarkly_api.model.subject_data_rep import SubjectDataRep
from launchdarkly_api.model.target import Target
from launchdarkly_api.model.target_resource_rep import TargetResourceRep
from launchdarkly_api.model.team_collection_rep import TeamCollectionRep
from launchdarkly_api.model.team_patch_input import TeamPatchInput
from launchdarkly_api.model.team_post_input import TeamPostInput
from launchdarkly_api.model.team_rep import TeamRep
from launchdarkly_api.model.title_rep import TitleRep
from launchdarkly_api.model.token import Token
from launchdarkly_api.model.token_data_rep import TokenDataRep
from launchdarkly_api.model.tokens import Tokens
from launchdarkly_api.model.unauthorized_error_rep import UnauthorizedErrorRep
from launchdarkly_api.model.url_matchers import UrlMatchers
from launchdarkly_api.model.url_post import UrlPost
from launchdarkly_api.model.user import User
from launchdarkly_api.model.user_attribute_names_rep import UserAttributeNamesRep
from launchdarkly_api.model.user_flag_setting import UserFlagSetting
from launchdarkly_api.model.user_flag_settings import UserFlagSettings
from launchdarkly_api.model.user_record import UserRecord
from launchdarkly_api.model.user_record_rep import UserRecordRep
from launchdarkly_api.model.user_segment import UserSegment
from launchdarkly_api.model.user_segment_rule import UserSegmentRule
from launchdarkly_api.model.user_segments import UserSegments
from launchdarkly_api.model.users import Users
from launchdarkly_api.model.value_put import ValuePut
from launchdarkly_api.model.variation import Variation
from launchdarkly_api.model.variation_or_rollout_rep import VariationOrRolloutRep
from launchdarkly_api.model.variation_summary import VariationSummary
from launchdarkly_api.model.versions_rep import VersionsRep
from launchdarkly_api.model.webhook import Webhook
from launchdarkly_api.model.webhook_post import WebhookPost
from launchdarkly_api.model.webhooks import Webhooks
from launchdarkly_api.model.weighted_variation import WeightedVariation
| 70.675676 | 126 | 0.914149 | 1,931 | 15,690 | 7.111341 | 0.205075 | 0.245849 | 0.291946 | 0.368774 | 0.421133 | 0.194728 | 0.096271 | 0.014565 | 0.006845 | 0 | 0 | 0.000068 | 0.057808 | 15,690 | 221 | 127 | 70.995475 | 0.928837 | 0.022753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6e2b3fc7095935c7e6a5f4b2d065f3d1018fe9ef | 254 | py | Python | wagtail_draftail_snippet/utils.py | TMFRook/wagtail-draftail-snippet | 1d8d76655b8d544e75884e168f1193b1bfaf02e2 | [
"BSD-3-Clause"
] | 15 | 2020-01-17T20:38:53.000Z | 2022-03-08T10:02:09.000Z | wagtail_draftail_snippet/utils.py | TMFRook/wagtail-draftail-snippet | 1d8d76655b8d544e75884e168f1193b1bfaf02e2 | [
"BSD-3-Clause"
] | 11 | 2020-02-13T14:02:19.000Z | 2022-03-08T10:51:09.000Z | wagtail_draftail_snippet/utils.py | TMFRook/wagtail-draftail-snippet | 1d8d76655b8d544e75884e168f1193b1bfaf02e2 | [
"BSD-3-Clause"
] | 5 | 2020-03-03T14:09:15.000Z | 2021-12-13T22:56:53.000Z |
def get_snippet_link_frontend_template(app_name, model_name):
return "%s/%s_snippet_link.html" % (app_name, model_name)
def get_snippet_embed_frontend_template(app_name, model_name):
return "%s/%s_snippet_embed.html" % (app_name, model_name)
| 28.222222 | 62 | 0.779528 | 40 | 254 | 4.45 | 0.325 | 0.157303 | 0.269663 | 0.359551 | 0.752809 | 0.52809 | 0.52809 | 0.52809 | 0.52809 | 0.52809 | 0 | 0 | 0.110236 | 254 | 8 | 63 | 31.75 | 0.787611 | 0 | 0 | 0 | 0 | 0 | 0.186508 | 0.186508 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
6e4af99e3bd03ffb38916b63875360a0695892d9 | 57 | py | Python | examples/expr_as_instr.py | naim1992/grep_mister_python | 17e4aefc10d221c2b566c01f775b6d77106b84ef | [
"PSF-2.0"
] | 26 | 2018-09-09T17:09:56.000Z | 2021-10-01T12:51:15.000Z | examples/expr_as_instr.py | naim1992/grep_mister_python | 17e4aefc10d221c2b566c01f775b6d77106b84ef | [
"PSF-2.0"
] | 85 | 2018-02-14T10:28:19.000Z | 2021-12-16T17:38:47.000Z | examples/expr_as_instr.py | naim1992/grep_mister_python | 17e4aefc10d221c2b566c01f775b6d77106b84ef | [
"PSF-2.0"
] | 26 | 2018-02-08T11:17:51.000Z | 2021-12-16T17:43:19.000Z |
def f():
""" -> int """
42 + 2
return 42
| 6.333333 | 18 | 0.315789 | 7 | 57 | 2.571429 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 0.473684 | 57 | 8 | 19 | 7.125 | 0.433333 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
284436f09e7cbda6df391de2a9940776fbd19ba2 | 35 | py | Python | python/packages/isce3/math/__init__.py | isce3-testing/isce3-circleci-poc | ec1dfb6019bcdc7afb7beee7be0fa0ce3f3b87b3 | [
"Apache-2.0"
] | null | null | null | python/packages/isce3/math/__init__.py | isce3-testing/isce3-circleci-poc | ec1dfb6019bcdc7afb7beee7be0fa0ce3f3b87b3 | [
"Apache-2.0"
] | 1 | 2021-12-23T00:00:31.000Z | 2021-12-23T00:00:31.000Z | python/packages/isce3/math/__init__.py | isce3-testing/isce3-circleci-poc | ec1dfb6019bcdc7afb7beee7be0fa0ce3f3b87b3 | [
"Apache-2.0"
] | 1 | 2021-12-02T21:10:11.000Z | 2021-12-02T21:10:11.000Z | from isce3.ext.isce3.math import *
| 17.5 | 34 | 0.771429 | 6 | 35 | 4.5 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064516 | 0.114286 | 35 | 1 | 35 | 35 | 0.806452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2847c7436e0bf87f9f8337019a0fe2f7c39a9f16 | 120 | py | Python | cs_131b/3_week/example_code/sys.py | kimberleejohnson/python-study | 5dc08007a1bc18c91e32879a0e9d5cad1bd1cdd3 | [
"MIT"
] | null | null | null | cs_131b/3_week/example_code/sys.py | kimberleejohnson/python-study | 5dc08007a1bc18c91e32879a0e9d5cad1bd1cdd3 | [
"MIT"
] | null | null | null | cs_131b/3_week/example_code/sys.py | kimberleejohnson/python-study | 5dc08007a1bc18c91e32879a0e9d5cad1bd1cdd3 | [
"MIT"
] | null | null | null | # Program demonstrates how sys works
import sys
print("These",len(sys.argv),"arguments were passed:",''.join(sys.argv)) | 40 | 71 | 0.741667 | 18 | 120 | 4.944444 | 0.777778 | 0.157303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.091667 | 120 | 3 | 71 | 40 | 0.816514 | 0.283333 | 0 | 0 | 0 | 0 | 0.321429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.5 | 0.5 | 0 | 0.5 | 0.5 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 6 |
2869a7fd9f03e57acb60657e91805ecc6083a771 | 179 | py | Python | backend/app/controllers/index.py | DankanTsar/memesmerkatuan | 4654f1164930d2ee0241a3beeae5a1d28daa2e1e | [
"BSD-3-Clause"
] | null | null | null | backend/app/controllers/index.py | DankanTsar/memesmerkatuan | 4654f1164930d2ee0241a3beeae5a1d28daa2e1e | [
"BSD-3-Clause"
] | null | null | null | backend/app/controllers/index.py | DankanTsar/memesmerkatuan | 4654f1164930d2ee0241a3beeae5a1d28daa2e1e | [
"BSD-3-Clause"
] | null | null | null | from .. import app
from flask import render_template
from ..misc.cur_user import cur_user
@app.route("/")
def index():
return render_template("index.html", user=cur_user())
| 19.888889 | 57 | 0.731844 | 27 | 179 | 4.666667 | 0.518519 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.134078 | 179 | 8 | 58 | 22.375 | 0.812903 | 0 | 0 | 0 | 0 | 0 | 0.061453 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | true | 0 | 0.5 | 0.166667 | 0.833333 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
2892dec2cc4a57477ac4fa46253cb7eb5b5f07e7 | 149 | py | Python | lib/game/events.py | vyahello/racing-game | cf042ba8040327a1ae0c26bd8bcf8d0d9987e7dd | [
"Apache-2.0"
] | null | null | null | lib/game/events.py | vyahello/racing-game | cf042ba8040327a1ae0c26bd8bcf8d0d9987e7dd | [
"Apache-2.0"
] | null | null | null | lib/game/events.py | vyahello/racing-game | cf042ba8040327a1ae0c26bd8bcf8d0d9987e7dd | [
"Apache-2.0"
] | null | null | null | from typing import List
from pygame.event import get, Event
def events() -> List[Event]:
"""Returns a list of game events."""
return get()
| 18.625 | 40 | 0.671141 | 22 | 149 | 4.545455 | 0.636364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208054 | 149 | 7 | 41 | 21.285714 | 0.847458 | 0.201342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | true | 0 | 0.5 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
95a4f048e0f36fc47b3bfdc0f837d5131f2452ca | 29,769 | py | Python | tests/%test_prov.py | KWR-Water/hgc | cf6f92c28e3f787a653c3e7f4b58ccc33fe36cbf | [
"MIT"
] | 2 | 2019-10-22T13:07:53.000Z | 2020-09-25T10:30:25.000Z | tests/%test_prov.py | KWR-Water/hgc | cf6f92c28e3f787a653c3e7f4b58ccc33fe36cbf | [
"MIT"
] | 4 | 2019-10-22T10:51:46.000Z | 2021-02-03T09:58:48.000Z | tests/%test_prov.py | KWR-Water/hgc | cf6f92c28e3f787a653c3e7f4b58ccc33fe36cbf | [
"MIT"
] | 1 | 2019-10-18T08:29:54.000Z | 2019-10-18T08:29:54.000Z | # -*- coding: utf-8 -*-
"""
Reading data for WB, PRO,
for kennisimpulse project
to read data from province, water companies, and any other sources
Created on Sun Jul 26 21:55:57 2020
@author: Xin Tian
"""
import pytest
import numpy as np
import pandas as pd
from pathlib import Path
import pickle as pckl
from hgc import ner
from hgc import io
import tests
# import xlsxwriter
def test_province():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse'+'/provincie_data_long_preprocessed.csv'
df_temp = pd.read_csv(WD, encoding='ISO-8859-1', header=None)
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 25].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 26].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'stacked',
'shape': 'stacked',
'slice_header': [1, slice(1, None)],
'slice_data': [slice(1, n_row), slice(1, None)],
'map_header': {
**io.default_map_header(),
'MeetpuntId': 'LocationID',
'parameter':'Feature',
'eenheid': 'Unit',
'waarde': 'Value',
'Opgegeven bemonstering datum': 'Datetime',
'Monsternummer': 'SampleID', # "SampleID" already exists as header, but contains wrong date. Use "Sample number" as "SampleID"
# 'SampleID': None # otherwise exists twice in output file
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'oC':'°C'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse'+r'/provincie_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_prov')
df2.to_excel(writer, sheet_name='df_prov')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_KIWKZUID():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
df_temp = pd.read_csv(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 20].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 21].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Export KoW 2.0',
'shape': 'stacked',
'slice_header': [1, slice(1, 24)],
'slice_data': [slice(1, n_row), slice(1, 24)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter omschrijving':'Feature',
'Eenheid': 'Unit',
'Gerapporteerde waarde': 'Value', # Gerapporteerde waarde, right?!
'Monstername datum': 'Datetime',
'Analyse': 'SampleID', # Analyse !?
# 'SampleID': None # otherwise exists twice in output file
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'oC':'°C'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
df2.to_excel(writer, sheet_name='KIWK_Zuid')
df2_hgc.to_excel(writer, sheet_name='hgc_KIWK_Zuid')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_KIWKVenloschol():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Venloschol_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 20].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 21].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Export KoW 2.0',
'shape': 'stacked',
'slice_header': [1, slice(1, 24)],
'slice_data': [slice(1, n_row), slice(1, 24)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter omschrijving':'Feature',
'Eenheid': 'Unit',
'Gerapporteerde waarde': 'Value', # Gerapporteerde waarde, right?!
'Monstername datum': 'Datetime',
'Analyse': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l atrazine-D5':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Venloschol_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_KIWK Venloschol')
df2.to_excel(writer, sheet_name='KIWK Venloschol')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_KIWKRoerdalslenk():
WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Roerdalslenk_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 20].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 21].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Export KoW 2.0',
'shape': 'stacked',
'slice_header': [1, slice(1, 24)],
'slice_data': [slice(1, n_row), slice(1, 24)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter omschrijving':'Feature',
'Eenheid': 'Unit',
'Gerapporteerde waarde': 'Value', # Gerapporteerde waarde, right?!
'Monstername datum': 'Datetime',
'Analyse': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l Hxdcn-d34':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Roerdalslenk_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_KIWK Roerdalslenk')
df2.to_excel(writer, sheet_name='KIWK Roerdalslenk')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_KIWKHeelBeegden():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Heel Beegden_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 20].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 21].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Export KoW 2.0',
'shape': 'stacked',
'slice_header': [1, slice(1, 24)],
'slice_data': [slice(1, n_row), slice(1, 24)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter omschrijving':'Feature',
'Eenheid': 'Unit',
'Gerapporteerde waarde': 'Value', # Gerapporteerde waarde, right?!
'Monstername datum': 'Datetime',
'Analyse': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l Hxdcn-d34':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Heel Beegden_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_KIWKHeelBeegden')
df2.to_excel(writer, sheet_name='KIWKHeelBeegden')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_WBGR():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Kennisimpuls kwaliteitsdata_WBGR_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1', sheet_name='Resultaten')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 6].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 11].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Resultaten',
'shape': 'stacked',
'slice_header': [1, slice(1, 12)],
'slice_data': [slice(1, n_row), slice(1, 12)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter':'Feature',
'Eenheid': 'Unit',
'Resultaat': 'Value', # Gerapporteerde waarde, right?!
'Datum': 'Datetime',
'Beschrijving': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l Hxdcn-d34':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Kennisimpuls kwaliteitsdata_WBGR_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_WBGR')
df2.to_excel(writer, sheet_name='WBGR')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_WMD():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Kennisimpuls kwaliteitsdata_WMD_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1', sheet_name='Resultaten WMD')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 6].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 11].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Resultaten WMD',
'shape': 'stacked',
'slice_header': [1, slice(1, 12)],
'slice_data': [slice(1, n_row), slice(1, 12)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter':'Feature',
'Eenheid': 'Unit',
'Resultaat': 'Value', # Gerapporteerde waarde, right?!
'Datum': 'Datetime',
'Beschrijving': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l Hxdcn-d34':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Kennisimpuls kwaliteitsdata_WMD_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_WMD')
df2.to_excel(writer, sheet_name='WMD')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_BOexport_bewerkt():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/BOexport_bewerkt_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 12].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 26].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'BOexport_bewerkt',
'shape': 'stacked',
'slice_header': [1, slice(1, 41)],
'slice_data': [slice(2, n_row), slice(1, 41)],
'map_header': {
**io.default_map_header(),
'sampling.point': 'LocationID',
'component':'Feature',
'eenheid': 'Unit',
'value.result': 'Value', # Gerapporteerde waarde, right?!
'sampled.date': 'Datetime',
'sample.id': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l Hxdcn-d34':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/BOexport_bewerkt_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_BOexport')
df2.to_excel(writer, sheet_name='BOexport')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
test_BOexport_bewerkt()
def test_LIMS_Ruw_2017_2019():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/LIMS_Ruw_2017_2019_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 6].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 9].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Export Worksheet',
'shape': 'stacked',
'slice_header': [1, slice(1, 10)],
'slice_data': [slice(1, n_row), slice(1, 10)],
'map_header': {
**io.default_map_header(),
'POINTDESCR': 'LocationID',
'ANALYTE':'Feature',
'UNITS': 'Unit',
'FINAL': 'Value', # Gerapporteerde waarde, right?!
'SAMPDATE': 'Datetime',
'TESTNO': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l Hxdcn-d34':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/LIMS_Ruw_2017_2019_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_LIMS_Ruw_2017_2019')
df2.to_excel(writer, sheet_name='LIMS_Ruw_2017_2019')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_Oasen():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/preprocessed/Oasen_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 8].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 9].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'data2009-2019',
'shape': 'stacked',
'slice_header': [1, slice(1, 14)],
'slice_data': [slice(1, n_row), slice(1, 14)],
'map_header': {
**io.default_map_header(),
'Monsterpuntcode': 'LocationID',
'Omschrijving (Parameter)':'Feature',
'Eenheid (Parameter)': 'Unit',
'Waarde numeriek': 'Value', # Gerapporteerde waarde, right?!
'Monsternamedatum': 'Datetime',
'Naam': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l paraoxon':'µg/l', 'µg/l C6H5OH': 'µg/l','mg/l Na-lauryl-SO4':'mg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Oasen_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_Oasen')
df2.to_excel(writer, sheet_name='Oasen')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_VitensMacro():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/preprocessed/Vitens_PP_WP_Macro_2009_2020.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[1, slice(8, None)].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[2, slice(8, None)].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Sheet1',
'shape': 'wide',
'slice_header': [1, slice(1, 8)],
'slice_feature': [1, slice(8, 25)],
'slice_unit': [2, slice(8, 25)],
'slice_data': [slice(3, n_row), slice(1, 14)],
'map_header': {
**io.default_map_header(),
# 'Monsterpuntcode': 'LocationID',
# 'Omschrijving (Parameter)':'Feature',
# 'Eenheid (Parameter)': 'Unit',
# 'Waarde numeriek': 'Value', # Gerapporteerde waarde, right?!
'Datum': 'Datetime',
'Naam': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/VitensMacro.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_VitenMacro')
df2.to_excel(writer, sheet_name='VitensMacro')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_VitensOMIVE():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/preprocessed/Vitens_PP_WP_OMIVE_2009_2020.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[1, slice(8, 819)].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[2, slice(8, 819)].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Sheet1',
'shape': 'wide',
'slice_header': [1, slice(1, 8)],
'slice_feature': [1, slice(8, None)],
'slice_unit': [2, slice(8, None)],
'slice_data': [slice(3, n_row), slice(1, None)],
'map_header': {
**io.default_map_header(),
# 'Monsterpuntcode': 'LocationID',
# 'Omschrijving (Parameter)':'Feature',
# 'Eenheid (Parameter)': 'Unit',
# 'Waarde numeriek': 'Value', # Gerapporteerde waarde, right?!
'Datum': 'Datetime',
'Naam': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/VitensOMIVE.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_VitenOMIVE')
# df2.to_excel(writer, sheet_name='VitensOMIVE')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
df2.to_csv(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/VitensOMIVE_ref.csv')
| 54.125455 | 171 | 0.661729 | 3,925 | 29,769 | 4.784459 | 0.06242 | 0.038341 | 0.06257 | 0.095106 | 0.931786 | 0.927206 | 0.912349 | 0.901326 | 0.89025 | 0.888386 | 0 | 0.033063 | 0.193288 | 29,769 | 550 | 172 | 54.125455 | 0.748824 | 0.179549 | 0 | 0.651765 | 0 | 0.023529 | 0.276928 | 0.109316 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028235 | false | 0 | 0.047059 | 0 | 0.075294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
95ee08951d69d572d4fa270be9994a3fa06ccbb3 | 2,107 | py | Python | test/test_twitter_worker.py | ambalytics/amba-analysis-worker-discussion | a218dd562cc73eb2d4c2ad76b3855865fe86b284 | [
"MIT"
] | null | null | null | test/test_twitter_worker.py | ambalytics/amba-analysis-worker-discussion | a218dd562cc73eb2d4c2ad76b3855865fe86b284 | [
"MIT"
] | null | null | null | test/test_twitter_worker.py | ambalytics/amba-analysis-worker-discussion | a218dd562cc73eb2d4c2ad76b3855865fe86b284 | [
"MIT"
] | null | null | null | from src import twitter_worker
import unittest
class TestTwitterWorker(unittest.TestCase):
def test_normalize_sentiment_value(self):
self.assertEqual(twitter_worker.TwitterWorker.normalize_sentiment_value(1), 10)
self.assertEqual(twitter_worker.TwitterWorker.normalize_sentiment_value(0.5), 9)
self.assertEqual(twitter_worker.TwitterWorker.normalize_sentiment_value(0.2), 7)
self.assertEqual(twitter_worker.TwitterWorker.normalize_sentiment_value(0), 5)
self.assertEqual(twitter_worker.TwitterWorker.normalize_sentiment_value(-0.2), 2)
self.assertEqual(twitter_worker.TwitterWorker.normalize_sentiment_value(-0.5), 1)
self.assertEqual(twitter_worker.TwitterWorker.normalize_sentiment_value(-1), 0)
def test_normalize_abstract_value(self):
self.assertEqual(twitter_worker.TwitterWorker.normalize_abstract_value(0.95), 3)
self.assertEqual(twitter_worker.TwitterWorker.normalize_abstract_value(0.85), 5)
self.assertEqual(twitter_worker.TwitterWorker.normalize_abstract_value(0.55), 10)
self.assertEqual(twitter_worker.TwitterWorker.normalize_abstract_value(0.25), 3)
self.assertEqual(twitter_worker.TwitterWorker.normalize_abstract_value(0), 1)
def test_score_length(self):
self.assertEqual(twitter_worker.score_length(40), 3)
self.assertEqual(twitter_worker.score_length(70), 6)
self.assertEqual(twitter_worker.score_length(120), 10)
def test_score_type(self):
self.assertEqual(twitter_worker.score_type('quoted'), 0.6)
self.assertEqual(twitter_worker.score_type('replied_to'), 0.7)
self.assertEqual(twitter_worker.score_type('retweeted'), 0.1)
self.assertEqual(twitter_worker.score_type('tweet'), 1)
def test_score_time(self):
self.assertEqual(twitter_worker.score_time(1), 30)
self.assertEqual(twitter_worker.score_time(7), 20)
self.assertEqual(twitter_worker.score_time(30), 12.52130303491482)
self.assertEqual(twitter_worker.score_time(365), 1)
if __name__ == '__main__':
unittest.main()
| 49 | 89 | 0.758424 | 264 | 2,107 | 5.75 | 0.193182 | 0.205534 | 0.333333 | 0.424242 | 0.814888 | 0.811594 | 0.530303 | 0.527009 | 0.515152 | 0.30303 | 0 | 0.046129 | 0.135738 | 2,107 | 42 | 90 | 50.166667 | 0.787479 | 0 | 0 | 0 | 0 | 0 | 0.018035 | 0 | 0 | 0 | 0 | 0 | 0.69697 | 1 | 0.151515 | false | 0 | 0.060606 | 0 | 0.242424 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
95fef8420ffb0a292835194c90582fa5b4f50499 | 1,191 | py | Python | src/support/tests/strip_vlan_enable/test_bcastF_mcastT_ucastF.py | Paulche/vfd | aff17c97d7ef35fb1f4bda5a4f6ab2f2266bdacc | [
"Apache-2.0"
] | 71 | 2016-04-14T20:21:48.000Z | 2021-11-27T20:01:28.000Z | src/support/tests/strip_vlan_enable/test_bcastF_mcastT_ucastF.py | Paulche/vfd | aff17c97d7ef35fb1f4bda5a4f6ab2f2266bdacc | [
"Apache-2.0"
] | 62 | 2016-06-03T18:04:32.000Z | 2018-09-07T21:13:27.000Z | src/support/tests/strip_vlan_enable/test_bcastF_mcastT_ucastF.py | Paulche/vfd | aff17c97d7ef35fb1f4bda5a4f6ab2f2266bdacc | [
"Apache-2.0"
] | 35 | 2016-04-14T17:12:46.000Z | 2021-10-13T03:34:11.000Z | from tests import packet
import tests.helper_functions as hf
import pytest
@pytest.mark.valid_vlan_bcastF_mcastT_ucastF
def test_bcast_valid_vlan(vf2_valid_vlan):
hf.read_sample_data('TARGET_VF2')
pkt = hf.build_packet(dmac=hf.config['bcast_mac'], valid_vlan=int(vf2_valid_vlan))
inst = packet.sniff_packets(hf.config['iface'], timeout=8, filters=[{'layer': 'ether', 'config': {'type': '0x8100'}}])
pkt.send_pkt(tx_port=hf.config['iface'], count=1)
pkts = packet.load_sniff_packets(inst)
vlan = None
for pkt in pkts:
vlan = pkt.pktgen.strip_vlan('vlan')
break
assert vlan == None
@pytest.mark.invalid_vlan_bcastF_mcastT_ucastF
def test_bcast_invalid_vlan(vf2_invalid_vlan):
hf.read_sample_data('TARGET_VF2')
pkt = hf.build_packet(dmac=hf.config['bcast_mac'], valid_vlan=int(vf2_invalid_vlan))
inst = packet.sniff_packets(hf.config['iface'], timeout=8, filters=[{'layer': 'ether', 'config': {'type': '0x8100'}}])
pkt.send_pkt(tx_port=hf.config['iface'], count=1)
pkts = packet.load_sniff_packets(inst)
vlan = None
for pkt in pkts:
vlan = pkt.pktgen.strip_vlan('vlan')
break
assert vlan == None
| 39.7 | 122 | 0.70529 | 178 | 1,191 | 4.455056 | 0.308989 | 0.068096 | 0.065574 | 0.055486 | 0.81715 | 0.81715 | 0.81715 | 0.7314 | 0.7314 | 0.7314 | 0 | 0.019822 | 0.152813 | 1,191 | 29 | 123 | 41.068966 | 0.766105 | 0 | 0 | 0.666667 | 0 | 0 | 0.099076 | 0 | 0 | 0 | 0.010076 | 0 | 0.074074 | 1 | 0.074074 | false | 0 | 0.111111 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
25050e6f271f7228184afbf60d40b921b1fe2322 | 123 | py | Python | setup/fusion/scripts/Comp/avalon/creator.py | bumpybox/core | 5a24640484f19e48dc12682dae979adc6d41dc0b | [
"MIT"
] | 168 | 2017-06-23T15:50:43.000Z | 2022-02-27T10:48:45.000Z | setup/fusion/scripts/Comp/avalon/creator.py | bumpybox/core | 5a24640484f19e48dc12682dae979adc6d41dc0b | [
"MIT"
] | 366 | 2017-06-22T08:38:45.000Z | 2021-06-19T07:29:06.000Z | setup/fusion/scripts/Comp/avalon/creator.py | bumpybox/core | 5a24640484f19e48dc12682dae979adc6d41dc0b | [
"MIT"
] | 42 | 2017-06-23T15:27:26.000Z | 2021-09-29T17:28:18.000Z | import avalon.api
import avalon.fusion
import avalon.tools.creator as tool
avalon.api.install(avalon.fusion)
tool.show()
| 15.375 | 35 | 0.804878 | 19 | 123 | 5.210526 | 0.526316 | 0.363636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 123 | 7 | 36 | 17.571429 | 0.891892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.6 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
252f880b1cf16c4df9600bce4b0ea35a518f2e68 | 34 | py | Python | playground/numba_play/__init__.py | drkostas/DSE512-playground | 1e47ae2878cc9f3f00fdbd81626189657d642061 | [
"MIT"
] | null | null | null | playground/numba_play/__init__.py | drkostas/DSE512-playground | 1e47ae2878cc9f3f00fdbd81626189657d642061 | [
"MIT"
] | 17 | 2021-02-15T01:43:46.000Z | 2021-05-04T02:32:32.000Z | playground/numba_play/__init__.py | drkostas/DSE512-playground | 1e47ae2878cc9f3f00fdbd81626189657d642061 | [
"MIT"
] | null | null | null | from .numba_play import NumbaPlay
| 17 | 33 | 0.852941 | 5 | 34 | 5.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 34 | 1 | 34 | 34 | 0.933333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
255cfbf94bcc360e89067bb6d86a507159532ea5 | 47 | py | Python | gcdetection/__init__.py | Justin900429/GC-Detection | 3869cad8a36dc67380d5b3509e6d7fab2980f367 | [
"MIT"
] | 4 | 2021-02-20T09:49:52.000Z | 2021-02-24T06:56:54.000Z | gcdetection/__init__.py | Justin900429/object_detection | 3869cad8a36dc67380d5b3509e6d7fab2980f367 | [
"MIT"
] | null | null | null | gcdetection/__init__.py | Justin900429/object_detection | 3869cad8a36dc67380d5b3509e6d7fab2980f367 | [
"MIT"
] | 1 | 2021-02-20T13:51:03.000Z | 2021-02-20T13:51:03.000Z | from .gc_detection import Detection, Interface
| 23.5 | 46 | 0.851064 | 6 | 47 | 6.5 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106383 | 47 | 1 | 47 | 47 | 0.928571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c2bf9584cd6b32f60c0bc38e5fe0fe7d3eade5f7 | 43,195 | py | Python | foxlink/graphs.py | lamsoa729/FoXlink | 3c061b02968cdab1def752d5c145a6df4615504b | [
"BSD-3-Clause"
] | null | null | null | foxlink/graphs.py | lamsoa729/FoXlink | 3c061b02968cdab1def752d5c145a6df4615504b | [
"BSD-3-Clause"
] | null | null | null | foxlink/graphs.py | lamsoa729/FoXlink | 3c061b02968cdab1def752d5c145a6df4615504b | [
"BSD-3-Clause"
] | 2 | 2019-06-18T16:48:03.000Z | 2019-06-20T23:50:02.000Z | #!/usr/bin/env python
"""@package docstring
File: graphs.py
Author: Adam Lamson
Email: adam.lamson@colorado.edu
Description: File containing modular graphing functions for Fokker-Planck data.
"""
import numpy as np
import matplotlib as mpl
from matplotlib.lines import Line2D
from matplotlib.patches import (Circle, RegularPolygon, FancyArrowPatch,
ArrowStyle)
# import matplotlib.pyplot as plt
def convert_size_units(d, ax, reference='y'):
"""
Convert a linewidth in data units to linewidth in points.
Parameters
----------
linewidth: float
Linewidth in data units of the respective reference-axis
axis: matplotlib axis
The axis which is used to extract the relevant transformation
data (data limits and size must not change afterwards)
reference: string
The axis that is taken as a reference for the data width.
Possible values: 'x' and 'y'. Defaults to 'y'.
Returns
-------
d: float
Linewidth in points
"""
fig = ax.get_figure()
if reference == 'x':
length = fig.bbox_inches.width * ax.get_position().width
value_range = np.diff(ax.get_xlim())[0]
elif reference == 'y':
length = fig.bbox_inches.height * ax.get_position().height
value_range = np.diff(ax.get_ylim())[0]
# Convert length to points
length *= 72
# Scale linewidth to value range
return d * (length / value_range)
def xlink_end_pos(r_vec, u_vec, s):
"""!Get spatial location of a xlink end using rod position and orientation.
@param r_vec: Position vector of rods center
@param u_vec: Orientation unit vector of rod
@param s: Location of xlink end with respect to center of rod. Can be negative.
@return: Position of xlink end in system
"""
return (r_vec + (u_vec * s))
def get_max_min_ends(r_i, r_j, u_i, u_j, L_i, L_j):
"""!Get the maximum and minimum end position value in a direction for two
rods.
@param r_i: Array of rod i center positions
@param r_j: Array of rod j center positions
@param u_i: Array of rod i orientation unit vectors
@param u_j: Array of rod j orientation unit vectors
@param L_i: Length of rod i
@param L_j: Length of rod j
@return: List of all possible maximums and minimum rod end positions for
rods i and j. Both plus and minus rod ends are considered.
"""
return [np.amax(0.5 * L_i * u_i + r_i), np.amin(0.5 * L_i * u_i + r_i),
np.amax(-.5 * L_i * u_i + r_i), np.amin(-.5 * L_i * u_i + r_i),
np.amax(0.5 * L_j * u_j + r_j), np.amin(0.5 * L_j * u_j + r_j),
np.amax(-.5 * L_j * u_j + r_j), np.amin(-.5 * L_j * u_j + r_j)]
class LineDataUnits(Line2D):
"""!Class that rescales a 2D matplotlib line to have the proper width and
length with respect to axis unit values.
"""
def __init__(self, *args, **kwargs):
_lw_data = kwargs.pop("linewidth", 1)
super().__init__(*args, **kwargs)
self._lw_data = _lw_data
def _get_lw(self):
if self.axes is not None:
ppd = 72. / self.axes.figure.dpi
trans = self.axes.transData.transform
return ((trans((1, self._lw_data)) - trans((0, 0))) * ppd)[1]
else:
return 1
def _set_lw(self, lw):
self._lw_data = lw
_linewidth = property(_get_lw, _set_lw)
def draw_rod(ax, r_vec, u_vec, L, rod_diam, color='tab:green', tip_color='b'):
"""!Draw a diagramitic representation of a rod on a matplotlib axis object.
@param ax: Matplotlib axis object
@param r_vec: Position vector of rod's center
@param u_vec: Orientation unit vector of rod
@param L: Length of rod
@param rod_diam: Diameter of rod
@param color: Color of rod body
@param tip_color: Color of plus end of rod
@return: None
"""
line = LineDataUnits((r_vec[1] - .5 * L * u_vec[1],
r_vec[1] + .5 * L * u_vec[1]),
(r_vec[2] - .5 * L * u_vec[2],
r_vec[2] + .5 * L * u_vec[2]),
linewidth=rod_diam, solid_capstyle='round',
color=color, clip_on=False, )
tip = Circle((r_vec[1] + .5 * L * u_vec[1], r_vec[2] + .5 * L * u_vec[2]),
.5 * rod_diam, color=tip_color, zorder=3)
ax.add_patch(tip)
ax.add_line(line)
def draw_xlink(ax, e_i, e_j, lw=10, color='k', alpha=.5):
"""!Draw a diagramitic representation of an xlink density on a matplotlib
axis object.
@param ax: Matplotlib axis object
@param e_i: End of xlink on rod i
@param e_j: End of xlink on rod j
@param lw: Width of line representing xlink density
@param color: Color of line representing xlink density
@param alpha: Transparency of line representing xlink density
return: None
"""
line = LineDataUnits((e_i[1], e_j[1]), (e_i[2], e_j[2]),
linewidth=lw, # solid_capstyle='round',
color=color, clip_on=False, alpha=alpha)
ax.add_line(line)
def draw_moment_rod(ax, r_vec, u_vec, L, rod_diam,
mu00, mu10, mu20, num_max=50):
"""!Draw a diagramitic representation of a rod and moments of xlink end
density on rod.
@param ax: Matplotlib axis object
@param r_vec: Position vector of rod's center
@param u_vec: Orientation unit vector of rod
@param L: Length of rod
@param rod_diam: Diameter of rod
@param mu00: Zeroth moment of xlink density (respresented as rod color)
@param mu10: First moment of xlink density corresponding to average end
position on rod (represented by position of polygon)
@param mu20: First moment of xlink density corresponding to variance of end
position on rod with respect to rod center (used to calculate sigma)
@param num_max: Maximum number of xlinks to set standard colormap
@return: colorbar set by num_max
"""
cb = mpl.cm.ScalarMappable(
mpl.colors.Normalize(0, num_max), 'viridis')
draw_rod(ax, r_vec, u_vec, L, rod_diam)
scaled_mu10 = mu10 / mu00 if mu00 else 0
mu10_loc = RegularPolygon((r_vec[1] + scaled_mu10 * u_vec[1],
r_vec[2] + scaled_mu10 * u_vec[2]),
5, rod_diam, color=cb.to_rgba(mu00), zorder=4)
variance = (mu20 / mu00) - (scaled_mu10**2) if mu00 > 1e-3 else 0.
sigma_dist = np.sqrt(variance) if variance >= 1e-3 else 0.
# mu20_ellipse = Ellipse((r_vec[1], r_vec[2]), mu20_dist*2., rod_diam,
# angle=np.arctan(u_vec[2]/u_vec[1]), zorder=4, fill=False)
sigma_bar = FancyArrowPatch(
(r_vec[1] + (scaled_mu10 - sigma_dist) * u_vec[1],
r_vec[2] + (scaled_mu10 - sigma_dist) * u_vec[2]),
(r_vec[1] + (scaled_mu10 + sigma_dist) * u_vec[1],
r_vec[2] + (scaled_mu10 + sigma_dist) * u_vec[2]),
arrowstyle=ArrowStyle('|-|',
widthA=convert_size_units(.5 * rod_diam, ax),
widthB=convert_size_units(.5 * rod_diam, ax)),
zorder=3)
ax.add_patch(sigma_bar)
ax.add_patch(mu10_loc)
return cb
def graph_vs_time(ax, time, y, n=-1, color='b', fillstyle='full'):
"""!TODO: Docstring for graph_vs_t.
@param ax: TODO
@param time: TODO
@param y: TODO
@param n: TODO
@return: TODO
"""
s = ax.plot(time[:n], y[:n], c=color, marker='o',
fillstyle=fillstyle, linestyle='')
return s
def graph_xl_dens(ax, psi, s_i, s_j, **kwargs):
"""!Graph an instance in time of the crosslinker density for the PDE
@param psi: crosslinker density
@param **kwargs: TODO
@return: TODO
"""
s_i = np.asarray(s_i)
s_j = np.asarray(s_j)
psi = np.transpose(np.asarray(psi))
if "max_dens_val" in kwargs:
max_val = kwargs["max_dens_val"]
c = ax.pcolormesh(s_i, s_j, psi, vmin=0, vmax=max_val)
else:
c = ax.pcolormesh(s_i, s_j, psi)
return c
def graph_2d_rod_diagram(ax, anal, n=-1):
"""!TODO: Docstring for graph_2d_rod_diagram.
@param ax: TODO
@param anal: TODO
@param n: TODO
@return: TODO
"""
params = anal._params
L_i = params["L1"]
L_j = params["L2"]
lw = params['rod_diameter']
if hasattr(anal, 'phi_arr') and not hasattr(anal, 'R1_vec'):
hphi = anal.phi_arr[n] * .5
line1 = LineDataUnits((0, L_i * np.cos(hphi)),
(0, L_i * np.sin(hphi)),
linewidth=lw, solid_capstyle='round',
color='tab:green', clip_on=False)
line2 = LineDataUnits((0, L_j * np.cos(hphi)),
(0, -L_j * np.sin(hphi)),
linewidth=lw, solid_capstyle='round',
color='tab:purple', clip_on=False)
ax.add_line(line1)
ax.add_line(line2)
elif hasattr(anal, 'R_arr'):
r = anal.R_arr[n, :]
line1 = LineDataUnits((-.5 * L_i, .5 * L_i),
(0, 0),
linewidth=lw, solid_capstyle='round',
color='tab:green', clip_on=False)
line2 = LineDataUnits((-.5 * L_i + r[0], .5 * L_i + r[0]),
(r[1], r[1]),
linewidth=lw, solid_capstyle='round',
color='tab:purple', clip_on=False)
ax.add_line(line1)
ax.add_line(line2)
else:
r_i_arr = anal.R1_pos
r_j_arr = anal.R2_pos
u_i_arr = anal.R1_vec
u_j_arr = anal.R2_vec
draw_rod(ax, r_i_arr[n], u_i_arr[n], L_i, lw, color='tab:green')
draw_rod(ax, r_j_arr[n], u_j_arr[n], L_j, lw, color='tab:purple')
# if anal.OT1_pos is not None:
# ot1 = Circle((anal.OT1_pos[n, 1], anal.OT1_pos[n, 2]),
# 3 * lw, color='y', alpha=.5)
# mtip1 = Circle((-.5 * L_i * u1[1] + r_i[1], -.5 * L_i * u1[2] + r_i[2]),
# lw, color='b', zorder=4)
# ax.add_patch(ot1)
# ax.add_patch(mtip1)
# if anal.OT2_pos is not None:
# ot2 = Circle((anal.OT2_pos[n, 1], anal.OT2_pos[n, 2]),
# 3 * lw, color='y', alpha=.5)
# mtip2 = Circle((-.5 * L_j * u2[1] + r_j[1], -.5 * L_j * u2[2] + r_j[2]),
# lw, color='b', zorder=4)
# ax.add_patch(ot2)
# ax.add_patch(mtip2)
# Get all extreme positions of tips in the first dimension to maintain
# consistent graphing size
x_ends = get_max_min_ends(
r_i_arr[:, 1], r_j_arr[:, 1], u_i_arr[:, 1], u_j_arr[:, 1], L_i, L_j)
# Get all extreme positions of tips in the second dimension to maintain
# consistent graphing size
y_ends = get_max_min_ends(
r_i_arr[:, 2], r_j_arr[:, 2], u_i_arr[:, 2], u_j_arr[:, 2], L_i, L_j)
max_x = max(x_ends + y_ends)
max_x = max_x * 1.25 if max_x > 0 else .75 * max_x
min_x = min(x_ends + y_ends)
min_x = min_x * 1.25 if min_x < 0 else .75 * min_x
# Make a square box always
max_y = max_x
min_y = min_x
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
ax.set_xlabel(r'x (nm)')
ax.set_ylabel(r'y (nm)')
# labels = ["fil$_i$", "fil$_j$", "Plus-end"]
# if anal.OT1_pos is not None or anal.OT2_pos is not None:
# labels += ["Optical trap", "Bead"]
# ax.legend(labels, loc="upper right")
def graph_2d_rod_pde_diagram(ax, anal, n=-1, scale=50):
"""!TODO: Docstring for graph_2d_rod_diagram.
@param ax: TODO
@param anal: TODO
@param n: TODO
@return: TODO
"""
graph_2d_rod_diagram(ax, anal, n)
L_i = anal._params["L1"]
L_j = anal._params["L2"]
rod_diam = anal._params['rod_diameter']
r_i = anal.R1_pos[n]
r_j = anal.R2_pos[n]
u_i = anal.R1_vec[n]
u_j = anal.R2_vec[n]
xl_distr = anal.xl_distr[:, :, n]
N, M = int(L_i / rod_diam) + 1, int(L_j / rod_diam) + 1
a, b = int(xl_distr.shape[0] / N), int(xl_distr.shape[1] / M)
# s_i = anal.s_i.reshape(N, a).mean(axis=1)
# s_j = anal.s_j.reshape(M, b).mean(axis=1)
s_i = np.arange(-.5 * (L_i + rod_diam), .5 * (L_i + rod_diam), rod_diam)
s_j = np.arange(-.5 * (L_j + rod_diam), .5 * (L_j + rod_diam), rod_diam)
xl_distr_coarse = xl_distr[:a * N, :b * M].reshape(N, a, M, b)
xl_distr_coarse = xl_distr_coarse.sum(axis=(1, 3))
for index, val in np.ndenumerate(xl_distr_coarse):
e_i = xlink_end_pos(r_i, u_i, s_i[index[0]])
e_j = xlink_end_pos(r_j, u_j, s_j[index[1]])
# print(e_i, e_j)
draw_xlink(ax, e_i, e_j, color='r',
alpha=np.clip(val * scale / (a * b), 0, 1))
def graph_2d_rod_moment_diagram(ax, anal, n=-1):
"""!TODO: Docstring for graph_2d_rod_diagram.
@param ax: TODO
@param anal: TODO
@param n: TODO
@return: TODO
"""
params = anal._params
L_i = params["L1"]
L_j = params["L2"]
rod_diam = params['rod_diameter']
r_i_arr = anal.R1_pos
r_j_arr = anal.R2_pos
u_i_arr = anal.R1_vec
u_j_arr = anal.R2_vec
mu00_max = np.amax(anal.mu00)
# Get all extreme positions of tips in the first dimension to maintain
# consistent graphing size
x_ends = get_max_min_ends(
r_i_arr[:, 1], r_j_arr[:, 1], u_i_arr[:, 1], u_j_arr[:, 1], L_i, L_j)
# Get all extreme positions of tips in the second dimension to maintain
# consistent graphing size
y_ends = get_max_min_ends(
r_i_arr[:, 2], r_j_arr[:, 2], u_i_arr[:, 2], u_j_arr[:, 2], L_i, L_j)
max_x = max(x_ends + y_ends)
max_x = max_x * 1.25 if max_x > 0 else .75 * max_x
min_x = min(x_ends + y_ends)
min_x = min_x * 1.25 if min_x < 0 else .75 * min_x
# Make a square box always
max_y = max_x
min_y = min_x
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
ax.set_xlabel(r'x (nm)')
ax.set_ylabel(r'y (nm)')
cb = draw_moment_rod(ax, r_i_arr[n], u_i_arr[n], L_i, rod_diam,
anal.mu00[n], anal.mu10[n], anal.mu20[n],
num_max=mu00_max)
cb = draw_moment_rod(ax, r_j_arr[n], u_j_arr[n], L_j, rod_diam,
anal.mu00[n], anal.mu01[n], anal.mu02[n],
num_max=mu00_max)
# labels = ["fil$_i$", "fil$_j$", "Plus-end", r"$\mu^{{10}}$", r"$\mu^{{20}}$"]
# if anal.OT1_pos is not None or anal.OT2_pos is not None:
# labels += ["Optical trap", "Bead"]
# ax.legend(labels, loc="upper right")
return cb
def me_graph_min_data_2d(fig, axarr, n, me_anal):
# Clean up if lines on axis object to speed up movie making
if not me_anal.init_flag:
for ax in axarr.flatten():
ax.clear()
for artist in fig.gca().lines + fig.gca().collections:
artist.remove()
del artist
cb = graph_2d_rod_moment_diagram(axarr[0], me_anal, n)
cb1 = graph_xl_dens(axarr[1],
me_anal.xl_distr[:, :, n],
me_anal.s_i,
me_anal.s_j,
max_dens_val=me_anal.max_dens_val)
axarr[1].set_xlabel(
'Head distance from \n center of fil$_i$ $s_i$ (nm)')
axarr[1].set_ylabel(
'Head distance from \n center of fil$_j$ $s_j$ (nm)')
if me_anal.init_flag:
axarr[0].set_aspect(1.0)
axarr[1].set_aspect(1.0)
cbar = fig.colorbar(cb, ax=axarr[0])
cbar.set_label(
r'Motor number $\langle N_{i,j} \rangle$')
cbar1 = fig.colorbar(cb1, ax=axarr[1])
cbar1.set_label(
r'Reconstructed motor density $\psi_{i,j}$')
me_anal.init_flag = False
axarr[0].text(.05, .90, "Time = {:.2f} sec".format(me_anal.time[n]),
horizontalalignment='left',
verticalalignment='bottom',
transform=axarr[0].transAxes)
return fig.gca().lines + fig.gca().collections
def me_graph_all_data_2d(fig, axarr, n, me_anal):
# Clean up if lines on axis object to speed up movie making
if not me_anal.init_flag:
for ax in axarr.flatten():
ax.clear()
for artist in fig.gca().lines + fig.gca().collections:
artist.remove()
del artist
axarr[1].set_xlabel(r'Time (sec)')
axarr[1].set_ylabel('Distance between fils \n centers of mass (nm)')
axarr[1].set_xlim(left=0, right=me_anal.time[-1])
axarr[1].set_ylim(np.amin(me_anal.dR_arr),
np.amax(me_anal.dR_arr))
axarr[2].set_xlabel(r'Time (sec)')
axarr[2].set_ylabel('Angle between fil \n orientation vectors (rad)')
axarr[2].set_xlim(left=0, right=me_anal.time[-1])
axarr[2].set_ylim(np.nanmin(me_anal.phi_arr),
np.nanmax(me_anal.phi_arr))
axarr[3].set_xlabel(r'Time (sec)')
axarr[3].set_ylabel(r'Motor number')
axarr[3].set_xlim(left=0, right=me_anal.time[-1])
axarr[3].set_ylim(np.amin(me_anal.mu00),
np.amax(me_anal.mu00))
p_n = np.stack((me_anal.mu10, me_anal.mu01))
axarr[4].set_xlabel(r'Time (sec)')
axarr[4].set_ylabel(r'First moments (nm)')
axarr[4].set_xlim(left=0, right=me_anal.time[-1])
axarr[4].set_ylim(np.amin(p_n), np.amax(p_n))
# mu_kl = me_anal._h5_data['/xl_data/second_moments'][...]
mu_kl = np.stack((me_anal.mu11, me_anal.mu20, me_anal.mu02))
axarr[5].set_xlabel(r'Time (sec)')
axarr[5].set_ylabel(r'Second moments (nm$^2$)')
axarr[5].set_xlim(left=0, right=me_anal.time[-1])
axarr[5].set_ylim(np.amin(mu_kl), np.amax(mu_kl))
# Draw rods
if me_anal.graph_type == 'min':
graph_2d_rod_diagram(axarr[0], me_anal, n)
else:
cb = graph_2d_rod_moment_diagram(axarr[0], me_anal, n)
if me_anal.init_flag:
axarr[0].set_aspect(1.0)
if me_anal.graph_type == 'all':
fig.colorbar(cb, ax=axarr[0])
me_anal.init_flag = False
# Graph rod center separations
graph_vs_time(axarr[1], me_anal.time, me_anal.dR_arr, n)
# Graph angle between rod orientations
graph_vs_time(axarr[2], me_anal.time, me_anal.phi_arr, n)
# Graph zeroth moment aka number of crosslinkers
graph_vs_time(axarr[3], me_anal.time, me_anal.mu00, n)
# Graph first moments of crosslink distribution
graph_vs_time(axarr[4], me_anal.time, me_anal.mu10, n,
color='tab:green')
graph_vs_time(axarr[4], me_anal.time, me_anal.mu01, n,
color='tab:purple')
# Graph second moments of crosslinker distribution
graph_vs_time(axarr[5], me_anal.time, me_anal.mu11, n,
color='b')
graph_vs_time(axarr[5], me_anal.time, me_anal.mu20, n,
color='tab:green')
graph_vs_time(axarr[5], me_anal.time, me_anal.mu02, n,
color='tab:purple')
# # Effective moment graphing
# ## Zeroth moment
# graph_vs_time(axarr[3], me_anal.time,
# me_anal.mu_kl_eff[:, 0], n, fillstyle='none')
# ## First moments
# graph_vs_time(axarr[4], me_anal.time, me_anal.mu_kl_eff[:, 1], n,
# color='tab:green', fillstyle='none')
# graph_vs_time(axarr[4], me_anal.time, me_anal.mu_kl_eff[:, 2], n,
# color='tab:purple', fillstyle='none')
# ## Second moments
# graph_vs_time(axarr[5], me_anal.time, me_anal.mu_kl_eff[:, 3], n,
# color='b', fillstyle='none')
# graph_vs_time(axarr[5], me_anal.time, me_anal.mu_kl_eff[:, 4], n,
# color='tab:green', fillstyle='none')
# graph_vs_time(axarr[5], me_anal.time, me_anal.mu_kl_eff[:, 5], n,
# color='tab:purple', fillstyle='none')
# Legend information
axarr[1].legend([r"$\Delta$R({:.2f}) = {:.1f} nm".format(
me_anal.time[n], me_anal.dR_arr[n])])
axarr[2].legend([r"$\phi$({:.2f}) = {:.1f} rad".format(
me_anal.time[n], me_anal.phi_arr[n])])
axarr[3].legend([r"N({:.2f})={:.1f}".format(
me_anal.time[n], me_anal.mu00[n])])
axarr[4].legend([r"$\mu^{{1,0}}$({:.2f}) = {:.1f}".format(me_anal.time[n],
me_anal.mu10[n]),
r"$\mu^{{0,1}}$({:.2f}) = {:.1f}".format(me_anal.time[n],
me_anal.mu01[n])])
axarr[5].legend([r"$\mu^{{1,1}}$({:.2f}) = {:.1f}".format(me_anal.time[n],
me_anal.mu11[n]),
r"$\mu^{{2,0}}$({:.2f}) = {:.1f}".format(me_anal.time[n],
me_anal.mu20[n]),
r"$\mu^{{0,2}}$({:.2f}) = {:.1f}".format(me_anal.time[n],
me_anal.mu02[n])])
return fig.gca().lines + fig.gca().collections
def me_graph_distr_data_2d(fig, axarr, n, me_anal):
# Clean up if lines on axis object to speed up movie making
if not me_anal.init_flag:
for ax in axarr.flatten():
ax.clear()
for artist in fig.gca().lines + fig.gca().collections:
artist.remove()
del artist
# Draw rods
graph_2d_rod_diagram(axarr[0], me_anal, n)
cb1 = graph_xl_dens(axarr[1],
me_anal.xl_distr[:, :, n],
me_anal.s_i,
me_anal.s_j,
max_dens_val=me_anal.max_dens_val)
# Graph rod center separations
axarr[3].set_xlabel(r'Time (sec)')
axarr[3].set_ylabel('Distance between fils \n centers of mass (nm)')
axarr[3].set_xlim(left=0, right=me_anal.time[-1])
axarr[3].set_ylim(np.amin(me_anal.dR_arr),
np.amax(me_anal.dR_arr))
graph_vs_time(axarr[3], me_anal.time, me_anal.dR_arr, n)
# Graph angle between rod orientations
axarr[4].set_xlabel(r'Time (sec)')
axarr[4].set_ylabel('Angle between fil \n orientation vectors (rad)')
axarr[4].set_xlim(left=0, right=me_anal.time[-1])
axarr[4].set_ylim(np.nanmin(me_anal.phi_arr),
np.nanmax(me_anal.phi_arr))
graph_vs_time(axarr[4], me_anal.time, me_anal.phi_arr, n)
# Graph zeroth moment aka number of crosslinkers
axarr[2].set_xlabel(r'Time (sec)')
axarr[2].set_ylabel(r'Motor number')
axarr[2].set_xlim(left=0, right=me_anal.time[-1])
axarr[2].set_ylim(np.amin(me_anal.mu00),
np.amax(me_anal.mu00))
graph_vs_time(axarr[2], me_anal.time, me_anal.mu00, n)
if me_anal._params['ODE_type'] == 'zrl_bvg':
graph_vs_time(axarr[2], me_anal.time, me_anal.mu_kl_eff[:, 0],
n, fillstyle='none')
# Graph first moments of crosslink distribution
p_n = np.stack((me_anal.mu10, me_anal.mu01))
axarr[5].set_xlabel(r'Time (sec)')
axarr[5].set_ylabel(r'First moments (nm)')
axarr[5].set_xlim(left=0, right=me_anal.time[-1])
axarr[5].set_ylim(np.amin(p_n), np.amax(p_n))
graph_vs_time(axarr[5], me_anal.time, me_anal.mu10, n,
color='tab:green')
graph_vs_time(axarr[5], me_anal.time, me_anal.mu01, n,
color='tab:purple')
if me_anal._params['ODE_type'] == 'zrl_bvg':
graph_vs_time(axarr[5], me_anal.time, me_anal.mu_kl_eff[:, 1], n,
color='tab:green', fillstyle='none')
graph_vs_time(axarr[5], me_anal.time, me_anal.mu_kl_eff[:, 2], n,
color='tab:purple', fillstyle='none')
# Graph second moments of crosslinker distribution
mu_kl = np.stack((me_anal.mu11, me_anal.mu20, me_anal.mu02))
axarr[8].set_xlabel(r'Time (sec)')
axarr[8].set_ylabel(r'Second moments (nm$^2$)')
axarr[8].set_xlim(left=0, right=me_anal.time[-1])
axarr[8].set_ylim(np.amin(mu_kl), np.amax(mu_kl))
graph_vs_time(axarr[8], me_anal.time, me_anal.mu11, n,
color='b')
graph_vs_time(axarr[8], me_anal.time, me_anal.mu20, n,
color='tab:green')
graph_vs_time(axarr[8], me_anal.time, me_anal.mu02, n,
color='tab:purple')
if me_anal._params['ODE_type'] == 'zrl_bvg':
graph_vs_time(axarr[8], me_anal.time, me_anal.mu_kl_eff[:, 3], n,
color='b', fillstyle='none')
graph_vs_time(axarr[8], me_anal.time, me_anal.mu_kl_eff[:, 4], n,
color='tab:green', fillstyle='none')
graph_vs_time(axarr[8], me_anal.time, me_anal.mu_kl_eff[:, 5], n,
color='tab:purple', fillstyle='none')
if me_anal.init_flag:
axarr[0].set_aspect(1.0)
axarr[1].set_aspect(1.0)
fig.colorbar(cb1, ax=axarr[1])
me_anal.init_flag = False
# Legend information
# axarr[1].legend([r"$\Delta$R({:.2f}) = {:.1f} nm".format(
# me_anal.time[n], me_anal.dR_arr[n])])
# axarr[2].legend([r"$\phi$({:.2f}) = {:.1f} rad".format(
# me_anal.time[n], me_anal.phi_arr[n])])
# axarr[3].legend([r"N({:.2f})={:.1f}".format(
# me_anal.time[n], me_anal.mu00[n])])
# axarr[4].legend([r"$\mu^{{1,0}}$({:.2f}) = {:.1f}".format(me_anal.time[n],
# me_anal.mu10[n]),
# r"$\mu^{{0,1}}$({:.2f}) = {:.1f}".format(me_anal.time[n],
# me_anal.mu01[n])])
# axarr[5].legend([r"$\mu^{{1,1}}$({:.2f}) = {:.1f}".format(me_anal.time[n],
# me_anal.mu11[n]),
# r"$\mu^{{2,0}}$({:.2f}) = {:.1f}".format(me_anal.time[n],
# me_anal.mu20[n]),
# r"$\mu^{{0,2}}$({:.2f}) = {:.1f}".format(me_anal.time[n],
# me_anal.mu02[n])])
return fig.gca().lines + fig.gca().collections
def pde_graph_all_data_2d(fig, axarr, n, pde_anal):
# Clean up if lines
if not pde_anal.init_flag:
for ax in axarr.flatten():
ax.clear()
for artist in fig.gca().lines + fig.gca().collections:
artist.remove()
del artist
# Init axis labels and ranges
axarr[1].set_xlabel(
'Head distance from \n center of fil$_i$ $s_i$ (nm)')
axarr[1].set_ylabel(
'Head distance from \n center of fil$_j$ $s_j$ (nm)')
axarr[2].set_xlabel(r'Time (sec)')
axarr[2].set_ylabel(r'Motor number')
axarr[2].set_xlim(left=0, right=pde_anal.time[-1])
axarr[2].set_ylim(np.amin(pde_anal.mu00), np.amax(pde_anal.mu00))
axarr[3].set_xlabel(r'Time (sec)')
axarr[3].set_ylabel(r'Motor force (pN)')
axarr[3].set_xlim(left=0, right=pde_anal.time[-1])
axarr[3].set_ylim(np.amin(pde_anal.force_arr), np.amax(pde_anal.force_arr))
axarr[4].set_xlabel(r'Time (sec)')
axarr[4].set_ylabel(r'Motor torque (pN*nm)')
axarr[4].set_xlim(left=0, right=pde_anal.time[-1])
axarr[4].set_ylim(np.amin(pde_anal.torque_arr),
np.amax(pde_anal.torque_arr))
axarr[5].set_xlabel(r'Time (sec)')
axarr[5].set_ylabel(r'First moments (nm)')
axarr[5].set_xlim(left=0, right=pde_anal.time[-1])
axarr[5].set_ylim(min(np.amin(pde_anal.mu10), np.amin(pde_anal.mu01)),
max(np.amax(pde_anal.mu10), np.amax(pde_anal.mu01)))
axarr[6].set_xlabel(r'Time (sec)')
axarr[6].set_ylabel('Distance between fils \n centers of mass (nm)')
axarr[6].set_xlim(left=0, right=pde_anal.time[-1])
axarr[6].set_ylim(np.amin(pde_anal.dR_arr),
np.amax(pde_anal.dR_arr))
axarr[7].set_xlabel(r'Time (sec)')
axarr[7].set_ylabel('Angle between fil \n orientation vectors (rad)')
axarr[7].set_xlim(left=0, right=pde_anal.time[-1])
axarr[7].set_ylim(np.nanmin(pde_anal.phi_arr),
np.nanmax(pde_anal.phi_arr))
axarr[8].set_xlabel(r'Time (sec)')
axarr[8].set_ylabel(r'Second moments (nm$^2$)')
axarr[8].set_xlim(left=0, right=pde_anal.time[-1])
axarr[8].set_ylim(min(np.amin(pde_anal.mu11),
np.amin(pde_anal.mu20),
np.amin(pde_anal.mu02)),
max(np.amax(pde_anal.mu11),
np.amax(pde_anal.mu20),
np.amax(pde_anal.mu02)))
# Draw rods
graph_2d_rod_diagram(axarr[0], pde_anal, n)
# Make crosslinker density plot
c = graph_xl_dens(axarr[1],
pde_anal.xl_distr[:, :, n],
pde_anal.s_i,
pde_anal.s_j,
max_dens_val=pde_anal.max_dens_val)
if pde_anal.init_flag:
axarr[0].set_aspect(1.0)
axarr[1].set_aspect(1.0)
fig.colorbar(c, ax=axarr[1])
pde_anal.init_flag = False
# Graph zeroth moment aka number of crosslinkers
graph_vs_time(axarr[2], pde_anal.time, pde_anal.mu00, n)
# Graph forces
graph_vs_time(axarr[3], pde_anal.time, pde_anal.force_arr[:, 0], n,
color='tab:green')
graph_vs_time(axarr[3], pde_anal.time, pde_anal.force_arr[:, 1], n,
color='tab:purple')
# Graph torques
graph_vs_time(axarr[4], pde_anal.time, pde_anal.torque_arr[:, 0], n,
color='tab:green')
graph_vs_time(axarr[4], pde_anal.time, pde_anal.torque_arr[:, 1], n,
color='tab:purple')
# Graph first moments of crosslink distribution
graph_vs_time(axarr[5], pde_anal.time, pde_anal.mu10, n,
color='tab:green')
graph_vs_time(axarr[5], pde_anal.time, pde_anal.mu01, n,
color='tab:purple')
# Graph rod center separations
graph_vs_time(axarr[6], pde_anal.time, pde_anal.dR_arr, n)
# Graph angle between rod orientations
graph_vs_time(axarr[7], pde_anal.time, pde_anal.phi_arr, n)
# Graph second moments of crosslinker distribution
graph_vs_time(axarr[8], pde_anal.time, pde_anal.mu11, n,
color='b')
graph_vs_time(axarr[8], pde_anal.time, pde_anal.mu20, n,
color='tab:green')
graph_vs_time(axarr[8], pde_anal.time, pde_anal.mu02, n,
color='tab:purple')
# Legend information
axarr[2].legend(["N({:.2f}) = {:.1f}".format(
pde_anal.time[n], pde_anal.mu00[n])])
axarr[3].legend([r"F$_i$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.force_arr[n, 0]),
r"F$_j$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.force_arr[n, 1])])
axarr[4].legend([r"$T_i$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.torque_arr[n, 0]),
r"$T_j$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.torque_arr[n, 1])])
axarr[5].legend([r"$\mu^{{1,0}}$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.mu10[n]),
r"$\mu^{{0,1}}$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.mu01[n])])
axarr[6].legend([r"$\Delta$R({:.2f}) = {:.1f} nm".format(
pde_anal.time[n], pde_anal.dR_arr[n])])
axarr[7].legend([r"$\phi$({:.2f}) = {:.1f} rad".format(
pde_anal.time[n], pde_anal.phi_arr[n])])
axarr[8].legend([r"$\mu^{{1,1}}$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.mu11[n]),
r"$\mu^{{2,0}}$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.mu20[n]),
r"$\mu^{{0,2}}$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.mu02[n])])
return fig.gca().lines + fig.gca().collections
def pde_graph_moment_data_2d(fig, axarr, n, pde_anal):
# Clean up if lines
if not pde_anal.init_flag:
for ax in axarr.flatten():
ax.clear()
for artist in fig.gca().lines + fig.gca().collections:
artist.remove()
del artist
# Init axis labels and ranges
axarr[1].set_xlabel(
'Head distance from \n center of fil$_i$ $s_i$ (nm)')
axarr[1].set_ylabel(
'Head distance from \n center of fil$_j$ $s_j$ (nm)')
axarr[2].set_xlabel(r'Time (sec)')
axarr[2].set_ylabel(r'Motor number')
axarr[2].set_xlim(left=0, right=pde_anal.time[-1])
axarr[2].set_ylim(np.amin(pde_anal.mu00),
np.amax(pde_anal.mu00))
axarr[3].set_xlabel(r'Time (sec)')
axarr[3].set_ylabel(r'First moments (nm)')
axarr[3].set_xlim(left=0, right=pde_anal.time[-1])
axarr[3].set_ylim(min(np.amin(pde_anal.mu10), np.amin(pde_anal.mu01)),
max(np.amax(pde_anal.mu10), np.amax(pde_anal.mu01)))
axarr[4].set_xlabel(r'Time (sec)')
axarr[4].set_ylabel(r'Second moments (nm$^2$)')
axarr[4].set_xlim(left=0, right=pde_anal.time[-1])
axarr[4].set_ylim(min(np.amin(pde_anal.mu11),
np.amin(pde_anal.mu20),
np.amin(pde_anal.mu02)),
max(np.amax(pde_anal.mu11),
np.amax(pde_anal.mu20),
np.amax(pde_anal.mu02)))
axarr[5].set_xlabel(r'Time (sec)')
axarr[5].set_ylabel(r'Motor force (pN)')
axarr[5].set_xlim(left=0, right=pde_anal.time[-1])
axarr[5].set_ylim(np.amin(pde_anal.force_arr),
np.amax(pde_anal.force_arr))
axarr[6].set_xlabel(r'Time (sec)')
axarr[6].set_ylabel(r'Motor torque (pN$\cdot$nm)')
axarr[6].set_xlim(left=0, right=pde_anal.time[-1])
axarr[6].set_ylim(np.amin(pde_anal.torque_arr),
np.amax(pde_anal.torque_arr))
# Draw rods
graph_2d_rod_diagram(axarr[0], pde_anal, n)
# Make crosslinker density plot
c = graph_xl_dens(axarr[1],
pde_anal.xl_distr[:, :, n],
pde_anal.s_i,
pde_anal.s_j,
max_dens_val=pde_anal.max_dens_val)
if pde_anal.init_flag:
axarr[0].set_aspect(1.0)
axarr[1].set_aspect(1.0)
fig.colorbar(c, ax=axarr[1])
pde_anal.init_flag = False
# Graph zeroth moment aka number of crosslinkers
graph_vs_time(axarr[2], pde_anal.time, pde_anal.mu00, n)
# Graph first moments of crosslink distribution
graph_vs_time(axarr[3], pde_anal.time, pde_anal.mu10, n,
color='tab:green')
graph_vs_time(axarr[3], pde_anal.time, pde_anal.mu01, n,
color='tab:purple')
# Graph second moments of crosslinker distribution
graph_vs_time(axarr[4], pde_anal.time, pde_anal.mu11, n,
color='b')
graph_vs_time(axarr[4], pde_anal.time, pde_anal.mu20, n,
color='tab:green')
graph_vs_time(axarr[4], pde_anal.time, pde_anal.mu02, n,
color='tab:purple')
# Graph forces
graph_vs_time(axarr[5], pde_anal.time, pde_anal.force_arr[:, 0], n,
color='tab:green')
graph_vs_time(axarr[5], pde_anal.time, pde_anal.force_arr[:, 1], n,
color='tab:purple')
# Graph torques
graph_vs_time(axarr[6], pde_anal.time, pde_anal.torque_arr[:, 0], n,
color='tab:green')
graph_vs_time(axarr[6], pde_anal.time, pde_anal.torque_arr[:, 1], n,
color='tab:purple')
# Legend information
axarr[2].legend([r"N({:.2f}) = {:.1f}".format(
pde_anal.time[n], pde_anal.mu00[n])])
axarr[3].legend([r"$\mu^{{1,0}}$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.mu10[n]),
r"$\mu^{{0,1}}$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.mu01[n])])
axarr[4].legend([r"$\mu^{{1,1}}$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.mu11[n]),
r"$\mu^{{2,0}}$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.mu20[n]),
r"$\mu^{{0,2}}$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.mu02[n])])
axarr[5].legend([r"F$_i$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.force_arr[n, 0]),
r"F$_j$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.force_arr[n, 1])])
axarr[6].legend([r"$T_i$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.torque_arr[n, 0]),
r"$T_j$({:.2f}) = {:.1f}".format(pde_anal.time[n],
pde_anal.torque_arr[n, 1])])
return fig.gca().lines + fig.gca().collections
def pde_graph_mts_xlink_distr_2d(fig, axarr, n, pde_anal):
# Clean up if lines
if not pde_anal.init_flag:
for ax in axarr.flatten():
ax.clear()
for artist in fig.gca().lines + fig.gca().collections:
artist.remove()
del artist
# Draw rods
graph_2d_rod_pde_diagram(axarr[0], pde_anal, n,
scale=1. / (pde_anal.max_dens_val))
# Make density plot
c = graph_xl_dens(axarr[1],
pde_anal.xl_distr[:, :, n],
pde_anal.s_i,
pde_anal.s_j,
max_dens_val=pde_anal.max_dens_val)
axarr[1].set_xlabel(
'Head distance from \n center of fil$_i$ $s_i$ (nm)')
axarr[1].set_ylabel(
'Head distance from \n center of fil$_j$ $s_j$ (nm)')
if pde_anal.init_flag:
axarr[0].set_aspect(1.0)
axarr[1].set_aspect(1.0)
fig.colorbar(c, ax=axarr[1])
pde_anal.init_flag = False
axarr[0].text(.05, .90, "Time = {:.2f} sec".format(pde_anal.time[n]),
horizontalalignment='left',
verticalalignment='bottom',
transform=axarr[0].transAxes)
# pde_anal.time[n])], facecolor='inherit')
return fig.gca().lines + fig.gca().collections
def pde_graph_stationary_runs_2d(fig, axarr, n, pde_anal):
# Clean up if lines
if not pde_anal.init_flag:
for ax in axarr.flatten():
ax.clear()
for artist in fig.gca().lines + fig.gca().collections:
artist.remove()
del artist
# Draw rods
graph_2d_rod_diagram(axarr[0], pde_anal, n)
# Make density plot
c = graph_xl_dens(axarr[1],
pde_anal.xl_distr[:, :, n],
pde_anal.s_i,
pde_anal.s_j,
max_dens_val=pde_anal.max_dens_val)
axarr[1].set_xlabel(
'Head distance from \n center of fil$_i$ $s_i$ (nm)')
axarr[1].set_ylabel(
'Head distance from \n center of fil$_j$ $s_j$ (nm)')
axarr[2].set_xlabel(r'Time (sec)')
axarr[2].set_ylabel(r'Motor number')
axarr[2].set_xlim(left=0, right=pde_anal.time[-1])
axarr[2].set_ylim(np.amin(pde_anal.mu00),
np.amax(pde_anal.mu00))
axarr[3].set_xlabel(r'Time (sec)')
axarr[3].set_ylabel(r'Motor force (pN)')
axarr[3].set_xlim(left=0, right=pde_anal.time[-1])
axarr[3].set_ylim(np.amin(pde_anal.force_arr),
np.amax(pde_anal.force_arr))
axarr[4].set_xlabel(r'Time (sec)')
axarr[4].set_ylabel(r'Motor torque (pN$\cdotnm)')
axarr[4].set_xlim(left=0, right=pde_anal.time[-1])
axarr[4].set_ylim(np.amin(pde_anal.torque_arr),
np.amax(pde_anal.torque_arr))
if pde_anal.init_flag:
axarr[0].set_aspect(1.0)
axarr[1].set_aspect(1.0)
fig.colorbar(c, ax=axarr[1])
pde_anal.init_flag = False
graph_vs_time(axarr[2], pde_anal.time, pde_anal.mu00, n)
graph_vs_time(axarr[3], pde_anal.time, pde_anal.force_arr[:, 0], n,
color='tab:green')
graph_vs_time(axarr[3], pde_anal.time, pde_anal.force_arr[:, 1], n,
color='tab:purple')
graph_vs_time(axarr[4], pde_anal.time, pde_anal.torque_arr[:, 0], n,
color='tab:green')
graph_vs_time(axarr[4], pde_anal.time, pde_anal.torque_arr[:, 1], n,
color='tab:purple')
return fig.gca().lines + fig.gca().collections
######################################
# Crosslinker distribution moments #
######################################
def pde_graph_recreate_xlink_distr_2d(fig, axarr, n, pde_anal):
# Clean up if lines
if not pde_anal.init_flag:
for ax in axarr.flatten():
ax.clear()
for artist in fig.gca().lines + fig.gca().collections:
artist.remove()
del artist
# Draw rods
graph_2d_rod_pde_diagram(axarr[0], pde_anal, n,
scale=1. / (pde_anal.max_dens_val))
# Make a function of a recreated distribution
# Make density plot
cb1 = graph_xl_dens(axarr[1],
pde_anal.xl_distr[:, :, n],
pde_anal.s_i,
pde_anal.s_j,
max_dens_val=pde_anal.max_dens_val)
# Make recreation of distribution
xl_distr_rec_func = pde_anal.create_distr_approx_func()
s_j_grid, s_i_grid = np.meshgrid(pde_anal.s_j, pde_anal.s_i)
xl_distr_rec = xl_distr_rec_func(s_i_grid, s_j_grid, n)
cb2 = graph_xl_dens(axarr[2],
xl_distr_rec,
pde_anal.s_i,
pde_anal.s_j,
max_dens_val=pde_anal.max_dens_val)
axarr[1].set_xlabel(
'Head distance from \n center of fil$_i$ $s_i$ (nm)')
axarr[1].set_ylabel(
'Head distance from \n center of fil$_j$ $s_j$ (nm)')
axarr[2].set_xlabel(
'Head distance from \n center of fil$_i$ $s_i$ (nm)')
axarr[2].set_ylabel(
'Head distance from \n center of fil$_j$ $s_j$ (nm)')
if pde_anal.init_flag:
fig.colorbar(cb1, ax=axarr[1])
fig.colorbar(cb2, ax=axarr[2])
axarr[0].set_aspect(1.0)
axarr[1].set_aspect(1.0)
axarr[2].set_aspect(1.0)
pde_anal.init_flag = False
axarr[0].text(.05, .95, "Time = {:.2f} sec".format(pde_anal.time[n]),
horizontalalignment='left',
verticalalignment='bottom',
transform=axarr[0].transAxes)
# pde_anal.time[n])], facecolor='inherit')
return fig.gca().lines + fig.gca().collections
| 40.406922 | 86 | 0.555342 | 6,586 | 43,195 | 3.420437 | 0.064683 | 0.065877 | 0.033205 | 0.039064 | 0.794247 | 0.765837 | 0.740045 | 0.717228 | 0.705065 | 0.690682 | 0 | 0.033253 | 0.293344 | 43,195 | 1,068 | 87 | 40.444757 | 0.704757 | 0.201829 | 0 | 0.6 | 0 | 0 | 0.092702 | 0.009315 | 0 | 0 | 0 | 0.021536 | 0 | 1 | 0.032353 | false | 0 | 0.005882 | 0 | 0.066176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c2c7d434e033a6804caefa0f02cdcf1508ad3291 | 149 | py | Python | server/Location/admin.py | adamA113/servize | 89933c3864d997188ec79ad690b37f51bca54aa3 | [
"MIT"
] | null | null | null | server/Location/admin.py | adamA113/servize | 89933c3864d997188ec79ad690b37f51bca54aa3 | [
"MIT"
] | null | null | null | server/Location/admin.py | adamA113/servize | 89933c3864d997188ec79ad690b37f51bca54aa3 | [
"MIT"
] | 2 | 2020-12-26T09:50:17.000Z | 2020-12-26T09:52:45.000Z | from django.contrib import admin
from Location.models import City
# Register your models here.
admin.site.register(City)
# admin.site.register(Area) | 24.833333 | 32 | 0.805369 | 22 | 149 | 5.454545 | 0.590909 | 0.15 | 0.283333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107383 | 149 | 6 | 33 | 24.833333 | 0.902256 | 0.348993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6c15db0a3d84fb945ac8100bc99b080aae3a50ac | 32,337 | py | Python | azure-iot-sdk-python/provisioning_service_client/tests/client_ut.py | wadooddaoud/Gast_Iot_Sensor_Development | 3d923cc0632e380da7f0e960d74df934735b8fea | [
"MIT"
] | null | null | null | azure-iot-sdk-python/provisioning_service_client/tests/client_ut.py | wadooddaoud/Gast_Iot_Sensor_Development | 3d923cc0632e380da7f0e960d74df934735b8fea | [
"MIT"
] | null | null | null | azure-iot-sdk-python/provisioning_service_client/tests/client_ut.py | wadooddaoud/Gast_Iot_Sensor_Development | 3d923cc0632e380da7f0e960d74df934735b8fea | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import copy
import unittest
from six import add_move, MovedModule
add_move(MovedModule('mock', 'mock', 'unittest.mock'))
from six.moves import mock
from msrest.pipeline import ClientRawResponse
import context
from provisioningserviceclient.utils.sastoken import SasTokenFactory
from provisioningserviceclient.client import ProvisioningServiceClient, \
BulkEnrollmentOperation, BulkEnrollmentOperationResult, ProvisioningServiceError, \
_is_successful, _copy_and_unwrap_bulkop
from provisioningserviceclient.models import IndividualEnrollment, EnrollmentGroup, \
DeviceRegistrationState, AttestationMechanism, DeviceRegistrationState
from provisioningserviceclient import QuerySpecification, Query
from provisioningserviceclient.serviceswagger import DeviceProvisioningServiceServiceRuntimeClient
from provisioningserviceclient.serviceswagger.operations import DeviceEnrollmentOperations, \
DeviceEnrollmentGroupOperations, RegistrationStateOperations
import provisioningserviceclient.serviceswagger.models as genmodels
SAS = "dummy_token"
RESP_MSG = "message"
REG_ID = "reg-id"
SUCCESS = 200
SUCCESS_DEL = 204
FAIL = 400
UNEXPECTED_FAIL = 793
def dummy(arg1, arg2):
pass
def create_raw_response(body, status, message):
resp = Response(status, message)
return ClientRawResponse(body, resp)
def create_PSED_Exception(status, message):
resp = Response(status, message)
return genmodels.ProvisioningServiceErrorDetailsException(dummy, resp)
class Response(object):
def __init__(self, status_code, message):
self.status_code = status_code
self.reason = message
def raise_for_status(self):
pass
class TestCreationProvisioningServiceClient(unittest.TestCase):
def test_create_w_params(self):
psc = ProvisioningServiceClient("test-uri.azure-devices-provisioning.net", \
"provisioningserviceowner", "dGVzdGluZyBhIHNhc3Rva2Vu")
self.assertEqual(psc.host_name, "test-uri.azure-devices-provisioning.net")
self.assertEqual(psc.shared_access_key_name, "provisioningserviceowner")
self.assertEqual(psc.shared_access_key, "dGVzdGluZyBhIHNhc3Rva2Vu")
def test_basic_cs(self):
cs = "HostName=test-uri.azure-devices-provisioning.net;SharedAccessKeyName=provisioningserviceowner;SharedAccessKey=dGVzdGluZyBhIHNhc3Rva2Vu"
psc = ProvisioningServiceClient.create_from_connection_string(cs)
self.assertEqual(psc.host_name, "test-uri.azure-devices-provisioning.net")
self.assertEqual(psc.shared_access_key_name, "provisioningserviceowner")
self.assertEqual(psc.shared_access_key, "dGVzdGluZyBhIHNhc3Rva2Vu")
def test_reordered_cs_args(self):
cs = "SharedAccessKey=dGVzdGluZyBhIHNhc3Rva2Vu;HostName=test-uri.azure-devices-provisioning.net;SharedAccessKeyName=provisioningserviceowner"
psc = ProvisioningServiceClient.create_from_connection_string(cs)
self.assertEqual(psc.host_name, "test-uri.azure-devices-provisioning.net")
self.assertEqual(psc.shared_access_key_name, "provisioningserviceowner")
self.assertEqual(psc.shared_access_key, "dGVzdGluZyBhIHNhc3Rva2Vu")
def test_fail_too_many_cs_args(self):
#ExtraVal additional cs val
cs = "ExtraVal=testingValue;HostName=test-uri.azure-devices-provisioning.net;SharedAccessKeyName=provisioningserviceowner;SharedAccessKey=dGVzdGluZyBhIHNhc3Rva2Vu"
with self.assertRaises(ValueError):
psc = ProvisioningServiceClient.create_from_connection_string(cs)
def test_fail_missing_cs_args(self):
#HostName is missing
cs = "SharedAccessKeyName=provisioningserviceowner;SharedAccessKey=dGVzdGluZyBhIHNhc3Rva2Vu"
with self.assertRaises(ValueError):
psc = ProvisioningServiceClient.create_from_connection_string(cs)
def test_fail_replaced_cs_args(self):
#ExtraVal replaces HostName in cs
cs = "ExtraVal=testingValue;SharedAccessKeyName=provisioningserviceowner;SharedAccessKey=dGVzdGluZyBhIHNhc3Rva2Vu"
with self.assertRaises(ValueError):
psc = ProvisioningServiceClient.create_from_connection_string(cs)
def test_fail_duplicate_cs_args(self):
#SharedAccessKeyName defined twice
cs = "SharedAccessKeyName=provisioningserviceowner;SharedAccessKey=dGVzdGluZyBhIHNhc3Rva2Vu;SharedAccessKeyName=duplicatevalue"
with self.assertRaises(UnboundLocalError):
psc = ProvisioningServiceClient.create_from_connection_string(cs)
def test_fail_invalid_cs(self):
cs = "not_a_connection_string"
with self.assertRaises(ValueError):
psc = ProvisioningServiceClient.create_from_connection_string(cs)
class TestValidProvisioningServiceClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
cs = "HostName=test-uri.azure-devices-provisioning.net;SharedAccessKeyName=provisioningserviceowner;SharedAccessKey=dGVzdGluZyBhIHNhc3Rva2Vu"
cls.psc = ProvisioningServiceClient.create_from_connection_string(cs)
def expected_headers(self):
headers = {}
headers["Authorization"] = SAS
return headers
class TestProvisioningServiceClientWithIndividualEnrollment(TestValidProvisioningServiceClient):
def setUp(self):
tpm_am = AttestationMechanism.create_with_tpm("my-ek")
self.ie = IndividualEnrollment.create("reg-id", tpm_am)
self.ret_ie = copy.deepcopy(self.ie._internal)
self.ret_ie.created_updated_time_utc = 1000
self.ret_ie.last_updated_time_utc = 1000
@mock.patch.object(DeviceEnrollmentOperations, 'create_or_update')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_create_or_update_ie_success(self, mock_sas, mock_create):
mock_create.return_value = create_raw_response(self.ret_ie, SUCCESS, RESP_MSG)
ret = self.psc.create_or_update(self.ie)
self.assertIs(ret._internal, self.ret_ie)
self.assertIsInstance(ret, IndividualEnrollment)
mock_create.assert_called_with(self.ie.registration_id, self.ie._internal, self.ie.etag, \
self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentOperations, 'create_or_update')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_create_or_update_ie_fail(self, mock_sas, mock_create):
mock_create.return_value = create_raw_response(None, FAIL, RESP_MSG)
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.create_or_update(self.ie)
e = cm.exception
self.assertEqual(RESP_MSG, str(e))
self.assertIsNone(e.cause)
mock_create.assert_called_with(self.ie.registration_id, self.ie._internal, self.ie.etag, \
self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentOperations, 'create_or_update')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_create_or_update_ie_service_exception(self, mock_sas, mock_create):
mock_ex = create_PSED_Exception(UNEXPECTED_FAIL, RESP_MSG)
mock_create.side_effect = mock_ex
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.create_or_update(self.ie)
e = cm.exception
self.assertEqual(self.psc.err_msg_unexpected.format(UNEXPECTED_FAIL), str(e))
self.assertIs(e.cause, mock_ex)
mock_create.assert_called_with(self.ie.registration_id, self.ie._internal, self.ie.etag, \
self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentOperations, 'get')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_get_individual_enrollment(self, mock_sas, mock_get):
mock_get.return_value = create_raw_response(self.ret_ie, SUCCESS, RESP_MSG)
ret = self.psc.get_individual_enrollment(self.ie.registration_id)
self.assertIs(ret._internal, self.ret_ie)
self.assertIsInstance(ret, IndividualEnrollment)
mock_get.assert_called_with(self.ie.registration_id, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentOperations, 'get')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_get_individual_enrollment_fail(self, mock_sas, mock_get):
mock_get.return_value = create_raw_response(None, FAIL, RESP_MSG)
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.get_individual_enrollment(self.ie.registration_id)
e = cm.exception
self.assertEqual(RESP_MSG, str(e))
self.assertIsNone(e.cause)
mock_get.assert_called_with(self.ie.registration_id, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentOperations, 'get')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_get_individual_enrollment_service_exception(self, mock_sas, mock_get):
mock_ex = create_PSED_Exception(UNEXPECTED_FAIL, RESP_MSG)
mock_get.side_effect = mock_ex
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.get_individual_enrollment(self.ie.registration_id)
e = cm.exception
self.assertEqual(self.psc.err_msg_unexpected.format(UNEXPECTED_FAIL), str(e))
self.assertIs(e.cause, mock_ex)
mock_get.assert_called_with(self.ie.registration_id, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentOperations, 'delete')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_delete_individual_enrollment_by_param_w_etag(self, mock_sas, mock_delete):
mock_delete.return_value = create_raw_response(None, SUCCESS_DEL, RESP_MSG)
ret = self.psc.delete_individual_enrollment_by_param(self.ie.registration_id, self.ie.etag)
self.assertIsNone(ret)
mock_delete.assert_called_with(self.ie.registration_id, self.ie.etag, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentOperations, 'delete')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_delete_individual_enrollment_by_param_no_etag(self, mock_sas, mock_delete):
mock_delete.return_value = create_raw_response(None, SUCCESS_DEL, RESP_MSG)
ret = self.psc.delete_individual_enrollment_by_param(self.ie.registration_id)
self.assertIsNone(ret)
mock_delete.assert_called_with(self.ie.registration_id, None , self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentOperations, 'delete')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_delete_individual_enrollment_by_param_fail(self, mock_sas, mock_delete):
mock_delete.return_value = create_raw_response(None, FAIL, RESP_MSG)
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.delete_individual_enrollment_by_param(self.ie.registration_id, self.ie.etag)
e = cm.exception
self.assertEqual(RESP_MSG, str(e))
self.assertIsNone(e.cause)
mock_delete.assert_called_with(self.ie.registration_id, self.ie.etag, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentOperations, 'delete')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_delete_individual_enrollment_by_param_service_exception(self, mock_sas, mock_delete):
mock_ex = create_PSED_Exception(UNEXPECTED_FAIL, RESP_MSG)
mock_delete.side_effect = mock_ex
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.delete_individual_enrollment_by_param(self.ie.registration_id, self.ie.etag)
e = cm.exception
self.assertEqual(self.psc.err_msg_unexpected.format(UNEXPECTED_FAIL), str(e))
self.assertIs(e.cause, mock_ex)
mock_delete.assert_called_with(self.ie.registration_id, self.ie.etag, self.expected_headers(), True)
@mock.patch.object(ProvisioningServiceClient, 'delete_individual_enrollment_by_param')
def test_delete_individual_enrollment(self, mock_psc_delete):
self.psc.delete(self.ie)
mock_psc_delete.assert_called_with(self.ie.registration_id, self.ie.etag)
class TestProvisioningServiceClientWithEnrollmentGroup(TestValidProvisioningServiceClient):
def setUp(self):
x509_am = AttestationMechanism.create_with_x509_signing_certs("test-cert")
self.eg = EnrollmentGroup.create("grp-id", x509_am)
self.ret_eg = copy.deepcopy(self.eg._internal)
self.ret_eg.created_updated_time_utc = 1000
self.ret_eg.last_updated_time_utc = 1000
@mock.patch.object(DeviceEnrollmentGroupOperations, 'create_or_update')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_create_or_update_eg(self, mock_sas, mock_create):
mock_create.return_value = create_raw_response(self.ret_eg, SUCCESS, RESP_MSG)
ret = self.psc.create_or_update(self.eg)
self.assertIs(ret._internal, self.ret_eg)
self.assertIsInstance(ret, EnrollmentGroup)
mock_create.assert_called_with(self.eg.enrollment_group_id, self.eg._internal, self.eg.etag, \
self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentGroupOperations, 'create_or_update')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_create_or_update_eg_fail(self, mock_sas, mock_create):
mock_create.return_value = create_raw_response(None, FAIL, RESP_MSG)
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.create_or_update(self.eg)
e = cm.exception
self.assertEqual(RESP_MSG, str(e))
self.assertIsNone(e.cause)
mock_create.assert_called_with(self.eg.enrollment_group_id, self.eg._internal, self.eg.etag, \
self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentGroupOperations, 'create_or_update')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_create_or_update_eg_service_exception(self, mock_sas, mock_create):
mock_ex = create_PSED_Exception(UNEXPECTED_FAIL, RESP_MSG)
mock_create.side_effect = mock_ex
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.create_or_update(self.eg)
e = cm.exception
self.assertEqual(self.psc.err_msg_unexpected.format(UNEXPECTED_FAIL), str(e))
self.assertIs(e.cause, mock_ex)
mock_create.assert_called_with(self.eg.enrollment_group_id, self.eg._internal, self.eg.etag, \
self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentGroupOperations, 'get')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_get_enrollment_group(self, mock_sas, mock_get):
mock_get.return_value = create_raw_response(self.ret_eg, SUCCESS, RESP_MSG)
ret = self.psc.get_enrollment_group(self.eg.enrollment_group_id)
self.assertIs(ret._internal, self.ret_eg)
self.assertIsInstance(ret, EnrollmentGroup)
mock_get.assert_called_with(self.eg.enrollment_group_id, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentGroupOperations, 'get')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_get_enrollment_group_fail(self, mock_sas, mock_get):
mock_get.return_value = create_raw_response(None, FAIL, RESP_MSG)
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.get_enrollment_group(self.eg.enrollment_group_id)
e = cm.exception
self.assertEqual(RESP_MSG, str(e))
self.assertIsNone(e.cause)
mock_get.assert_called_with(self.eg.enrollment_group_id, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentGroupOperations, 'get')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_get_enrollment_group_service_exception(self, mock_sas, mock_get):
mock_ex = create_PSED_Exception(UNEXPECTED_FAIL, RESP_MSG)
mock_get.side_effect = mock_ex
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.get_enrollment_group(self.eg.enrollment_group_id)
e = cm.exception
self.assertEqual(self.psc.err_msg_unexpected.format(UNEXPECTED_FAIL), str(e))
self.assertIs(e.cause, mock_ex)
mock_get.assert_called_with(self.eg.enrollment_group_id, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentGroupOperations, 'delete')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_delete_enrollment_group_by_param_w_etag(self, mock_sas, mock_delete):
mock_delete.return_value = create_raw_response(None, SUCCESS_DEL, RESP_MSG)
ret = self.psc.delete_enrollment_group_by_param(self.eg.enrollment_group_id, self.eg.etag)
self.assertIsNone(ret)
mock_delete.assert_called_with(self.eg.enrollment_group_id, self.eg.etag, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentGroupOperations, 'delete')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_delete_enrollment_group_by_param_no_etag(self, mock_sas, mock_delete):
mock_delete.return_value = create_raw_response(None, SUCCESS_DEL, RESP_MSG)
ret = self.psc.delete_enrollment_group_by_param(self.eg.enrollment_group_id)
self.assertIsNone(ret)
mock_delete.assert_called_with(self.eg.enrollment_group_id, None , self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentGroupOperations, 'delete')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_delete_enrollment_group_by_param_fail(self, mock_sas, mock_delete):
mock_delete.return_value = create_raw_response(None, FAIL, RESP_MSG)
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.delete_enrollment_group_by_param(self.eg.enrollment_group_id, self.eg.etag)
e = cm.exception
self.assertEqual(RESP_MSG, str(e))
self.assertIsNone(e.cause)
mock_delete.assert_called_with(self.eg.enrollment_group_id, self.eg.etag, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentGroupOperations, 'delete')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_delete_enrollment_group_by_param_service_exception(self, mock_sas, mock_delete):
mock_ex = create_PSED_Exception(UNEXPECTED_FAIL, RESP_MSG)
mock_delete.side_effect = mock_ex
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.delete_enrollment_group_by_param(self.eg.enrollment_group_id, self.eg.etag)
e = cm.exception
self.assertEqual(self.psc.err_msg_unexpected.format(UNEXPECTED_FAIL), str(e))
self.assertIs(e.cause, mock_ex)
mock_delete.assert_called_with(self.eg.enrollment_group_id, self.eg.etag, self.expected_headers(), True)
@mock.patch.object(ProvisioningServiceClient, 'delete_enrollment_group_by_param')
def test_delete_enrollment_group(self, mock_psc_delete):
self.psc.delete(self.eg)
mock_psc_delete.assert_called_with(self.eg.enrollment_group_id, self.eg.etag)
class TestProvisioningServiceClientWithRegistrationState(TestValidProvisioningServiceClient):
def setUp(self):
self.drs = DeviceRegistrationState(genmodels.DeviceRegistrationState("reg-id", "assigned"))
self.ret_drs = copy.deepcopy(self.drs._internal)
self.ret_drs.created_updated_time_utc = 1000
self.ret_drs.last_updated_time_utc = 1000
@mock.patch.object(RegistrationStateOperations, 'get_registration_state')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_get_registration_state(self, mock_sas, mock_get):
mock_get.return_value = create_raw_response(self.ret_drs, SUCCESS, RESP_MSG)
ret = self.psc.get_registration_state(self.drs.registration_id)
self.assertIs(ret._internal, self.ret_drs)
self.assertIsInstance(ret, DeviceRegistrationState)
mock_get.assert_called_with(self.drs.registration_id, self.expected_headers(), True)
@mock.patch.object(RegistrationStateOperations, 'get_registration_state')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_get_registration_state_fail(self, mock_sas, mock_get):
mock_get.return_value = create_raw_response(self.ret_drs, FAIL, RESP_MSG)
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.get_registration_state(self.drs.registration_id)
e = cm.exception
self.assertEqual(str(e), RESP_MSG)
self.assertIsNone(e.cause)
mock_get.assert_called_with(self.drs.registration_id, self.expected_headers(), True)
@mock.patch.object(RegistrationStateOperations, 'get_registration_state')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_get_registration_state_service_fail(self, mock_sas, mock_get):
mock_ex = create_PSED_Exception(UNEXPECTED_FAIL, RESP_MSG)
mock_get.side_effect = mock_ex
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.get_registration_state(self.drs.registration_id)
e = cm.exception
self.assertEqual(str(e), self.psc.err_msg_unexpected.format(UNEXPECTED_FAIL))
self.assertIs(e.cause, mock_ex)
mock_get.assert_called_with(self.drs.registration_id, self.expected_headers(), True)
@mock.patch.object(RegistrationStateOperations, 'delete_registration_state')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_delete_registration_state_by_param_w_etag(self, mock_sas, mock_delete):
mock_delete.return_value = create_raw_response(None, SUCCESS_DEL, RESP_MSG)
ret = self.psc.delete_registration_state_by_param(self.drs.registration_id, self.drs.etag)
self.assertIsNone(ret)
mock_delete.assert_called_with(self.drs.registration_id, self.drs.etag, self.expected_headers(), True)
@mock.patch.object(RegistrationStateOperations, 'delete_registration_state')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_delete_registration_state_by_param_no_etag(self, mock_sas, mock_delete):
mock_delete.return_value = create_raw_response(None, SUCCESS_DEL, RESP_MSG)
ret = self.psc.delete_registration_state_by_param(self.drs.registration_id)
self.assertIsNone(ret)
mock_delete.assert_called_with(self.drs.registration_id, None, self.expected_headers(), True)
@mock.patch.object(RegistrationStateOperations, 'delete_registration_state')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_delete_registration_state_fail(self, mock_sas, mock_delete):
mock_delete.return_value = create_raw_response(None, FAIL, RESP_MSG)
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.delete_registration_state_by_param(self.drs.registration_id, self.drs.etag)
e = cm.exception
self.assertEqual(str(e), RESP_MSG)
self.assertIsNone(e.cause)
mock_delete.assert_called_with(self.drs.registration_id, self.drs.etag, self.expected_headers(), True)
@mock.patch.object(RegistrationStateOperations, 'delete_registration_state')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_delete_registration_state_service_exception(self, mock_sas, mock_delete):
mock_ex = create_PSED_Exception(UNEXPECTED_FAIL, RESP_MSG)
mock_delete.side_effect = mock_ex
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.delete_registration_state_by_param(self.drs.registration_id, self.drs.etag)
e = cm.exception
self.assertEqual(str(e), self.psc.err_msg_unexpected.format(UNEXPECTED_FAIL))
self.assertIs(e.cause, mock_ex)
mock_delete.assert_called_with(self.drs.registration_id, self.drs.etag, self.expected_headers(), True)
class TestProvisioningServiceClientBulkOperation(TestValidProvisioningServiceClient):
def setUp(self):
enrollments = []
for i in range(5):
att = AttestationMechanism.create_with_tpm("test-ek")
enrollments.append(IndividualEnrollment.create("reg-id" + str(i), att))
self.bulkop = BulkEnrollmentOperation("create", enrollments)
internal = []
for enrollment in self.bulkop.enrollments:
internal.append(enrollment._internal)
self.internal_bulkop = BulkEnrollmentOperation("create", internal)
self.bulkop_resp = BulkEnrollmentOperationResult(True)
@mock.patch.object(DeviceEnrollmentOperations, 'bulk_operation')
@mock.patch('provisioningserviceclient.client._copy_and_unwrap_bulkop')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_run_bulk_operation_op_success(self, mock_sas, mock_unwrap, mock_bulk_op):
mock_bulk_op.return_value = create_raw_response(self.bulkop_resp, SUCCESS, RESP_MSG)
mock_unwrap.return_value = self.internal_bulkop
ret = self.psc.run_bulk_operation(self.bulkop)
self.assertEqual(ret, self.bulkop_resp)
self.assertIsInstance(ret, BulkEnrollmentOperationResult)
mock_bulk_op.assert_called_with(self.internal_bulkop, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentOperations, 'bulk_operation')
@mock.patch('provisioningserviceclient.client._copy_and_unwrap_bulkop')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_run_bulk_operation_op_fail(self, mock_sas, mock_unwrap, mock_bulk_op):
self.bulkop_resp.is_successful = False
mock_bulk_op.return_value = create_raw_response(self.bulkop_resp, SUCCESS, RESP_MSG)
mock_unwrap.return_value = self.internal_bulkop
ret = self.psc.run_bulk_operation(self.bulkop)
self.assertEqual(ret, self.bulkop_resp)
self.assertIsInstance(ret, BulkEnrollmentOperationResult)
mock_bulk_op.assert_called_with(self.internal_bulkop, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentOperations, 'bulk_operation')
@mock.patch('provisioningserviceclient.client._copy_and_unwrap_bulkop')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_run_bulk_operation_fail_response(self, mock_sas, mock_unwrap, mock_bulk_op):
mock_bulk_op.return_value = create_raw_response(None, FAIL, RESP_MSG)
mock_unwrap.return_value = self.internal_bulkop
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.run_bulk_operation(self.bulkop)
e = cm.exception
self.assertEqual(str(e), RESP_MSG)
self.assertIsNone(e.cause)
mock_bulk_op.assert_called_with(self.internal_bulkop, self.expected_headers(), True)
@mock.patch.object(DeviceEnrollmentOperations, 'bulk_operation')
@mock.patch('provisioningserviceclient.client._copy_and_unwrap_bulkop')
@mock.patch.object(SasTokenFactory, 'generate_sastoken', return_value=SAS)
def test_run_bulk_operation_service_exception(self, mock_sas, mock_unwrap, mock_bulk_op):
mock_unwrap.return_value = self.internal_bulkop
mock_ex = create_PSED_Exception(UNEXPECTED_FAIL, RESP_MSG)
mock_bulk_op.side_effect = mock_ex
with self.assertRaises(ProvisioningServiceError) as cm:
ret = self.psc.run_bulk_operation(self.bulkop)
e = cm.exception
self.assertEqual(str(e), self.psc.err_msg_unexpected.format(UNEXPECTED_FAIL))
self.assertIs(e.cause, mock_ex)
mock_bulk_op.assert_called_with(self.internal_bulkop, self.expected_headers(), True)
class TestProvisioningServiceClientOtherOperations(TestValidProvisioningServiceClient):
@mock.patch('provisioningserviceclient.client.Query', autospec=True)
def test_create_individual_enrollment_query_default_page(self, mock_query):
qs = QuerySpecification("*")
ret = self.psc.create_individual_enrollment_query(qs)
mock_query.assert_called_with(qs, self.psc._runtime_client.device_enrollment.query, \
self.psc._sastoken_factory, None)
self.assertIs(ret, mock_query.return_value)
@mock.patch('provisioningserviceclient.client.Query', autospec=True)
def test_create_individual_enrollment_query_custom_page(self, mock_query):
qs = QuerySpecification("*")
page_size = 50
ret = self.psc.create_individual_enrollment_query(qs, page_size)
mock_query.assert_called_with(qs, self.psc._runtime_client.device_enrollment.query, \
self.psc._sastoken_factory, page_size)
self.assertIs(ret, mock_query.return_value)
@mock.patch('provisioningserviceclient.client.Query', autospec=True)
def test_create_enrollment_group_query_default_page(self, mock_query):
qs = QuerySpecification("*")
ret = self.psc.create_enrollment_group_query(qs)
mock_query.assert_called_with(qs, self.psc._runtime_client.device_enrollment_group.query, \
self.psc._sastoken_factory, None)
self.assertIs(ret, mock_query.return_value)
@mock.patch('provisioningserviceclient.client.Query', autospec=True)
def test_create_enrollment_group_query_custom_page(self, mock_query):
qs = QuerySpecification("*")
page_size = 50
ret = self.psc.create_enrollment_group_query(qs, page_size)
mock_query.assert_called_with(qs, self.psc._runtime_client.device_enrollment_group.query, \
self.psc._sastoken_factory, page_size)
self.assertIs(ret, mock_query.return_value)
@mock.patch('provisioningserviceclient.client.Query', autospec=True)
def test_create_registration_state_query_default_page(self, mock_query):
id = REG_ID
ret = self.psc.create_registration_state_query(id)
mock_query.assert_called_with(id, self.psc._runtime_client.registration_state.query_registration_state, \
self.psc._sastoken_factory, None)
self.assertIs(ret, mock_query.return_value)
@mock.patch('provisioningserviceclient.client.Query', autospec=True)
def test_create_registration_state_query_custom_page(self, mock_query):
id = REG_ID
page_size = 50
ret = self.psc.create_registration_state_query(id, page_size)
mock_query.assert_called_with(id, self.psc._runtime_client.registration_state.query_registration_state, \
self.psc._sastoken_factory, page_size)
self.assertIs(ret, mock_query.return_value)
class TestProvisioningServiceCleintWithBadInputs(TestValidProvisioningServiceClient):
def test_create_or_update_wrong_obj_fail(self):
with self.assertRaises(TypeError):
self.psc.create_or_update(object())
def test_delete_wrong_obj_fail(self):
with self.assertRaises(TypeError):
self.psc.delete(object())
class TestHelperFunctions(unittest.TestCase):
def test_is_successful(self):
for i in range(999):
ret = _is_successful(i)
if i == 200 or i == 204:
self.assertTrue(ret)
else:
self.assertFalse(ret)
def test_copy_and_unwrap_bulkop(self):
enrollments = []
for i in range(5):
att = AttestationMechanism.create_with_tpm("test-ek")
enrollments.append(IndividualEnrollment.create("reg-id" + str(i), att))
bulkop = BulkEnrollmentOperation("create", enrollments)
res = _copy_and_unwrap_bulkop(bulkop)
for i in range(len(res.enrollments)):
self.assertIs(res.enrollments[i], bulkop.enrollments[i]._internal)
if __name__ == '__main__':
unittest.main()
| 52.838235 | 171 | 0.753378 | 3,905 | 32,337 | 5.919078 | 0.063252 | 0.028814 | 0.041533 | 0.028554 | 0.838929 | 0.827723 | 0.824998 | 0.811283 | 0.793632 | 0.785931 | 0 | 0.003147 | 0.154807 | 32,337 | 611 | 172 | 52.924714 | 0.84256 | 0.007855 | 0 | 0.652695 | 0 | 0.007984 | 0.088137 | 0.060109 | 0 | 0 | 0 | 0 | 0.275449 | 1 | 0.123753 | false | 0.003992 | 0.025948 | 0 | 0.175649 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6c1aa6ecf7cbb11aac2896042941b073a0b4f92d | 6,901 | py | Python | metrics/dine_ndt_metrics.py | DorTsur/dine_ndt | 3a07064b1d37da12c36e679a9b1de6a32ae42689 | [
"MIT"
] | 1 | 2022-03-29T03:09:52.000Z | 2022-03-29T03:09:52.000Z | metrics/dine_ndt_metrics.py | DorTsur/dine_ndt | 3a07064b1d37da12c36e679a9b1de6a32ae42689 | [
"MIT"
] | null | null | null | metrics/dine_ndt_metrics.py | DorTsur/dine_ndt | 3a07064b1d37da12c36e679a9b1de6a32ae42689 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras import backend as K
import logging
import numpy as np
import math
logger = logging.getLogger("logger")
class DINE_NDT_Metrics(tf.keras.metrics.Metric):
def __init__(self, writer, name='', **kwargs):
super(DINE_NDT_Metrics, self).__init__(name=name, **kwargs)
self.writer = writer
self.metric_pool = [DV(name='dv_xy_{}'.format(name)),
DV(name='dv_y_{}'.format(name)),
DI(name='di_{}'.format(name)),
DI_bits(name='di_bits')]
def update_state(self, t_y, t_xy, **kwargs):
self.metric_pool[0].update_state(t_xy[0], t_xy[1])
self.metric_pool[1].update_state(t_y[0], t_y[1])
self.metric_pool[2].update_state(t_y[0], t_y[1], t_xy[0], t_xy[1])
self.metric_pool[3].update_state(t_y[0], t_y[1], t_xy[0], t_xy[1])
def result(self):
return [metric.result() for metric in self.metric_pool]
def reset_states(self):
for metric in self.metric_pool:
metric.reset_states()
return
def log_metrics(self, epoch, model_name):
# log to tensorboard
with self.writer.as_default():
for metric in self.metric_pool:
tf.summary.scalar(metric.name, metric.result(), epoch)
# print to terminal
msg = ["{} Epoch: {:05d}\t".format(self.name, epoch)]
for metric in self.metric_pool:
if np.isnan(metric.result()):
raise ValueError("NaN appeared in metric {}".format(metric.name))
msg.append("{:s} {:3.6f}\t".format(metric.name, float(metric.result())))
msg.append(model_name)
logger.info("\t".join(msg))
class MINE_NDT_Metrics(tf.keras.metrics.Metric):
def __init__(self, writer, name='', **kwargs):
super(MINE_NDT_Metrics, self).__init__(name=name, **kwargs)
self.writer = writer
self.metric_pool = [DV(name='dv_{}'.format(name))]
def update_state(self, t, t_, **kwargs):
self.metric_pool[0].update_state(t, t_)
def result(self):
return [metric.result() for metric in self.metric_pool]
def reset_states(self):
for metric in self.metric_pool:
metric.reset_states()
return
def log_metrics(self, epoch, model_name):
# log to tensorboard
with self.writer.as_default():
for metric in self.metric_pool:
tf.summary.scalar(metric.name, metric.result(), epoch)
# print to terminal
msg = ["{} Epoch: {:05d}\t".format(self.name, epoch)]
for metric in self.metric_pool:
if np.isnan(metric.result()):
raise ValueError("NaN appeared in metric {}".format(metric.name))
msg.append("{:s} {:3.6f}\t".format(metric.name, float(metric.result())))
msg.append(model_name)
logger.info("\t".join(msg))
class DV(tf.keras.metrics.Metric): # estimated DV loss calcaultion metric class
def __init__(self, name='dv_loss', **kwargs):
super(DV, self).__init__(name=name, **kwargs)
self.T = self.add_weight(name='t', initializer='zeros')
self.exp_T_bar = self.add_weight(name='exp_t_bar', initializer='zeros')
self.global_counter = self.add_weight(name='n', initializer='zeros')
self.global_counter_ref = self.add_weight(name='n_ref', initializer='zeros')
def update_state(self, T, T_bar, **kwargs):
self.T.assign(self.T + tf.reduce_sum(T))
self.exp_T_bar.assign(self.exp_T_bar + tf.reduce_sum(T_bar))
self.global_counter.assign(self.global_counter + tf.cast(tf.reduce_prod(T.shape[:-1]), dtype=tf.float32))
self.global_counter_ref.assign(self.global_counter_ref + tf.cast(tf.reduce_prod(T_bar.shape[:-1]), dtype=tf.float32))
def result(self):
loss = self.T / self.global_counter - K.log(self.exp_T_bar / self.global_counter_ref)
return loss
class DI(tf.keras.metrics.Metric): # estimated DI calcaultion metric class
def __init__(self, name='dv_loss', **kwargs):
super(DI, self).__init__(name=name, **kwargs)
self.c_T = self.add_weight(name='c_t', initializer='zeros')
self.c_exp_T_bar = self.add_weight(name='c_exp_t_bar', initializer='zeros')
self.xc_T = self.add_weight(name='xc_t', initializer='zeros')
self.xc_exp_T_bar = self.add_weight(name='xc_exp_t_bar', initializer='zeros')
self.global_counter = self.add_weight(name='n', initializer='zeros')
self.global_counter_ref = self.add_weight(name='n_ref', initializer='zeros')
def update_state(self, c_T, c_T_bar, xc_T, xc_T_bar, **kwargs):
self.c_T.assign(self.c_T + tf.reduce_sum(c_T))
self.c_exp_T_bar.assign(self.c_exp_T_bar + tf.reduce_sum(c_T_bar))
self.xc_T.assign(self.xc_T + tf.reduce_sum(xc_T))
self.xc_exp_T_bar.assign(self.xc_exp_T_bar + tf.reduce_sum(xc_T_bar))
self.global_counter.assign(self.global_counter + c_T.shape[0]*c_T.shape[1])
self.global_counter_ref.assign(self.global_counter_ref + c_T_bar.shape[0]*c_T_bar.shape[1]*c_T_bar.shape[2])
def result(self):
loss_y = self.c_T / self.global_counter - K.log(self.c_exp_T_bar / self.global_counter_ref)
loss_xy = self.xc_T / self.global_counter - K.log(self.xc_exp_T_bar / self.global_counter_ref)
return loss_xy - loss_y
class DI_bits(tf.keras.metrics.Metric): # estimated DI calcaultion metric class in bits
def __init__(self, name='dv_loss', **kwargs):
super(DI_bits, self).__init__(name=name, **kwargs)
self.c_T = self.add_weight(name='c_t', initializer='zeros')
self.c_exp_T_bar = self.add_weight(name='c_exp_t_bar', initializer='zeros')
self.xc_T = self.add_weight(name='xc_t', initializer='zeros')
self.xc_exp_T_bar = self.add_weight(name='xc_exp_t_bar', initializer='zeros')
self.global_counter = self.add_weight(name='n', initializer='zeros')
self.global_counter_ref = self.add_weight(name='n_ref', initializer='zeros')
def update_state(self, c_T, c_T_bar, xc_T, xc_T_bar, **kwargs):
self.c_T.assign(self.c_T + tf.reduce_sum(c_T))
self.c_exp_T_bar.assign(self.c_exp_T_bar + tf.reduce_sum(c_T_bar))
self.xc_T.assign(self.xc_T + tf.reduce_sum(xc_T))
self.xc_exp_T_bar.assign(self.xc_exp_T_bar + tf.reduce_sum(xc_T_bar))
self.global_counter.assign(self.global_counter + c_T.shape[0]*c_T.shape[1])
self.global_counter_ref.assign(self.global_counter_ref + c_T_bar.shape[0]*c_T_bar.shape[1]*c_T_bar.shape[2])
def result(self):
loss_y = self.c_T / self.global_counter - K.log(self.c_exp_T_bar / self.global_counter_ref)
loss_xy = self.xc_T / self.global_counter - K.log(self.xc_exp_T_bar / self.global_counter_ref)
return (loss_xy - loss_y)/math.log(2) | 45.701987 | 125 | 0.655847 | 1,088 | 6,901 | 3.858456 | 0.087316 | 0.040019 | 0.113387 | 0.064793 | 0.912577 | 0.882325 | 0.857551 | 0.845641 | 0.826108 | 0.758695 | 0 | 0.007618 | 0.20113 | 6,901 | 151 | 126 | 45.701987 | 0.753855 | 0.028981 | 0 | 0.66087 | 0 | 0 | 0.051539 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.165217 | false | 0 | 0.043478 | 0.017391 | 0.313043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6c27de781049f910986e16c5a5723a30f969a4b2 | 33 | py | Python | backend/api/db/schemas/users.py | skluthe/Yacht | d9ba4185c318e128d2fc6ceb7c0111927f571dbd | [
"MIT"
] | null | null | null | backend/api/db/schemas/users.py | skluthe/Yacht | d9ba4185c318e128d2fc6ceb7c0111927f571dbd | [
"MIT"
] | null | null | null | backend/api/db/schemas/users.py | skluthe/Yacht | d9ba4185c318e128d2fc6ceb7c0111927f571dbd | [
"MIT"
] | null | null | null | from fastapi_users import models
| 16.5 | 32 | 0.878788 | 5 | 33 | 5.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121212 | 33 | 1 | 33 | 33 | 0.965517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6c368b661be57c9485e7c28e223830033ce3223d | 6,526 | py | Python | RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/opencv_apps/cfg/FaceRecognitionConfig.py | QianheYu/xtark_driver_dev | 1708888161cf20c0d1f45c99d0da4467d69c26c8 | [
"BSD-3-Clause"
] | 1 | 2022-03-11T03:31:15.000Z | 2022-03-11T03:31:15.000Z | RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/opencv_apps/cfg/FaceRecognitionConfig.py | bravetree/xtark_driver_dev | 1708888161cf20c0d1f45c99d0da4467d69c26c8 | [
"BSD-3-Clause"
] | null | null | null | RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/opencv_apps/cfg/FaceRecognitionConfig.py | bravetree/xtark_driver_dev | 1708888161cf20c0d1f45c99d0da4467d69c26c8 | [
"BSD-3-Clause"
] | null | null | null | ## *********************************************************
##
## File autogenerated for the opencv_apps package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 245, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 290, 'description': 'Method to recognize faces', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'model_method', 'edit_method': "{'enum_description': 'Method to recognize faces', 'enum': [{'srcline': 41, 'description': 'eigen', 'srcfile': '/home/xtark/ros_ws/src/third_packages/opencv_apps/cfg/FaceRecognition.cfg', 'cconsttype': 'const char * const', 'value': 'eigen', 'ctype': 'std::string', 'type': 'str', 'name': 'eigen'}, {'srcline': 42, 'description': 'fisher', 'srcfile': '/home/xtark/ros_ws/src/third_packages/opencv_apps/cfg/FaceRecognition.cfg', 'cconsttype': 'const char * const', 'value': 'fisher', 'ctype': 'std::string', 'type': 'str', 'name': 'fisher'}, {'srcline': 43, 'description': 'LBPH', 'srcfile': '/home/xtark/ros_ws/src/third_packages/opencv_apps/cfg/FaceRecognition.cfg', 'cconsttype': 'const char * const', 'value': 'LBPH', 'ctype': 'std::string', 'type': 'str', 'name': 'LBPH'}]}", 'default': 'eigen', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Use saved data', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'use_saved_data', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Save train data', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'save_train_data', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Save directory for train data', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'data_dir', 'edit_method': '', 'default': '~/.ros/opencv_apps/face_data', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Width of training face image', 'max': 500, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'face_model_width', 'edit_method': '', 'default': 190, 'level': 0, 'min': 30, 'type': 'int'}, {'srcline': 290, 'description': 'Height of training face image', 'max': 500, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'face_model_height', 'edit_method': '', 'default': 90, 'level': 0, 'min': 30, 'type': 'int'}, {'srcline': 290, 'description': 'Padding ratio of each face', 'max': 2.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'face_padding', 'edit_method': '', 'default': 0.1, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Number of components for face recognizer model', 'max': 100, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'model_num_components', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Threshold for face recognizer model', 'max': 10000.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'model_threshold', 'edit_method': '', 'default': 8000.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Radius parameter used only for LBPH model', 'max': 10, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'lbph_radius', 'edit_method': '', 'default': 1, 'level': 0, 'min': 1, 'type': 'int'}, {'srcline': 290, 'description': 'Neighbors parameter used only for LBPH model', 'max': 30, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'lbph_neighbors', 'edit_method': '', 'default': 8, 'level': 0, 'min': 1, 'type': 'int'}, {'srcline': 290, 'description': 'grid_x parameter used only for LBPH model', 'max': 30, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'lbph_grid_x', 'edit_method': '', 'default': 8, 'level': 0, 'min': 1, 'type': 'int'}, {'srcline': 290, 'description': 'grid_y parameter used only for LBPH model', 'max': 30, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'lbph_grid_y', 'edit_method': '', 'default': 8, 'level': 0, 'min': 1, 'type': 'int'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
FaceRecognition_eigen = 'eigen'
FaceRecognition_fisher = 'fisher'
FaceRecognition_LBPH = 'LBPH'
| 163.15 | 5,555 | 0.665952 | 825 | 6,526 | 5.141818 | 0.166061 | 0.067893 | 0.042904 | 0.066007 | 0.711457 | 0.674682 | 0.657001 | 0.649458 | 0.632013 | 0.604668 | 0 | 0.024932 | 0.096537 | 6,526 | 39 | 5,556 | 167.333333 | 0.694539 | 0.060374 | 0 | 0 | 1 | 0.789474 | 0.683522 | 0.257241 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
665fa45057e1b19d20422413670b61257951e59e | 70 | py | Python | chalicelib/util/__init__.py | la-mar/ecs-cluster-management | 334ca02c7a4d86aa77f2a5bccd4fd48db8620c87 | [
"MIT"
] | null | null | null | chalicelib/util/__init__.py | la-mar/ecs-cluster-management | 334ca02c7a4d86aa77f2a5bccd4fd48db8620c87 | [
"MIT"
] | null | null | null | chalicelib/util/__init__.py | la-mar/ecs-cluster-management | 334ca02c7a4d86aa77f2a5bccd4fd48db8620c87 | [
"MIT"
] | null | null | null | # flake8: noqa
from util.botoutil import *
from util.dt import utcnow
| 17.5 | 27 | 0.771429 | 11 | 70 | 4.909091 | 0.727273 | 0.296296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016949 | 0.157143 | 70 | 3 | 28 | 23.333333 | 0.898305 | 0.171429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
66755202fbb6508a1318a40efea2f6ad9766b361 | 7,341 | py | Python | tests/api_resources/test_customer.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 8 | 2021-05-29T08:57:58.000Z | 2022-02-19T07:09:25.000Z | tests/api_resources/test_customer.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 5 | 2021-05-31T10:18:36.000Z | 2022-01-25T11:39:03.000Z | tests/api_resources/test_customer.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2021-05-29T13:27:10.000Z | 2021-05-29T13:27:10.000Z | from __future__ import absolute_import, division, print_function
import stripe
import pytest
pytestmark = pytest.mark.asyncio
TEST_RESOURCE_ID = "cus_123"
TEST_SUB_ID = "sub_123"
TEST_SOURCE_ID = "ba_123"
TEST_TAX_ID_ID = "txi_123"
TEST_TRANSACTION_ID = "cbtxn_123"
class TestCustomer(object):
async def test_is_listable(self, request_mock):
resources = await stripe.Customer.list()
request_mock.assert_requested("get", "/v1/customers")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.Customer)
async def test_is_retrievable(self, request_mock):
resource = await stripe.Customer.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/customers/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Customer)
async def test_is_creatable(self, request_mock):
resource = await stripe.Customer.create()
request_mock.assert_requested("post", "/v1/customers")
assert isinstance(resource, stripe.Customer)
async def test_is_saveable(self, request_mock):
resource = await stripe.Customer.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
await resource.save()
request_mock.assert_requested(
"post", "/v1/customers/%s" % TEST_RESOURCE_ID
)
async def test_is_modifiable(self, request_mock):
resource = await stripe.Customer.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/customers/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Customer)
async def test_is_deletable(self, request_mock):
resource = await stripe.Customer.retrieve(TEST_RESOURCE_ID)
await resource.delete()
request_mock.assert_requested(
"delete", "/v1/customers/%s" % TEST_RESOURCE_ID
)
assert resource.deleted is True
async def test_can_delete(self, request_mock):
resource = await stripe.Customer.delete(TEST_RESOURCE_ID)
request_mock.assert_requested(
"delete", "/v1/customers/%s" % TEST_RESOURCE_ID
)
assert resource.deleted is True
async def test_can_delete_discount(self, request_mock):
resource = await stripe.Customer.retrieve(TEST_RESOURCE_ID)
await resource.delete_discount()
request_mock.assert_requested(
"delete", "/v1/customers/%s/discount" % TEST_RESOURCE_ID
)
async def test_can_delete_discount_class_method(self, request_mock):
await stripe.Customer.delete_discount(TEST_RESOURCE_ID)
request_mock.assert_requested(
"delete", "/v1/customers/%s/discount" % TEST_RESOURCE_ID
)
class TestCustomerSources(object):
async def test_is_creatable(self, request_mock):
await stripe.Customer.create_source(TEST_RESOURCE_ID, source="btok_123")
request_mock.assert_requested(
"post", "/v1/customers/%s/sources" % TEST_RESOURCE_ID
)
async def test_is_retrievable(self, request_mock):
await stripe.Customer.retrieve_source(TEST_RESOURCE_ID, TEST_SOURCE_ID)
request_mock.assert_requested(
"get",
"/v1/customers/%s/sources/%s" % (TEST_RESOURCE_ID, TEST_SOURCE_ID),
)
async def test_is_modifiable(self, request_mock):
await stripe.Customer.modify_source(
TEST_RESOURCE_ID, TEST_SOURCE_ID, metadata={"foo": "bar"}
)
request_mock.assert_requested(
"post",
"/v1/customers/%s/sources/%s" % (TEST_RESOURCE_ID, TEST_SOURCE_ID),
)
async def test_is_deletable(self, request_mock):
await stripe.Customer.delete_source(TEST_RESOURCE_ID, TEST_SOURCE_ID)
request_mock.assert_requested(
"delete",
"/v1/customers/%s/sources/%s" % (TEST_RESOURCE_ID, TEST_SOURCE_ID),
)
async def test_is_listable(self, request_mock):
resources = await stripe.Customer.list_sources(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/customers/%s/sources" % TEST_RESOURCE_ID
)
assert isinstance(resources.data, list)
class TestCustomerTaxIds(object):
async def test_is_creatable(self, request_mock):
resource = await stripe.Customer.create_tax_id(
TEST_RESOURCE_ID, type="eu_vat", value="11111"
)
request_mock.assert_requested(
"post", "/v1/customers/%s/tax_ids" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.TaxId)
async def test_is_retrievable(self, request_mock):
await stripe.Customer.retrieve_tax_id(TEST_RESOURCE_ID, TEST_TAX_ID_ID)
request_mock.assert_requested(
"get",
"/v1/customers/%s/tax_ids/%s" % (TEST_RESOURCE_ID, TEST_TAX_ID_ID),
)
async def test_is_deletable(self, request_mock):
await stripe.Customer.delete_tax_id(TEST_RESOURCE_ID, TEST_TAX_ID_ID)
request_mock.assert_requested(
"delete",
"/v1/customers/%s/tax_ids/%s" % (TEST_RESOURCE_ID, TEST_TAX_ID_ID),
)
async def test_is_listable(self, request_mock):
resources = await stripe.Customer.list_tax_ids(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/customers/%s/tax_ids" % TEST_RESOURCE_ID
)
assert isinstance(resources.data, list)
class TestCustomerTransactions(object):
async def test_is_creatable(self, request_mock):
resource = await stripe.Customer.create_balance_transaction(
TEST_RESOURCE_ID, amount=1234, currency="usd"
)
request_mock.assert_requested(
"post", "/v1/customers/%s/balance_transactions" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.CustomerBalanceTransaction)
async def test_is_retrievable(self, request_mock):
await stripe.Customer.retrieve_balance_transaction(
TEST_RESOURCE_ID, TEST_TRANSACTION_ID
)
request_mock.assert_requested(
"get",
"/v1/customers/%s/balance_transactions/%s"
% (TEST_RESOURCE_ID, TEST_TRANSACTION_ID),
)
async def test_is_listable(self, request_mock):
resources = await stripe.Customer.list_balance_transactions(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/customers/%s/balance_transactions" % TEST_RESOURCE_ID
)
assert isinstance(resources.data, list)
class TestCustomerPaymentMethods(object):
async def test_is_listable(self, request_mock):
await stripe.Customer.list_payment_methods(TEST_RESOURCE_ID, type="card")
request_mock.assert_requested(
"get", "/v1/customers/%s/payment_methods" % TEST_RESOURCE_ID
)
async def test_is_listable_on_object(self, request_mock):
customer = await stripe.Customer.retrieve(
TEST_RESOURCE_ID
)
resource = await customer.list_payment_methods(TEST_RESOURCE_ID, type="card")
request_mock.assert_requested(
"get", "/v1/customers/%s/payment_methods" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.ListObject)
| 37.454082 | 85 | 0.675521 | 866 | 7,341 | 5.387991 | 0.106236 | 0.108444 | 0.132019 | 0.128161 | 0.865409 | 0.837977 | 0.8024 | 0.736391 | 0.704029 | 0.61916 | 0 | 0.009015 | 0.229397 | 7,341 | 195 | 86 | 37.646154 | 0.815803 | 0 | 0 | 0.432099 | 0 | 0 | 0.101757 | 0.062526 | 0 | 0 | 0 | 0 | 0.222222 | 1 | 0 | false | 0 | 0.018519 | 0 | 0.049383 | 0.006173 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
66aac9943e9c3c111b39568b5e50475b96174662 | 42 | py | Python | hearthstone-deck-classifier/Classifiers/__init__.py | viktorstaikov/hearthstone-deck-classifier | 78fa6641cfa4b081b61ea04125c296f5e40eb733 | [
"MIT"
] | null | null | null | hearthstone-deck-classifier/Classifiers/__init__.py | viktorstaikov/hearthstone-deck-classifier | 78fa6641cfa4b081b61ea04125c296f5e40eb733 | [
"MIT"
] | null | null | null | hearthstone-deck-classifier/Classifiers/__init__.py | viktorstaikov/hearthstone-deck-classifier | 78fa6641cfa4b081b61ea04125c296f5e40eb733 | [
"MIT"
] | null | null | null | from NaiveBayes import *
from kNN import * | 21 | 24 | 0.785714 | 6 | 42 | 5.5 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 42 | 2 | 25 | 21 | 0.942857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
66d128e379932eb656f30d362a21ae7a3ce3a9f2 | 24 | py | Python | kalliope/neurons/sleep/__init__.py | G10DRAS/kalliope | 4c6586bd4c5ff0ca2b51cbf02f042d9ed0c9742d | [
"MIT"
] | null | null | null | kalliope/neurons/sleep/__init__.py | G10DRAS/kalliope | 4c6586bd4c5ff0ca2b51cbf02f042d9ed0c9742d | [
"MIT"
] | null | null | null | kalliope/neurons/sleep/__init__.py | G10DRAS/kalliope | 4c6586bd4c5ff0ca2b51cbf02f042d9ed0c9742d | [
"MIT"
] | null | null | null | from sleep import Sleep
| 12 | 23 | 0.833333 | 4 | 24 | 5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 24 | 1 | 24 | 24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
dd3676639f63d3ec0393084c48bc46011423b12d | 14,650 | py | Python | tests/test_api_objects.py | cyberjacob/pymonzo | 056fd74f559cac28ddfa60597d8604a5ab1033ea | [
"MIT"
] | null | null | null | tests/test_api_objects.py | cyberjacob/pymonzo | 056fd74f559cac28ddfa60597d8604a5ab1033ea | [
"MIT"
] | null | null | null | tests/test_api_objects.py | cyberjacob/pymonzo | 056fd74f559cac28ddfa60597d8604a5ab1033ea | [
"MIT"
] | 1 | 2021-12-22T09:58:10.000Z | 2021-12-22T09:58:10.000Z | # -*- coding: utf-8 -*-
"""
Test 'pymonzo.api_objects' file
"""
from __future__ import unicode_literals
from datetime import datetime
import pytest
from dateutil.parser import parse as parse_date
from pymonzo import api_objects, MonzoAPI
from pymonzo.api_objects import MonzoAccount, MonzoPot
from pymonzo.utils import CommonMixin
class TestMonzoObject:
"""
Test `api_objects.MonzoObject` class
"""
klass = api_objects.MonzoObject
data = {
'foo': 'foo',
'bar': 'bar',
}
@pytest.fixture(scope='session')
def instance(self):
"""Simple fixture that returns initialize object"""
return self.klass(data=self.data)
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, api_objects.MonzoObject)
assert isinstance(instance, CommonMixin)
def test_class_properties(self, instance):
"""Test class properties"""
assert self.klass._required_keys == []
assert instance._required_keys == []
def test_class_initialization(self, instance):
"""Test class `__init__` method"""
assert instance._raw_data == self.data
assert instance.foo == 'foo'
assert instance.bar == 'bar'
def test_class_lack_of_required_keys(self, mocker):
"""Test class `__init__` method when data lack one of required keys"""
mocker.patch.multiple(self.klass, _required_keys='baz')
with pytest.raises(ValueError):
self.klass(data=self.data)
class TestMonzoAccount:
"""
Test `api_objects.MonzoAccount` class
"""
klass = api_objects.MonzoAccount
@pytest.fixture(scope='session')
def data(self, accounts_api_response):
"""Simple fixture that returns data used to initialize the object"""
return accounts_api_response['accounts'][0]
@pytest.fixture(scope='session')
def instance(self, data):
"""Simple fixture that returns initialize object"""
return self.klass(data)
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, api_objects.MonzoAccount)
assert isinstance(instance, api_objects.MonzoObject)
def test_class_properties(self, instance):
"""Test class properties"""
expected_keys = ['id', 'description', 'created']
assert self.klass._required_keys == expected_keys
assert instance._required_keys == expected_keys
def test_class_initialization(self, instance, data):
"""Test class `__init__` method"""
expected_data = data.copy()
assert instance._raw_data == data
del instance._raw_data
expected_data['created'] = parse_date(expected_data['created'])
orig_instance_vars = vars(instance)
instance_vars = orig_instance_vars.copy()
# Don't inspect private variables
for k in orig_instance_vars.keys():
if k.startswith('_'):
instance_vars.pop(k)
assert instance_vars == expected_data
assert isinstance(instance.created, datetime)
def test_class_lack_of_required_keys(self, mocker, data):
"""Test class `__init__` method when data lack one of required keys"""
mocker.patch.multiple(self.klass, _required_keys='baz')
with pytest.raises(ValueError):
self.klass(data=data)
class TestMonzoPot:
"""
Test `api_objects.MonzoPot` class
"""
klass = api_objects.MonzoPot
@pytest.fixture
def mocked_monzo(self, mocker):
"""Helper fixture that returns a mocked `MonzoAPI` instance"""
mocker.patch('pymonzo.monzo_api.OAuth2Session')
mocker.patch('pymonzo.monzo_api.MonzoAPI._save_token_on_disk')
client_id = 'explicit_client_id'
client_secret = 'explicit_client_secret'
auth_code = 'explicit_auth_code'
monzo = MonzoAPI(
client_id=client_id,
client_secret=client_secret,
auth_code=auth_code,
)
return monzo
@pytest.fixture(scope='session')
def data(self, pots_api_response):
"""Simple fixture that returns data used to initialize the object"""
return pots_api_response['pots'][0]
@pytest.fixture(scope='session')
def instance(self, data):
"""Simple fixture that returns initialize object"""
return self.klass(data)
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, api_objects.MonzoPot)
assert isinstance(instance, api_objects.MonzoObject)
def test_class_properties(self, instance):
"""Test class properties"""
expected_keys = [
'id', 'name', 'created', 'style', 'balance', 'currency', 'updated', 'deleted'
]
assert self.klass._required_keys == expected_keys
assert instance._required_keys == expected_keys
def test_class_initialization(self, instance, data):
"""Test class `__init__` method"""
expected_data = data.copy()
assert instance._raw_data == data
del instance._raw_data
expected_data['created'] = parse_date(expected_data['created'])
orig_instance_vars = vars(instance)
instance_vars = orig_instance_vars.copy()
# Don't inspect private variables
for k in orig_instance_vars.keys():
if k.startswith('_'):
instance_vars.pop(k)
assert instance_vars == expected_data
assert isinstance(instance.created, datetime)
def test_class_lack_of_required_keys(self, mocker, data):
"""Test class `__init__` method when data lack one of required keys"""
mocker.patch.multiple(self.klass, _required_keys='baz')
with pytest.raises(ValueError):
self.klass(data=data)
def test_class_deposit_method(self, mocker, mocked_monzo,
pots_api_response, accounts_api_response):
"""Test class `add` method"""
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
mocked_get_response.return_value.json.return_value = pots_api_response['pots'][0]
accounts_json = accounts_api_response['accounts']
pots_json = pots_api_response['pots']
mocked_monzo._cached_accounts = [
MonzoAccount(data=account, context=mocked_monzo) for account in accounts_json
]
mocked_monzo._cached_pots = [
MonzoPot(data=pot, context=mocked_monzo) for pot in pots_json
]
pot = mocked_monzo.pots()[0]
expected_result = pot
expected_result.balance = 50000
result = pot.deposit(37655, mocked_monzo._cached_accounts[0], "abc")
mocked_get_response.assert_called_once_with(
method='put',
endpoint='/pots/'+mocked_monzo._cached_pots[0].id+'/deposit',
body={
'source_account_id': mocked_monzo._cached_accounts[0].id,
'amount': 37655,
'dedupe_id': "abc",
},
)
assert result is None
assert pot == expected_result
def test_class_withdraw_method(self, mocker, mocked_monzo,
pots_api_response, accounts_api_response):
"""Test class `add` method"""
mocked_get_response = mocker.patch(
'pymonzo.monzo_api.MonzoAPI._get_response',
)
mocked_get_response.return_value.json.return_value = pots_api_response['pots'][0]
accounts_json = accounts_api_response['accounts']
pots_json = pots_api_response['pots']
mocked_monzo._cached_accounts = [
MonzoAccount(data=account, context=mocked_monzo) for account in accounts_json
]
mocked_monzo._cached_pots = [
MonzoPot(data=pot, context=mocked_monzo) for pot in pots_json
]
pot = mocked_monzo.pots()[0]
expected_result = pot
expected_result.balance = 2500
result = pot.withdraw(9845, mocked_monzo._cached_accounts[0], "abc")
mocked_get_response.assert_called_once_with(
method='put',
endpoint='/pots/'+mocked_monzo._cached_pots[0].id+'/withdraw',
body={
'destination_account_id': mocked_monzo._cached_accounts[0].id,
'amount': 9845,
'dedupe_id': "abc",
},
)
assert result is None
assert pot == expected_result
class TestMonzoBalance:
"""
Test `api_objects.MonzoBalance` class
"""
klass = api_objects.MonzoBalance
@pytest.fixture(scope='session')
def data(self, balance_api_response):
"""Simple fixture that returns data used to initialize the object"""
return balance_api_response
@pytest.fixture(scope='session')
def instance(self, data):
"""Simple fixture that returns initialize object"""
return self.klass(data)
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, api_objects.MonzoBalance)
assert isinstance(instance, api_objects.MonzoObject)
def test_class_properties(self, instance):
"""Test class properties"""
expected_keys = ['balance', 'currency', 'spend_today']
assert self.klass._required_keys == expected_keys
assert instance._required_keys == expected_keys
def test_class_initialization(self, instance, data):
"""Test class `__init__` method"""
expected_data = data.copy()
assert instance._raw_data == expected_data
del instance._raw_data
orig_instance_vars = vars(instance)
instance_vars = orig_instance_vars.copy()
# Don't inspect private variables
for k in orig_instance_vars.keys():
if k.startswith('_'):
instance_vars.pop(k)
assert instance_vars == expected_data
def test_class_lack_of_required_keys(self, mocker, data):
"""Test class `__init__` method when data lack one of required keys"""
mocker.patch.multiple(self.klass, _required_keys='baz')
with pytest.raises(ValueError):
self.klass(data=data)
class TestMonzoTransaction:
"""
Test `api_objects.MonzoTransaction` class
"""
klass = api_objects.MonzoTransaction
@pytest.fixture(scope='session')
def data(self, transaction_api_response):
"""Simple fixture that returns data used to initialize the object"""
return transaction_api_response['transaction']
@pytest.fixture(scope='session')
def instance(self, data):
"""Simple fixture that returns initialize object"""
return self.klass(data)
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, api_objects.MonzoTransaction)
assert isinstance(instance, api_objects.MonzoObject)
def test_class_properties(self, instance):
"""Test class properties"""
expected_keys = [
'account_balance', 'amount', 'created', 'currency', 'description',
'id', 'merchant', 'metadata', 'notes', 'is_load',
]
assert self.klass._required_keys == expected_keys
assert instance._required_keys == expected_keys
def test_class_initialization(self, instance, data):
"""Test class `__init__` method"""
expected_data = data.copy()
assert instance._raw_data == expected_data
del instance._raw_data
expected_data['created'] = parse_date(expected_data['created'])
expected_data['settled'] = parse_date(expected_data['settled'])
expected_data['merchant'] = api_objects.MonzoMerchant(
data=expected_data['merchant']
)
orig_instance_vars = vars(instance)
instance_vars = orig_instance_vars.copy()
# Don't inspect private variables
for k in orig_instance_vars.keys():
if k.startswith('_'):
instance_vars.pop(k)
assert instance_vars == expected_data
assert isinstance(instance.created, datetime)
assert isinstance(instance.settled, datetime)
assert isinstance(instance.merchant, api_objects.MonzoMerchant)
def test_class_lack_of_required_keys(self, mocker, data):
"""Test class `__init__` method when data lack one of required keys"""
mocker.patch.multiple(self.klass, _required_keys='baz')
with pytest.raises(ValueError):
self.klass(data=data)
class TestMonzoMerchant:
"""
Test `api_objects.MonzoMerchant` class
"""
klass = api_objects.MonzoMerchant
@pytest.fixture(scope='session')
def data(self, transaction_api_response):
"""Simple fixture that returns data used to initialize the object"""
return transaction_api_response['transaction']['merchant']
@pytest.fixture(scope='session')
def instance(self, data):
"""Simple fixture that returns initialize object"""
return self.klass(data)
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, api_objects.MonzoMerchant)
assert isinstance(instance, api_objects.MonzoObject)
def test_class_properties(self, instance):
"""Test class properties"""
expected_keys = [
'address', 'created', 'group_id', 'id',
'logo', 'emoji', 'name', 'category',
]
assert self.klass._required_keys == expected_keys
assert instance._required_keys == expected_keys
def test_class_initialization(self, instance, data):
"""Test class `__init__` method"""
expected_data = data.copy()
assert instance._raw_data == expected_data
del instance._raw_data
expected_data['created'] = parse_date(expected_data['created'])
orig_instance_vars = vars(instance)
instance_vars = orig_instance_vars.copy()
# Don't inspect private variables
for k in orig_instance_vars.keys():
if k.startswith('_'):
instance_vars.pop(k)
assert instance_vars == expected_data
assert isinstance(instance.created, datetime)
def test_class_lack_of_required_keys(self, mocker, data):
"""Test class `__init__` method when data lack one of required keys"""
mocker.patch.multiple(self.klass, _required_keys='baz')
with pytest.raises(ValueError):
self.klass(data=data)
| 34.069767 | 89 | 0.651263 | 1,634 | 14,650 | 5.55814 | 0.097307 | 0.051531 | 0.034354 | 0.030059 | 0.800044 | 0.788373 | 0.783418 | 0.762497 | 0.762497 | 0.742788 | 0 | 0.003717 | 0.247031 | 14,650 | 429 | 90 | 34.149184 | 0.819599 | 0.134608 | 0 | 0.634328 | 0 | 0 | 0.066532 | 0.016229 | 0 | 0 | 0 | 0 | 0.182836 | 1 | 0.141791 | false | 0 | 0.026119 | 0 | 0.261194 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
dd4c7f81606f7a7524c654b5d977498faf26cc9a | 271 | py | Python | aestudos/bestudos/views.py | DirceuAlmei/repositorio | aae7fd4a6841b0fd4b7ac05dc13d5a426ca35899 | [
"MIT"
] | null | null | null | aestudos/bestudos/views.py | DirceuAlmei/repositorio | aae7fd4a6841b0fd4b7ac05dc13d5a426ca35899 | [
"MIT"
] | null | null | null | aestudos/bestudos/views.py | DirceuAlmei/repositorio | aae7fd4a6841b0fd4b7ac05dc13d5a426ca35899 | [
"MIT"
] | null | null | null | from urllib import request
from django.shortcuts import render
from .import views
# Create your views here.
def EstudosView(request):
return render(request, 'bestudos/estudos.html')
def ProjetoView(request):
return render(request, 'bestudos/projetocordel.html')
| 27.1 | 57 | 0.782288 | 34 | 271 | 6.235294 | 0.558824 | 0.122642 | 0.179245 | 0.245283 | 0.320755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129151 | 271 | 9 | 58 | 30.111111 | 0.898305 | 0.084871 | 0 | 0 | 0 | 0 | 0.195122 | 0.195122 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | false | 0 | 0.428571 | 0.285714 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
06d5eae9b8c563878e8b0c13b29c421da900c307 | 17,752 | py | Python | pedidos/tests.py | tiagocordeiro/zumaq-partners | ba2c5d4257438ec062ef034096cd203efe58ef4a | [
"MIT"
] | 1 | 2019-02-13T11:01:25.000Z | 2019-02-13T11:01:25.000Z | pedidos/tests.py | tiagocordeiro/zumaq-partners | ba2c5d4257438ec062ef034096cd203efe58ef4a | [
"MIT"
] | 619 | 2018-11-26T06:11:05.000Z | 2022-03-31T22:56:13.000Z | pedidos/tests.py | tiagocordeiro/zumaq-partners | ba2c5d4257438ec062ef034096cd203efe58ef4a | [
"MIT"
] | 1 | 2020-03-12T16:34:13.000Z | 2020-03-12T16:34:13.000Z | import base64 # for decoding base64 image
from io import BytesIO
from django.contrib.auth.models import User, Group
from django.contrib.messages.storage.fallback import FallbackStorage
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.test import TestCase, RequestFactory, Client
from django.urls import reverse
from core.models import UserProfile
from pedidos.models import Pedido
from pedidos.views import pedidos_list, pedido_add_item, pedido_aberto, pedido_checkout, pedido_details, \
pedido_export_pdf, pedido_delivery_term_pdf, pedido_delivery_term_with_order_pdf, pedidos_list_separacao, \
pedidos_list_separados, pedido_separacao
from products.models import Produto, CustomCoeficiente, CustomCoeficienteItens
class PedidosTestCase(TestCase):
def setUp(self):
# Every test needs access to the request factory.
self.factory = RequestFactory()
self.client = Client()
# User Gerente
self.user_gerente = User.objects.create_user(username='jacob', email='jacob@…', password='top_secret')
self.group_gerente = Group.objects.create(name='Gerente')
self.group_gerente.user_set.add(self.user_gerente)
# User Parceiro
self.user_parceiro = User.objects.create_user(username='joe', email='joe@…', password='top_secret')
self.group_parceiro = Group.objects.create(name='Parceiro')
self.group_parceiro.user_set.add(self.user_parceiro)
# User Parceiro2
self.user_parceiro2 = User.objects.create_user(username='robert', email='robert@…', password='top_secret')
self.group_parceiro.user_set.add(self.user_parceiro2)
# Produto
self.product = Produto.objects.create(codigo='TYL-1080',
descricao='Tubo de Laser Yong Li - 80w - R3',
pago_na_china=880,
reminmbi=6.84,
dolar_cotado=3.89,
impostos_na_china=0,
porcentagem_importacao=0.52,
coeficiente=0.50)
# Custom Coeficiente Parceiro
self.custom_coeficiente = CustomCoeficiente.objects.create(parceiro=self.user_parceiro)
# Custom Coeficiente Parceiro -> Produto
self.custom_coeficiente_item = CustomCoeficienteItens.objects.create(parceiro=self.custom_coeficiente,
produto=self.product,
coeficiente=0.10)
# User Profiles
image_thumb = '''
R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7
'''.strip()
self.image = InMemoryUploadedFile(
BytesIO(base64.b64decode(image_thumb)), # use io.BytesIO
field_name='tempfile',
name='tempfile.png',
content_type='image/png',
size=len(image_thumb),
charset='utf-8',
)
self.user_profile_parceiro = UserProfile.objects.create(user=self.user_parceiro, avatar=str(self.image))
self.user_profile_gerente = UserProfile.objects.create(user=self.user_gerente, avatar=str(self.image))
self.pedido_aberto = Pedido.objects.create(parceiro=self.user_parceiro)
def test_pedido_add_item(self):
item = self.product
request = self.factory.post(reverse('pedido_add_item', kwargs={'codigo': item.codigo}))
request.user = self.user_parceiro
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
response = pedido_add_item(request, codigo=item.codigo)
self.assertEqual(response.status_code, 302)
def test_pedido_aberto_view_parceiro(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
request = self.factory.get(reverse('pedido_aberto'))
request.user = self.user_parceiro
response = pedido_aberto(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(pedido.get_status_display(), 'Aberto')
def test_pedidos_list_view_anonimo(self):
self.client.logout()
response = self.client.get(reverse('pedidos_list'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/pedido/list/',
status_code=302, target_status_code=200)
def test_pedidos_list_view_gerente(self):
request = self.factory.get(reverse('pedidos_list'))
request.user = self.user_gerente
response = pedidos_list(request)
self.assertEqual(response.status_code, 200)
def test_pedidos_list_view_parceiro(self):
request = self.factory.get(reverse('pedidos_list'))
request.user = self.user_parceiro
response = pedidos_list(request)
self.assertEqual(response.status_code, 200)
def test_pedidos_para_separar_list_view_anonimo(self):
self.client.logout()
response = self.client.get(reverse('pedidos_list_separacao'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/pedido/list/separacao/',
status_code=302, target_status_code=200)
def test_pedidos_para_separar_list_view_parceiro(self):
request = self.factory.get(reverse('pedidos_list_separacao'))
request.user = self.user_parceiro
response = pedidos_list_separacao(request)
self.assertEqual(response.status_code, 302)
def test_pedidos_para_separar_list_view_gerente(self):
pedido = self.pedido_aberto
request = self.factory.get(reverse('pedidos_list_separacao'))
request.user = self.user_gerente
response = pedidos_list_separacao(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(pedido.get_status_display(), 'Aberto')
# Parceiro adiciona item ao pedido
self.test_pedido_add_item()
# Parceiro faz checkout
self.test_pedido_checkout()
# Testa se pedido alterou status para 'Enviado'
self.assertEqual(pedido.get_status_display(), 'Enviado')
response = pedidos_list_separacao(request)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'R$ 1.319,70')
def test_pedidos_separados_list_view_anonimo(self):
self.client.logout()
response = self.client.get(reverse('pedidos_list_separados'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/pedido/list/separados/',
status_code=302, target_status_code=200)
def test_pedidos_separados_list_view_parceiro(self):
request = self.factory.get(reverse('pedidos_list_separados'))
request.user = self.user_parceiro
response = pedidos_list_separados(request)
self.assertEqual(response.status_code, 302)
def test_pedidos_separados_list_view_gerente(self):
pedido = self.pedido_aberto
request = self.factory.get(reverse('pedidos_list_separados'))
request.user = self.user_gerente
response = pedidos_list_separados(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(pedido.get_status_display(), 'Aberto')
# Parceiro adiciona item ao pedido
self.test_pedido_add_item()
# Parceiro faz checkout
self.test_pedido_checkout()
# Testa se pedido alterou status para 'Enviado'
self.assertEqual(pedido.get_status_display(), 'Enviado')
# Marca pedido como separado
pedido.separado = True
pedido.save()
response = pedidos_list_separados(request)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'R$ 1.319,70')
def test_pedido_em_separacao_view_anonimo(self):
pedido = self.pedido_aberto
self.client.logout()
response = self.client.get(reverse('pedido_separacao', kwargs={'pk': pedido.pk}))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, f'/accounts/login/?next=/pedido/separacao/{pedido.pk}/',
status_code=302, target_status_code=200)
def test_pedido_em_separacao_view_parceiro(self):
pedido = self.pedido_aberto
request = self.factory.get(reverse('pedido_separacao', kwargs={'pk': pedido.pk}))
request.user = self.user_parceiro
response = pedido_separacao(request, self.pedido_aberto.pk)
self.assertEqual(response.status_code, 302)
def test_pedido_em_separacao_view_gerente(self):
pedido = self.pedido_aberto
request = self.factory.get(reverse('pedido_separacao', kwargs={'pk': pedido.pk}))
request.user = self.user_gerente
response = pedido_separacao(request, self.pedido_aberto.pk)
self.assertEqual(response.status_code, 200)
def test_pedido_checkout(self):
pedido = self.pedido_aberto
request = self.factory.get(reverse('pedido_checkout', kwargs={'pk': pedido.pk}))
request.user = self.user_parceiro
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
self.assertEqual(pedido.status, 0)
response = pedido_checkout(request, pedido.pk)
self.assertEqual(response.status_code, 200)
pedido.refresh_from_db()
self.assertEqual(pedido.status, 1)
def test_pedido_checkout_not_owner(self):
"""
Testa checkout quando o usuário não é dono do pedido em aberto.
:return: Deve retornar status_code = 302 e redirecionar para dashboard.
"""
pedido = self.pedido_aberto
request = self.factory.get(reverse('pedido_checkout', kwargs={'pk': pedido.pk}))
request.user = self.user_gerente
self.assertEqual(pedido.status, 0)
response = pedido_checkout(request, pedido.pk)
self.assertEqual(response.status_code, 302)
pedido.refresh_from_db()
self.assertEqual(pedido.status, 0)
def test_pedido_detais_view_by_owner(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
self.test_pedido_checkout()
request = self.factory.get(reverse('pedido_details', kwargs={'pk': pedido.pk}))
request.user = self.user_parceiro
response = pedido_details(request, pedido.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(pedido.get_status_display(), 'Enviado')
def test_pedido_detais_view_not_owner(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
self.test_pedido_checkout()
request = self.factory.get(reverse('pedido_details', kwargs={'pk': pedido.pk}))
request.user = self.user_parceiro2
response = pedido_details(request, pedido.pk)
self.assertEqual(response.status_code, 302)
def test_pedido_detais_view_as_gerente(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
self.test_pedido_checkout()
request = self.factory.get(reverse('pedido_details', kwargs={'pk': pedido.pk}))
request.user = self.user_gerente
response = pedido_details(request, pedido.pk)
self.assertEqual(response.status_code, 200)
def test_pedido_export_pdf_anonimo(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
self.test_pedido_checkout()
self.client.logout()
response = self.client.get(reverse('pedido_export_pdf', kwargs={'pk': pedido.pk}))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response,
f'/accounts/login/?next=/pedido/export/pdf/{pedido.pk}/',
status_code=302,
target_status_code=200)
def test_pedido_export_pdf_as_gerente(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
self.test_pedido_checkout()
request = self.factory.get(reverse('pedido_export_pdf', kwargs={'pk': pedido.pk}))
request.user = self.user_gerente
response = pedido_export_pdf(request, pedido.pk)
self.assertEqual(response.status_code, 200)
def test_pedido_export_pdf_not_owner(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
self.test_pedido_checkout()
request = self.factory.get(reverse('pedido_export_pdf', kwargs={'pk': pedido.pk}))
request.user = self.user_parceiro2
response = pedido_export_pdf(request, pedido.pk)
self.assertEqual(response.status_code, 302)
def test_pedido_export_delivery_term_pdf_anonimo(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
self.test_pedido_checkout()
self.client.logout()
response = self.client.get(reverse('pedido_export_delivery_term_pdf', kwargs={'pk': pedido.pk}))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response,
f'/accounts/login/?next=/pedido/export/pdf/deliveryterm/{pedido.pk}/',
status_code=302,
target_status_code=200)
def test_pedido_export_delivery_term_pdf_as_gerente(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
self.test_pedido_checkout()
request = self.factory.get(reverse('pedido_export_delivery_term_pdf', kwargs={'pk': pedido.pk}))
request.user = self.user_gerente
response = pedido_delivery_term_pdf(request, pedido.pk)
self.assertEqual(response.status_code, 200)
def test_pedido_export_delivery_term_pdf_not_owner(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
self.test_pedido_checkout()
request = self.factory.get(reverse('pedido_export_delivery_term_pdf', kwargs={'pk': pedido.pk}))
request.user = self.user_parceiro2
response = pedido_delivery_term_pdf(request, pedido.pk)
self.assertEqual(response.status_code, 302)
def test_pedido_export_complete_pdf_anonimo(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
self.test_pedido_checkout()
self.client.logout()
response = self.client.get(reverse('pedido_export_complete_pdf', kwargs={'pk': pedido.pk}))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response,
f'/accounts/login/?next=/pedido/export/pdf/completo/{pedido.pk}/',
status_code=302,
target_status_code=200)
def test_pedido_export_complete_pdf_as_gerente(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
self.test_pedido_checkout()
request = self.factory.get(reverse('pedido_export_complete_pdf', kwargs={'pk': pedido.pk}))
request.user = self.user_gerente
response = pedido_delivery_term_with_order_pdf(request, pedido.pk)
self.assertEqual(response.status_code, 200)
def test_pedido_export_complete_pdf_not_owner(self):
pedido = self.pedido_aberto
self.assertEqual(pedido.pedidoitem_set.values().count(), 0)
self.test_pedido_add_item()
self.assertEqual(pedido.pedidoitem_set.values().count(), 1)
self.test_pedido_checkout()
request = self.factory.get(reverse('pedido_export_complete_pdf', kwargs={'pk': pedido.pk}))
request.user = self.user_parceiro2
response = pedido_delivery_term_with_order_pdf(request, pedido.pk)
self.assertEqual(response.status_code, 302)
| 41.769412 | 114 | 0.667192 | 2,041 | 17,752 | 5.5561 | 0.097011 | 0.087302 | 0.066667 | 0.07672 | 0.819665 | 0.796825 | 0.768254 | 0.75194 | 0.732187 | 0.720899 | 0 | 0.016233 | 0.229608 | 17,752 | 424 | 115 | 41.867925 | 0.812299 | 0.032954 | 0 | 0.669935 | 0 | 0 | 0.076056 | 0.041883 | 0 | 0 | 0 | 0 | 0.245098 | 1 | 0.094771 | false | 0.009804 | 0.039216 | 0 | 0.137255 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
06f191fa713641dbb2fb2a3266208c851846c21a | 22,882 | py | Python | boto3_type_annotations_with_docs/boto3_type_annotations/es/paginator.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 119 | 2018-12-01T18:20:57.000Z | 2022-02-02T10:31:29.000Z | boto3_type_annotations_with_docs/boto3_type_annotations/es/paginator.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 15 | 2018-11-16T00:16:44.000Z | 2021-11-13T03:44:18.000Z | boto3_type_annotations_with_docs/boto3_type_annotations/es/paginator.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 11 | 2019-05-06T05:26:51.000Z | 2021-09-28T15:27:59.000Z | from typing import Dict
from botocore.paginate import Paginator
class DescribeReservedElasticsearchInstanceOfferings(Paginator):
def paginate(self, ReservedElasticsearchInstanceOfferingId: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`ElasticsearchService.Client.describe_reserved_elasticsearch_instance_offerings`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/DescribeReservedElasticsearchInstanceOfferings>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ReservedElasticsearchInstanceOfferingId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ReservedElasticsearchInstanceOfferings': [
{
'ReservedElasticsearchInstanceOfferingId': 'string',
'ElasticsearchInstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'PaymentOption': 'ALL_UPFRONT'|'PARTIAL_UPFRONT'|'NO_UPFRONT',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
]
},
]
}
**Response Structure**
- *(dict) --*
Container for results from ``DescribeReservedElasticsearchInstanceOfferings``
- **ReservedElasticsearchInstanceOfferings** *(list) --*
List of reserved Elasticsearch instance offerings
- *(dict) --*
Details of a reserved Elasticsearch instance offering.
- **ReservedElasticsearchInstanceOfferingId** *(string) --*
The Elasticsearch reserved instance offering identifier.
- **ElasticsearchInstanceType** *(string) --*
The Elasticsearch instance type offered by the reserved instance offering.
- **Duration** *(integer) --*
The duration, in seconds, for which the offering will reserve the Elasticsearch instance.
- **FixedPrice** *(float) --*
The upfront fixed charge you will pay to purchase the specific reserved Elasticsearch instance offering.
- **UsagePrice** *(float) --*
The rate you are charged for each hour the domain that is using the offering is running.
- **CurrencyCode** *(string) --*
The currency code for the reserved Elasticsearch instance offering.
- **PaymentOption** *(string) --*
Payment option for the reserved Elasticsearch instance offering
- **RecurringCharges** *(list) --*
The charge to your account regardless of whether you are creating any domains using the instance offering.
- *(dict) --*
Contains the specific price and frequency of a recurring charges for a reserved Elasticsearch instance, or for a reserved Elasticsearch instance offering.
- **RecurringChargeAmount** *(float) --*
The monetary amount of the recurring charge.
- **RecurringChargeFrequency** *(string) --*
The frequency of the recurring charge.
:type ReservedElasticsearchInstanceOfferingId: string
:param ReservedElasticsearchInstanceOfferingId:
The offering identifier filter value. Use this parameter to show only the available offering that matches the specified reservation identifier.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeReservedElasticsearchInstances(Paginator):
def paginate(self, ReservedElasticsearchInstanceId: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`ElasticsearchService.Client.describe_reserved_elasticsearch_instances`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/DescribeReservedElasticsearchInstances>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ReservedElasticsearchInstanceId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ReservedElasticsearchInstances': [
{
'ReservationName': 'string',
'ReservedElasticsearchInstanceId': 'string',
'ReservedElasticsearchInstanceOfferingId': 'string',
'ElasticsearchInstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'StartTime': datetime(2015, 1, 1),
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'ElasticsearchInstanceCount': 123,
'State': 'string',
'PaymentOption': 'ALL_UPFRONT'|'PARTIAL_UPFRONT'|'NO_UPFRONT',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
]
},
]
}
**Response Structure**
- *(dict) --*
Container for results from ``DescribeReservedElasticsearchInstances``
- **ReservedElasticsearchInstances** *(list) --*
List of reserved Elasticsearch instances.
- *(dict) --*
Details of a reserved Elasticsearch instance.
- **ReservationName** *(string) --*
The customer-specified identifier to track this reservation.
- **ReservedElasticsearchInstanceId** *(string) --*
The unique identifier for the reservation.
- **ReservedElasticsearchInstanceOfferingId** *(string) --*
The offering identifier.
- **ElasticsearchInstanceType** *(string) --*
The Elasticsearch instance type offered by the reserved instance offering.
- **StartTime** *(datetime) --*
The time the reservation started.
- **Duration** *(integer) --*
The duration, in seconds, for which the Elasticsearch instance is reserved.
- **FixedPrice** *(float) --*
The upfront fixed charge you will paid to purchase the specific reserved Elasticsearch instance offering.
- **UsagePrice** *(float) --*
The rate you are charged for each hour for the domain that is using this reserved instance.
- **CurrencyCode** *(string) --*
The currency code for the reserved Elasticsearch instance offering.
- **ElasticsearchInstanceCount** *(integer) --*
The number of Elasticsearch instances that have been reserved.
- **State** *(string) --*
The state of the reserved Elasticsearch instance.
- **PaymentOption** *(string) --*
The payment option as defined in the reserved Elasticsearch instance offering.
- **RecurringCharges** *(list) --*
The charge to your account regardless of whether you are creating any domains using the instance offering.
- *(dict) --*
Contains the specific price and frequency of a recurring charges for a reserved Elasticsearch instance, or for a reserved Elasticsearch instance offering.
- **RecurringChargeAmount** *(float) --*
The monetary amount of the recurring charge.
- **RecurringChargeFrequency** *(string) --*
The frequency of the recurring charge.
:type ReservedElasticsearchInstanceId: string
:param ReservedElasticsearchInstanceId:
The reserved instance identifier filter value. Use this parameter to show only the reservation that matches the specified reserved Elasticsearch instance ID.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetUpgradeHistory(Paginator):
def paginate(self, DomainName: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`ElasticsearchService.Client.get_upgrade_history`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/GetUpgradeHistory>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DomainName='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'UpgradeHistories': [
{
'UpgradeName': 'string',
'StartTimestamp': datetime(2015, 1, 1),
'UpgradeStatus': 'IN_PROGRESS'|'SUCCEEDED'|'SUCCEEDED_WITH_ISSUES'|'FAILED',
'StepsList': [
{
'UpgradeStep': 'PRE_UPGRADE_CHECK'|'SNAPSHOT'|'UPGRADE',
'UpgradeStepStatus': 'IN_PROGRESS'|'SUCCEEDED'|'SUCCEEDED_WITH_ISSUES'|'FAILED',
'Issues': [
'string',
],
'ProgressPercent': 123.0
},
]
},
],
}
**Response Structure**
- *(dict) --*
Container for response returned by `` GetUpgradeHistory `` operation.
- **UpgradeHistories** *(list) --*
A list of `` UpgradeHistory `` objects corresponding to each Upgrade or Upgrade Eligibility Check performed on a domain returned as part of `` GetUpgradeHistoryResponse `` object.
- *(dict) --*
History of the last 10 Upgrades and Upgrade Eligibility Checks.
- **UpgradeName** *(string) --*
A string that describes the update briefly
- **StartTimestamp** *(datetime) --*
UTC Timestamp at which the Upgrade API call was made in "yyyy-MM-ddTHH:mm:ssZ" format.
- **UpgradeStatus** *(string) --*
The overall status of the update. The status can take one of the following values:
* In Progress
* Succeeded
* Succeeded with Issues
* Failed
- **StepsList** *(list) --*
A list of `` UpgradeStepItem `` s representing information about each step performed as pard of a specific Upgrade or Upgrade Eligibility Check.
- *(dict) --*
Represents a single step of the Upgrade or Upgrade Eligibility Check workflow.
- **UpgradeStep** *(string) --*
Represents one of 3 steps that an Upgrade or Upgrade Eligibility Check does through:
* PreUpgradeCheck
* Snapshot
* Upgrade
- **UpgradeStepStatus** *(string) --*
The status of a particular step during an upgrade. The status can take one of the following values:
* In Progress
* Succeeded
* Succeeded with Issues
* Failed
- **Issues** *(list) --*
A list of strings containing detailed information about the errors encountered in a particular step.
- *(string) --*
- **ProgressPercent** *(float) --*
The Floating point value representing progress percentage of a particular step.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListElasticsearchInstanceTypes(Paginator):
def paginate(self, ElasticsearchVersion: str, DomainName: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`ElasticsearchService.Client.list_elasticsearch_instance_types`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/ListElasticsearchInstanceTypes>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ElasticsearchVersion='string',
DomainName='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ElasticsearchInstanceTypes': [
'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
],
}
**Response Structure**
- *(dict) --*
Container for the parameters returned by `` ListElasticsearchInstanceTypes `` operation.
- **ElasticsearchInstanceTypes** *(list) --*
List of instance types supported by Amazon Elasticsearch service for given `` ElasticsearchVersion ``
- *(string) --*
:type ElasticsearchVersion: string
:param ElasticsearchVersion: **[REQUIRED]**
Version of Elasticsearch for which list of supported elasticsearch instance types are needed.
:type DomainName: string
:param DomainName:
DomainName represents the name of the Domain that we are trying to modify. This should be present only if we are querying for list of available Elasticsearch instance types when modifying existing domain.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListElasticsearchVersions(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`ElasticsearchService.Client.list_elasticsearch_versions`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/ListElasticsearchVersions>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ElasticsearchVersions': [
'string',
],
}
**Response Structure**
- *(dict) --*
Container for the parameters for response received from `` ListElasticsearchVersions `` operation.
- **ElasticsearchVersions** *(list) --*
List of supported elastic search versions.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
| 59.900524 | 1,110 | 0.58924 | 2,014 | 22,882 | 6.67577 | 0.16137 | 0.033916 | 0.034511 | 0.0119 | 0.703012 | 0.67765 | 0.659874 | 0.646634 | 0.631462 | 0.619338 | 0 | 0.020267 | 0.312123 | 22,882 | 381 | 1,111 | 60.057743 | 0.833926 | 0.836378 | 0 | 0.294118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.294118 | false | 0.294118 | 0.117647 | 0 | 0.705882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
662232304288b85cfea8157445920bb2f850f851 | 51,750 | py | Python | meta-iotqa/lib/oeqa/runtime/programming/nodejs/rest_apis.py | kraj/intel-iot-refkit | 04cd5afec0c41deeb5e1a48b43a0a31e708295c1 | [
"MIT"
] | 36 | 2017-02-20T04:04:28.000Z | 2022-02-17T05:36:33.000Z | meta-iotqa/lib/oeqa/runtime/programming/nodejs/rest_apis.py | kraj/intel-iot-refkit | 04cd5afec0c41deeb5e1a48b43a0a31e708295c1 | [
"MIT"
] | 284 | 2017-02-06T08:51:52.000Z | 2021-11-03T16:52:16.000Z | meta-iotqa/lib/oeqa/runtime/programming/nodejs/rest_apis.py | kraj/intel-iot-refkit | 04cd5afec0c41deeb5e1a48b43a0a31e708295c1 | [
"MIT"
] | 65 | 2017-02-03T12:36:16.000Z | 2021-02-18T11:00:46.000Z | # -*- coding:utf8 -*-
__author__ = 'qiuzhong'
__version__ = '0.0.1'
import os
import subprocess
import sys
import shutil
from oeqa.oetest import oeRuntimeTest
class RESTAPITest(oeRuntimeTest):
'''
The test case checks whether the REST APIs works well.
@class RESTAPITest
'''
rest_api = 'restapis'
files_dir = None
rest_api_dir = None
target_rest_api_dir = '/tmp/%s' % rest_api
nodeunit_zip = None
rest_api_js_files = {
'api_system': 'nodeunit_test_api_system.js',
'api_oic_d': 'nodeunit_test_api_oic_d.js',
'api_oic_p': 'nodeunit_test_api_oic_p.js',
'api_oic_res': 'nodeunit_test_api_oic_res.js'
}
@classmethod
def all_files_exists(cls):
'''
See wether all the files exists.
:return:
@fn all_files_exists
@param cls
@return
'''
for test_file in cls.rest_api_js_files.values():
if not os.path.exists(os.path.join(os.path.dirname(__file__),
'files', cls.rest_api,
test_file)):
return False
return True
@classmethod
def node_module_path(cls):
'''
Install the module path via npm
@fn node_module_path
@param cls
@return
'''
path_module_path = '/tmp/node_modules/path'
if os.path.exists(path_module_path):
shutil.rmtree(path_module_path)
proc = subprocess.Popen(['npm', 'install', 'path'], cwd='/tmp/')
proc.wait()
if proc and proc.returncode == 0 and os.path.exists(path_module_path):
oldscp = cls.tc.target.connection.scp[:]
cls.tc.target.connection.scp.insert(1, '-r')
cls.tc.target.run('cd /tmp; mkdir node_modules;')
cls.tc.target.copy_to(
path_module_path,
'/tmp/node_modules'
)
cls.tc.target.connection.scp[:] = oldscp
else:
print ('Install node module path failed')
sys.exit(1)
@classmethod
def setUpClass(cls):
'''
Copy all the JavaScript files to the target system.
@fn setUpClass
@param cls
@return
'''
cls.files_dir = os.path.join(os.path.dirname(__file__), 'files')
cls.rest_api_dir = os.path.join(os.path.dirname(__file__),
'files', cls.rest_api).rstrip('/')
cls.tc.target.run('rm -fr %s' % cls.target_rest_api_dir)
cls.tc.target.run('rm -f %s.tar' % cls.target_rest_api_dir)
if os.path.exists('%s.tar' % cls.rest_api_dir):
os.remove('%s.tar' % cls.rest_api_dir)
# compress restapi directory and copy it to target device.
proc = None
if cls.all_files_exists():
proc = subprocess.Popen(
['tar', '-cf', '%s.tar' % cls.rest_api, cls.rest_api],
cwd = cls.files_dir)
proc.wait()
if proc and proc.returncode == 0 and \
os.path.exists('%s.tar' % cls.rest_api_dir):
cls.tc.target.copy_to(
os.path.join(
os.path.dirname(__file__),
'files',
'%s.tar' % cls.rest_api),
'%s.tar' % cls.target_rest_api_dir)
cls.tc.target.run('cd /tmp/; ' \
'tar -xf %s.tar -C %s/' % (
cls.target_rest_api_dir,
os.path.dirname(cls.target_rest_api_dir))
)
# Install and copy the node module path to device
cls.node_module_path()
#Start the server iot-rest-api-server
cls.tc.target.run('systemctl stop iot-rest-api-server.socket; systemctl stop iot-rest-api-server.service')
check_process_cmd = 'ps | grep "/usr/lib/node_modules/iot-rest-api" | grep -v grep | awk "{print $4}"'
(status, output) = cls.tc.target.run(check_process_cmd)
if '/usr/lib/node_modules/iot-rest-api' not in output:
cls.tc.target.run('systemctl start iot-rest-api-server.socket')
(status, output) = cls.tc.target.run('unset http_proxy; curl http://%s:8000/api/oic/d' % (cls.tc.target.ip))
(status, output) = cls.tc.target.run(check_process_cmd)
if '/usr/lib/node_modules/iot-rest-api' not in output:
print ("The iot-rest-api-server doesn't start!")
sys.exit(1)
# Download nodeunit from git hub
proc = subprocess.Popen(['wget', 'https://github.com/caolan/nodeunit/archive/master.zip'],
cwd = cls.files_dir)
proc.wait()
cls.nodeunit_zip = os.path.join(os.path.dirname(__file__),
'files',
'master.zip')
if os.path.exists(cls.nodeunit_zip):
#change nodeunit zip to tar
os.chdir(cls.files_dir)
os.system('unzip -oq %s; cd nodeunit-master;npm install;cd ..;tar -cf master.tar nodeunit-master; rm -rf nodeunit-master' %\
(cls.nodeunit_zip)
)
cls.nodeunit_tar = os.path.join(cls.files_dir, 'master.tar')
cls.tc.target.copy_to(cls.nodeunit_tar, '/tmp/master.tar')
cls.tc.target.run('cd /tmp/; ' \
'tar -xf master.tar;' \
'chmod +x /tmp/nodeunit-master/bin/nodeunit'
)
cls.tc.target.run("/usr/sbin/nft add chain inet filter rest_api { type filter hook input priority 0\; }")
cls.tc.target.run("/usr/sbin/nft add rule inet filter rest_api udp dport 5683 accept")
cls.tc.target.run('/opt/iotivity/examples/resource/c/SimpleClientServer/ocserver -o 0')
for api, api_js in cls.rest_api_js_files.items():
cls.tc.target.run('cd %s; node %s' % (cls.target_rest_api_dir, api_js) )
def test_api_system_status_code(self):
'''
Test status code of response of /api/system is 200
@fn test_api_system_status_code
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemStatusCode' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_status_code
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_status_code
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_has_hostname(self):
'''
Test if the response of /api/system has hostname property.
@fn test_api_system_has_hostname
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemHostnameNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_has_hostname
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_has_hostname
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_hostname_type(self):
'''
Test if type of hostname property in response is a string.
@fn test_api_system_hostname_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemHostnameType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_hostname_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_hostname_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_hostname_value(self):
'''
Test if value of hostname property in response is OK.
@fn test_api_system_hostname_value
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemHostnameValue' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_hostname_value
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_hostname_value
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_has_type(self):
'''
Test if the response of /api/system has type property.
@fn test_api_system_has_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemTypeNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_has_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_has_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_type_type(self):
'''
Test if type of type property in response is a string.
@fn test_api_system_type_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemTypeType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_type_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_type_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_type_value(self):
'''
Test if value of type property in response is OK.
@fn test_api_system_type_value
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemTypeValue' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_type_value
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_type_value
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_has_arch(self):
'''
Test if the response of /api/system has arch property.
@fn test_api_system_has_arch
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemArchNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_has_arch
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_has_arch
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_arch_type(self):
'''
Test if type of arch property in response is a string.
@fn test_api_system_arch_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemArchType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_arch_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_arch_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_arch_value(self):
'''
Test if value of arch property in response is OK.
@fn test_api_system_arch_value
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemArchValue' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_arch_value
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_arch_value
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_has_release(self):
'''
Test if the response of /api/system has release property.
@fn test_api_system_has_release
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemReleaseNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_has_release
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_has_release
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_release_type(self):
'''
Test if type of release property in response is a string.
@fn test_api_system_release_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemReleaseType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_release_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_release_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_release_value(self):
'''
Test if value of release property in response is OK.
@fn test_api_system_release_value
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemReleaseValue' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_release_value
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_release_value
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_has_uptime(self):
'''
Test if the response of /api/system has uptime property.
@fn test_api_system_has_uptime
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemUptimeNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_has_uptime
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_has_uptime
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_uptime_type(self):
'''
Test if type of uptime property in response is a number.
@fn test_api_system_uptime_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemUptimeType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_uptime_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_uptime_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_has_loadavg(self):
'''
Test if the response of /api/system has loadavg property.
@fn test_api_system_has_loadavg
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemLoadavgNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_has_loadavg
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_has_loadavg
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_loadavg_type(self):
'''
Test if type of loadavg property in response is an array.
@fn test_api_system_loadavg_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemLoadavgType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_loadavg_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_loadavg_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_has_totalmem(self):
'''
Test if the response of /api/system has totalmem property.
@fn test_api_system_has_totalmem
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemTotalmemNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_has_totalmem
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_has_totalmem
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_totalmem_type(self):
'''
Test if type of totalmem property in response is a string.
@fn test_api_system_totalmem_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemTotalmemType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_totalmem_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_totalmem_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_totalmem_value(self):
'''
Test if value of totalmem property in response is OK.
@fn test_api_system_totalmem_value
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemTotalmemValue' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_totalmem_value
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_totalmem_value
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_has_freemem(self):
'''
Test if the response of /api/system has freemem property.
@fn test_api_system_has_freemem
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemFreememNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_has_freemem
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_has_freemem
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_freemem_type(self):
'''
Test if type of freemem property in response is a string.
@fn test_api_system_freemem_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemFreememType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_freemem_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_freemem_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_has_cpus(self):
'''
Test if the response of /api/system has cpus property.
@fn test_api_system_has_cpus
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemCpusNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_has_cpus
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_has_cpus
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_cpus_type(self):
'''
Test if type of cpus property in response is an array.
@fn test_api_system_cpus_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemCpusType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_cpus_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_cpus_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_cpus_value(self):
'''
Test if value of cpus property in response is OK.
@fn test_api_system_cpus_value
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemCpusValue' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_cpus_value
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_cpus_value
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_system_networkinterfaces_value(self):
'''
Test if value of networkinterfaces property in response is OK.
@fn test_api_system_networkinterfaces_value
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiSystemNetworkInterfacesValue' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_system']
)
)
##
# TESTPOINT: #1, test_api_system_networkinterfaces_value
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_system_networkinterfaces_value
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_d_status_code(self):
'''
Test status code of response to /api/oic/d is 200
@fn test_api_oic_d_status_code
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicDStatusCode' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_d']
)
)
##
# TESTPOINT: #1, test_api_oic_d_status_code
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_d_status_code
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_d_has_required_n(self):
'''
Test if the response of /api/oic/d has required property n.
@fn test_api_oic_d_has_required_n
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicDRequiredNNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_d']
)
)
##
# TESTPOINT: #1, test_api_oic_d_has_required_n
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_d_has_required_n
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_d_required_n_type(self):
'''
Test if the type of n property in response is string.
@fn test_api_oic_d_required_n_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicDRequiredNType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_d']
)
)
##
# TESTPOINT: #1, test_api_oic_d_required_n_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_d_required_n_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_d_has_required_di(self):
'''
Test if the response of /api/oic/d has required property di.
@fn test_api_oic_d_has_required_di
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicDRequiredDiNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_d']
)
)
##
# TESTPOINT: #1, test_api_oic_d_has_required_di
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_d_has_required_di
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_d_required_di_type(self):
'''
Test if the type of di property in response is string.
@fn test_api_oic_d_required_di_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicDRequiredDiType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_d']
)
)
##
# TESTPOINT: #1, test_api_oic_d_required_di_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_d_required_di_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_d_required_di_value_uuid(self):
'''
Test if the value of di property in response is UUID format.
@fn test_api_oic_d_required_di_value_uuid
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicDRequiredDiUuid' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_d']
)
)
##
# TESTPOINT: #1, test_api_oic_d_required_di_value_uuid
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_d_required_di_value_uuid
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_d_has_required_icv(self):
'''
Test if the response of /api/oic/d has required property icv.
@fn test_api_oic_d_has_required_icv
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicDRequiredIcvNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_d']
)
)
##
# TESTPOINT: #1, test_api_oic_d_has_required_icv
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_d_has_required_icv
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_d_required_icv_type(self):
'''
Test if the type of icv property in response is string.
@fn test_api_oic_d_required_icv_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicRequiredDIcvType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_d']
)
)
##
# TESTPOINT: #1, test_api_oic_d_required_icv_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_d_required_icv_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_d_optional_dmv_type(self):
'''
Test if the type of dmv property (if it exists) in response is string.
@fn test_api_oic_d_optional_dmv_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicDOptionalDmvType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_d']
)
)
##
# TESTPOINT: #1, test_api_oic_d_optional_dmv_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_d_optional_dmv_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_d_optional_dmv_value_csv(self):
'''
Test if the value of dmv property (if it exists) in response is csv format.
@fn test_api_oic_d_optional_dmv_value_csv
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicDOptionalDmvCsv' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_d']
)
)
##
# TESTPOINT: #1, test_api_oic_d_optional_dmv_value_csv
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_d_optional_dmv_value_csv
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_status_code(self):
'''
Test status code of /api/oic/p.
@fn test_api_oic_p_status_code
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPStatusCode' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_status_code
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_status_code
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_has_required_pi(self):
'''
Test if the response of /api/oic/pi has required property pi.
@fn test_api_oic_p_has_required_pi
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPRequiredPiNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_has_required_pi
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_has_required_pi
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_required_pi_type(self):
'''
Test if the type of pi property in response is string.
@fn test_api_oic_p_required_pi_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPRequiredPiType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_required_pi_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_required_pi_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_has_required_mnmn(self):
'''
Test if the response of /api/oic/p has required property mnmn.
@fn test_api_oic_p_has_required_mnmn
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPRequiredMnmnNotNull' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_has_required_mnmn
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_has_required_mnmn
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_required_mnmn_type(self):
'''
Test if the type of mnmn property in response is string.
@fn test_api_oic_p_required_mnmn_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPRequiredMnmnType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_required_mnmn_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_required_mnmn_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_optional_mnml_type(self):
'''
Test if the type of mnml property in response is string.
@fn test_api_oic_p_optional_mnml_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPOptionalMnmlType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_optional_mnml_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_optional_mnml_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_optional_mnmo_type(self):
'''
Test if the type of mnmo property in response is string.
@fn test_api_oic_p_optional_mnmo_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPOptionalMnmoType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_optional_mnmo_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_optional_mnmo_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_optional_mndt_type(self):
'''
Test if the type of mndt property in response is string.
@fn test_api_oic_p_optional_mndt_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPOptionalMndtType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_optional_mndt_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_optional_mndt_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_optional_mnpv_type(self):
'''
Test if the type of mnpv property in response is string.
@fn test_api_oic_p_optional_mnpv_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPOptionalMnpvType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_optional_mnpv_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_optional_mnpv_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_optional_mnos_type(self):
'''
Test if the type of mnos property in response is string.
@fn test_api_oic_p_optional_mnos_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPOptionalMnosType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_optional_mnos_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_optional_mnos_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_optional_mnhw_type(self):
'''
Test if the type of mnhw property in response is string.
@fn test_api_oic_p_optional_mnhw_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPOptionalMnhwType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_optional_mnhw_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_optional_mnhw_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_optional_mnfv_type(self):
'''
Test if the type of mnfv property in response is string.
@fn test_api_oic_p_optional_mnfv_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPOptionalMnfvType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_optional_mnfv_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_optional_mnfv_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_optional_mnsl_type(self):
'''
Test if the type of mnfv property in response is string.
@fn test_api_oic_p_optional_mnsl_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPOptionalMnslType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_optional_mnsl_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_optional_mnsl_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_p_optional_st_type(self):
'''
Test if the type of st property in response is string.
@fn test_api_oic_p_optional_st_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicPOptionalStType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_p']
)
)
##
# TESTPOINT: #1, test_api_oic_p_optional_st_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_p_optional_st_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_res_status_code(self):
'''
Test status code of /api/oic/res.
@fn test_api_oic_res_status_code
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicResStatusCode' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_res']
)
)
##
# TESTPOINT: #1, test_api_oic_res_status_code
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_res_status_code
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_res_n_type(self):
'''
Test if the type of n property (if it exists) in response is string.
@fn test_api_oic_res_n_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicResNType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_res']
)
)
##
# TESTPOINT: #1, test_api_oic_res_n_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_res_n_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_res_di_type(self):
'''
Test if the type of di property (if it exists) in response is string.
@fn test_api_oic_res_di_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicResDiType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_res']
)
)
##
# TESTPOINT: #1, test_api_oic_res_di_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_res_di_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_res_di_value_uuid(self):
'''
Test if the value of di property (if it exists) in response is UUID format.
@fn test_api_oic_res_di_value_uuid
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicResDiUuid' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_res']
)
)
##
# TESTPOINT: #1, test_api_oic_res_di_value_uuid
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_res_di_value_uuid
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
def test_api_oic_res_links_type(self):
'''
Test if the type of links property (if it exists) in response is an array.
@fn test_api_oic_res_links_type
@param self
@return
'''
(api_status, api_output) = self.target.run(
'cd %s/; /tmp/nodeunit-master/bin/nodeunit %s/%s -t testApiOicResLinksType' % (
self.target_rest_api_dir,
self.target_rest_api_dir,
self.rest_api_js_files['api_oic_res']
)
)
##
# TESTPOINT: #1, test_api_oic_res_links_type
#
self.assertEqual(api_status, 0)
##
# TESTPOINT: #2, test_api_oic_res_links_type
#
self.assertTrue('OK:' in api_output.strip().splitlines()[-1])
@classmethod
def tearDownClass(cls):
'''
Clean work.
Clean all the files and directories that the tests may be used on target.
@fn tearDownClass
@param cls
@return
'''
(_, pid) = cls.tc.target.run("ps | grep -v grep | grep 'ocserver' | awk '{print $1}'")
cls.tc.target.run('kill -9 %s' % pid.strip());
stop_server_cmd = 'systemctl stop iot-rest-api-server.socket; systemctl stop iot-rest-api-server.service'
cls.tc.target.run(stop_server_cmd)
if os.path.exists('%s.tar' % cls.rest_api_dir):
os.remove('%s.tar' % cls.rest_api_dir)
if os.path.exists(cls.nodeunit_zip):
os.remove(cls.nodeunit_zip)
os.system('rm -rf %s/master.*' % cls.files_dir)
cls.tc.target.run('rm -f %s.tar' % cls.target_rest_api_dir)
cls.tc.target.run('rm -fr %s/' % cls.target_rest_api_dir)
cls.tc.target.run('rm -fr /tmp/nodeunit-master')
cls.tc.target.run('rm -f /tmp/master.tar')
cls.tc.target.run('rm -rf /tmp/modules')
cls.tc.target.run("/usr/sbin/nft flush chain inet filter rest_api")
cls.tc.target.run("/usr/sbin/nft delete chain inet filter rest_api")
| 34.316976 | 136 | 0.551053 | 6,162 | 51,750 | 4.292275 | 0.048199 | 0.059284 | 0.047639 | 0.071988 | 0.862717 | 0.840372 | 0.791561 | 0.76967 | 0.746758 | 0.716133 | 0 | 0.007316 | 0.344986 | 51,750 | 1,507 | 137 | 34.339748 | 0.772959 | 0.225353 | 0 | 0.531852 | 0 | 0.087407 | 0.177241 | 0.097838 | 0 | 0 | 0 | 0 | 0.162963 | 1 | 0.087407 | false | 0 | 0.007407 | 0 | 0.108148 | 0.005926 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
663cd4f70386d36e29140296a6f49c250263225c | 6,572 | py | Python | tests/test_validator.py | sulembutproton/pythonbible | 7b8c90e1b25bfdc028da3e5a43aaa6287005a0b1 | [
"MIT"
] | 11 | 2021-03-29T17:29:57.000Z | 2022-02-19T20:55:43.000Z | tests/test_validator.py | sulembutproton/pythonbible | 7b8c90e1b25bfdc028da3e5a43aaa6287005a0b1 | [
"MIT"
] | 18 | 2021-03-24T21:50:54.000Z | 2022-03-15T01:10:14.000Z | tests/test_validator.py | sulembutproton/pythonbible | 7b8c90e1b25bfdc028da3e5a43aaa6287005a0b1 | [
"MIT"
] | 4 | 2021-05-19T01:19:24.000Z | 2022-03-26T00:48:56.000Z | import pythonbible as bible
def test_is_valid_verse_id(verse_id: int) -> None:
# Given a valid verse id
# When we test to see if it is valid
# Then the result is True
assert bible.is_valid_verse_id(verse_id)
def test_is_valid_verse_id_null() -> None:
# Given a null verse id
# When we test to see if it is valid
# Then the result is False
assert not bible.is_valid_verse_id(None)
def test_is_valid_verse_id_string(verse_id: int) -> None:
# Given a string verse id
# When we test to see if it is valid
# Then the result is False
assert not bible.is_valid_verse_id(str(verse_id))
def test_is_valid_verse_id_invalid(invalid_verse_id: int) -> None:
# Given an invalid verse id
# When we test to see if it is valid
# Then the result is False
assert not bible.is_valid_verse_id(invalid_verse_id)
def test_is_valid_reference(reference: bible.NormalizedReference) -> None:
# Given a valid normalized reference tuple
# When we test to see if it is valid
# Then the result is True
assert bible.is_valid_reference(reference)
def test_is_valid_reference_null() -> None:
# Given a null reference
# When we test to see if it is valid
# Then the result is False
assert not bible.is_valid_reference(None)
def test_is_valid_reference_string(reference_string: str) -> None:
# Given a string reference
# When we test to see if it is valid
# Then the result is False
assert not bible.is_valid_reference(reference_string)
def test_is_valid_reference_wrong_size(
book: bible.Book, chapter: int, verse: int
) -> None:
# Given a reference that is a tuple of the wrong size
# When we test to see if it is valid
# Then the result is False
assert not bible.is_valid_reference((book, chapter, verse))
def test_is_valid_reference_invalid_book(chapter: int, verse: int) -> None:
# Given a normalized reference tuple with an invalid book
# When we test to see if it is valid
# Then the result is False
assert not bible.is_valid_reference(
bible.NormalizedReference("invalid book", chapter, verse, chapter, verse)
)
def test_is_valid_reference_invalid_chapter(
book: bible.Book, invalid_chapter: int, verse: int
) -> None:
# Given a normalized reference tuple with an invalid chapter
# When we test to see if it is valid
# Then the result is False
assert not bible.is_valid_reference(
bible.NormalizedReference(book, invalid_chapter, verse, invalid_chapter, verse)
)
def test_is_valid_reference_invalid_start_verse(
book: bible.Book, chapter: int, verse: int, invalid_verse: int
) -> None:
# Given a normalized reference tuple with an invalid start verse
reference: bible.NormalizedReference = bible.NormalizedReference(
book, chapter, invalid_verse, chapter, verse
)
# When we test to see if it is valid
# Then the result is False
assert not bible.is_valid_reference(reference)
def test_is_valid_reference_invalid_end_verse(
book: bible.Book, chapter: int, verse: int, invalid_verse: int
) -> None:
# Given a normalized reference tuple with an invalid end verse
reference: bible.NormalizedReference = bible.NormalizedReference(
book, chapter, verse, chapter, invalid_verse
)
# When we test to see if it is valid
# Then the result is False
assert not bible.is_valid_reference(reference)
def test_is_valid_reference_smaller_end_verse(
book: bible.Book, chapter: int, verse: int
) -> None:
# Given a reference where the end verse comes before the start verse
reference: bible.NormalizedReference = bible.NormalizedReference(
book.title, chapter, verse + 1, chapter, verse
)
# When we test to see if it is valid
# Then the result is false
assert not bible.is_valid_reference(reference)
def test_is_valid_book(book: bible.Book) -> None:
# Given a valid book object
# When we test to see if it is valid
# Then the result is True
assert bible.is_valid_book(book)
def test_is_valid_book_null() -> None:
# Given a null book object
# When we test to see if it is valid
# Then the result is False
assert not bible.is_valid_book(None)
def test_is_valid_book_string(book: bible.Book) -> None:
# Given a string book object
# When we test to see if it is valid
# Then the result is False
assert not bible.is_valid_book(book.title)
def test_is_valid_chapter(book: bible.Book, chapter: int) -> None:
# Given a valid book and chapter
# When we test to see if the chapter is valid
# Then the result is True
assert bible.is_valid_chapter(book, chapter)
def test_is_valid_chapter_null(book: bible.Book) -> None:
# Given a valid book and a null chapter
# When we test to see if the chapter is valid
# Then the result is False
assert not bible.is_valid_chapter(book, None)
def test_is_valid_chapter_string(book: bible.Book, chapter: int) -> None:
# Given a valid book and a string chapter
# When we test to see if the chapter is valid
# Then the result is False
assert not bible.is_valid_chapter(book, str(chapter))
def test_is_valid_chapter_invalid(book: bible.Book, invalid_chapter: int) -> None:
# Given a valid book and an invalid chapter
# When we test to see if the chapter is valid
# Then the result is False
assert not bible.is_valid_chapter(book, invalid_chapter)
def test_is_valid_verse(book: bible.Book, chapter: int, verse: int) -> None:
# Given a valid book, chapter, and verse
# When we test to see if the verse is valid
# Then the result is True
assert bible.is_valid_verse(book, chapter, verse)
def test_is_valid_verse_null(book: bible.Book, chapter: int) -> None:
# Given a valid book, chapter, and a null verse
# When we test to see if the verse is valid
# Then the result is False
assert not bible.is_valid_verse(book, chapter, None)
def test_is_valid_verse_string(book: bible.Book, chapter: int, verse: int) -> None:
# Given a valid book, chapter, and a string verse
# When we test to see if the verse is valid
# Then the result is False
assert not bible.is_valid_verse(book, chapter, str(verse))
def test_is_valid_verse_invalid(
book: bible.Book, chapter: int, invalid_verse: int
) -> None:
# Given a valid book, chapter, and an invalid verse
# When we test to see if the verse is valid
# Then the result is False
assert not bible.is_valid_verse(book, chapter, invalid_verse)
| 33.530612 | 87 | 0.718503 | 1,053 | 6,572 | 4.316239 | 0.050332 | 0.110891 | 0.047525 | 0.073927 | 0.90209 | 0.842904 | 0.769197 | 0.758416 | 0.645325 | 0.629703 | 0 | 0.000194 | 0.216677 | 6,572 | 195 | 88 | 33.702564 | 0.882673 | 0.373098 | 0 | 0.243243 | 0 | 0 | 0.002965 | 0 | 0 | 0 | 0 | 0 | 0.324324 | 1 | 0.324324 | false | 0 | 0.013514 | 0 | 0.337838 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b0744ddc008bd70b5987eb4c2a8e1e51aa32f9f4 | 21 | py | Python | src/__init__.py | mmaysami/json-schema-validator | 647e31b492aa057042186093139cc98eb46b7407 | [
"MIT"
] | null | null | null | src/__init__.py | mmaysami/json-schema-validator | 647e31b492aa057042186093139cc98eb46b7407 | [
"MIT"
] | null | null | null | src/__init__.py | mmaysami/json-schema-validator | 647e31b492aa057042186093139cc98eb46b7407 | [
"MIT"
] | null | null | null | from .schema import * | 21 | 21 | 0.761905 | 3 | 21 | 5.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 21 | 1 | 21 | 21 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9fdec235b04f6e6b8b7f0df1e681482887b7a3d5 | 185 | py | Python | tests/test_app/admin.py | petar-bibulic/django-base | bf4fd8464ef7699ebfebd8ffe2df1b9eaed18f24 | [
"MIT"
] | null | null | null | tests/test_app/admin.py | petar-bibulic/django-base | bf4fd8464ef7699ebfebd8ffe2df1b9eaed18f24 | [
"MIT"
] | 4 | 2021-10-06T09:58:26.000Z | 2021-12-07T13:41:29.000Z | tests/test_app/admin.py | petar-bibulic/django-base | bf4fd8464ef7699ebfebd8ffe2df1b9eaed18f24 | [
"MIT"
] | 2 | 2021-11-24T17:02:55.000Z | 2021-12-01T10:21:18.000Z | from django.contrib import admin
from django_base.admin import BaseModelAdmin
from .models import TestModel
@admin.register(TestModel)
class TestModelAdmin(BaseModelAdmin):
pass
| 18.5 | 44 | 0.821622 | 22 | 185 | 6.863636 | 0.590909 | 0.13245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.124324 | 185 | 9 | 45 | 20.555556 | 0.932099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.166667 | 0.5 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
b057d6d319ba5752456a6c81dbf6e9a85c7df24f | 32 | py | Python | th_rss/lib/feedsservice/__init__.py | Leopere/django-th | 86c999d16bcf30b6224206e5b40824309834ac8c | [
"BSD-3-Clause"
] | 1,069 | 2015-01-07T01:55:57.000Z | 2022-02-17T10:50:57.000Z | th_rss/lib/feedsservice/__init__.py | barrygolden/django-th | 86c999d16bcf30b6224206e5b40824309834ac8c | [
"BSD-3-Clause"
] | 207 | 2015-01-06T21:41:17.000Z | 2018-02-20T14:10:15.000Z | th_rss/lib/feedsservice/__init__.py | barrygolden/django-th | 86c999d16bcf30b6224206e5b40824309834ac8c | [
"BSD-3-Clause"
] | 117 | 2015-01-04T16:21:13.000Z | 2022-02-22T06:18:49.000Z | from .feedsservice import Feeds
| 16 | 31 | 0.84375 | 4 | 32 | 6.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 32 | 1 | 32 | 32 | 0.964286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c69451b3d9a9342f3e8e4f6412d5b7db038c1d2c | 165 | py | Python | Courts-Of-Chaos/courts_of_chaos/views.py | milos85vasic/Courts-of-Chaos | e164ce4e0de8bbba280d089ad3945fc552cf1b1c | [
"Apache-2.0"
] | 3 | 2018-01-05T15:43:33.000Z | 2019-12-13T08:52:34.000Z | Courts-Of-Chaos/courts_of_chaos/views.py | milos85vasic/Courts-of-Chaos | e164ce4e0de8bbba280d089ad3945fc552cf1b1c | [
"Apache-2.0"
] | null | null | null | Courts-Of-Chaos/courts_of_chaos/views.py | milos85vasic/Courts-of-Chaos | e164ce4e0de8bbba280d089ad3945fc552cf1b1c | [
"Apache-2.0"
] | null | null | null | from pyramid.i18n import TranslationStringFactory
_ = TranslationStringFactory('Courts-Of-Chaos')
def my_view(request):
return {'project': 'Courts-Of-Chaos'}
| 20.625 | 49 | 0.763636 | 18 | 165 | 6.888889 | 0.777778 | 0.129032 | 0.209677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013699 | 0.115152 | 165 | 7 | 50 | 23.571429 | 0.835616 | 0 | 0 | 0 | 0 | 0 | 0.224242 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.25 | 0.25 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
c694e99d2717d5410ecbb70db5f6d69294d22aa0 | 11,606 | py | Python | tripleo_ansible/tests/modules/test_tripleo_get_dpdk_nics_numa_info.py | beagles/tripleo-ansible | 7faddd87cffc8903a9cdedc7a6454cdf44aeed67 | [
"Apache-2.0"
] | 22 | 2018-08-29T12:33:15.000Z | 2022-03-30T00:17:25.000Z | tripleo_ansible/tests/modules/test_tripleo_get_dpdk_nics_numa_info.py | beagles/tripleo-ansible | 7faddd87cffc8903a9cdedc7a6454cdf44aeed67 | [
"Apache-2.0"
] | 1 | 2020-02-07T20:54:34.000Z | 2020-02-07T20:54:34.000Z | tripleo_ansible/tests/modules/test_tripleo_get_dpdk_nics_numa_info.py | beagles/tripleo-ansible | 7faddd87cffc8903a9cdedc7a6454cdf44aeed67 | [
"Apache-2.0"
] | 19 | 2019-07-16T04:42:00.000Z | 2022-03-30T00:17:29.000Z | # Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
try:
from ansible.module_utils import tripleo_common_utils as tc
except ImportError:
from tripleo_ansible.ansible_plugins.module_utils import tripleo_common_utils as tc
from tripleo_ansible.ansible_plugins.modules import tripleo_get_dpdk_nics_numa_info as derive_params
from tripleo_ansible.tests import base as tests_base
class TestTripleoGetDpdkNicsNumaInfo(tests_base.TestCase):
"""Test the _get_dpdk_nics_numa_info method of the OvS DPDK module"""
def test_run_dpdk_port(self):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge",
"addresses": [{"ip_netmask": ""}]}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1", "numa_node": 1},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
expected_result = [{'bridge_name': 'br-link', 'name': 'ens802f1',
'mtu': 8192, 'numa_node': 1,
'addresses': [{'ip_netmask': ''}]}]
result = derive_params._get_dpdk_nics_numa_info(network_configs,
inspect_data)
self.assertEqual(result, expected_result)
def test_run_dpdk_bond(self):
network_configs = [{
"members": [{"type": "ovs_dpdk_bond", "name": "dpdkbond0",
"mtu": 9000, "rx_queue": 4,
"members": [{"type": "ovs_dpdk_port",
"name": "dpdk0",
"members": [{"type": "interface",
"name": "nic4"}]},
{"type": "ovs_dpdk_port",
"name": "dpdk1",
"members": [{"type": "interface",
"name": "nic5"}]}]}],
"name": "br-link",
"type": "ovs_user_bridge",
"addresses": [{"ip_netmask": "172.16.10.0/24"}]}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1", "numa_node": 1},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
expected_result = [{'bridge_name': 'br-link', 'mtu': 9000,
'numa_node': 1, 'name': 'ens802f0',
'addresses': [{'ip_netmask': '172.16.10.0/24'}]},
{'bridge_name': 'br-link', 'mtu': 9000,
'numa_node': 1, 'name': 'ens802f1',
'addresses': [{'ip_netmask': '172.16.10.0/24'}]}]
result = derive_params._get_dpdk_nics_numa_info(network_configs,
inspect_data)
self.assertEqual(result, expected_result)
def test_run_no_inspect_nics(self):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": []
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
self.assertRaises(tc.DeriveParamsError,
derive_params._get_dpdk_nics_numa_info,
network_configs, inspect_data)
def test_run_no_inspect_interfaces(self):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": []
},
"inventory": {
"interfaces": []
}
}
self.assertRaises(tc.DeriveParamsError,
derive_params._get_dpdk_nics_numa_info,
network_configs, inspect_data)
def test_run_no_inspect_active_interfaces(self):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1", "numa_node": 1},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
self.assertRaises(tc.DeriveParamsError,
derive_params._get_dpdk_nics_numa_info,
network_configs, inspect_data)
def test_run_no_numa_node(self):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1"},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
self.assertRaises(tc.DeriveParamsError,
derive_params._get_dpdk_nics_numa_info,
network_configs, inspect_data)
| 42.826568 | 100 | 0.390918 | 890 | 11,606 | 4.847191 | 0.173034 | 0.064905 | 0.05007 | 0.060269 | 0.792304 | 0.755911 | 0.755911 | 0.755911 | 0.722531 | 0.722531 | 0 | 0.055728 | 0.472773 | 11,606 | 270 | 101 | 42.985185 | 0.649289 | 0.057212 | 0 | 0.771552 | 0 | 0 | 0.228966 | 0 | 0 | 0 | 0 | 0 | 0.025862 | 1 | 0.025862 | false | 0 | 0.025862 | 0 | 0.056034 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c69bd6190be103c511def589c3c433a93b01f94a | 5,289 | py | Python | mongotriggers/mongotriggers.py | drorasaf/mongodb-triggers | 937d590d91fb83d414ece7e20594dd610783ed4c | [
"BSD-3-Clause"
] | 6 | 2018-03-24T09:53:49.000Z | 2021-01-28T14:16:23.000Z | mongotriggers/mongotriggers.py | drorasaf/mongodb-triggers | 937d590d91fb83d414ece7e20594dd610783ed4c | [
"BSD-3-Clause"
] | null | null | null | mongotriggers/mongotriggers.py | drorasaf/mongodb-triggers | 937d590d91fb83d414ece7e20594dd610783ed4c | [
"BSD-3-Clause"
] | 1 | 2021-01-28T14:14:18.000Z | 2021-01-28T14:14:18.000Z | from .mongodtriggers import MongodTrigger
import threading
"""Class for manipulating notifications from MongoDB """
class MongoTrigger(object):
def __init__(self, conn, since=None):
"""Creates MongoTriggers instance
The object uses a defered context to provide notification on a
different context to avoid exploiting the caller thread/process
Args:
conn (MongoClient) - connection on which triggers will be fired
since (datetime) - the last timestamp to start listening from
"""
self.trigger = MongodTrigger(conn, since)
self.thread = None
def tail_oplog(self):
"""Listens to oplog and fire the registered callbacks """
if self.thread:
raise OSError("unable to tail using more than 1 thread")
self.thread = threading.Thread(target=self.trigger.start_tailing)
self.thread.start()
def stop_tail(self):
"""Stops listening to the oplog, no callbacks after calling this """
self.trigger.stop_tailing()
self.thread.join()
self.thread = None
def register_op_trigger(self, func, db_name=None, collection_name=None):
"""Watches the specified database and collections for any changes
Args:
func (callback): function to be invoked when any operation occurs
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.register_insert_trigger(func, db_name, collection_name)
self.trigger.register_update_trigger(func, db_name, collection_name)
self.trigger.register_delete_trigger(func, db_name, collection_name)
def register_insert_trigger(self, func, db_name=None, collection_name=None):
"""Adds an insert callback to the specified namespace
Args:
func (callback): callback to execute when an insert operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.register_insert_trigger(func, db_name, collection_name)
def register_update_trigger(self, func, db_name=None, collection_name=None):
"""Adds ann update callback to the specified namespace
Args:
func (callback): callback to execute when an update operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.register_update_trigger(func, db_name, collection_name)
def register_delete_trigger(self, func, db_name=None, collection_name=None):
"""Adds a delete callback to the specified namespace
Args:
func (callback): callback to execute when a delete operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.register_delete_trigger(func, db_name, collection_name)
def unregister_op_trigger(self, func, db_name=None, collection_name=None):
"""Removes all callbacks from the specified namespace
Args:
func (callback): callback to disable when any operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.unregister_insert_trigger(func, db_name, collection_name)
self.trigger.unregister_update_trigger(func, db_name, collection_name)
self.trigger.unregister_delete_trigger(func, db_name, collection_name)
def unregister_insert_trigger(self, func, db_name=None, collection_name=None):
"""Removes an insert callback from the specified namespace
Args:
func (callback): callback to disable when an insert operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.unregister_insert_trigger(func, db_name, collection_name)
def unregister_update_trigger(self, func, db_name=None, collection_name=None):
"""Removes an update callback from the specified namespace
Args:
func (callback): callback to disable when an insert operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.unregister_update_trigger(func, db_name, collection_name)
def unregister_delete_trigger(self, func, db_name=None, collection_name=None):
"""Removes a delete callback from the specified namespace
Args:
func (callback): callback to disable when an insert operation occur
db_name (str): name of Mongo database to watch for changes
collection_name (str): name of Mongo collection to watch for changes
"""
self.trigger.unregister_delete_trigger(func, db_name, collection_name)
| 44.445378 | 82 | 0.686519 | 675 | 5,289 | 5.225185 | 0.161481 | 0.047633 | 0.056705 | 0.058974 | 0.738588 | 0.738588 | 0.738588 | 0.738588 | 0.722711 | 0.716189 | 0 | 0.000251 | 0.247684 | 5,289 | 118 | 83 | 44.822034 | 0.886152 | 0.483834 | 0 | 0.4 | 0 | 0 | 0.017403 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.314286 | false | 0 | 0.057143 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c6e3ec7a6c4eb3542622e80d4bfe9f0139a32b5c | 95 | py | Python | myenv/lib/python3.9/site-packages/japanize_matplotlib/__init__.py | Yuki-max/earthquake | 3992d9967bd2ba3c803236f30a884796c71e3c0f | [
"MIT"
] | 145 | 2018-10-10T06:34:33.000Z | 2022-03-29T04:01:04.000Z | japanize_matplotlib/__init__.py | vaaaaanquish/japanize-matplotlib | 6d8f8ab4c927633be6b2257d09288afaa1cc7132 | [
"MIT"
] | 16 | 2018-11-02T03:59:02.000Z | 2021-12-04T04:42:40.000Z | japanize_matplotlib/__init__.py | vaaaaanquish/japanize-matplotlib | 6d8f8ab4c927633be6b2257d09288afaa1cc7132 | [
"MIT"
] | 14 | 2018-11-13T13:20:34.000Z | 2022-03-29T02:57:17.000Z | from japanize_matplotlib.japanize_matplotlib import japanize, get_font_path, get_font_ttf_path
| 47.5 | 94 | 0.905263 | 14 | 95 | 5.642857 | 0.571429 | 0.455696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.063158 | 95 | 1 | 95 | 95 | 0.88764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
05c2e2f2b08d5f7a315043c532b482b54f3caab2 | 60 | py | Python | jsonpath_pyrs/__init__.py | niap0r/jsonpath-pyrs | aa0e57cbf1bc1e6b0185ddc2dfddaef5124b8083 | [
"MIT"
] | null | null | null | jsonpath_pyrs/__init__.py | niap0r/jsonpath-pyrs | aa0e57cbf1bc1e6b0185ddc2dfddaef5124b8083 | [
"MIT"
] | null | null | null | jsonpath_pyrs/__init__.py | niap0r/jsonpath-pyrs | aa0e57cbf1bc1e6b0185ddc2dfddaef5124b8083 | [
"MIT"
] | null | null | null | from ._jsonpath_pyrs import read_json_file, read_json_string | 60 | 60 | 0.9 | 10 | 60 | 4.8 | 0.8 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 60 | 1 | 60 | 60 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
af0ab902bee81de85f60bdc25ac37ad2a262eda1 | 37 | py | Python | web/transiq/local/__init__.py | manibhushan05/transiq | 763fafb271ce07d13ac8ce575f2fee653cf39343 | [
"Apache-2.0"
] | null | null | null | web/transiq/local/__init__.py | manibhushan05/transiq | 763fafb271ce07d13ac8ce575f2fee653cf39343 | [
"Apache-2.0"
] | 14 | 2020-06-05T23:06:45.000Z | 2022-03-12T00:00:18.000Z | web/transiq/local/__init__.py | manibhushan05/transiq | 763fafb271ce07d13ac8ce575f2fee653cf39343 | [
"Apache-2.0"
] | null | null | null | from transiq.settings.local import *
| 18.5 | 36 | 0.810811 | 5 | 37 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108108 | 37 | 1 | 37 | 37 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
af291fe2ec6f01ba61e4c44429431cc88ddf1e0d | 167 | py | Python | problem_1.py | vineeths96/SVM-and-Neural-Networks | 84d734542d4f7fc718c49a8d63db07b0597ccbc7 | [
"MIT"
] | 2 | 2020-12-07T09:51:40.000Z | 2021-05-03T18:29:23.000Z | problem_1.py | vineeths96/SVM-and-Neural-Networks | 84d734542d4f7fc718c49a8d63db07b0597ccbc7 | [
"MIT"
] | null | null | null | problem_1.py | vineeths96/SVM-and-Neural-Networks | 84d734542d4f7fc718c49a8d63db07b0597ccbc7 | [
"MIT"
] | 4 | 2021-02-22T16:36:50.000Z | 2021-09-14T12:50:36.000Z | from problem_1.problem_1_SVM import problem_1_SVM
from problem_1.problem_1_DNN import problem_1_DNN
# Problem 1 SVM
problem_1_SVM()
# Problem 1 DNN
problem_1_DNN()
| 16.7 | 49 | 0.826347 | 32 | 167 | 3.875 | 0.1875 | 0.645161 | 0.354839 | 0.306452 | 0.741935 | 0 | 0 | 0 | 0 | 0 | 0 | 0.068493 | 0.125749 | 167 | 9 | 50 | 18.555556 | 0.780822 | 0.161677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
af3455914781e1a618025ce4d4b6fcc9af5fa406 | 10,748 | py | Python | docs/website_examples/t-fixed_point.py | joannadiong/zEpid | 7377ed06156d074aa2b571be520e8e004a564353 | [
"MIT"
] | 101 | 2018-12-17T20:32:20.000Z | 2022-03-29T08:51:46.000Z | docs/website_examples/t-fixed_point.py | joannadiong/zEpid | 7377ed06156d074aa2b571be520e8e004a564353 | [
"MIT"
] | 124 | 2018-12-13T22:30:41.000Z | 2022-02-10T00:24:25.000Z | docs/website_examples/t-fixed_point.py | joannadiong/zEpid | 7377ed06156d074aa2b571be520e8e004a564353 | [
"MIT"
] | 26 | 2019-02-07T17:45:15.000Z | 2022-01-03T00:39:34.000Z | import warnings
import numpy as np
import pandas as pd
import statsmodels.api as sm
from zepid import load_sample_data, spline
#######################################################################################################################
# Binary Outcome
#######################################################################################################################
df = load_sample_data(timevary=False)
df = df.drop(columns=['cd4_wk45'])
df[['cd4_rs1', 'cd4_rs2']] = spline(df, 'cd40', n_knots=3, term=2, restricted=True)
df[['age_rs1', 'age_rs2']] = spline(df, 'age0', n_knots=3, term=2, restricted=True)
#############################
# Naive Risk Difference
from zepid import RiskDifference
rd = RiskDifference()
rd.fit(df, exposure='art', outcome='dead')
rd.summary()
#############################
# G-formula
from zepid.causal.gformula import TimeFixedGFormula
g = TimeFixedGFormula(df, exposure='art', outcome='dead')
g.outcome_model(model='art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
# Estimating marginal effect under treat-all plan
g.fit(treatment='all')
r_all = g.marginal_outcome
# Estimating marginal effect under treat-none plan
g.fit(treatment='none')
r_none = g.marginal_outcome
riskd = r_all - r_none
print('RD:', riskd)
rd_results = []
for i in range(1000):
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=UserWarning)
s = df.sample(n=df.shape[0],replace=True)
g = TimeFixedGFormula(s,exposure='art',outcome='dead')
g.outcome_model(model='art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
g.fit(treatment='all')
r_all = g.marginal_outcome
g.fit(treatment='none')
r_none = g.marginal_outcome
rd_results.append(r_all - r_none)
se = np.std(rd_results)
print('95% LCL', riskd - 1.96*se)
print('95% UCL', riskd + 1.96*se)
#############################
# IPTW
from zepid.causal.ipw import IPTW
iptw = IPTW(df, treatment='art', outcome='dead')
iptw.treatment_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
bound=0.01, print_results=False)
iptw.marginal_structural_model('art')
iptw.fit()
iptw.summary()
#############################
# AIPTW
from zepid.causal.doublyrobust import AIPTW
aipw = AIPTW(df, exposure='art', outcome='dead')
# Treatment model
aipw.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False, bound=0.01)
# Outcome model
aipw.outcome_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
# Calculating estimate
aipw.fit()
# Printing summary results
aipw.summary()
#############################
# TMLE
from zepid.causal.doublyrobust import TMLE
tmle = TMLE(df, exposure='art', outcome='dead')
tmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False, bound=0.01)
tmle.missing_model('art + male + age0 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
tmle.outcome_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
tmle.fit()
tmle.summary()
#############################
# Cross-fitting
from sklearn.ensemble import RandomForestClassifier
from zepid.superlearner import GLMSL, StepwiseSL, SuperLearner
from zepid.causal.doublyrobust import SingleCrossfitTMLE
# SuperLearner set-up
labels = ["LogR", "Step.int", "RandFor"]
candidates = [GLMSL(sm.families.family.Binomial()),
StepwiseSL(sm.families.family.Binomial(), selection="forward", order_interaction=0),
RandomForestClassifier(random_state=809512)]
# Single cross-fit TMLE
sctmle = SingleCrossfitTMLE(df, exposure='art', outcome='dead')
sctmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
SuperLearner(candidates, labels, folds=10, loss_function="nloglik"),
bound=0.01)
sctmle.outcome_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
SuperLearner(candidates, labels, folds=10, loss_function="nloglik"))
sctmle.fit(n_partitions=3, random_state=201820)
sctmle.summary()
#############################
# G-estimation
from zepid.causal.snm import GEstimationSNM
snm = GEstimationSNM(df, exposure='art', outcome='dead')
# Specify treatment model
snm.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
# Specify structural nested model
snm.structural_nested_model('art')
# G-estimation
snm.fit()
snm.summary()
psi = snm.psi
print('Psi:', psi)
psi_results = []
for i in range(500):
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=UserWarning)
dfs = df.sample(n=df.shape[0], replace=True)
snm = GEstimationSNM(dfs, exposure='art', outcome='dead')
snm.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
snm.structural_nested_model('art')
snm.fit()
psi_results.append(snm.psi)
se = np.std(psi_results)
print('95% LCL', psi - 1.96*se)
print('95% UCL', psi + 1.96*se)
snm = GEstimationSNM(df, exposure='art', outcome='dead')
snm.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
snm.structural_nested_model('art + art:male')
snm.fit()
snm.summary()
#######################################################################################################################
# Continuous Outcome
#######################################################################################################################
df = load_sample_data(timevary=False)
dfs = df.drop(columns=['dead']).dropna()
df[['cd4_rs1', 'cd4_rs2']] = spline(df, 'cd40', n_knots=3, term=2, restricted=True)
df[['age_rs1', 'age_rs2']] = spline(df, 'age0', n_knots=3, term=2, restricted=True)
#############################
# G-formula
g = TimeFixedGFormula(df, exposure='art', outcome='cd4_wk45', outcome_type='normal')
g.outcome_model(model='art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
g.fit(treatment='all')
r_all = g.marginal_outcome
g.fit(treatment='none')
r_none = g.marginal_outcome
ate = r_all - r_none
print('ATE:', ate)
ate_results = []
for i in range(1000):
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=UserWarning)
s = df.sample(n=df.shape[0], replace=True)
g = TimeFixedGFormula(s,exposure='art',outcome='cd4_wk45', outcome_type='normal')
g.outcome_model(model='art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
g.fit(treatment='all')
r_all = g.marginal_outcome
g.fit(treatment='none')
r_none = g.marginal_outcome
ate_results.append(r_all - r_none)
se = np.std(ate_results)
print('95% LCL', ate - 1.96*se)
print('95% UCL', ate + 1.96*se)
#############################
# IPTW
ipw = IPTW(df, treatment='art', outcome='cd4_wk45')
ipw.treatment_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False, bound=0.01)
ipw.marginal_structural_model('art')
ipw.fit()
ipw.summary()
#############################
# AIPTW
aipw = AIPTW(df, exposure='art', outcome='cd4_wk45')
aipw.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False, bound=0.01)
aipw.outcome_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
aipw.fit()
aipw.summary()
#############################
# TMLE
tmle = TMLE(df, exposure='art', outcome='cd4_wk45')
tmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False, bound=0.01)
tmle.outcome_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
tmle.fit()
tmle.summary()
#############################
# Cross-fitting
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
# SuperLearner set-up
labels = ["LogR", "Step.int", "RandFor"]
b_candidates = [GLMSL(sm.families.family.Binomial()),
StepwiseSL(sm.families.family.Binomial(), selection="forward", order_interaction=0),
RandomForestClassifier(random_state=809512)]
c_candidates = [GLMSL(sm.families.family.Gaussian()),
StepwiseSL(sm.families.family.Gaussian(), selection="forward", order_interaction=0),
RandomForestRegressor(random_state=809512)]
# Single cross-fit TMLE
sctmle = SingleCrossfitTMLE(df, exposure='art', outcome='cd4_wk45')
sctmle.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
SuperLearner(b_candidates, labels, folds=10, loss_function="nloglik"),
bound=0.01)
sctmle.outcome_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
SuperLearner(c_candidates, labels, folds=10))
sctmle.fit(n_partitions=3, random_state=201820)
sctmle.summary()
#############################
# G-estimation
snm = GEstimationSNM(df, exposure='art', outcome='cd4_wk45')
snm.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
snm.structural_nested_model('art')
snm.fit()
snm.summary()
psi = snm.psi
print('Psi:', psi)
psi_results = []
for i in range(500):
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=UserWarning)
dfs = df.sample(n=df.shape[0], replace=True)
snm = GEstimationSNM(dfs, exposure='art', outcome='cd4_wk45')
snm.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
snm.structural_nested_model('art')
snm.fit()
psi_results.append(snm.psi)
se = np.std(psi_results, ddof=1)
print('95% LCL', psi - 1.96*se)
print('95% UCL', psi + 1.96*se)
snm = GEstimationSNM(df, exposure='art', outcome='cd4_wk45')
snm.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0',
print_results=False)
snm.structural_nested_model('art + art:male')
snm.fit()
snm.summary()
| 36.557823 | 119 | 0.614254 | 1,372 | 10,748 | 4.632653 | 0.115889 | 0.025488 | 0.038232 | 0.050975 | 0.840623 | 0.782096 | 0.767778 | 0.729232 | 0.716331 | 0.695563 | 0 | 0.048792 | 0.180033 | 10,748 | 293 | 120 | 36.682594 | 0.672416 | 0.045311 | 0 | 0.65 | 0 | 0 | 0.231688 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.075 | 0 | 0.075 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
af78c8936ad74afa66c8f292ab57b24cbcee6ff1 | 39,056 | py | Python | digical/lib/schedule_test.py | nfearnley/digical | ff1af0f9dcb5dfdd2bdee2e653dc765affcf3b59 | [
"MIT"
] | null | null | null | digical/lib/schedule_test.py | nfearnley/digical | ff1af0f9dcb5dfdd2bdee2e653dc765affcf3b59 | [
"MIT"
] | null | null | null | digical/lib/schedule_test.py | nfearnley/digical | ff1af0f9dcb5dfdd2bdee2e653dc765affcf3b59 | [
"MIT"
] | null | null | null | import pytest
from digical import Time, TimeRange, Schedule
from digical.lib.schedule import timerange_isdisjoint, timerange_isadjacent, timerange_issubset, timerange_ispropersubset, timerange_issuperset, timerange_ispropersuperset, timerange_union, timerange_intersection, timerange_difference, timerange_symmetric_difference
def test_init():
"""Schedule() -> Schedule"""
schedule_empty = Schedule()
assert schedule_empty is not None
schedule = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
assert schedule is not None
def test_from_json():
"""Schedule.from_json(dict) -> Schedule"""
schedule = Schedule.from_json({
"timeranges": [
{
"start": {"value": 1000},
"end": {"value": 2000}
},
{
"start": {"value": 2500},
"end": {"value": 3000}
}
]
})
assert schedule == Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
def test_to_json():
"""Schedule.to_json() -> dict"""
schedule_json = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
]).to_json()
assert schedule_json == {
"timeranges": [
{
"start": {"value": 1000},
"end": {"value": 2000}
},
{
"start": {"value": 2500},
"end": {"value": 3000}
}
]
}
def test_copy():
"""Schedule.copy() -> Schedule"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = schedule_a.copy()
assert schedule_a == schedule_b
assert schedule_a is not schedule_b
def test_repr():
"""repr(Schedule) -> repr"""
schedule = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
assert repr(schedule) == "Schedule([TimeRange(Time(1000), Time(2000)), TimeRange(Time(2500), Time(3000))])"
def test_str():
"""str(Schedule) -> str"""
schedule = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
assert str(schedule) == "Sunday, 16:40 - Monday, 9:20; Monday, 17:40 - Tuesday, 2:00"
def test_len():
"""len(Schedule) -> int"""
schedule = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
assert len(schedule) == 1500
def test_timeranges():
"""Schedule.timeranges -> *TimeRange"""
timeranges = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
]).timeranges
assert isinstance(timeranges, tuple)
assert timeranges == (
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
)
def test_eq():
"""Schedule == Schedule -> bool"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_b = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_c = Schedule([
TimeRange(Time(1500), Time(2500))
])
assert schedule_a == schedule_b
assert not schedule_a == schedule_c
def test_add_int():
"""Schedule + int -> Schedule"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = schedule_a + 500
assert schedule_b == Schedule([
TimeRange(Time(1500), Time(2500)),
TimeRange(Time(3000), Time(3500))
])
def test_sub_int():
"""Schedule - int -> Schedule"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = schedule_a - 500
assert schedule_b == Schedule([
TimeRange(Time(500), Time(1500)),
TimeRange(Time(2000), Time(2500))
])
def test_radd_int():
"""int + Schedule -> Schedule"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = 500 + schedule_a
assert schedule_b == Schedule([
TimeRange(Time(1500), Time(2500)),
TimeRange(Time(3000), Time(3500))
])
def test_iadd_int():
"""Schedule += int -> None"""
schedule_orig = schedule = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule += 500
assert schedule == Schedule([
TimeRange(Time(1500), Time(2500)),
TimeRange(Time(3000), Time(3500))
])
assert schedule is schedule_orig
def test_isub_int():
"""Schedule -= int -> None"""
schedule_orig = schedule = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule -= 500
assert schedule == Schedule([
TimeRange(Time(500), Time(1500)),
TimeRange(Time(2000), Time(2500))
])
assert schedule is schedule_orig
def test_contains_time():
"""Time in Schedule -> bool"""
schedule = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
assert (Time(500) in schedule) is False
assert (Time(1000) in schedule) is True
assert (Time(1500) in schedule) is True
assert (Time(2000) in schedule) is False
assert (Time(2250) in schedule) is False
assert (Time(2500) in schedule) is True
assert (Time(2750) in schedule) is True
assert (Time(3000) in schedule) is False
assert (Time(3500) in schedule) is False
def test_contains_timerange():
"""TimeRange in Schedule -> bool"""
schedule = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
assert (TimeRange(Time(500), Time(750)) in schedule) is False
assert (TimeRange(Time(500), Time(1500)) in schedule) is False
assert (TimeRange(Time(1000), Time(2000)) in schedule) is True
assert (TimeRange(Time(1200), Time(1800)) in schedule) is True
assert (TimeRange(Time(1200), Time(2800)) in schedule) is False
def test_add_timerange():
"""Schedule.add(TimeRange) -> None"""
schedule = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule.add(TimeRange(Time(2000), Time(2500)))
assert schedule == Schedule([
TimeRange(Time(1000), Time(3000))
])
schedule = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule.add(TimeRange(Time(1200), Time(2700)))
assert schedule == Schedule([
TimeRange(Time(1000), Time(3000))
])
schedule = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule.add(TimeRange(Time(2200), Time(2700)))
assert schedule == Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2200), Time(3000))
])
def test_remove_timerange():
"""Schedule.remove(TimeRange) -> None"""
schedule = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule.remove(TimeRange(Time(1000), Time(1200)))
assert schedule == Schedule([
TimeRange(Time(1200), Time(2000))
])
schedule = Schedule([
TimeRange(Time(1000), Time(2000))
])
with pytest.raises(KeyError):
schedule.remove(TimeRange(Time(800), Time(1200)))
def test_discard_timerange():
"""Schedule.discard(TimeRange) -> None"""
schedule = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule.discard(TimeRange(Time(1000), Time(1200)))
assert schedule == Schedule([
TimeRange(Time(1200), Time(2000))
])
schedule = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule.discard(TimeRange(Time(800), Time(1200)))
assert schedule == Schedule([
TimeRange(Time(1200), Time(2000))
])
def test_pop():
"""Schedule.pop() -> elem"""
schedule = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
timerange = schedule.pop()
assert schedule == Schedule([
TimeRange(Time(1000), Time(2000))
])
assert timerange == TimeRange(Time(2500), Time(3000))
def test_add_schedule():
"""Schedule + Schedule -> Schedule"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_b = Schedule([
TimeRange(Time(1500), Time(2500))
])
assert schedule_a + schedule_b == Schedule([
TimeRange(Time(1000), Time(2500))
])
def test_sub_schedule():
"""Schedule - Schedule -> Schedule"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_b = Schedule([
TimeRange(Time(1500), Time(2500))
])
assert schedule_a - schedule_b == Schedule([
TimeRange(Time(1000), Time(1500))
])
def test_iadd_schedule():
"""Schedule += Schedule -> None"""
schedule = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule += Schedule([
TimeRange(Time(1500), Time(2500))
])
assert schedule == Schedule([
TimeRange(Time(1000), Time(2500))
])
def test_isub_schedule():
"""Schedule -= Schedule -> None"""
schedule = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule -= Schedule([
TimeRange(Time(1500), Time(2500))
])
assert schedule == Schedule([
TimeRange(Time(1000), Time(1500))
])
def test_isdisjoint():
"""Schedule.isdisjoint(Schedule) -> bool"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_b = Schedule([
TimeRange(Time(1500), Time(2500))
])
schedule_c = Schedule([
TimeRange(Time(2000), Time(3000))
])
assert schedule_a.isdisjoint(schedule_b) is False
assert schedule_a.isdisjoint(schedule_c) is True
def test_issubset():
"""Schedule.issubset(Schedule) -> bool"""
"""Schedule <= Schedule -> bool"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_b = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_c = Schedule([
TimeRange(Time(1500), Time(2000))
])
schedule_d = Schedule([
TimeRange(Time(1500), Time(3000))
])
assert schedule_a.issubset(schedule_b) is True
assert schedule_a.issubset(schedule_c) is True
assert schedule_a.issubset(schedule_d) is False
assert (schedule_b <= schedule_a) is True
assert (schedule_c <= schedule_a) is True
assert (schedule_d <= schedule_a) is False
def test_ispropersubset():
"""Schedule < Schedule -> bool"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_b = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_c = Schedule([
TimeRange(Time(1500), Time(2000))
])
schedule_d = Schedule([
TimeRange(Time(1500), Time(3000))
])
assert not schedule_b < schedule_a
assert schedule_c < schedule_a
assert not schedule_d < schedule_a
def test_issuperset():
"""Schedule.issuperset(Schedule) -> bool"""
"""Schedule >= Schedule -> bool"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_b = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_c = Schedule([
TimeRange(Time(500), Time(2000))
])
schedule_d = Schedule([
TimeRange(Time(500), Time(1500))
])
assert schedule_a.issuperset(schedule_b)
assert schedule_a.issuperset(schedule_c)
assert not schedule_a.issuperset(schedule_d)
assert schedule_b >= schedule_a
assert schedule_c >= schedule_a
assert not schedule_d >= schedule_a
def test_ispropersuperset():
"""Schedule > Schedule -> bool"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_b = Schedule([
TimeRange(Time(1000), Time(2000))
])
schedule_c = Schedule([
TimeRange(Time(500), Time(2000))
])
schedule_d = Schedule([
TimeRange(Time(500), Time(1500))
])
assert not schedule_b > schedule_a
assert schedule_c > schedule_a
assert not schedule_d > schedule_a
def test_union():
"""Schedule.union(*Schedule) -> Schedule"""
"""Schedule | Schedule -> Schedule"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = Schedule([
TimeRange(Time(1200), Time(2200)),
TimeRange(Time(2700), Time(3200))
])
schedule_c = Schedule([
TimeRange(Time(4500), Time(6000))
])
assert schedule_a.union(schedule_b, schedule_c) == Schedule([
TimeRange(Time(1000), Time(2200)),
TimeRange(Time(2500), Time(3200)),
TimeRange(Time(4500), Time(6000))
])
assert schedule_a | schedule_b == Schedule([
TimeRange(Time(1000), Time(2200)),
TimeRange(Time(2500), Time(3200))
])
def test_intersection():
"""Schedule.intersection(*Schedule) -> Schedule"""
"""Schedule & Schedule -> Schedule"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = Schedule([
TimeRange(Time(1200), Time(2200)),
TimeRange(Time(2700), Time(3200))
])
schedule_c = Schedule([
TimeRange(Time(1300), Time(6000))
])
assert schedule_a.intersection(schedule_b, schedule_c) == Schedule([
TimeRange(Time(1300), Time(2000)),
TimeRange(Time(2700), Time(3000))
])
assert schedule_a & schedule_b == Schedule([
TimeRange(Time(1200), Time(2000)),
TimeRange(Time(2700), Time(3000))
])
def test_difference():
"""Schedule.difference(*Schedule) -> Schedule"""
"""Schedule - Schedule -> Schedule"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = Schedule([
TimeRange(Time(1200), Time(2200)),
TimeRange(Time(2700), Time(3200))
])
schedule_c = Schedule([
TimeRange(Time(1300), Time(6000))
])
assert schedule_a.difference(schedule_b, schedule_c) == Schedule([
TimeRange(Time(1000), Time(1200))
])
assert schedule_a - schedule_b == Schedule([
TimeRange(Time(1000), Time(1200)),
TimeRange(Time(2500), Time(2700))
])
def test_symmetric_difference():
"""Schedule.symmetric_difference(Schedule) -> Schedule"""
"""Schedule ^ Schedule -> Schedule"""
schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = Schedule([
TimeRange(Time(1200), Time(2200)),
TimeRange(Time(2700), Time(3200))
])
assert schedule_a.symmetric_difference(schedule_b) == Schedule([
TimeRange(Time(1000), Time(1200)),
TimeRange(Time(2000), Time(2200)),
TimeRange(Time(2500), Time(2700)),
TimeRange(Time(3000), Time(3200))
])
assert schedule_a ^ schedule_b == Schedule([
TimeRange(Time(1000), Time(1200)),
TimeRange(Time(2000), Time(2200)),
TimeRange(Time(2500), Time(2700)),
TimeRange(Time(3000), Time(3200))
])
def test_update():
"""Schedule.update(*Schedule) -> None"""
"""Schedule |= Schedule -> None"""
schedule_orig = schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = Schedule([
TimeRange(Time(1200), Time(2200)),
TimeRange(Time(2700), Time(3200))
])
schedule_c = Schedule([
TimeRange(Time(4500), Time(6000))
])
schedule_a.update(schedule_b, schedule_c)
assert schedule_a == Schedule([
TimeRange(Time(1000), Time(2200)),
TimeRange(Time(2500), Time(3200)),
TimeRange(Time(4500), Time(6000))
])
assert schedule_a is schedule_orig
schedule_orig = schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = Schedule([
TimeRange(Time(1200), Time(2200)),
TimeRange(Time(2700), Time(3200))
])
schedule_c = Schedule([
TimeRange(Time(4500), Time(6000))
])
schedule_a |= schedule_b
assert schedule_a == Schedule([
TimeRange(Time(1000), Time(2200)),
TimeRange(Time(2500), Time(3200))
])
assert schedule_a is schedule_orig
def test_intersection_update():
"""Schedule.intersection_update(*Schedule) -> None"""
"""Schedule &= Schedule -> None"""
schedule_orig = schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = Schedule([
TimeRange(Time(1200), Time(2200)),
TimeRange(Time(2700), Time(3200))
])
schedule_c = Schedule([
TimeRange(Time(1300), Time(6000))
])
schedule_a.intersection_update(schedule_b, schedule_c)
assert schedule_a == Schedule([
TimeRange(Time(1300), Time(2000)),
TimeRange(Time(2700), Time(3000))
])
assert schedule_a is schedule_orig
schedule_orig = schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = Schedule([
TimeRange(Time(1200), Time(2200)),
TimeRange(Time(2700), Time(3200))
])
schedule_c = Schedule([
TimeRange(Time(1300), Time(6000))
])
schedule_a &= schedule_b
assert schedule_a == Schedule([
TimeRange(Time(1200), Time(2000)),
TimeRange(Time(2700), Time(3000))
])
assert schedule_a is schedule_orig
def test_difference_update():
"""Schedule.difference_update(*Schedule) -> None"""
"""Schedule -= Schedule -> None"""
schedule_orig = schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = Schedule([
TimeRange(Time(1200), Time(2200)),
TimeRange(Time(2700), Time(3200))
])
schedule_c = Schedule([
TimeRange(Time(1300), Time(6000))
])
schedule_a.difference_update(schedule_b, schedule_c)
assert schedule_a == Schedule([
TimeRange(Time(1000), Time(1200))
])
assert schedule_a is schedule_orig
schedule_orig = schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = Schedule([
TimeRange(Time(1200), Time(2200)),
TimeRange(Time(2700), Time(3200))
])
schedule_c = Schedule([
TimeRange(Time(1300), Time(6000))
])
schedule_a -= schedule_b
assert schedule_a - schedule_b == Schedule([
TimeRange(Time(1000), Time(1200)),
TimeRange(Time(2500), Time(2700))
])
assert schedule_a is schedule_orig
def test_symmetric_difference_update():
"""Schedule.symmetric_difference_update(Schedule) -> None"""
"""Schedule ^= Schedule -> None"""
schedule_orig = schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = Schedule([
TimeRange(Time(1200), Time(2200)),
TimeRange(Time(2700), Time(3200))
])
schedule_a.symmetric_difference_update(schedule_b)
assert schedule_a == Schedule([
TimeRange(Time(1000), Time(1200)),
TimeRange(Time(2000), Time(2200)),
TimeRange(Time(2500), Time(2700)),
TimeRange(Time(3000), Time(3200))
])
assert schedule_a is schedule_orig
schedule_orig = schedule_a = Schedule([
TimeRange(Time(1000), Time(2000)),
TimeRange(Time(2500), Time(3000))
])
schedule_b = Schedule([
TimeRange(Time(1200), Time(2200)),
TimeRange(Time(2700), Time(3200))
])
schedule_a ^= schedule_b
assert schedule_a == Schedule([
TimeRange(Time(1000), Time(1200)),
TimeRange(Time(2000), Time(2200)),
TimeRange(Time(2500), Time(2700)),
TimeRange(Time(3000), Time(3200))
])
assert schedule_a is schedule_orig
def test_timerange_isdisjoint():
"""timerange_isdisjoint(TimeRange, TimeRange) -> bool"""
timerange_a = TimeRange(Time(1000), Time(2000))
timerange_b = TimeRange(Time(2500), Time(3000))
timerange_c = TimeRange(Time(2000), Time(2500))
timerange_d = TimeRange(Time(1500), Time(2500))
timerange_e = TimeRange(Time(1000), Time(2000))
timerange_f = TimeRange(Time(500), Time(1500))
timerange_g = TimeRange(Time(500), Time(1000))
timerange_h = TimeRange(Time(0), Time(500))
timerange_i = TimeRange(Time(1500), Time(2000))
timerange_j = TimeRange(Time(1200), Time(1800))
timerange_k = TimeRange(Time(1000), Time(1500))
timerange_l = TimeRange(Time(1000), Time(2500))
timerange_m = TimeRange(Time(500), Time(2500))
timerange_n = TimeRange(Time(500), Time(2000))
assert timerange_isdisjoint(timerange_a, timerange_b) is True
assert timerange_isdisjoint(timerange_a, timerange_c) is True
assert timerange_isdisjoint(timerange_a, timerange_d) is False
assert timerange_isdisjoint(timerange_a, timerange_e) is False
assert timerange_isdisjoint(timerange_a, timerange_f) is False
assert timerange_isdisjoint(timerange_a, timerange_g) is True
assert timerange_isdisjoint(timerange_a, timerange_h) is True
assert timerange_isdisjoint(timerange_a, timerange_i) is False
assert timerange_isdisjoint(timerange_a, timerange_j) is False
assert timerange_isdisjoint(timerange_a, timerange_k) is False
assert timerange_isdisjoint(timerange_a, timerange_l) is False
assert timerange_isdisjoint(timerange_a, timerange_m) is False
assert timerange_isdisjoint(timerange_a, timerange_n) is False
def test_timerange_isadjacent():
"""timerange_isadjacent(TimeRange, TimeRange) -> bool"""
timerange_a = TimeRange(Time(1000), Time(2000))
timerange_b = TimeRange(Time(2500), Time(3000))
timerange_c = TimeRange(Time(2000), Time(2500))
timerange_d = TimeRange(Time(1500), Time(2500))
timerange_e = TimeRange(Time(1000), Time(2000))
timerange_f = TimeRange(Time(500), Time(1500))
timerange_g = TimeRange(Time(500), Time(1000))
timerange_h = TimeRange(Time(0), Time(500))
timerange_i = TimeRange(Time(1500), Time(2000))
timerange_j = TimeRange(Time(1200), Time(1800))
timerange_k = TimeRange(Time(1000), Time(1500))
timerange_l = TimeRange(Time(1000), Time(2500))
timerange_m = TimeRange(Time(500), Time(2500))
timerange_n = TimeRange(Time(500), Time(2000))
assert timerange_isadjacent(timerange_a, timerange_b) is False
assert timerange_isadjacent(timerange_a, timerange_c) is True
assert timerange_isadjacent(timerange_a, timerange_d) is False
assert timerange_isadjacent(timerange_a, timerange_e) is False
assert timerange_isadjacent(timerange_a, timerange_f) is False
assert timerange_isadjacent(timerange_a, timerange_g) is True
assert timerange_isadjacent(timerange_a, timerange_h) is False
assert timerange_isadjacent(timerange_a, timerange_i) is False
assert timerange_isadjacent(timerange_a, timerange_j) is False
assert timerange_isadjacent(timerange_a, timerange_k) is False
assert timerange_isadjacent(timerange_a, timerange_l) is False
assert timerange_isadjacent(timerange_a, timerange_m) is False
assert timerange_isadjacent(timerange_a, timerange_n) is False
def test_timerange_issubset():
"""timerange_issubset(TimeRange, TimeRange) -> bool"""
timerange_a = TimeRange(Time(1000), Time(2000))
timerange_b = TimeRange(Time(2500), Time(3000))
timerange_c = TimeRange(Time(2000), Time(2500))
timerange_d = TimeRange(Time(1500), Time(2500))
timerange_e = TimeRange(Time(1000), Time(2000))
timerange_f = TimeRange(Time(500), Time(1500))
timerange_g = TimeRange(Time(500), Time(1000))
timerange_h = TimeRange(Time(0), Time(500))
timerange_i = TimeRange(Time(1500), Time(2000))
timerange_j = TimeRange(Time(1200), Time(1800))
timerange_k = TimeRange(Time(1000), Time(1500))
timerange_l = TimeRange(Time(1000), Time(2500))
timerange_m = TimeRange(Time(500), Time(2500))
timerange_n = TimeRange(Time(500), Time(2000))
assert timerange_issubset(timerange_a, timerange_b) is False
assert timerange_issubset(timerange_a, timerange_c) is False
assert timerange_issubset(timerange_a, timerange_d) is False
assert timerange_issubset(timerange_a, timerange_e) is True
assert timerange_issubset(timerange_a, timerange_f) is False
assert timerange_issubset(timerange_a, timerange_g) is False
assert timerange_issubset(timerange_a, timerange_h) is False
assert timerange_issubset(timerange_a, timerange_i) is True
assert timerange_issubset(timerange_a, timerange_j) is True
assert timerange_issubset(timerange_a, timerange_k) is True
assert timerange_issubset(timerange_a, timerange_l) is False
assert timerange_issubset(timerange_a, timerange_m) is False
assert timerange_issubset(timerange_a, timerange_n) is False
def test_timerange_ispropersubset():
"""timerange_ispropersubset(TimeRange, TimeRange) -> bool"""
timerange_a = TimeRange(Time(1000), Time(2000))
timerange_b = TimeRange(Time(2500), Time(3000))
timerange_c = TimeRange(Time(2000), Time(2500))
timerange_d = TimeRange(Time(1500), Time(2500))
timerange_e = TimeRange(Time(1000), Time(2000))
timerange_f = TimeRange(Time(500), Time(1500))
timerange_g = TimeRange(Time(500), Time(1000))
timerange_h = TimeRange(Time(0), Time(500))
timerange_i = TimeRange(Time(1500), Time(2000))
timerange_j = TimeRange(Time(1200), Time(1800))
timerange_k = TimeRange(Time(1000), Time(1500))
timerange_l = TimeRange(Time(1000), Time(2500))
timerange_m = TimeRange(Time(500), Time(2500))
timerange_n = TimeRange(Time(500), Time(2000))
assert timerange_ispropersubset(timerange_a, timerange_b) is False
assert timerange_ispropersubset(timerange_a, timerange_c) is False
assert timerange_ispropersubset(timerange_a, timerange_d) is False
assert timerange_ispropersubset(timerange_a, timerange_e) is False
assert timerange_ispropersubset(timerange_a, timerange_f) is False
assert timerange_ispropersubset(timerange_a, timerange_g) is False
assert timerange_ispropersubset(timerange_a, timerange_h) is False
assert timerange_ispropersubset(timerange_a, timerange_i) is True
assert timerange_ispropersubset(timerange_a, timerange_j) is True
assert timerange_ispropersubset(timerange_a, timerange_k) is True
assert timerange_ispropersubset(timerange_a, timerange_l) is False
assert timerange_ispropersubset(timerange_a, timerange_m) is False
assert timerange_ispropersubset(timerange_a, timerange_n) is False
def test_timerange_issuperset():
"""timerange_issuperset(TimeRange, TimeRange) -> bool"""
timerange_a = TimeRange(Time(1000), Time(2000))
timerange_b = TimeRange(Time(2500), Time(3000))
timerange_c = TimeRange(Time(2000), Time(2500))
timerange_d = TimeRange(Time(1500), Time(2500))
timerange_e = TimeRange(Time(1000), Time(2000))
timerange_f = TimeRange(Time(500), Time(1500))
timerange_g = TimeRange(Time(500), Time(1000))
timerange_h = TimeRange(Time(0), Time(500))
timerange_i = TimeRange(Time(1500), Time(2000))
timerange_j = TimeRange(Time(1200), Time(1800))
timerange_k = TimeRange(Time(1000), Time(1500))
timerange_l = TimeRange(Time(1000), Time(2500))
timerange_m = TimeRange(Time(500), Time(2500))
timerange_n = TimeRange(Time(500), Time(2000))
assert timerange_issuperset(timerange_a, timerange_b) is False
assert timerange_issuperset(timerange_a, timerange_c) is False
assert timerange_issuperset(timerange_a, timerange_d) is False
assert timerange_issuperset(timerange_a, timerange_e) is True
assert timerange_issuperset(timerange_a, timerange_f) is False
assert timerange_issuperset(timerange_a, timerange_g) is False
assert timerange_issuperset(timerange_a, timerange_h) is False
assert timerange_issuperset(timerange_a, timerange_i) is False
assert timerange_issuperset(timerange_a, timerange_j) is False
assert timerange_issuperset(timerange_a, timerange_k) is False
assert timerange_issuperset(timerange_a, timerange_l) is True
assert timerange_issuperset(timerange_a, timerange_m) is True
assert timerange_issuperset(timerange_a, timerange_n) is True
def test_timerange_ispropersuperset():
"""timerange_ispropersuperset(TimeRange, TimeRange) -> bool"""
timerange_a = TimeRange(Time(1000), Time(2000))
timerange_b = TimeRange(Time(2500), Time(3000))
timerange_c = TimeRange(Time(2000), Time(2500))
timerange_d = TimeRange(Time(1500), Time(2500))
timerange_e = TimeRange(Time(1000), Time(2000))
timerange_f = TimeRange(Time(500), Time(1500))
timerange_g = TimeRange(Time(500), Time(1000))
timerange_h = TimeRange(Time(0), Time(500))
timerange_i = TimeRange(Time(1500), Time(2000))
timerange_j = TimeRange(Time(1200), Time(1800))
timerange_k = TimeRange(Time(1000), Time(1500))
timerange_l = TimeRange(Time(1000), Time(2500))
timerange_m = TimeRange(Time(500), Time(2500))
timerange_n = TimeRange(Time(500), Time(2000))
assert timerange_ispropersuperset(timerange_a, timerange_b) is False
assert timerange_ispropersuperset(timerange_a, timerange_c) is False
assert timerange_ispropersuperset(timerange_a, timerange_d) is False
assert timerange_ispropersuperset(timerange_a, timerange_e) is False
assert timerange_ispropersuperset(timerange_a, timerange_f) is False
assert timerange_ispropersuperset(timerange_a, timerange_g) is False
assert timerange_ispropersuperset(timerange_a, timerange_h) is False
assert timerange_ispropersuperset(timerange_a, timerange_i) is False
assert timerange_ispropersuperset(timerange_a, timerange_j) is False
assert timerange_ispropersuperset(timerange_a, timerange_k) is False
assert timerange_ispropersuperset(timerange_a, timerange_l) is True
assert timerange_ispropersuperset(timerange_a, timerange_m) is True
assert timerange_ispropersuperset(timerange_a, timerange_n) is True
def test_timerange_union():
"""timerange_union(TimeRange, TimeRange) -> TimeRange"""
timerange_a = TimeRange(Time(1000), Time(2000))
timerange_b = TimeRange(Time(2500), Time(3000))
timerange_c = TimeRange(Time(2000), Time(2500))
timerange_d = TimeRange(Time(1500), Time(2500))
timerange_e = TimeRange(Time(1000), Time(2000))
timerange_f = TimeRange(Time(500), Time(1500))
timerange_g = TimeRange(Time(500), Time(1000))
timerange_h = TimeRange(Time(0), Time(500))
timerange_i = TimeRange(Time(1500), Time(2000))
timerange_j = TimeRange(Time(1200), Time(1800))
timerange_k = TimeRange(Time(1000), Time(1500))
timerange_l = TimeRange(Time(1000), Time(2500))
timerange_m = TimeRange(Time(500), Time(2500))
timerange_n = TimeRange(Time(500), Time(2000))
assert timerange_union(timerange_a, timerange_b) == (TimeRange(Time(1000), Time(2000)), TimeRange(Time(2500), Time(3000)))
assert timerange_union(timerange_a, timerange_c) == (TimeRange(Time(1000), Time(2500)), )
assert timerange_union(timerange_a, timerange_d) == (TimeRange(Time(1000), Time(2500)), )
assert timerange_union(timerange_a, timerange_e) == (TimeRange(Time(1000), Time(2000)), )
assert timerange_union(timerange_a, timerange_f) == (TimeRange(Time(500), Time(2000)), )
assert timerange_union(timerange_a, timerange_g) == (TimeRange(Time(500), Time(2000)), )
assert timerange_union(timerange_a, timerange_h) == (TimeRange(Time(0), Time(500)), TimeRange(Time(1000), Time(2000)))
assert timerange_union(timerange_a, timerange_i) == (TimeRange(Time(1000), Time(2000)), )
assert timerange_union(timerange_a, timerange_j) == (TimeRange(Time(1000), Time(2000)), )
assert timerange_union(timerange_a, timerange_k) == (TimeRange(Time(1000), Time(2000)), )
assert timerange_union(timerange_a, timerange_l) == (TimeRange(Time(1000), Time(2500)), )
assert timerange_union(timerange_a, timerange_m) == (TimeRange(Time(500), Time(2500)), )
assert timerange_union(timerange_a, timerange_n) == (TimeRange(Time(500), Time(2000)), )
def test_timerange_intersection():
"""timerange_intersection(TimeRange, TimeRange) -> TimeRange"""
timerange_a = TimeRange(Time(1000), Time(2000))
timerange_b = TimeRange(Time(2500), Time(3000))
timerange_c = TimeRange(Time(2000), Time(2500))
timerange_d = TimeRange(Time(1500), Time(2500))
timerange_e = TimeRange(Time(1000), Time(2000))
timerange_f = TimeRange(Time(500), Time(1500))
timerange_g = TimeRange(Time(500), Time(1000))
timerange_h = TimeRange(Time(0), Time(500))
timerange_i = TimeRange(Time(1500), Time(2000))
timerange_j = TimeRange(Time(1200), Time(1800))
timerange_k = TimeRange(Time(1000), Time(1500))
timerange_l = TimeRange(Time(1000), Time(2500))
timerange_m = TimeRange(Time(500), Time(2500))
timerange_n = TimeRange(Time(500), Time(2000))
assert timerange_intersection(timerange_a, timerange_b) == ()
assert timerange_intersection(timerange_a, timerange_c) == ()
assert timerange_intersection(timerange_a, timerange_d) == (TimeRange(Time(1500), Time(2000)), )
assert timerange_intersection(timerange_a, timerange_e) == (TimeRange(Time(1000), Time(2000)), )
assert timerange_intersection(timerange_a, timerange_f) == (TimeRange(Time(1000), Time(1500)), )
assert timerange_intersection(timerange_a, timerange_g) == ()
assert timerange_intersection(timerange_a, timerange_h) == ()
assert timerange_intersection(timerange_a, timerange_i) == (TimeRange(Time(1500), Time(2000)), )
assert timerange_intersection(timerange_a, timerange_j) == (TimeRange(Time(1200), Time(1800)), )
assert timerange_intersection(timerange_a, timerange_k) == (TimeRange(Time(1000), Time(1500)), )
assert timerange_intersection(timerange_a, timerange_l) == (TimeRange(Time(1000), Time(2000)), )
assert timerange_intersection(timerange_a, timerange_m) == (TimeRange(Time(1000), Time(2000)), )
assert timerange_intersection(timerange_a, timerange_n) == (TimeRange(Time(1000), Time(2000)), )
def test_timerange_difference():
"""timerange_difference(TimeRange, TimeRange) -> TimeRange"""
timerange_a = TimeRange(Time(1000), Time(2000))
timerange_b = TimeRange(Time(2500), Time(3000))
timerange_c = TimeRange(Time(2000), Time(2500))
timerange_d = TimeRange(Time(1500), Time(2500))
timerange_e = TimeRange(Time(1000), Time(2000))
timerange_f = TimeRange(Time(500), Time(1500))
timerange_g = TimeRange(Time(500), Time(1000))
timerange_h = TimeRange(Time(0), Time(500))
timerange_i = TimeRange(Time(1500), Time(2000))
timerange_j = TimeRange(Time(1200), Time(1800))
timerange_k = TimeRange(Time(1000), Time(1500))
timerange_l = TimeRange(Time(1000), Time(2500))
timerange_m = TimeRange(Time(500), Time(2500))
timerange_n = TimeRange(Time(500), Time(2000))
assert timerange_difference(timerange_a, timerange_b) == (TimeRange(Time(1000), Time(2000)), )
assert timerange_difference(timerange_a, timerange_c) == (TimeRange(Time(1000), Time(2000)), )
assert timerange_difference(timerange_a, timerange_d) == (TimeRange(Time(1000), Time(1500)), )
assert timerange_difference(timerange_a, timerange_e) == ()
assert timerange_difference(timerange_a, timerange_f) == (TimeRange(Time(1500), Time(2000)), )
assert timerange_difference(timerange_a, timerange_g) == (TimeRange(Time(1000), Time(2000)), )
assert timerange_difference(timerange_a, timerange_h) == (TimeRange(Time(1000), Time(2000)), )
assert timerange_difference(timerange_a, timerange_i) == (TimeRange(Time(1000), Time(1500)), )
assert timerange_difference(timerange_a, timerange_j) == (TimeRange(Time(1000), Time(1200)), TimeRange(Time(1800), Time(2000)))
assert timerange_difference(timerange_a, timerange_k) == (TimeRange(Time(1500), Time(2000)), )
assert timerange_difference(timerange_a, timerange_l) == ()
assert timerange_difference(timerange_a, timerange_m) == ()
assert timerange_difference(timerange_a, timerange_n) == ()
def test_timerange_symmetric_difference():
"""timerange_symmetric_difference(TimeRange, TimeRange) -> TimeRange"""
timerange_a = TimeRange(Time(1000), Time(2000))
timerange_b = TimeRange(Time(2500), Time(3000))
timerange_c = TimeRange(Time(2000), Time(2500))
timerange_d = TimeRange(Time(1500), Time(2500))
timerange_e = TimeRange(Time(1000), Time(2000))
timerange_f = TimeRange(Time(500), Time(1500))
timerange_g = TimeRange(Time(500), Time(1000))
timerange_h = TimeRange(Time(0), Time(500))
timerange_i = TimeRange(Time(1500), Time(2000))
timerange_j = TimeRange(Time(1200), Time(1800))
timerange_k = TimeRange(Time(1000), Time(1500))
timerange_l = TimeRange(Time(1000), Time(2500))
timerange_m = TimeRange(Time(500), Time(2500))
timerange_n = TimeRange(Time(500), Time(2000))
assert timerange_symmetric_difference(timerange_a, timerange_b) == (TimeRange(Time(1000), Time(2000)), TimeRange(Time(2500), Time(3000)))
assert timerange_symmetric_difference(timerange_a, timerange_c) == (TimeRange(Time(1000), Time(2500)), )
assert timerange_symmetric_difference(timerange_a, timerange_d) == (TimeRange(Time(1000), Time(1500)), TimeRange(Time(2000), Time(2500)))
assert timerange_symmetric_difference(timerange_a, timerange_e) == ()
assert timerange_symmetric_difference(timerange_a, timerange_f) == (TimeRange(Time(500), Time(1000)), TimeRange(Time(1500), Time(2000)))
assert timerange_symmetric_difference(timerange_a, timerange_g) == (TimeRange(Time(500), Time(2000)), )
assert timerange_symmetric_difference(timerange_a, timerange_h) == (TimeRange(Time(0), Time(500)), TimeRange(Time(1000), Time(2000)))
assert timerange_symmetric_difference(timerange_a, timerange_i) == (TimeRange(Time(1000), Time(1500)), )
assert timerange_symmetric_difference(timerange_a, timerange_j) == (TimeRange(Time(1000), Time(1200)), TimeRange(Time(1800), Time(2000)))
assert timerange_symmetric_difference(timerange_a, timerange_k) == (TimeRange(Time(1500), Time(2000)), )
assert timerange_symmetric_difference(timerange_a, timerange_l) == (TimeRange(Time(2000), Time(2500)), )
assert timerange_symmetric_difference(timerange_a, timerange_m) == (TimeRange(Time(500), Time(1000)), TimeRange(Time(2000), Time(2500)))
assert timerange_symmetric_difference(timerange_a, timerange_n) == (TimeRange(Time(500), Time(1000)), )
| 38.478818 | 266 | 0.671369 | 4,659 | 39,056 | 5.442799 | 0.022537 | 0.205063 | 0.095867 | 0.118424 | 0.920617 | 0.906696 | 0.877553 | 0.859374 | 0.69414 | 0.674974 | 0 | 0.101083 | 0.191213 | 39,056 | 1,014 | 267 | 38.516765 | 0.701691 | 0.044782 | 0 | 0.673302 | 0 | 0.002342 | 0.006291 | 0.001416 | 0 | 0 | 0 | 0 | 0.257611 | 1 | 0.055035 | false | 0 | 0.003513 | 0 | 0.058548 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
af8bb2c0ea455cfa20d40823c574b7efa39e63c2 | 112 | py | Python | socialnetworks/github/__init__.py | gGonz/django-socialnetworks | 3f6c577efafd6ed5eb8b5cb60d9ee6a36920581d | [
"Apache-2.0"
] | 5 | 2015-06-18T03:30:28.000Z | 2017-11-04T21:34:20.000Z | socialnetworks/github/__init__.py | gGonz/django-socialnetworks | 3f6c577efafd6ed5eb8b5cb60d9ee6a36920581d | [
"Apache-2.0"
] | 2 | 2015-04-25T00:06:19.000Z | 2015-04-30T22:42:40.000Z | socialnetworks/github/__init__.py | gGonz/django-socialnetworks | 3f6c577efafd6ed5eb8b5cb60d9ee6a36920581d | [
"Apache-2.0"
] | 4 | 2015-06-11T18:28:04.000Z | 2016-09-07T15:08:09.000Z | from .clients import GitHubClient
from .decorators import fetch_github_data
from .utils import read_github_data
| 28 | 41 | 0.866071 | 16 | 112 | 5.8125 | 0.625 | 0.215054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 112 | 3 | 42 | 37.333333 | 0.93 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
af9909a68c095ec0b20a759915d73475a05ed1b4 | 3,568 | py | Python | tests/test_time_series_spliter.py | Plozano94/skforecast | 71b83a45ecde757fb24be58adf9c88d8066a4582 | [
"MIT"
] | null | null | null | tests/test_time_series_spliter.py | Plozano94/skforecast | 71b83a45ecde757fb24be58adf9c88d8066a4582 | [
"MIT"
] | null | null | null | tests/test_time_series_spliter.py | Plozano94/skforecast | 71b83a45ecde757fb24be58adf9c88d8066a4582 | [
"MIT"
] | null | null | null | import sys
sys.path.insert(1, '/home/ximo/Documents/GitHub/skforecast')
import pytest
from pytest import approx
import numpy as np
from skforecast.model_selection import time_series_spliter
# Test test_time_series_spliter
#-------------------------------------------------------------------------------
def test_time_series_spliter_exception_when_y_is_numpy_array_with_more_than_1_dimesion():
results = time_series_spliter(np.arange(10).reshape(-1, 2), initial_train_size=3, steps=1)
with pytest.raises(Exception):
list(results)
def test_time_series_spliter_exception_when_y_is_list():
results = time_series_spliter([0,1,2,3,4], initial_train_size=3, steps=1)
with pytest.raises(Exception):
list(results)
def test_time_series_spliter_when_y_is_numpy_arange_10_initial_train_size_5_steps_1_allow_incomplete_fold_True():
results = time_series_spliter(
y=np.arange(10),
initial_train_size=5,
steps=1,
allow_incomplete_fold=True,
verbose=True
)
results = list(results)
expected = [(range(0, 5), range(5, 6)),
(range(0, 6), range(6, 7)),
(range(0, 7), range(7, 8)),
(range(0, 8), range(8, 9)),
(range(0, 9), range(9, 10))]
assert results == expected
def test_time_series_spliter_when_y_is_numpy_arange_10_initial_train_size_5_steps_5_allow_incomplete_fold_True():
results = time_series_spliter(
y=np.arange(10),
initial_train_size=5,
steps=5,
allow_incomplete_fold=True,
verbose=True
)
results = list(results)
expected = [(range(0, 5), range(5, 10))]
assert results == expected
def test_time_series_spliter_when_y_is_numpy_arange_10_initial_train_size_5_steps_3_allow_incomplete_fold_False():
results = time_series_spliter(
y=np.arange(10),
initial_train_size=5,
steps=3,
allow_incomplete_fold=False,
verbose=True
)
results = list(results)
expected = [(range(0, 5), range(5, 8))]
assert results == expected
def test_time_series_spliter_when_y_is_numpy_arange_10_initial_train_size_5_steps_3_allow_incomplete_fold_True():
results = time_series_spliter(
y=np.arange(10),
initial_train_size=5,
steps=3,
allow_incomplete_fold=True,
verbose=True
)
results = list(results)
expected = [(range(0, 5), range(5, 8)), (range(0, 8), range(8, 10))]
assert results == expected
def test_time_series_spliter_when_y_is_numpy_arange_10_initial_train_size_5_steps_20_allow_incomplete_fold_False():
results = time_series_spliter(
y=np.arange(10),
initial_train_size=5,
steps=20,
allow_incomplete_fold=False,
verbose=True
)
results = list(results)
expected = []
assert results == expected
def test_time_series_spliter_when_y_is_numpy_arange_10_initial_train_size_5_steps_20_allow_incomplete_fold_True():
results = time_series_spliter(
y=np.arange(10),
initial_train_size=5,
steps=20,
allow_incomplete_fold=True,
verbose=True
)
results = list(results)
expected = []
results == expected
assert results == expected | 33.345794 | 115 | 0.616031 | 447 | 3,568 | 4.501119 | 0.136465 | 0.089463 | 0.152087 | 0.119284 | 0.835984 | 0.818091 | 0.804672 | 0.804672 | 0.804672 | 0.776839 | 0 | 0.041053 | 0.276345 | 3,568 | 107 | 116 | 33.345794 | 0.738187 | 0.030269 | 0 | 0.611765 | 0 | 0 | 0.010989 | 0.010989 | 0 | 0 | 0 | 0 | 0.070588 | 1 | 0.094118 | false | 0 | 0.058824 | 0 | 0.152941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
bb6557235669c3523337525e34cae046812f8d52 | 135 | py | Python | odin/handlers/data_handler/price_handler/__init__.py | gsamarakoon/Odin | e2e9d638c68947d24f1260d35a3527dd84c2523f | [
"MIT"
] | 103 | 2017-01-14T19:38:14.000Z | 2022-03-10T12:52:09.000Z | odin/handlers/data_handler/price_handler/__init__.py | gsamarakoon/Odin | e2e9d638c68947d24f1260d35a3527dd84c2523f | [
"MIT"
] | 6 | 2017-01-19T01:38:53.000Z | 2020-03-09T19:03:18.000Z | odin/handlers/data_handler/price_handler/__init__.py | JamesBrofos/Odin | e2e9d638c68947d24f1260d35a3527dd84c2523f | [
"MIT"
] | 33 | 2017-02-05T21:51:17.000Z | 2021-12-22T20:38:30.000Z | from .database_price_handler import DatabasePriceHandler
from .interactive_brokers_price_handler import InteractiveBrokersPriceHandler
| 45 | 77 | 0.925926 | 13 | 135 | 9.230769 | 0.692308 | 0.2 | 0.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.059259 | 135 | 2 | 78 | 67.5 | 0.944882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
bbf00786dce003d4f415fd635aa8a87b7c5bfa26 | 228 | py | Python | rand_param_envs/gym/wrappers/__init__.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 24 | 2021-03-24T07:14:52.000Z | 2022-03-17T08:15:44.000Z | rand_param_envs/gym/wrappers/__init__.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 12 | 2021-02-02T22:53:59.000Z | 2022-03-12T00:41:30.000Z | rand_param_envs/gym/wrappers/__init__.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 6 | 2021-04-12T18:49:47.000Z | 2021-09-07T05:33:22.000Z | from rand_param_envs.gym import error
from rand_param_envs.gym.wrappers.frame_skipping import SkipWrapper
from rand_param_envs.gym.wrappers.monitoring import Monitor
from rand_param_envs.gym.wrappers.time_limit import TimeLimit
| 45.6 | 67 | 0.885965 | 36 | 228 | 5.333333 | 0.444444 | 0.166667 | 0.270833 | 0.354167 | 0.541667 | 0.4375 | 0 | 0 | 0 | 0 | 0 | 0 | 0.070175 | 228 | 4 | 68 | 57 | 0.90566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a537cf33a054ba5c421897c65798e0a6d5297fae | 23 | py | Python | PoisDenoiser/netwroks/__init__.py | AndreiDavydov/Poisson_Denoiser | a0b8f3dce8282b8e50d44cacb7bdc4fc6d4abc22 | [
"MIT"
] | 4 | 2019-12-24T10:54:40.000Z | 2021-12-27T14:07:06.000Z | PoisDenoiser/networks/__init__.py | AndreiDavydov/Poisson_Denoiser | a0b8f3dce8282b8e50d44cacb7bdc4fc6d4abc22 | [
"MIT"
] | null | null | null | PoisDenoiser/networks/__init__.py | AndreiDavydov/Poisson_Denoiser | a0b8f3dce8282b8e50d44cacb7bdc4fc6d4abc22 | [
"MIT"
] | 1 | 2020-09-28T06:04:12.000Z | 2020-09-28T06:04:12.000Z | from . import PoisNet
| 7.666667 | 21 | 0.73913 | 3 | 23 | 5.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.217391 | 23 | 2 | 22 | 11.5 | 0.944444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a56d1442fa6c49c95a0e83b08dac106d466cf535 | 48 | py | Python | virl/cli/views/console/__init__.py | gve-vse-tim/virlutils | 64687229ea8763509aca54b63144b61037e5228f | [
"MIT"
] | 12 | 2018-03-27T14:02:22.000Z | 2018-06-07T16:19:38.000Z | virl/cli/views/console/__init__.py | gve-vse-tim/virlutils | 64687229ea8763509aca54b63144b61037e5228f | [
"MIT"
] | 29 | 2017-12-14T16:38:12.000Z | 2018-08-19T18:41:06.000Z | virl/cli/views/console/__init__.py | gve-vse-tim/virlutils | 64687229ea8763509aca54b63144b61037e5228f | [
"MIT"
] | 7 | 2018-03-02T15:42:22.000Z | 2020-04-20T11:25:32.000Z | from .console_views import console_table # noqa
| 24 | 47 | 0.833333 | 7 | 48 | 5.428571 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 48 | 1 | 48 | 48 | 0.904762 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a5add39acdbf8184e956b77c8ede59f84f4f49f0 | 98 | py | Python | openprocurement/auctions/core/plugins/contracting/v2/interfaces.py | EBRD-ProzorroSale/openprocurement.auctions.core | 52bd59f193f25e4997612fca0f87291decf06966 | [
"Apache-2.0"
] | 2 | 2016-09-15T20:17:43.000Z | 2017-01-08T03:32:43.000Z | openprocurement/auctions/core/plugins/contracting/v2/interfaces.py | EBRD-ProzorroSale/openprocurement.auctions.core | 52bd59f193f25e4997612fca0f87291decf06966 | [
"Apache-2.0"
] | 183 | 2017-12-21T11:04:37.000Z | 2019-03-27T08:14:34.000Z | openprocurement/auctions/core/plugins/contracting/v2/interfaces.py | EBRD-ProzorroSale/openprocurement.auctions.core | 52bd59f193f25e4997612fca0f87291decf06966 | [
"Apache-2.0"
] | 12 | 2016-09-05T12:07:48.000Z | 2019-02-26T09:24:17.000Z | # -*- coding: utf-8
from zope.interface import Interface
class IContractV2(Interface):
pass
| 14 | 36 | 0.72449 | 12 | 98 | 5.916667 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024691 | 0.173469 | 98 | 6 | 37 | 16.333333 | 0.851852 | 0.173469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
a5b1e5bee020cfdb5240c47d0edcd47bc482b286 | 30,920 | py | Python | zun/tests/unit/compute/test_compute_manager.py | cooldharma06/zun_glance_tag | 555399275afdff748888036a2fca47bbf347956b | [
"Apache-2.0"
] | null | null | null | zun/tests/unit/compute/test_compute_manager.py | cooldharma06/zun_glance_tag | 555399275afdff748888036a2fca47bbf347956b | [
"Apache-2.0"
] | null | null | null | zun/tests/unit/compute/test_compute_manager.py | cooldharma06/zun_glance_tag | 555399275afdff748888036a2fca47bbf347956b | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from six import StringIO
from zun.common import consts
from zun.common import exception
from zun.compute import claims
from zun.compute import compute_node_tracker
from zun.compute import manager
import zun.conf
from zun.objects.container import Container
from zun.objects.image import Image
from zun.tests import base
from zun.tests.unit.container.fake_driver import FakeDriver as fake_driver
from zun.tests.unit.db import utils
class FakeResourceTracker(object):
def container_claim(self, context, container, host, limits):
return claims.NopClaim()
class TestManager(base.TestCase):
def setUp(self):
super(TestManager, self).setUp()
zun.conf.CONF.set_override(
'container_driver',
'zun.tests.unit.container.fake_driver.FakeDriver')
self.compute_manager = manager.Manager()
@mock.patch.object(Container, 'save')
def test_fail_container(self, mock_save):
container = Container(self.context, **utils.get_test_container())
self.compute_manager._fail_container(self.context, container,
"Creation Failed")
self.assertEqual(consts.ERROR, container.status)
self.assertEqual("Creation Failed", container.status_reason)
self.assertIsNone(container.task_state)
@mock.patch.object(Container, 'save')
@mock.patch('zun.image.driver.pull_image')
@mock.patch.object(fake_driver, 'create')
def test_container_create(self, mock_create, mock_pull, mock_save):
container = Container(self.context, **utils.get_test_container())
image = {'image': 'repo', 'path': 'out_path', 'driver': 'glance'}
mock_pull.return_value = image, False
self.compute_manager._resource_tracker = FakeResourceTracker()
networks = []
self.compute_manager._do_container_create(self.context, container,
networks)
mock_save.assert_called_with(self.context)
mock_pull.assert_any_call(self.context, container.image, 'latest',
'always', 'glance')
mock_create.assert_called_once_with(self.context, container, image,
networks)
@mock.patch.object(Container, 'save')
@mock.patch('zun.image.driver.pull_image')
@mock.patch.object(manager.Manager, '_fail_container')
def test_container_create_pull_image_failed_docker_error(
self, mock_fail, mock_pull, mock_save):
container = Container(self.context, **utils.get_test_container())
mock_pull.side_effect = exception.DockerError("Pull Failed")
networks = []
self.compute_manager._do_container_create(self.context, container,
networks)
mock_fail.assert_called_once_with(self.context,
container, "Pull Failed")
@mock.patch.object(Container, 'save')
@mock.patch('zun.image.driver.pull_image')
@mock.patch.object(manager.Manager, '_fail_container')
def test_container_create_pull_image_failed_image_not_found(
self, mock_fail, mock_pull, mock_save):
container = Container(self.context, **utils.get_test_container())
mock_pull.side_effect = exception.ImageNotFound("Image Not Found")
networks = []
self.compute_manager._do_container_create(self.context, container,
networks)
mock_fail.assert_called_once_with(self.context,
container, "Image Not Found")
@mock.patch.object(Container, 'save')
@mock.patch('zun.image.driver.pull_image')
@mock.patch.object(manager.Manager, '_fail_container')
def test_container_create_pull_image_failed_zun_exception(
self, mock_fail, mock_pull, mock_save):
container = Container(self.context, **utils.get_test_container())
mock_pull.side_effect = exception.ZunException(
message="Image Not Found")
networks = []
self.compute_manager._do_container_create(self.context, container,
networks)
mock_fail.assert_called_once_with(self.context,
container, "Image Not Found")
@mock.patch.object(Container, 'save')
@mock.patch('zun.image.driver.pull_image')
@mock.patch.object(fake_driver, 'create')
@mock.patch.object(manager.Manager, '_fail_container')
def test_container_create_docker_create_failed(self, mock_fail,
mock_create, mock_pull,
mock_save):
container = Container(self.context, **utils.get_test_container())
image = {'image': 'repo', 'path': 'out_path', 'driver': 'glance',
'repo': 'test', 'tag': 'testtag'}
mock_pull.return_value = image, False
mock_create.side_effect = exception.DockerError("Creation Failed")
self.compute_manager._resource_tracker = FakeResourceTracker()
networks = []
self.compute_manager._do_container_create(self.context, container,
networks)
mock_fail.assert_called_once_with(
self.context, container, "Creation Failed", unset_host=True)
@mock.patch.object(Container, 'save')
@mock.patch('zun.image.driver.pull_image')
@mock.patch.object(fake_driver, 'create')
@mock.patch.object(fake_driver, 'start')
def test_container_run(self, mock_start,
mock_create, mock_pull, mock_save):
container = Container(self.context, **utils.get_test_container())
image = {'image': 'repo', 'path': 'out_path', 'driver': 'glance'}
mock_create.return_value = container
mock_pull.return_value = image, False
container.status = 'Stopped'
self.compute_manager._resource_tracker = FakeResourceTracker()
networks = []
self.compute_manager._do_container_run(self.context, container,
networks)
mock_save.assert_called_with(self.context)
mock_pull.assert_any_call(self.context, container.image, 'latest',
'always', 'glance')
mock_create.assert_called_once_with(self.context, container, image,
networks)
mock_start.assert_called_once_with(self.context, container)
@mock.patch.object(Container, 'save')
@mock.patch('zun.image.driver.pull_image')
@mock.patch.object(manager.Manager, '_fail_container')
def test_container_run_image_not_found(self, mock_fail,
mock_pull, mock_save):
container_dict = utils.get_test_container(
image='test:latest', image_driver='docker',
image_pull_policy='ifnotpresent')
container = Container(self.context, **container_dict)
mock_pull.side_effect = exception.ImageNotFound(
message="Image Not Found")
networks = []
self.compute_manager._do_container_run(self.context,
container,
networks)
mock_save.assert_called_with(self.context)
mock_fail.assert_called_with(self.context,
container, 'Image Not Found')
mock_pull.assert_called_once_with(self.context, 'test', 'latest',
'ifnotpresent', 'docker')
@mock.patch.object(Container, 'save')
@mock.patch('zun.image.driver.pull_image')
@mock.patch.object(manager.Manager, '_fail_container')
def test_container_run_image_pull_exception_raised(self, mock_fail,
mock_pull, mock_save):
container_dict = utils.get_test_container(
image='test:latest', image_driver='docker',
image_pull_policy='ifnotpresent')
container = Container(self.context, **container_dict)
mock_pull.side_effect = exception.ZunException(
message="Image Not Found")
networks = []
self.compute_manager._do_container_run(self.context,
container,
networks)
mock_save.assert_called_with(self.context)
mock_fail.assert_called_with(self.context,
container, 'Image Not Found')
mock_pull.assert_called_once_with(self.context, 'test', 'latest',
'ifnotpresent', 'docker')
@mock.patch.object(Container, 'save')
@mock.patch('zun.image.driver.pull_image')
@mock.patch.object(manager.Manager, '_fail_container')
def test_container_run_image_pull_docker_error(self, mock_fail,
mock_pull, mock_save):
container_dict = utils.get_test_container(
image='test:latest', image_driver='docker',
image_pull_policy='ifnotpresent')
container = Container(self.context, **container_dict)
mock_pull.side_effect = exception.DockerError(
message="Docker Error occurred")
networks = []
self.compute_manager._do_container_run(self.context,
container,
networks)
mock_save.assert_called_with(self.context)
mock_fail.assert_called_with(self.context,
container, 'Docker Error occurred')
mock_pull.assert_called_once_with(self.context, 'test', 'latest',
'ifnotpresent', 'docker')
@mock.patch.object(Container, 'save')
@mock.patch('zun.image.driver.pull_image')
@mock.patch.object(manager.Manager, '_fail_container')
@mock.patch.object(fake_driver, 'create')
def test_container_run_create_raises_docker_error(self, mock_create,
mock_fail,
mock_pull, mock_save):
container = Container(self.context, **utils.get_test_container())
image = {'image': 'repo', 'path': 'out_path', 'driver': 'glance',
'repo': 'test', 'tag': 'testtag'}
mock_pull.return_value = image, True
mock_create.side_effect = exception.DockerError(
message="Docker Error occurred")
self.compute_manager._resource_tracker = FakeResourceTracker()
networks = []
self.compute_manager._do_container_run(self.context,
container,
networks)
mock_save.assert_called_with(self.context)
mock_fail.assert_called_with(
self.context, container, 'Docker Error occurred', unset_host=True)
mock_pull.assert_any_call(self.context, container.image, 'latest',
'always', 'glance')
mock_create.assert_called_once_with(
self.context, container, image, networks)
@mock.patch.object(compute_node_tracker.ComputeNodeTracker,
'remove_usage_from_container')
@mock.patch.object(Container, 'destroy')
@mock.patch.object(Container, 'save')
@mock.patch.object(fake_driver, 'delete')
def test_container_delete(self, mock_delete, mock_save, mock_cnt_destroy,
mock_remove_usage):
container = Container(self.context, **utils.get_test_container())
self.compute_manager.container_delete(self. context, container, False)
mock_save.assert_called_with(self.context)
mock_delete.assert_called_once_with(self.context, container, False)
mock_cnt_destroy.assert_called_once_with(self.context)
mock_remove_usage.assert_called_once_with(self.context, container,
True)
@mock.patch.object(manager.Manager, '_fail_container')
@mock.patch.object(Container, 'save')
@mock.patch.object(fake_driver, 'delete')
def test_container_delete_failed(self, mock_delete, mock_save,
mock_fail):
container = Container(self.context, **utils.get_test_container())
mock_delete.side_effect = exception.DockerError(
message="Docker Error occurred")
self.assertRaises(exception.DockerError,
self.compute_manager.container_delete,
self.context, container, False)
mock_save.assert_called_with(self.context)
mock_fail.assert_called_with(self.context,
container, 'Docker Error occurred')
@mock.patch.object(manager.Manager, '_fail_container')
@mock.patch.object(fake_driver, 'delete_sandbox')
@mock.patch.object(Container, 'save')
@mock.patch.object(fake_driver, 'delete')
def test_container_delete_sandbox_failed(self, mock_delete, mock_save,
mock_delete_sandbox,
mock_fail):
self.compute_manager.use_sandbox = True
container = Container(self.context, **utils.get_test_container())
container.set_sandbox_id("sandbox_id")
mock_delete_sandbox.side_effect = exception.ZunException(
message="Unexpected exception")
self.assertRaises(exception.ZunException,
self.compute_manager.container_delete,
self.context, container, False)
mock_save.assert_called_with(self.context)
mock_fail.assert_called_with(self.context,
container, 'Unexpected exception')
@mock.patch.object(fake_driver, 'list')
def test_container_list(self, mock_list):
self.compute_manager.container_list(self.context)
mock_list.assert_called_once_with(self.context)
@mock.patch.object(fake_driver, 'list')
def test_container_list_failed(self, mock_list):
mock_list.side_effect = exception.DockerError
self.assertRaises(exception.DockerError,
self.compute_manager.container_list,
self.context)
@mock.patch.object(fake_driver, 'show')
def test_container_show(self, mock_show):
container = Container(self.context, **utils.get_test_container())
self.compute_manager.container_show(self.context, container)
mock_show.assert_called_once_with(self.context, container)
@mock.patch.object(fake_driver, 'show')
def test_container_show_failed(self, mock_show):
container = Container(self.context, **utils.get_test_container())
mock_show.side_effect = exception.DockerError
self.assertRaises(exception.DockerError,
self.compute_manager.container_show,
self.context, container)
@mock.patch.object(Container, 'save')
@mock.patch.object(fake_driver, 'reboot')
def test_container_reboot(self, mock_reboot, mock_save):
container = Container(self.context, **utils.get_test_container())
self.compute_manager._do_container_reboot(self.context, container, 10)
mock_save.assert_called_with(self.context)
mock_reboot.assert_called_once_with(self.context, container, 10)
@mock.patch.object(manager.Manager, '_fail_container')
@mock.patch.object(Container, 'save')
@mock.patch.object(fake_driver, 'reboot')
def test_container_reboot_failed(self, mock_reboot, mock_save,
mock_fail):
container = Container(self.context, **utils.get_test_container())
mock_reboot.side_effect = exception.DockerError(
message="Docker Error occurred")
self.assertRaises(exception.DockerError,
self.compute_manager._do_container_reboot,
self.context, container, 10, reraise=True)
mock_save.assert_called_with(self.context)
mock_fail.assert_called_with(self.context,
container, 'Docker Error occurred')
@mock.patch.object(Container, 'save')
@mock.patch.object(fake_driver, 'stop')
def test_container_stop(self, mock_stop, mock_save):
container = Container(self.context, **utils.get_test_container())
self.compute_manager._do_container_stop(self.context, container, 10)
mock_save.assert_called_with(self.context)
mock_stop.assert_called_once_with(self.context, container, 10)
@mock.patch.object(manager.Manager, '_fail_container')
@mock.patch.object(Container, 'save')
@mock.patch.object(fake_driver, 'stop')
def test_container_stop_failed(self, mock_stop, mock_save, mock_fail):
container = Container(self.context, **utils.get_test_container())
mock_stop.side_effect = exception.DockerError(
message="Docker Error occurred")
self.assertRaises(exception.DockerError,
self.compute_manager._do_container_stop,
self.context, container, 10, reraise=True)
mock_save.assert_called_with(self.context)
mock_fail.assert_called_with(self.context,
container, 'Docker Error occurred')
@mock.patch.object(Container, 'save')
@mock.patch.object(fake_driver, 'start')
def test_container_start(self, mock_start, mock_save):
container = Container(self.context, **utils.get_test_container())
self.compute_manager._do_container_start(self.context, container)
mock_save.assert_called_with(self.context)
mock_start.assert_called_once_with(self.context, container)
@mock.patch.object(Container, 'save')
@mock.patch.object(manager.Manager, '_fail_container')
@mock.patch.object(fake_driver, 'start')
def test_container_start_failed(self, mock_start,
mock_fail, mock_save):
container = Container(self.context, **utils.get_test_container())
mock_start.side_effect = exception.DockerError(
message="Docker Error occurred")
self.assertRaises(exception.DockerError,
self.compute_manager._do_container_start,
self.context, container, reraise=True)
mock_save.assert_called_with(self.context)
mock_fail.assert_called_with(self.context,
container, 'Docker Error occurred')
@mock.patch.object(fake_driver, 'pause')
def test_container_pause(self, mock_pause):
container = Container(self.context, **utils.get_test_container())
self.compute_manager._do_container_pause(self.context, container)
mock_pause.assert_called_once_with(self.context, container)
@mock.patch.object(manager.Manager, '_fail_container')
@mock.patch.object(fake_driver, 'pause')
def test_container_pause_failed(self, mock_pause, mock_fail):
container = Container(self.context, **utils.get_test_container())
mock_pause.side_effect = exception.DockerError(
message="Docker Error occurred")
self.assertRaises(exception.DockerError,
self.compute_manager._do_container_pause,
self.context, container, reraise=True)
mock_fail.assert_called_with(self.context,
container, 'Docker Error occurred')
@mock.patch.object(fake_driver, 'unpause')
def test_container_unpause(self, mock_unpause):
container = Container(self.context, **utils.get_test_container())
self.compute_manager._do_container_unpause(self.context, container)
mock_unpause.assert_called_once_with(self.context, container)
@mock.patch.object(manager.Manager, '_fail_container')
@mock.patch.object(fake_driver, 'unpause')
def test_container_unpause_failed(self, mock_unpause, mock_fail):
container = Container(self.context, **utils.get_test_container())
mock_unpause.side_effect = exception.DockerError(
message="Docker Error occurred")
self.assertRaises(exception.DockerError,
self.compute_manager._do_container_unpause,
self.context, container, reraise=True)
mock_fail.assert_called_with(self.context,
container, 'Docker Error occurred')
@mock.patch.object(fake_driver, 'show_logs')
def test_container_logs(self, mock_logs):
container = Container(self.context, **utils.get_test_container())
self.compute_manager.container_logs(self.context,
container, True, True,
False, 'all', None)
mock_logs.assert_called_once_with(
self.context, container, stderr=True, stdout=True,
timestamps=False, tail='all', since=None)
@mock.patch.object(fake_driver, 'show_logs')
def test_container_logs_failed(self, mock_logs):
container = Container(self.context, **utils.get_test_container())
mock_logs.side_effect = exception.DockerError
self.assertRaises(exception.DockerError,
self.compute_manager.container_logs,
self.context, container, True, True,
False, 'all', None)
@mock.patch.object(fake_driver, 'execute_run')
@mock.patch.object(fake_driver, 'execute_create')
def test_container_execute(self, mock_execute_create, mock_execute_run):
mock_execute_create.return_value = 'fake_exec_id'
container = Container(self.context, **utils.get_test_container())
self.compute_manager.container_exec(
self.context, container, 'fake_cmd', True, False)
mock_execute_create.assert_called_once_with(
self.context, container, 'fake_cmd', False)
mock_execute_run.assert_called_once_with('fake_exec_id', 'fake_cmd')
@mock.patch.object(fake_driver, 'execute_create')
def test_container_execute_failed(self, mock_execute_create):
container = Container(self.context, **utils.get_test_container())
mock_execute_create.side_effect = exception.DockerError
self.assertRaises(exception.DockerError,
self.compute_manager.container_exec,
self.context, container, 'fake_cmd', True, False)
@mock.patch.object(fake_driver, 'kill')
def test_container_kill(self, mock_kill):
container = Container(self.context, **utils.get_test_container())
self.compute_manager._do_container_kill(self.context, container, None)
mock_kill.assert_called_once_with(self.context, container, None)
@mock.patch.object(manager.Manager, '_fail_container')
@mock.patch.object(fake_driver, 'kill')
def test_container_kill_failed(self, mock_kill, mock_fail):
container = Container(self.context, **utils.get_test_container())
mock_kill.side_effect = exception.DockerError(
message="Docker Error occurred")
self.assertRaises(exception.DockerError,
self.compute_manager._do_container_kill,
self.context, container, None, reraise=True)
mock_fail.assert_called_with(self.context,
container, 'Docker Error occurred')
@mock.patch.object(Container, 'save')
@mock.patch.object(fake_driver, 'update')
def test_container_update(self, mock_update, mock_save):
container = Container(self.context, **utils.get_test_container())
self.compute_manager.container_update(self.context, container,
{'memory': 512})
mock_save.assert_called_with(self.context)
mock_update.assert_called_once_with(self.context, container)
@mock.patch.object(fake_driver, 'update')
def test_container_update_failed(self, mock_update):
container = Container(self.context, **utils.get_test_container())
mock_update.side_effect = exception.DockerError
self.assertRaises(exception.DockerError,
self.compute_manager.container_update,
self.context, container, {})
@mock.patch.object(fake_driver, 'get_websocket_url')
@mock.patch.object(Container, 'save')
def test_container_attach_successful(self, mock_save,
mock_get_websocket_url):
container = Container(self.context, **utils.get_test_container())
mock_get_websocket_url.return_value = "ws://test"
self.compute_manager.container_attach(self.context, container)
mock_get_websocket_url.assert_called_once_with(self.context, container)
mock_save.assert_called_once_with(self.context)
@mock.patch.object(fake_driver, 'get_websocket_url')
def test_container_attach_failed(self, mock_get_websocket_url):
container = Container(self.context, **utils.get_test_container())
mock_get_websocket_url.side_effect = Exception
self.assertRaises(exception.ZunException,
self.compute_manager.container_attach,
self.context, container)
@mock.patch.object(fake_driver, 'resize')
def test_container_resize(self, mock_resize):
container = Container(self.context, **utils.get_test_container())
self.compute_manager.container_resize(
self.context, container, "100", "100")
mock_resize.assert_called_once_with(
self.context, container, "100", "100")
@mock.patch.object(fake_driver, 'resize')
def test_container_resize_failed(self, mock_resize):
container = Container(self.context, **utils.get_test_container())
mock_resize.side_effect = exception.DockerError
self.assertRaises(exception.DockerError,
self.compute_manager.container_resize,
self.context, container, "100", "100")
@mock.patch.object(fake_driver, 'inspect_image')
@mock.patch.object(Image, 'save')
@mock.patch('zun.image.driver.pull_image')
def test_image_pull(self, mock_pull, mock_save, mock_inspect):
image = Image(self.context, **utils.get_test_image())
ret = {'image': 'repo', 'path': 'out_path', 'driver': 'glance'}
mock_pull.return_value = ret, True
mock_inspect.return_value = {'Id': 'fake-id', 'Size': 512}
self.compute_manager._do_image_pull(self.context, image)
mock_pull.assert_any_call(self.context, image.repo, image.tag)
mock_save.assert_called_once()
mock_inspect.assert_called_once_with(image.repo + ":" + image.tag)
@mock.patch.object(fake_driver, 'load_image')
@mock.patch.object(fake_driver, 'inspect_image')
@mock.patch.object(Image, 'save')
@mock.patch('zun.image.driver.pull_image')
def test_image_pull_not_loaded(self, mock_pull, mock_save,
mock_inspect, mock_load):
image = Image(self.context, **utils.get_test_image())
repo_tag = image.repo + ":" + image.tag
ret = {'image': 'repo', 'path': 'out_path', 'driver': 'glance'}
mock_pull.return_value = ret, False
mock_inspect.return_value = {'Id': 'fake-id', 'Size': 512}
self.compute_manager._do_image_pull(self.context, image)
mock_pull.assert_any_call(self.context, image.repo, image.tag)
mock_save.assert_called_once()
mock_inspect.assert_called_once_with(repo_tag)
mock_load.assert_called_once_with(ret['path'])
@mock.patch.object(fake_driver, 'execute_resize')
def test_container_exec_resize(self, mock_resize):
self.compute_manager.container_exec_resize(
self.context, 'fake_exec_id', "100", "100")
mock_resize.assert_called_once_with('fake_exec_id', "100", "100")
@mock.patch.object(fake_driver, 'execute_resize')
def test_container_exec_resize_failed(self, mock_resize):
mock_resize.side_effect = exception.DockerError
self.assertRaises(exception.DockerError,
self.compute_manager.container_exec_resize,
self.context, 'fake_exec_id', "100", "100")
@mock.patch('zun.image.driver.upload_image_data')
@mock.patch.object(fake_driver, 'get_image')
@mock.patch.object(fake_driver, 'commit')
def test_container_commit(self, mock_commit,
mock_get_image, mock_upload_image_data):
container = Container(self.context, **utils.get_test_container())
mock_get_image_response = mock.MagicMock()
mock_get_image_response.data = StringIO().read()
mock_get_image.return_value = mock_get_image_response
mock_upload_image_data.return_value = mock.MagicMock()
self.compute_manager._do_container_commit(self.context,
mock_get_image_response,
container, 'repo', 'tag')
mock_commit.assert_called_once_with(
self.context, container, 'repo', 'tag')
@mock.patch.object(fake_driver, 'commit')
def test_container_commit_failed(self, mock_commit):
container = Container(self.context, **utils.get_test_container())
mock_commit.side_effect = exception.DockerError
self.assertRaises(exception.DockerError,
self.compute_manager._do_container_commit,
self.context, container, 'repo', 'tag')
@mock.patch.object(fake_driver, 'network_detach')
def test_container_network_detach(self, mock_detach):
container = Container(self.context, **utils.get_test_container())
self.compute_manager.network_detach(self.context, container, 'network')
mock_detach.assert_called_once_with(self.context, container, mock.ANY)
| 51.362126 | 79 | 0.643467 | 3,401 | 30,920 | 5.547486 | 0.062041 | 0.089786 | 0.069168 | 0.045317 | 0.854084 | 0.8376 | 0.82048 | 0.796417 | 0.762336 | 0.727673 | 0 | 0.002831 | 0.257471 | 30,920 | 601 | 80 | 51.447587 | 0.818938 | 0.018564 | 0 | 0.612053 | 0 | 0 | 0.080847 | 0.014244 | 0 | 0 | 0 | 0 | 0.169492 | 1 | 0.092279 | false | 0 | 0.024482 | 0.001883 | 0.122411 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3c47da12113992dfa492449abb111d351442a1e0 | 26 | py | Python | madoka/__init__.py | moskomule/madoka | ff8ae073dd3b2fa288e16a7ecd7496560246c57a | [
"MIT"
] | 2 | 2020-03-29T18:12:30.000Z | 2021-10-02T08:00:01.000Z | madoka/__init__.py | moskomule/madoka | ff8ae073dd3b2fa288e16a7ecd7496560246c57a | [
"MIT"
] | null | null | null | madoka/__init__.py | moskomule/madoka | ff8ae073dd3b2fa288e16a7ecd7496560246c57a | [
"MIT"
] | null | null | null | from .figure import Figure | 26 | 26 | 0.846154 | 4 | 26 | 5.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.115385 | 26 | 1 | 26 | 26 | 0.956522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
3c5e6d2098071e995425b8213a6ecf7057e8a40e | 146 | py | Python | torchcule/atari/__init__.py | cg31/cule | 6cd8e06059c3c3a193a4b2e0821dc1b9daeb726c | [
"BSD-3-Clause"
] | 208 | 2019-05-25T21:35:35.000Z | 2022-03-28T17:33:13.000Z | torchcule/atari/__init__.py | cg31/cule | 6cd8e06059c3c3a193a4b2e0821dc1b9daeb726c | [
"BSD-3-Clause"
] | 30 | 2019-07-27T08:23:54.000Z | 2022-03-24T18:17:36.000Z | torchcule/atari/__init__.py | cg31/cule | 6cd8e06059c3c3a193a4b2e0821dc1b9daeb726c | [
"BSD-3-Clause"
] | 27 | 2019-07-27T05:42:23.000Z | 2022-03-05T03:08:52.000Z | from torchcule.atari.env import Env
from torchcule.atari.rom import Rom
from torchcule.atari.state import State
__all__ = ['Env', 'Rom', 'State']
| 29.2 | 39 | 0.767123 | 22 | 146 | 4.909091 | 0.363636 | 0.361111 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.116438 | 146 | 4 | 40 | 36.5 | 0.837209 | 0 | 0 | 0 | 0 | 0 | 0.075342 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.75 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b1b2436e02496377f1bd9f79b0642c8584b6b19a | 2,873 | py | Python | etl/parsers/etw/Microsoft_Windows_Mobile_Broadband_Experience_Api.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 104 | 2020-03-04T14:31:31.000Z | 2022-03-28T02:59:36.000Z | etl/parsers/etw/Microsoft_Windows_Mobile_Broadband_Experience_Api.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 7 | 2020-04-20T09:18:39.000Z | 2022-03-19T17:06:19.000Z | etl/parsers/etw/Microsoft_Windows_Mobile_Broadband_Experience_Api.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 16 | 2020-03-05T18:55:59.000Z | 2022-03-01T10:19:28.000Z | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-Mobile-Broadband-Experience-Api
GUID : 2e2bbb16-0c36-4b9b-a567-40924a199fd5
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("2e2bbb16-0c36-4b9b-a567-40924a199fd5"), event_id=1000, version=0)
class Microsoft_Windows_Mobile_Broadband_Experience_Api_1000_0(Etw):
pattern = Struct(
"funcName" / WString
)
@declare(guid=guid("2e2bbb16-0c36-4b9b-a567-40924a199fd5"), event_id=1001, version=0)
class Microsoft_Windows_Mobile_Broadband_Experience_Api_1001_0(Etw):
pattern = Struct(
"funcName" / WString,
"errorDetails" / WString
)
@declare(guid=guid("2e2bbb16-0c36-4b9b-a567-40924a199fd5"), event_id=1002, version=0)
class Microsoft_Windows_Mobile_Broadband_Experience_Api_1002_0(Etw):
pattern = Struct(
"funcName" / WString,
"errorDetails" / WString
)
@declare(guid=guid("2e2bbb16-0c36-4b9b-a567-40924a199fd5"), event_id=1003, version=0)
class Microsoft_Windows_Mobile_Broadband_Experience_Api_1003_0(Etw):
pattern = Struct(
"funcName" / WString,
"error" / Int32ul,
"hresult" / Int32sl
)
@declare(guid=guid("2e2bbb16-0c36-4b9b-a567-40924a199fd5"), event_id=1004, version=0)
class Microsoft_Windows_Mobile_Broadband_Experience_Api_1004_0(Etw):
pattern = Struct(
"funcName" / WString,
"error" / Int32ul,
"hresult" / Int32sl
)
@declare(guid=guid("2e2bbb16-0c36-4b9b-a567-40924a199fd5"), event_id=1005, version=0)
class Microsoft_Windows_Mobile_Broadband_Experience_Api_1005_0(Etw):
pattern = Struct(
"funcName" / WString,
"error" / Int32ul,
"hresult" / Int32sl
)
@declare(guid=guid("2e2bbb16-0c36-4b9b-a567-40924a199fd5"), event_id=1006, version=0)
class Microsoft_Windows_Mobile_Broadband_Experience_Api_1006_0(Etw):
pattern = Struct(
"funcName" / WString,
"error" / Int32ul,
"hresult" / Int32sl
)
@declare(guid=guid("2e2bbb16-0c36-4b9b-a567-40924a199fd5"), event_id=1007, version=0)
class Microsoft_Windows_Mobile_Broadband_Experience_Api_1007_0(Etw):
pattern = Struct(
"funcName" / WString
)
@declare(guid=guid("2e2bbb16-0c36-4b9b-a567-40924a199fd5"), event_id=1008, version=0)
class Microsoft_Windows_Mobile_Broadband_Experience_Api_1008_0(Etw):
pattern = Struct(
"funcName" / WString
)
@declare(guid=guid("2e2bbb16-0c36-4b9b-a567-40924a199fd5"), event_id=1009, version=0)
class Microsoft_Windows_Mobile_Broadband_Experience_Api_1009_0(Etw):
pattern = Struct(
"funcName" / WString,
"error" / Int32ul,
"hresult" / Int32sl
)
| 30.892473 | 123 | 0.714584 | 346 | 2,873 | 5.702312 | 0.176301 | 0.089204 | 0.122656 | 0.172833 | 0.850482 | 0.850482 | 0.809934 | 0.809934 | 0.809934 | 0.521034 | 0 | 0.153525 | 0.16568 | 2,873 | 92 | 124 | 31.228261 | 0.669587 | 0.040376 | 0 | 0.484848 | 0 | 0 | 0.190684 | 0.131004 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.060606 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b1f1b31667b9d521c8a5d3bed6c9ca8c80fff174 | 41 | py | Python | tests/dkt/__init__.py | bigdata-ustc/XKT | b3ac07541b92001b62d7cff4e8fe7e5a69c5c93c | [
"MIT"
] | 17 | 2019-09-11T12:00:05.000Z | 2022-03-30T04:41:05.000Z | tests/gkt/__init__.py | bigdata-ustc/XKT | b3ac07541b92001b62d7cff4e8fe7e5a69c5c93c | [
"MIT"
] | 1 | 2021-10-24T01:13:33.000Z | 2021-10-24T02:03:26.000Z | tests/dkt/__init__.py | bigdata-ustc/XKT | b3ac07541b92001b62d7cff4e8fe7e5a69c5c93c | [
"MIT"
] | 6 | 2019-09-13T07:50:07.000Z | 2022-03-12T00:22:11.000Z | # coding: utf-8
# 2021/8/24 @ tongshiwei
| 13.666667 | 24 | 0.658537 | 7 | 41 | 3.857143 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 0.170732 | 41 | 2 | 25 | 20.5 | 0.558824 | 0.878049 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5907220e4040dc6b5426bc0278e3c3fa6d692847 | 27 | py | Python | shap/actions/__init__.py | zduey/shap | 1bb8203f2d43f7552396a5f26167a258cbdc505c | [
"MIT"
] | 16,097 | 2016-12-01T20:01:26.000Z | 2022-03-31T20:27:40.000Z | shap/actions/__init__.py | zduey/shap | 1bb8203f2d43f7552396a5f26167a258cbdc505c | [
"MIT"
] | 2,217 | 2017-09-18T20:06:45.000Z | 2022-03-31T21:00:25.000Z | shap/actions/__init__.py | zduey/shap | 1bb8203f2d43f7552396a5f26167a258cbdc505c | [
"MIT"
] | 2,634 | 2017-06-29T21:30:46.000Z | 2022-03-30T07:30:36.000Z | from ._action import Action | 27 | 27 | 0.851852 | 4 | 27 | 5.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 27 | 1 | 27 | 27 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
3cc9ca70ead93369810abdc52257b9a5bd29fa0e | 256 | py | Python | files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/msg/__init__.py | Filipe-Douglas-Slam/slam_lidar_kinect | 4ac2c9555f939ba3bc3e97314eb611bdd9df5f27 | [
"MIT"
] | null | null | null | files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/msg/__init__.py | Filipe-Douglas-Slam/slam_lidar_kinect | 4ac2c9555f939ba3bc3e97314eb611bdd9df5f27 | [
"MIT"
] | 1 | 2021-07-08T10:26:06.000Z | 2021-07-08T10:31:11.000Z | files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/msg/__init__.py | Filipe-Douglas-Slam/slam_lidar_kinect | 4ac2c9555f939ba3bc3e97314eb611bdd9df5f27 | [
"MIT"
] | null | null | null | from ._ContactState import *
from ._ContactsState import *
from ._LinkState import *
from ._LinkStates import *
from ._ModelState import *
from ._ModelStates import *
from ._ODEJointProperties import *
from ._ODEPhysics import *
from ._WorldState import *
| 25.6 | 34 | 0.789063 | 27 | 256 | 7.148148 | 0.407407 | 0.414508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.140625 | 256 | 9 | 35 | 28.444444 | 0.877273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a736b6d19a2771c626c550644f3dc1c413a0b8b9 | 86 | py | Python | tests/artist/conftest.py | cjolowicz/muckr-service | 014017ab92bd1d2034cd398f2e98a6fdaf30f164 | [
"MIT"
] | null | null | null | tests/artist/conftest.py | cjolowicz/muckr-service | 014017ab92bd1d2034cd398f2e98a6fdaf30f164 | [
"MIT"
] | 12 | 2018-12-21T22:13:33.000Z | 2019-08-03T20:03:19.000Z | tests/artist/conftest.py | cjolowicz/muckr-service | 014017ab92bd1d2034cd398f2e98a6fdaf30f164 | [
"MIT"
] | null | null | null | from tests.artist.fixtures import * # noqa
from tests.user.fixtures import * # noqa
| 28.666667 | 43 | 0.744186 | 12 | 86 | 5.333333 | 0.583333 | 0.28125 | 0.5625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.162791 | 86 | 2 | 44 | 43 | 0.888889 | 0.104651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
595b66bed474955f4f90cfd6f97ae2ee880bbaba | 156 | py | Python | cloud_scanner_generic/config/__init__.py | Microsoft/cloud-scanner-generic | ef3cc01d64fd1857245049ce4ec23f1856eee46f | [
"MIT"
] | 4 | 2019-06-22T14:33:43.000Z | 2021-04-20T16:18:28.000Z | cloud_scanner_generic/config/__init__.py | microsoft/cloud-scanner-generic | ef3cc01d64fd1857245049ce4ec23f1856eee46f | [
"MIT"
] | null | null | null | cloud_scanner_generic/config/__init__.py | microsoft/cloud-scanner-generic | ef3cc01d64fd1857245049ce4ec23f1856eee46f | [
"MIT"
] | 5 | 2019-11-03T22:54:49.000Z | 2020-08-05T14:39:06.000Z | from cloud_scanner_generic.config.elastic_search_config import (
ElasticSearchConfig)
from cloud_scanner_generic.config.mysql_config import MySqlConfig
| 39 | 65 | 0.878205 | 19 | 156 | 6.842105 | 0.578947 | 0.138462 | 0.246154 | 0.353846 | 0.446154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 156 | 3 | 66 | 52 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
596b317006c77ad94ee852be44845c5e7d939a6b | 45 | py | Python | src/add_on_class/__init__.py | BehzadShayegh/abstract-object-decorator | 7228cba994ed203e647e8d74977e0c8670b9513e | [
"MIT"
] | 1 | 2022-02-10T07:24:22.000Z | 2022-02-10T07:24:22.000Z | src/add_on_class/__init__.py | BehzadShayegh/add-on-class | 7228cba994ed203e647e8d74977e0c8670b9513e | [
"MIT"
] | null | null | null | src/add_on_class/__init__.py | BehzadShayegh/add-on-class | 7228cba994ed203e647e8d74977e0c8670b9513e | [
"MIT"
] | null | null | null | from .add_on_class import AOC,covering_around | 45 | 45 | 0.888889 | 8 | 45 | 4.625 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 45 | 1 | 45 | 45 | 0.880952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
59a3e48a1d8f54bf174bf37e15d215f62651bcee | 235 | py | Python | qupy/framing/__init__.py | marcinbor85/qupy | 219563523c975d1d5ae2aa47bbd02862c906ab43 | [
"MIT"
] | null | null | null | qupy/framing/__init__.py | marcinbor85/qupy | 219563523c975d1d5ae2aa47bbd02862c906ab43 | [
"MIT"
] | null | null | null | qupy/framing/__init__.py | marcinbor85/qupy | 219563523c975d1d5ae2aa47bbd02862c906ab43 | [
"MIT"
] | null | null | null | class AbstractFraming:
def encode_frame(self, bytes_buf):
raise NotImplementedError()
def decode_frame(self, byte):
raise NotImplementedError()
def reset(self):
raise NotImplementedError()
| 23.5 | 38 | 0.659574 | 22 | 235 | 6.909091 | 0.590909 | 0.473684 | 0.355263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.26383 | 235 | 9 | 39 | 26.111111 | 0.878613 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.428571 | false | 0 | 0 | 0 | 0.571429 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
ab83d11c6b110189d666b58f844d955438309bdc | 41 | py | Python | terraform_compliance/__init__.py | karthikeayan/terraform-compliance | 55223a65fd5987062f6a9bfae4c71f6c99c19d7c | [
"MIT"
] | 1 | 2021-05-30T16:56:49.000Z | 2021-05-30T16:56:49.000Z | terraform_compliance/__init__.py | karthikeayan/terraform-compliance | 55223a65fd5987062f6a9bfae4c71f6c99c19d7c | [
"MIT"
] | null | null | null | terraform_compliance/__init__.py | karthikeayan/terraform-compliance | 55223a65fd5987062f6a9bfae4c71f6c99c19d7c | [
"MIT"
] | 2 | 2019-06-05T04:05:31.000Z | 2021-05-30T16:58:16.000Z | from terraform_validate import Validator
| 20.5 | 40 | 0.902439 | 5 | 41 | 7.2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 41 | 1 | 41 | 41 | 0.972973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e65e3fff15e9772972881b581c7ec3a5beb25810 | 27 | py | Python | Pacote Dowload/pythonProject/aula019ex091.py | J297-hub/exercicios-de-python | cde355f9aeb43abce7890cd9879646bfe768190e | [
"MIT"
] | null | null | null | Pacote Dowload/pythonProject/aula019ex091.py | J297-hub/exercicios-de-python | cde355f9aeb43abce7890cd9879646bfe768190e | [
"MIT"
] | null | null | null | Pacote Dowload/pythonProject/aula019ex091.py | J297-hub/exercicios-de-python | cde355f9aeb43abce7890cd9879646bfe768190e | [
"MIT"
] | null | null | null | from random import randint
| 13.5 | 26 | 0.851852 | 4 | 27 | 5.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 27 | 1 | 27 | 27 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
051712176a5298dd9e26bd8bfec23f267aeaa23c | 8,647 | py | Python | tests/test_api.py | hadtrindade/controle-contas | 237135a004e14c964819993741d4e0792803626a | [
"MIT"
] | null | null | null | tests/test_api.py | hadtrindade/controle-contas | 237135a004e14c964819993741d4e0792803626a | [
"MIT"
] | 27 | 2021-10-04T02:44:47.000Z | 2021-11-17T02:15:30.000Z | tests/test_api.py | hadtrindade/controle-contas | 237135a004e14c964819993741d4e0792803626a | [
"MIT"
] | null | null | null | from flask import url_for
def test_para_inserir_usuários_deve_retornar_status_code_201(client, token):
data = {
"username": "teste",
"first_name": "teste",
"last_name": "teste",
"email": "teste@test.com",
"password": "1234",
"admin": "true",
}
response = client.post(url_for("api.new_users"), json=data, headers=token)
assert response.status_code == 201
def test_para_inserir_lista_de_usuarios(client, token):
data = [
{
"username": "teste_2",
"first_name": "teste",
"last_name": "teste",
"email": "teste_2@test.com",
"password": "1234",
"admin": "false",
},
{
"username": "teste_3",
"first_name": "teste",
"last_name": "teste",
"email": "teste_3@test.com",
"password": "1234",
"admin": "false",
},
]
response = client.post(url_for("api.new_users"), json=data, headers=token)
assert response.status_code == 201
def test_para_get_users_deve_retornar_codigo_200(client, token):
users = client.get(url_for("api.get_users"), headers=token)
assert users.status_code == 200
def test_para_inserir_com_payload_invalido_deve_retornar_status_code_422(
client, token
):
data = {
"username": 1,
"first_name": "teste",
"last_name": "teste",
"email": "teste2test.com",
"password": "1234",
"admin": "a",
}
user = client.post(url_for("api.new_users"), json=data, headers=token)
assert user.status_code == 422
def test_cosultar_uma_unica_users(client, token):
response = client.get(url_for("api.get_user", pk=2), headers=token)
assert response.status_code == 200
def test_update_user(client, token):
data = {
"username": "teste_update",
"first_name": "teste",
"last_name": "teste",
"email": "teste1@test.com",
"password": "12343",
"admin": "false",
}
response = client.put(
url_for("api.update_user", pk=2), json=data, headers=token
)
assert response.status_code == 200
def test_update_de_users_deve_falhar_status_code_404(client, token):
data = {
"username": "teste",
"first_name": "teste",
"last_name": "teste",
"email": "teste@test.com",
"password": "1234",
"admin": "true",
}
response = client.put(
url_for("api.update_user", pk=10), json=data, headers=token
)
assert response.status_code == 404
def test_delete_users_deve_retornar_200(client, token):
response = client.delete(url_for("api.del_user", pk=2), headers=token)
assert response.status_code == 200
def test_delete_users_deve_retornar_404(client, token):
response = client.delete(url_for("api.del_user", pk=10), headers=token)
assert response.status_code == 404
def test_para_inserir_sources_de_teste(client, token):
data = {
"id": 1,
"description": "teste",
"id_user": 1,
}
response = client.post(
url_for("api.new_sources"), json=data, headers=token
)
assert response.json == data
def test_para_inserir_lista_de_sources(client, token):
data = [
{
"id": 2,
"description": "test_de_moi",
"id_user": 1,
},
{
"id": 3,
"description": "teste_test_de_moi",
"id_user": 1,
},
]
response = client.post(
url_for("api.new_sources"), json=data, headers=token
)
assert response.json == data
def test_para_consultar_sources_deve_retornar_status_code_200(client, token):
respose = client.get(url_for("api.get_sources"), headers=token)
assert respose.status_code == 200
def test_com_payload_invalido_deve_retornar_status_code_422(client, token):
data = {
"description": 1,
"id_user": "a",
}
response = client.post(
url_for("api.new_sources"), json=data, headers=token
)
assert response.status_code == 422
def test_cosultar_um_unico_source(client, token):
data = {
"id": 1,
"description": "teste",
"id_user": 1,
}
response = client.get(url_for("api.update_source", pk=1), headers=token)
assert response.json[0] == data
def test_update_source(client, token):
data = {
"id": 1,
"description": "teste_update",
"id_user": 1,
}
response = client.put(
url_for("api.update_source", pk=1), json=data, headers=token
)
assert response.status_code == 200
def test_update_de_source_deve_falhar_status_code_404(client, token):
data = {
"id": 1,
"description": "teste_update",
"id_user": 1,
}
response = client.put(
url_for("api.update_source", pk=10), json=data, headers=token
)
assert response.status_code == 404
def test_para_consultar_entry_deve_retornar_status_code_204_porque_esta_vazio(
client, token
):
response = client.get(url_for("api.get_entries"), headers=token)
assert response.status_code == 204
def test_para_inserir_entry_de_testes(client, token):
data = {
"description": "teste",
"value": 10,
"quantum": 1,
"id_source": 1,
"id_user": 1,
"revenue": "true",
}
response = client.post(
url_for("api.new_entries"), json=data, headers=token
)
assert response.status_code == 201
def test_para_inserir_lista_entries(client, token):
data = [
{
"description": "teste1",
"value": 102,
"quantum": 13,
"id_source": 1,
"id_user": 1,
"revenue": "true",
},
{
"description": "teste1",
"value": 105,
"quantum": 12,
"id_source": 1,
"id_user": 1,
"revenue": "false",
},
]
response = client.post(
url_for("api.new_entries"), json=data, headers=token
)
assert response.status_code == 201
def test_para_consultar_sources_deve_retornar_status_code_200(client, token):
response = client.get(url_for("api.get_entries"), headers=token)
assert response.status_code == 200
def test_com_payload_invalido_não_deve_adicionar_uma_entries(client, token):
data = {
"description": 30,
"value": "a",
"quantum": 1,
"id_source": 1,
"id_user": 1,
"revenue": "true",
}
response = client.post(
url_for("api.new_entries"), json=data, headers=token
)
assert response.status_code == 422
def test_cosultar_uma_unica_entries(client, token):
response = client.get(url_for("api.get_entry", pk=1), headers=token)
assert response.status_code == 200
def test_update_entry(client, token):
data = {
"description": "teste_update",
"value": 10,
"quantum": 1,
"id_source": 1,
"id_user": 1,
"revenue": "true",
}
response = client.put(
url_for("api.update_entry", pk=1), json=data, headers=token
)
assert response.status_code == 200
def test_update_de_entries_deve_falhar_status_code_404(client, token):
data = {
"description": "teste",
"value": 10,
"quantum": 1,
"id_source": 1,
"id_user": 1,
"revenue": "true",
}
response = client.put(
url_for("api.update_entry", pk=10), json=data, headers=token
)
assert response.status_code == 404
def test_delete_entries_deve_retornar_200(client, token):
response = client.delete(url_for("api.del_entry", pk=1), headers=token)
assert response.status_code == 200
def test_delete_2_entries_deve_retornar_200(client, token):
response = client.delete(url_for("api.del_entry", pk=2), headers=token)
assert response.status_code == 200
def test_delete_3_entries_deve_retornar_200(client, token):
response = client.delete(url_for("api.del_entry", pk=3), headers=token)
assert response.status_code == 200
def test_delete_source_deve_retornar_404(client, token):
response = client.delete(url_for("api.del_entry", pk=10), headers=token)
assert response.status_code == 404
def test_delete_source_deve_retornar_200(client, token):
response = client.delete(url_for("api.del_source", pk=1), headers=token)
assert response.status_code == 200
def test_delete_source_deve_retornar_404(client, token):
response = client.delete(url_for("api.del_source", pk=10), headers=token)
assert response.status_code == 404
| 26.606154 | 78 | 0.61212 | 1,068 | 8,647 | 4.667603 | 0.087079 | 0.072217 | 0.054162 | 0.140822 | 0.869408 | 0.833902 | 0.802808 | 0.775326 | 0.720361 | 0.682046 | 0 | 0.037508 | 0.253845 | 8,647 | 324 | 79 | 26.688272 | 0.735121 | 0 | 0 | 0.54902 | 0 | 0 | 0.170001 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.117647 | false | 0.023529 | 0.003922 | 0 | 0.121569 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
0579a7833d7d24076eb1b98de96521e2dc59e9ba | 35 | py | Python | np_ml/affinity_propagation/__init__.py | wwwy-binary/NP_ML | a51b2f3cd753e4a8b5a67bec343c3e75b3fe52d8 | [
"MIT"
] | 237 | 2018-03-17T08:50:18.000Z | 2022-02-24T12:57:46.000Z | np_ml/affinity_propagation/__init__.py | leizhang258/NP_ML | 472008b2a0b6949bab82f037bf6010b2241c8398 | [
"MIT"
] | 2 | 2019-01-28T03:30:31.000Z | 2021-03-03T01:47:38.000Z | np_ml/affinity_propagation/__init__.py | leizhang258/NP_ML | 472008b2a0b6949bab82f037bf6010b2241c8398 | [
"MIT"
] | 79 | 2018-03-21T12:22:09.000Z | 2021-12-17T02:39:09.000Z | from .affinity_propagation import * | 35 | 35 | 0.857143 | 4 | 35 | 7.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085714 | 35 | 1 | 35 | 35 | 0.90625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
058d0158af80b8e3443072e6d52b95cad1bb299a | 48 | py | Python | h5Nastran/h5Nastran/h5/nastran/input/coordinate_system/__init__.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 293 | 2015-03-22T20:22:01.000Z | 2022-03-14T20:28:24.000Z | h5Nastran/h5Nastran/h5/nastran/input/coordinate_system/__init__.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 512 | 2015-03-14T18:39:27.000Z | 2022-03-31T16:15:43.000Z | h5Nastran/h5Nastran/h5/nastran/input/coordinate_system/__init__.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 136 | 2015-03-19T03:26:06.000Z | 2022-03-25T22:14:54.000Z | from .coordinate_system import CoordinateSystem
| 24 | 47 | 0.895833 | 5 | 48 | 8.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 48 | 1 | 48 | 48 | 0.954545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
558aad199e9eda65d9da415011e376d80903c5ab | 29 | py | Python | src/affnist/__init__.py | cinjon/ml-capsules-inverted-attention-routing | 978b0f58eba1007bcef0b6cb045f3d2040f76a31 | [
"AML"
] | null | null | null | src/affnist/__init__.py | cinjon/ml-capsules-inverted-attention-routing | 978b0f58eba1007bcef0b6cb045f3d2040f76a31 | [
"AML"
] | null | null | null | src/affnist/__init__.py | cinjon/ml-capsules-inverted-attention-routing | 978b0f58eba1007bcef0b6cb045f3d2040f76a31 | [
"AML"
] | null | null | null | from .dataset import AffNist
| 14.5 | 28 | 0.827586 | 4 | 29 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 29 | 1 | 29 | 29 | 0.96 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e949029201defcc33b7ba07b9221597aa874eb2c | 13,747 | py | Python | deps/liblmdb/test/test_intintdb.py | ncloudioj/hustle | 890793b996d5ba0660f4f16dd71c88abc86ae5b5 | [
"MIT"
] | 88 | 2015-01-07T16:57:29.000Z | 2021-05-31T15:11:45.000Z | deps/liblmdb/test/test_intintdb.py | ncloudioj/hustle | 890793b996d5ba0660f4f16dd71c88abc86ae5b5 | [
"MIT"
] | 3 | 2015-08-17T09:42:20.000Z | 2018-01-12T18:31:12.000Z | deps/liblmdb/test/test_intintdb.py | ncloudioj/hustle | 890793b996d5ba0660f4f16dd71c88abc86ae5b5 | [
"MIT"
] | 10 | 2015-04-05T14:41:32.000Z | 2018-12-02T20:46:57.000Z | # -*- coding: utf-8 -*-
import mdb
from unittest import TestCase
class TestDB(TestCase):
def setUp(self):
import os
import errno
self.path = './testdbmii'
try:
os.makedirs(self.path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(self.path):
pass
else:
raise
self.env = mdb.Env(self.path, mapsize=1 * mdb.MB, max_dbs=8,
flags=mdb.MDB_WRITEMAP|mdb.MDB_NOSYNC)
def tearDown(self):
import shutil
self.env.close()
shutil.rmtree(self.path)
def drop_mdb(self):
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP)
db.drop(txn, 0)
txn.commit()
db.close()
def test_drop(self):
self.drop_mdb()
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP)
items = db.items(txn)
self.assertRaises(StopIteration, items.next)
txn.commit()
db.close()
def test_put(self):
# all keys must be sorted
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP)
db.put(txn, -11, -11)
txn.commit()
txn = self.env.begin_txn()
self.assertEqual(db.get(txn, -11), -11)
txn.commit()
db.close()
def test_mget(self):
self.drop_mdb()
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP)
db.put(txn, 1, 1)
db.put(txn, 1, 11)
db.put(txn, 2, 2)
db.put(txn, 2, 21)
db.put(txn, 2, 22)
db.put(txn, 3, 3)
db.put(txn, 4, 4)
db.put(txn, 5, 5)
txn.commit()
txn = self.env.begin_txn()
self.assertEqual(list(db.mget(txn, [1, 2, 3, 5])),
[1, 2, 3, 5])
self.assertEqual(list(db.mget(txn, [3, 1, 2, 5])),
[3, 1, 2, 5])
def test_get_neighbours(self):
self.drop_mdb()
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP)
db.put(txn, 1, 1)
db.put(txn, 5, 2)
db.put(txn, 7, 3)
db.put(txn, 8, 5)
txn.commit()
txn = self.env.begin_txn()
self.assertEqual(db.get_neighbours(txn, 0),
((1, 1), (1, 1)))
self.assertEqual(db.get_neighbours(txn, 1),
((1, 1), (1, 1)))
self.assertEqual(db.get_neighbours(txn, 2),
((1, 1), (5, 2)))
self.assertEqual(db.get_neighbours(txn, 3),
((1, 1), (5, 2)))
self.assertEqual(db.get_neighbours(txn, 4),
((1, 1), (5, 2)))
self.assertEqual(db.get_neighbours(txn, 5),
((5, 2), (5, 2)))
self.assertEqual(db.get_neighbours(txn, 6),
((5, 2), (7, 3)))
self.assertEqual(db.get_neighbours(txn, 7),
((7, 3), (7, 3)))
self.assertEqual(db.get_neighbours(txn, 8),
((8, 5), (8, 5)))
self.assertEqual(db.get_neighbours(txn, 9),
((8, 5), (8, 5)))
self.assertListEqual(list(db.mgetex(txn, range(10))),
[1, 1, 1, 1, 1, 2, 2, 3, 5, 5])
def test_contains(self):
# all keys must be sorted
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP)
db.put(txn, 1024, 2)
txn.commit()
txn = self.env.begin_txn()
self.assertTrue(db.contains(txn, 1024))
db.close()
def test_get_exception(self):
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP)
self.assertEqual(db.get(txn, 1321312312, 12), 12)
txn.commit()
db.close()
def test_put_duplicate(self):
# all values must be sorted as well
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP,
key_inttype=mdb.MDB_INT_16, value_inttype=mdb.MDB_INT_64)
db.put(txn, 13, 1312321313123)
db.put(txn, 13, 1431231231231)
db.put(txn, 13123, 1431231231231)
txn.commit()
txn = self.env.begin_txn()
self.assertEqual([value for value in db.get_dup(txn, 13)],
[1312321313123, 1431231231231])
self.assertEqual(db.get(txn, 13123), 1431231231231)
txn.commit()
db.close()
def test_get_less_than(self):
self.drop_mdb()
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP)
db.put(txn, 1, 1)
db.put(txn, 1, 11)
db.put(txn, 2, 2)
db.put(txn, 2, 21)
db.put(txn, 2, 22)
db.put(txn, 3, 3)
db.put(txn, 4, 4)
db.put(txn, 5, 5)
txn.commit()
txn = self.env.begin_txn()
self.assertEqual([value for value in db.get_eq(txn, 2)],
[(2, 2), (2, 21), (2, 22)])
self.assertEqual([value for value in db.get_eq(txn, 3)],
[(3, 3)])
self.assertEqual([value for value in db.get_lt(txn, 1)],
[])
self.assertEqual([value for value in db.get_lt(txn, 3)],
[(1, 1), (1, 11), (2, 2), (2, 21), (2, 22)])
self.assertEqual([value for value in db.get_lt(txn, 2)],
[(1, 1), (1, 11)])
self.assertEqual([value for value in db.get_gt(txn, 2)],
[(3, 3), (4, 4), (5, 5)])
self.assertEqual([value for value in db.get_gt(txn, 3)],
[(4, 4), (5, 5)])
self.assertEqual([value for value in db.get_gt(txn, 4)],
[(5, 5)])
self.assertEqual([value for value in db.get_gt(txn, 5)],
[])
self.assertEqual([value for value in db.get_le(txn, 2)],
[(1, 1), (1, 11), (2, 2), (2, 21), (2, 22)])
self.assertEqual([value for value in db.get_ge(txn, 2)],
[(2, 2), (2, 21), (2, 22),
(3, 3), (4, 4), (5, 5)])
self.assertEqual([value for value in db.get_ne(txn, 2)],
[(1, 1), (1, 11),
(3, 3), (4, 4), (5, 5)])
self.assertEqual([value for value in db.get_range(txn, 2, 4)],
[(2, 2), (2, 21), (2, 22),
(3, 3), (4, 4)])
txn.commit()
db.close()
def test_range_uint8(self):
self.drop_mdb()
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP,
key_inttype=mdb.MDB_UINT_8,
value_inttype=mdb.MDB_UINT_8)
db.put(txn, 1, 1)
db.put(txn, 1, 11)
db.put(txn, 2, 2)
db.put(txn, 2, 21)
db.put(txn, 2, 22)
db.put(txn, 3, 3)
db.put(txn, 4, 4)
db.put(txn, 5, 5)
txn.commit()
txn = self.env.begin_txn()
self.assertEqual([value for value in db.get_eq(txn, 2)],
[(2, 2), (2, 21), (2, 22)])
self.assertEqual([value for value in db.get_eq(txn, 3)],
[(3, 3)])
self.assertEqual([value for value in db.get_lt(txn, 1)],
[])
self.assertEqual([value for value in db.get_lt(txn, 3)],
[(1, 1), (1, 11), (2, 2), (2, 21), (2, 22)])
self.assertEqual([value for value in db.get_lt(txn, 2)],
[(1, 1), (1, 11)])
self.assertEqual([value for value in db.get_gt(txn, 2)],
[(3, 3), (4, 4), (5, 5)])
self.assertEqual([value for value in db.get_gt(txn, 3)],
[(4, 4), (5, 5)])
self.assertEqual([value for value in db.get_gt(txn, 4)],
[(5, 5)])
self.assertEqual([value for value in db.get_gt(txn, 5)],
[])
self.assertEqual([value for value in db.get_le(txn, 2)],
[(1, 1), (1, 11), (2, 2), (2, 21), (2, 22)])
self.assertEqual([value for value in db.get_ge(txn, 2)],
[(2, 2), (2, 21), (2, 22),
(3, 3), (4, 4), (5, 5)])
self.assertEqual([value for value in db.get_ne(txn, 2)],
[(1, 1), (1, 11),
(3, 3), (4, 4), (5, 5)])
self.assertEqual([value for value in db.get_range(txn, 2, 4)],
[(2, 2), (2, 21), (2, 22),
(3, 3), (4, 4)])
txn.commit()
db.close()
def test_range_int8(self):
self.drop_mdb()
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP,
key_inttype=mdb.MDB_INT_8,
value_inttype=mdb.MDB_INT_8)
db.put(txn, 1, 1)
db.put(txn, 1, -11)
db.put(txn, 2, 2)
db.put(txn, 2, -21)
db.put(txn, 2, 22)
db.put(txn, 3, 3)
db.put(txn, 4, 4)
db.put(txn, 5, 5)
txn.commit()
txn = self.env.begin_txn()
self.assertEqual([value for value in db.get_eq(txn, 2)],
[(2, -21), (2, 2), (2, 22)])
self.assertEqual([value for value in db.get_eq(txn, 3)],
[(3, 3)])
self.assertEqual([value for value in db.get_lt(txn, 1)],
[])
self.assertEqual([value for value in db.get_lt(txn, 3)],
[(1, -11), (1, 1), (2, -21), (2, 2), (2, 22)])
self.assertEqual([value for value in db.get_lt(txn, 2)],
[(1, -11), (1, 1)])
self.assertEqual([value for value in db.get_gt(txn, 2)],
[(3, 3), (4, 4), (5, 5)])
self.assertEqual([value for value in db.get_gt(txn, 3)],
[(4, 4), (5, 5)])
self.assertEqual([value for value in db.get_gt(txn, 4)],
[(5, 5)])
self.assertEqual([value for value in db.get_gt(txn, 5)],
[])
self.assertEqual([value for value in db.get_le(txn, 2)],
[(1, -11), (1, 1), (2, -21), (2, 2), (2, 22)])
self.assertEqual([value for value in db.get_ge(txn, 2)],
[(2, -21), (2, 2), (2, 22),
(3, 3), (4, 4), (5, 5)])
self.assertEqual([value for value in db.get_ne(txn, 2)],
[(1, -11), (1, 1),
(3, 3), (4, 4), (5, 5)])
self.assertEqual([value for value in db.get_range(txn, 2, 4)],
[(2, -21), (2, 2), (2, 22),
(3, 3), (4, 4)])
txn.commit()
db.close()
def test_get_all_items(self):
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP)
db.put(txn, 14, 14)
db.put(txn, 15, 15)
db.put(txn, 14, 141)
txn.commit()
txn = self.env.begin_txn()
values = [value for key, value in db.items(txn)]
self.assertEqual(values,
[14, 15])
txn.commit()
txn = self.env.begin_txn()
self.assertEqual(list(db.dup_items(txn)),
[(14, 14), (14, 141), (15, 15)])
txn.commit()
db.close()
def test_delete_by_key(self):
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP)
db.put(txn, 16, 16)
db.put(txn, 16, 161)
txn.commit()
txn = self.env.begin_txn()
db.delete(txn, 16)
txn.commit()
txn = self.env.begin_txn()
self.assertEqual(db.get(txn, 16), None)
txn.abort()
db.close()
def test_delete_by_key_value(self):
txn = self.env.begin_txn()
db = self.env.open_db(txn, 'test_db',
flags=mdb.MDB_CREATE|mdb.MDB_DUPSORT|mdb.MDB_INTEGERKEY|mdb.MDB_INTEGERDUP)
db.put(txn, 17, 17)
db.put(txn, 17, 171)
txn.commit()
txn = self.env.begin_txn()
db.delete(txn, 17, 17)
txn.commit()
txn = self.env.begin_txn()
self.assertEqual(db.get(txn, 17), 171)
txn.commit()
db.close()
| 40.432353 | 105 | 0.472321 | 1,883 | 13,747 | 3.332979 | 0.067446 | 0.060229 | 0.061185 | 0.14659 | 0.85325 | 0.818196 | 0.795411 | 0.770236 | 0.748566 | 0.748407 | 0 | 0.078118 | 0.372372 | 13,747 | 339 | 106 | 40.551622 | 0.649281 | 0.007493 | 0 | 0.690852 | 0 | 0 | 0.007992 | 0 | 0 | 0 | 0 | 0 | 0.195584 | 1 | 0.050473 | false | 0.003155 | 0.015773 | 0 | 0.069401 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.