hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
44c2750f35d1a12168a52dab7c96b6a11db1de1e
| 80
|
py
|
Python
|
toros/__init__.py
|
simon-schaefer/toros
|
26961867ea054a47f69b24512fe4e21beae718ec
|
[
"Apache-2.0"
] | 1
|
2022-03-16T16:21:38.000Z
|
2022-03-16T16:21:38.000Z
|
toros/__init__.py
|
simon-schaefer/toros
|
26961867ea054a47f69b24512fe4e21beae718ec
|
[
"Apache-2.0"
] | null | null | null |
toros/__init__.py
|
simon-schaefer/toros
|
26961867ea054a47f69b24512fe4e21beae718ec
|
[
"Apache-2.0"
] | null | null | null |
import toros.messages
import toros.logging
import toros.tf2
import toros.writer
| 16
| 21
| 0.85
| 12
| 80
| 5.666667
| 0.5
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013889
| 0.1
| 80
| 4
| 22
| 20
| 0.930556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
44d894974d804c6ca709c008807d4eb83bc50023
| 5,125
|
py
|
Python
|
maintain_frontend/dependencies/maintain_api/maintain_api_service.py
|
LandRegistry/maintain-frontend
|
d92446a9972ebbcd9a43a7a7444a528aa2f30bf7
|
[
"MIT"
] | 1
|
2019-10-03T13:58:29.000Z
|
2019-10-03T13:58:29.000Z
|
maintain_frontend/dependencies/maintain_api/maintain_api_service.py
|
LandRegistry/maintain-frontend
|
d92446a9972ebbcd9a43a7a7444a528aa2f30bf7
|
[
"MIT"
] | null | null | null |
maintain_frontend/dependencies/maintain_api/maintain_api_service.py
|
LandRegistry/maintain-frontend
|
d92446a9972ebbcd9a43a7a7444a528aa2f30bf7
|
[
"MIT"
] | 1
|
2021-04-11T05:24:57.000Z
|
2021-04-11T05:24:57.000Z
|
from flask import current_app, g
from maintain_frontend.config import MAINTAIN_API_URL
from maintain_frontend.exceptions import ApplicationError
from maintain_frontend.dependencies.audit_api.audit_api import AuditAPIService
from maintain_frontend.dependencies.session_api.last_created_charge import LastCreatedCharge
from maintain_frontend.services.charge_id_services import calc_display_id
from datetime import datetime
class MaintainApiService(object):
@staticmethod
def add_charge(add_land_charge):
current_app.logger.info("Attempting to add a charge")
try:
# Add Author Information
add_land_charge.author = g.session.user.get_author_info()
charge_json = add_land_charge.to_json()
headers = {'Content-Type': 'application/json', 'X-Trace-ID': g.trace_id}
current_app.logger.info("Posting to maintain-api/local-land-charge")
response = g.requests.post(
'{}/local-land-charge'.format(MAINTAIN_API_URL),
json=charge_json,
headers=headers
)
except Exception as ex:
error_message = 'Failed to send land charge to maintain-api. ' \
'TraceID : {} - Exception - {}' \
.format(g.trace_id, ex)
current_app.logger.exception(error_message)
AuditAPIService.audit_event("Failed to send land charge to maintain-api")
raise ApplicationError(500)
if response.status_code != 202:
current_app.logger.exception(
'Failed to send land charge to maintain-api. '
'TraceID : {} - Status: {}, Message: {}'
.format(g.trace_id, response.status_code, response.text)
)
AuditAPIService.audit_event("Failed to send land charge to maintain-api")
raise ApplicationError(500)
result = response.json()
current_app.logger.info(
"User ID '{}' created charge {}. Entry number: {}, registration date: {}. TraceID={}".format(
g.session.user.id,
result['land_charge_id'],
result['entry_number'],
result['registration_date'],
g.trace_id)
)
last_charge = LastCreatedCharge()
last_charge.charge_id = result['land_charge_id']
last_charge.entry_number = result['entry_number']
last_charge.registration_date = datetime.strptime(result['registration_date'], "%Y-%m-%d").strftime("%d/%m/%Y")
g.session.last_created_charge = last_charge
g.session.commit()
@staticmethod
def update_charge(land_charge):
current_app.logger.info("Attempting to update a charge")
try:
# Update Author Information
land_charge.author = g.session.user.get_author_info()
charge_json = land_charge.to_json()
headers = {'Content-Type': 'application/json', 'X-Trace-ID': g.trace_id}
current_app.logger.info(
"Putting to maintain-api/local-land-charge/{}".format(charge_json['local-land-charge'])
)
response = g.requests.put(
'{}/local-land-charge/{}'.format(MAINTAIN_API_URL, charge_json['local-land-charge']), json=charge_json,
headers=headers)
except Exception as ex:
error_message = 'Failed to send land charge to maintain-api. ' \
'TraceID : {} - Exception - {}' \
.format(g.trace_id, ex)
current_app.logger.exception(error_message)
AuditAPIService.audit_event("Failed to send land charge to maintain-api",
supporting_info={'id': calc_display_id(land_charge.local_land_charge)})
raise ApplicationError(500)
if response.status_code != 202:
current_app.logger.exception(
'Failed to send land charge to maintain-api. '
'TraceID : {} - Status: {}, Message: {}'
.format(g.trace_id, response.status_code, response.text)
)
AuditAPIService.audit_event("Failed to send land charge to maintain-api",
supporting_info={'id': calc_display_id(land_charge.local_land_charge)})
raise ApplicationError(500)
result = response.json()
current_app.logger.info(
"User ID '{}' updated charge {}. Entry number: {}, registration date: {}. TraceID={}".format(
g.session.user.id,
result['land_charge_id'],
result['entry_number'],
result['registration_date'],
g.trace_id)
)
last_charge = LastCreatedCharge()
last_charge.charge_id = result['land_charge_id']
last_charge.entry_number = result['entry_number']
last_charge.registration_date = datetime.strptime(result['registration_date'], "%Y-%m-%d").strftime("%d/%m/%Y")
g.session.last_created_charge = last_charge
g.session.commit()
| 45.353982
| 119
| 0.60722
| 563
| 5,125
| 5.310835
| 0.158082
| 0.093645
| 0.053512
| 0.042809
| 0.810702
| 0.798997
| 0.76388
| 0.740468
| 0.712375
| 0.712375
| 0
| 0.004923
| 0.286634
| 5,125
| 112
| 120
| 45.758929
| 0.81291
| 0.009366
| 0
| 0.65625
| 0
| 0
| 0.226054
| 0.016949
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.072917
| 0
| 0.104167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
44ea244a25b197e65e4740f5e296fa14862fef95
| 142
|
py
|
Python
|
Codewars/6kyu/pascals-triangle-number-2/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codewars/6kyu/pascals-triangle-number-2/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codewars/6kyu/pascals-triangle-number-2/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python - 2.7.6
test.assert_equals(pascal(1), [[1]])
test.assert_equals(pascal(5), [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1]])
| 28.4
| 86
| 0.514085
| 30
| 142
| 2.366667
| 0.4
| 0.169014
| 0.450704
| 0.619718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 0.161972
| 142
| 4
| 87
| 35.5
| 0.420168
| 0.098592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
785ce30fa2492f95d1a65a731ac5531b15b44676
| 30
|
py
|
Python
|
cards/viewsets/__init__.py
|
atalaydev/cardify
|
594a7421580dd5cdc47d5da0d68c7298189a0422
|
[
"MIT"
] | null | null | null |
cards/viewsets/__init__.py
|
atalaydev/cardify
|
594a7421580dd5cdc47d5da0d68c7298189a0422
|
[
"MIT"
] | null | null | null |
cards/viewsets/__init__.py
|
atalaydev/cardify
|
594a7421580dd5cdc47d5da0d68c7298189a0422
|
[
"MIT"
] | null | null | null |
from .card import CardViewSet
| 15
| 29
| 0.833333
| 4
| 30
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
78776f98e6477943410b10a4e046209a4e156f43
| 5,133
|
py
|
Python
|
chmap/coronal_holes/tracking/tools/projection.py
|
predsci/CHD
|
35f29d1b62861f4ffed57b38d18689b282664bcf
|
[
"Apache-2.0"
] | 3
|
2021-06-29T00:23:47.000Z
|
2021-09-17T18:29:05.000Z
|
chmap/coronal_holes/tracking/tools/projection.py
|
predsci/CHD
|
35f29d1b62861f4ffed57b38d18689b282664bcf
|
[
"Apache-2.0"
] | null | null | null |
chmap/coronal_holes/tracking/tools/projection.py
|
predsci/CHD
|
35f29d1b62861f4ffed57b38d18689b282664bcf
|
[
"Apache-2.0"
] | 1
|
2021-12-08T06:26:18.000Z
|
2021-12-08T06:26:18.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import pickle
def map_new_polar_projection(gray_image):
""" A function to rotate a grayscaled image and project.
The projection steps:
1. transform to cartesian coordinates.
2. rotate about the x axis by angle=pi/2:
* rotation matrix = [1 0 0 ] [1 0 0]
[0 cos(a) -sin(a)] = [0 0 -1]
[0 sin(a) cos(a)] [0 1 0]
3. map back to spherical coordinates. - return image in new projection.
:parameter gray_image = image matrix (n_t x n_p) dimensions.
Gray scaled, meaning its elements are between 0 and 255. """
# extract the dimensions of the grayscaled image.
n_t, n_p = np.shape(gray_image)
# create 1d arrays for spherical coordinates.
theta = np.linspace(np.pi, 0, n_t)
phi = np.linspace(0, 2 * np.pi, n_p)
# spacing in theta and phi.
delta_t = theta[1] - theta[0]
delta_p = phi[1] - phi[0]
# compute theta and phi grids.
theta_grid = np.arccos(np.outer(np.sin(theta), np.sin(phi)))
phi_grid = np.arctan2(np.outer(-np.cos(theta), np.ones(n_p)), np.outer(np.sin(theta), np.cos(phi)))
# Change phi range from [-pi,pi] to [0,2pi]
neg_phi = phi_grid < 0
phi_grid[neg_phi] = phi_grid[neg_phi] + 2 * np.pi
# initialize new image.
image = np.zeros((n_t, n_p))
# assign the new index.
for ii in range(0, n_t):
for jj in range(0, n_p):
image[ii, jj] = gray_image[int(np.abs(theta_grid[ii, jj]) / delta_t), int(phi_grid[ii, jj] / delta_p)]
return image
def map_back_to_long_lat(gray_image):
""" A function to rotate a grayscaled image and project.
The projection steps:
1. transform to cartesian coordinates.
2. rotate about the x axis by angle=-pi/2:
* rotation matrix = [1 0 0 ] [1 0 0]
[0 cos(a) -sin(a)] = [0 0 1]
[0 sin(a) cos(a)] [0 -1 0]
3. map back to spherical coordinates. - return image in new projection.
:parameter gray_image = image matrix (n_t x n_p) dimensions.
Gray scaled, meaning its elements are between 0 and 255. """
# extract the dimensions of the grayscaled image.
n_t, n_p = np.shape(gray_image)
# create 1d arrays for spherical coordinates.
theta = np.linspace(np.pi, 0, n_t)
phi = np.linspace(0, 2 * np.pi, n_p)
# spacing in theta and phi.
delta_t = theta[1] - theta[0]
delta_p = phi[1] - phi[0]
# compute theta and phi grids.
theta_grid = np.arccos(np.outer(-np.sin(theta), np.sin(phi)))
phi_grid = np.arctan2(np.outer(np.cos(theta), np.ones(n_p)), np.outer(np.sin(theta), np.cos(phi)))
# Change phi range from [-pi,pi] to [0,2pi]
neg_phi = phi_grid < 0
phi_grid[neg_phi] = phi_grid[neg_phi] + 2 * np.pi
# initialize new image.
image = np.zeros((n_t, n_p))
# assign the new index.
for ii in range(0, n_t):
for jj in range(0, n_p):
image[ii, jj] = gray_image[int(np.abs(theta_grid[ii, jj]) / delta_t), int(phi_grid[ii, jj] / delta_p)]
return image
if __name__ == '__main__':
# load image from pickle file.
image = pickle.load(file=open("example_vid/frame1.pkl", "rb"))
n_t, n_p = np.shape(image)
extent = [0, 2 * np.pi, 0, np.pi]
singularity_lat_lon = np.zeros((n_t, n_p))
t = np.linspace(np.pi, 0, n_t)
p = np.linspace(0, 2*np.pi, n_p)
for ii in range(0, n_t):
if t[ii] > np.pi*3/4:
singularity_lat_lon[ii, :] = 1
elif t[ii] < np.pi/4:
singularity_lat_lon[ii, :] = 1
fig = plt.figure()
ax = plt.axes()
plt.imshow(image)
# pixel coordinates + set ticks.
p_pixel = np.linspace(0, n_p, 5)
t_pixel = np.linspace(0, n_t, 5)
plt.xticks(p_pixel, ["0", "$90$", "$180$", "$270$", "$360$"])
plt.yticks(t_pixel, ["1", "$\dfrac{1}{2}$", "$0$", "-$\dfrac{1}{2}$", "-$1$"])
# axis label.
plt.xlabel("Longitude (Deg.)")
plt.ylabel("Sin(Lat.)")
ax.set_title('Original Image')
singularity_polar = np.zeros((n_t, n_p))
ind=255
for ii in range(n_t):
for jj in range(n_p):
if np.sin(t[ii])*np.sin(p[jj]) > 1/2:
singularity_polar[ii, jj] = (ind-ii-jj)
elif np.sin(t[ii])*np.sin(p[jj]) < -1/2:
singularity_polar[ii, jj] = (ind-ii-jj)
singularity_polar =255* singularity_polar/ np.min(singularity_polar)
fig = plt.figure()
ax = plt.axes()
pos = ax.imshow(singularity_polar, extent=extent, cmap='hsv')
fig.colorbar(pos, ax=ax)
ax.set_xlabel("$\phi$")
ax.set_ylabel("$\Theta$")
ax.set_title('Polar projection distortion region in lat-lon projection ')
fig = plt.figure()
ax = plt.axes()
pos = ax.imshow(map_new_polar_projection(gray_image=singularity_polar), extent=extent, cmap='hsv')
fig.colorbar(pos, ax=ax)
ax.set_xlabel("$\phi$")
ax.set_ylabel("$\Theta$")
ax.set_title('Polar projection distortion region in polar projection ')
plt.show()
| 32.694268
| 114
| 0.591662
| 837
| 5,133
| 3.489845
| 0.166069
| 0.012325
| 0.007189
| 0.009586
| 0.821294
| 0.809654
| 0.754536
| 0.743581
| 0.737419
| 0.715508
| 0
| 0.031712
| 0.262809
| 5,133
| 156
| 115
| 32.903846
| 0.740222
| 0.311709
| 0
| 0.518987
| 0
| 0
| 0.07986
| 0.006412
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025316
| false
| 0
| 0.037975
| 0
| 0.088608
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
153141c427e8a2f4dc02264b66a9d450e4a4921d
| 165
|
py
|
Python
|
tests/util.py
|
wesselb/aws
|
a4fc7443806ab96e78878bd11e4c4c5821d03f0d
|
[
"MIT"
] | 1
|
2021-06-11T15:20:31.000Z
|
2021-06-11T15:20:31.000Z
|
tests/util.py
|
wesselb/aws
|
a4fc7443806ab96e78878bd11e4c4c5821d03f0d
|
[
"MIT"
] | null | null | null |
tests/util.py
|
wesselb/aws
|
a4fc7443806ab96e78878bd11e4c4c5821d03f0d
|
[
"MIT"
] | 1
|
2021-06-11T15:20:35.000Z
|
2021-06-11T15:20:35.000Z
|
from numpy.testing import assert_allclose, assert_array_almost_equal
__all__ = ['allclose', 'approx']
allclose = assert_allclose
approx = assert_array_almost_equal
| 27.5
| 68
| 0.830303
| 21
| 165
| 5.952381
| 0.52381
| 0.224
| 0.272
| 0.352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09697
| 165
| 6
| 69
| 27.5
| 0.838926
| 0
| 0
| 0
| 0
| 0
| 0.084337
| 0
| 0
| 0
| 0
| 0
| 0.75
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
15374b57e839854b6f09184ab2169f62f3481955
| 47
|
py
|
Python
|
backend/garpix_notify/mixins/__init__.py
|
Beerhead/garpix_notify
|
a56d17ef278a2e96342e144bc918a647f4cc5d22
|
[
"MIT"
] | 9
|
2021-06-27T16:08:33.000Z
|
2021-12-26T17:33:25.000Z
|
backend/garpix_notify/mixins/__init__.py
|
Beerhead/garpix_notify
|
a56d17ef278a2e96342e144bc918a647f4cc5d22
|
[
"MIT"
] | 3
|
2022-01-24T11:36:46.000Z
|
2022-02-14T09:46:34.000Z
|
backend/garpix_notify/mixins/__init__.py
|
Beerhead/garpix_notify
|
a56d17ef278a2e96342e144bc918a647f4cc5d22
|
[
"MIT"
] | 7
|
2021-06-29T15:28:38.000Z
|
2022-01-25T07:40:28.000Z
|
from .user_notify_mixin import UserNotifyMixin
| 23.5
| 46
| 0.893617
| 6
| 47
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
15ba4bfd3b443066917c263e63c44571dae47f99
| 7,300
|
py
|
Python
|
SC101 - Github/My_Photoshop (Image Processing)/blur.py
|
huangichen97/sc-projects
|
ddbbe32f68d8257027973520efd0282ee4c79513
|
[
"MIT"
] | 1
|
2020-12-22T15:28:28.000Z
|
2020-12-22T15:28:28.000Z
|
SC101 - Github/My_Photoshop (Image Processing)/blur.py
|
huangichen97/sc-projects
|
ddbbe32f68d8257027973520efd0282ee4c79513
|
[
"MIT"
] | null | null | null |
SC101 - Github/My_Photoshop (Image Processing)/blur.py
|
huangichen97/sc-projects
|
ddbbe32f68d8257027973520efd0282ee4c79513
|
[
"MIT"
] | null | null | null |
"""
File: blur.py
-------------------------------
This file shows the original image(smiley-face.png)
first, and then its blurred image. The blur algorithm
uses the average RGB values of a pixel's nearest neighbors.
"""
from simpleimage import SimpleImage
def blur(img):
"""
:param img: The image that will be blurred.
:return: new_img: The blurred image.
"""
new_img = SimpleImage.blank(img.width, img.height)
for x in range(img.width):
for y in range(img.height):
new_pixel = new_img.get_pixel(x, y)
if x == 0 and y == 0:
# Top-left corner.
pixel1 = img.get_pixel(x, y)
pixel2 = img.get_pixel(x + 1, y)
pixel3 = img.get_pixel(x, y + 1)
pixel4 = img.get_pixel(x + 1, y + 1)
new_pixel.red = (pixel1.red + pixel2.red + pixel3.red + pixel4.red)//4
new_pixel.green = (pixel1.green + pixel2.green + pixel3.green + pixel4.green)//4
new_pixel.blue = (pixel1.blue + pixel2.blue + pixel3.blue + pixel4.blue)//4
elif x == img.width - 1 and y == 0:
# Top-right corner.
pixel1 = img.get_pixel(x, y)
pixel2 = img.get_pixel(x - 1, y)
pixel3 = img.get_pixel(x, y + 1)
pixel4 = img.get_pixel(x - 1, y + 1)
new_pixel.red = (pixel1.red + pixel2.red + pixel3.red + pixel4.red) // 4
new_pixel.green = (pixel1.green + pixel2.green + pixel3.green + pixel4.green) // 4
new_pixel.blue = (pixel1.blue + pixel2.blue + pixel3.blue + pixel4.blue) // 4
elif x == 0 and y == img.height - 1:
# Bottom-left corner
pixel1 = img.get_pixel(x, y)
pixel2 = img.get_pixel(x + 1, y)
pixel3 = img.get_pixel(x, y - 1)
pixel4 = img.get_pixel(x + 1, y - 1)
new_pixel.red = (pixel1.red + pixel2.red + pixel3.red + pixel4.red) // 4
new_pixel.green = (pixel1.green + pixel2.green + pixel3.green + pixel4.green) // 4
new_pixel.blue = (pixel1.blue + pixel2.blue + pixel3.blue + pixel4.blue) // 4
elif x == img.width - 1 and y == img.height - 1:
# Bottom-right corner
pixel1 = img.get_pixel(x, y)
pixel2 = img.get_pixel(x - 1, y)
pixel3 = img.get_pixel(x, y - 1)
pixel4 = img.get_pixel(x - 1, y - 1)
new_pixel.red = (pixel1.red + pixel2.red + pixel3.red + pixel4.red) // 4
new_pixel.green = (pixel1.green + pixel2.green + pixel3.green + pixel4.green) // 4
new_pixel.blue = (pixel1.blue + pixel2.blue + pixel3.blue + pixel4.blue) // 4
elif y == 0 and 0 < x < img.width - 1:
# First row
pixel1 = img.get_pixel(x, y)
pixel2 = img.get_pixel(x + 1, y)
pixel3 = img.get_pixel(x, y + 1)
pixel4 = img.get_pixel(x + 1, y + 1)
pixel5 = img.get_pixel(x-1, y)
pixel6 = img.get_pixel(x-1, y+1)
new_pixel.red = (pixel1.red + pixel2.red + pixel3.red + pixel4.red + pixel5.red + pixel6.red) // 6
new_pixel.green = (pixel1.green + pixel2.green + pixel3.green + pixel4.green + pixel5.green + pixel6.green) // 6
new_pixel.blue = (pixel1.blue + pixel2.blue + pixel3.blue + pixel4.blue + pixel5.blue + pixel6.blue) // 6
elif y == img.height - 1 and 0 < x < img.width - 1:
# Last row
pixel1 = img.get_pixel(x, y)
pixel2 = img.get_pixel(x + 1, y)
pixel3 = img.get_pixel(x, y - 1)
pixel4 = img.get_pixel(x + 1, y - 1)
pixel5 = img.get_pixel(x - 1, y)
pixel6 = img.get_pixel(x - 1, y - 1)
new_pixel.red = (pixel1.red + pixel2.red + pixel3.red + pixel4.red + pixel5.red + pixel6.red) // 6
new_pixel.green = (pixel1.green + pixel2.green + pixel3.green + pixel4.green + pixel5.green + pixel6.green) // 6
new_pixel.blue = (pixel1.blue + pixel2.blue + pixel3.blue + pixel4.blue + pixel5.blue + pixel6.blue) // 6
elif x == 0 and 0 < y < img.height - 1:
# First column
pixel1 = img.get_pixel(x, y)
pixel2 = img.get_pixel(x, y+1)
pixel3 = img.get_pixel(x, y-1)
pixel4 = img.get_pixel(x + 1, y)
pixel5 = img.get_pixel(x + 1, y+1)
pixel6 = img.get_pixel(x + 1, y-1)
new_pixel.red = (pixel1.red + pixel2.red + pixel3.red + pixel4.red + pixel5.red + pixel6.red) // 6
new_pixel.green = (pixel1.green + pixel2.green + pixel3.green + pixel4.green + pixel5.green + pixel6.green) // 6
new_pixel.blue = (pixel1.blue + pixel2.blue + pixel3.blue + pixel4.blue + pixel5.blue + pixel6.blue) // 6
elif x == img.width - 1 and 0 < y < img.height - 1:
# Last column
pixel1 = img.get_pixel(x, y)
pixel2 = img.get_pixel(x, y + 1)
pixel3 = img.get_pixel(x, y - 1)
pixel4 = img.get_pixel(x - 1, y)
pixel5 = img.get_pixel(x - 1, y + 1)
pixel6 = img.get_pixel(x - 1, y - 1)
new_pixel.red = (pixel1.red + pixel2.red + pixel3.red + pixel4.red + pixel5.red + pixel6.red) // 6
new_pixel.green = (pixel1.green + pixel2.green + pixel3.green + pixel4.green + pixel5.green + pixel6.green) // 6
new_pixel.blue = (pixel1.blue + pixel2.blue + pixel3.blue + pixel4.blue + pixel5.blue + pixel6.blue) // 6
else:
# Inner pixels.
pixel1 = img.get_pixel(x, y)
pixel2 = img.get_pixel(x, y + 1)
pixel3 = img.get_pixel(x, y - 1)
pixel4 = img.get_pixel(x - 1, y)
pixel5 = img.get_pixel(x - 1, y + 1)
pixel6 = img.get_pixel(x - 1, y - 1)
pixel7 = img.get_pixel(x + 1, y)
pixel8 = img.get_pixel(x + 1, y - 1)
pixel9 = img.get_pixel(x + 1, y + 1)
new_pixel.red = (pixel1.red + pixel2.red + pixel3.red + pixel4.red + pixel5.red + pixel6.red + pixel7.red + pixel8.red + pixel9.red) // 9
new_pixel.green = (pixel1.green + pixel2.green + pixel3.green + pixel4.green + pixel5.green + pixel6.green + pixel7.green + pixel8.green + pixel9.green) // 9
new_pixel.blue = (pixel1.blue + pixel2.blue + pixel3.blue + pixel4.blue + pixel5.blue + pixel6.blue + pixel7.blue + pixel8.blue + pixel9.blue) // 9
return new_img
def main():
"""
This program shows the original image(smiley-face.png) first,
and then blurs the original image into blurred_img.
Users can adjust how blur they want the image to be.
"""
old_img = SimpleImage("images/smiley-face.png")
old_img.show()
times = int(input('How Blur? (From 1-10): '))
blurred_img = blur(old_img)
for i in range(times-1):
blurred_img = blur(blurred_img)
blurred_img.show()
if __name__ == '__main__':
main()
| 50.344828
| 173
| 0.527808
| 1,004
| 7,300
| 3.74004
| 0.096614
| 0.079893
| 0.146471
| 0.159787
| 0.805326
| 0.801864
| 0.770706
| 0.766711
| 0.766711
| 0.766711
| 0
| 0.063099
| 0.342192
| 7,300
| 144
| 174
| 50.694444
| 0.718867
| 0.081096
| 0
| 0.627451
| 0
| 0
| 0.00797
| 0.003308
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.009804
| 0
| 0.039216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec5f6745847e0cc0cdac48844f8f4ca73c5e5ad8
| 1,083
|
py
|
Python
|
3.1.8/color_text/color_text/doc/__init__.py
|
ChongChong-qyx/color-text
|
6c65ec680bf49b2aecba0fe03ad6c09de8baa473
|
[
"MIT"
] | 2
|
2020-08-02T14:56:52.000Z
|
2020-08-03T01:21:21.000Z
|
4.2.2/color_text/color_text/doc/__init__.py
|
ChongChong-qyx/color-text
|
6c65ec680bf49b2aecba0fe03ad6c09de8baa473
|
[
"MIT"
] | null | null | null |
4.2.2/color_text/color_text/doc/__init__.py
|
ChongChong-qyx/color-text
|
6c65ec680bf49b2aecba0fe03ad6c09de8baa473
|
[
"MIT"
] | null | null | null |
"""
Help ducuments.
帮助文档。
"""
def doc():
import sys
import os
os.startfile(sys.path[5] + '\\color_text\\doc\\' + 'help.doc')
del os
del sys
def docx():
import sys
import os
os.startfile(sys.path[5] + '\\color_text\\doc\\' + 'help.docx')
del os
del sys
def markdown():
import sys
import os
os.startfile(sys.path[5] + '\\color_text\\doc\\' + 'help.md')
del os
del sys
def html():
import sys
import os
os.startfile(sys.path[5] + '\\color_text\\doc\\' + 'help.mhtml')
del os
del sys
def odt():
import sys
import os
os.startfile(sys.path[5] + '\\color_text\\doc\\' + 'help.odt')
del os
del sys
def pdf():
import sys
import os
os.startfile(sys.path[5] + '\\color_text\\doc\\' + 'help.pdf')
del os
del sys
def rtf():
import sys
import os
os.startfile(sys.path[5] + '\\color_text\\doc\\' + 'help.rtf')
del os
del sys
def xml():
import sys
import os
os.startfile(sys.path[5] + '\\color_text\\doc\\' + 'help.xml')
del os
del sys
def xps():
import sys
import os
os.startfile(sys.path[5] + '\\color_text\\doc\\' + 'help.xps')
del os
del sys
| 15.926471
| 65
| 0.630656
| 183
| 1,083
| 3.68306
| 0.131148
| 0.120178
| 0.200297
| 0.227003
| 0.860534
| 0.694362
| 0.694362
| 0.694362
| 0.694362
| 0.694362
| 0
| 0.010124
| 0.179132
| 1,083
| 67
| 66
| 16.164179
| 0.748032
| 0.019391
| 0
| 0.666667
| 0
| 0
| 0.232448
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.333333
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ec665822390410ae65e9981ffab2bba96b0d42b0
| 116
|
py
|
Python
|
examples/server/tests/__init__.py
|
rawjam/django-angular
|
0150dc16f270b8fd30554c4c3dfcff14a6f44c92
|
[
"MIT"
] | 1
|
2018-03-17T10:59:58.000Z
|
2018-03-17T10:59:58.000Z
|
examples/server/tests/__init__.py
|
rawjam/django-angular
|
0150dc16f270b8fd30554c4c3dfcff14a6f44c92
|
[
"MIT"
] | null | null | null |
examples/server/tests/__init__.py
|
rawjam/django-angular
|
0150dc16f270b8fd30554c4c3dfcff14a6f44c92
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from forms import *
from views import *
from validation import *
from templatetags import *
| 19.333333
| 26
| 0.706897
| 15
| 116
| 5.466667
| 0.6
| 0.365854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010526
| 0.181034
| 116
| 5
| 27
| 23.2
| 0.852632
| 0.181034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ec9003dd2936ce0b91ce85b406be6211454556ab
| 7,555
|
py
|
Python
|
unscript/mercuri/migrations/0001_initial.py
|
rising-entropy/UnscriptMercuri
|
6b545dfeb4beccaa2a64b8566926ed3028849669
|
[
"MIT"
] | null | null | null |
unscript/mercuri/migrations/0001_initial.py
|
rising-entropy/UnscriptMercuri
|
6b545dfeb4beccaa2a64b8566926ed3028849669
|
[
"MIT"
] | null | null | null |
unscript/mercuri/migrations/0001_initial.py
|
rising-entropy/UnscriptMercuri
|
6b545dfeb4beccaa2a64b8566926ed3028849669
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-11-21 05:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HospitalData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='Mukesh', max_length=100)),
('address', models.CharField(default='Antilla, Mumbai', max_length=500)),
('contactNo', models.CharField(default='0000000000', max_length=15)),
('ventilators', models.CharField(default='000000', max_length=5)),
('beds', models.CharField(default='000000', max_length=6)),
('occupiedVentilators', models.CharField(default='000000', max_length=5)),
('occupiedBeds', models.CharField(default='000000', max_length=6)),
('avilableOxygenCylinders', models.CharField(default='000000', max_length=6)),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fName', models.CharField(default='Mukesh', max_length=30)),
('lName', models.CharField(default='Ambani', max_length=30)),
('fullName', models.CharField(default='Mukesh Ambani', max_length=60)),
('email', models.CharField(default='patient@patient.com', max_length=50)),
('age', models.CharField(default='000', max_length=3)),
('address', models.CharField(default='Antilla, Mumbai', max_length=500)),
('currentStatus', models.CharField(default='Active', max_length=10)),
('remarks', models.CharField(default='Recovering Steadily', max_length=1200)),
('medicalHistory', models.CharField(default='Diabetic', max_length=1200)),
('ventilator', models.BooleanField()),
('contactNo', models.CharField(default='0000000000', max_length=15)),
('patientID', models.CharField(default='A1A1A1', max_length=12)),
('isAlive', models.BooleanField()),
('operatedByDoctor', models.CharField(default='Vijay Raaz', max_length=60)),
],
),
migrations.CreateModel(
name='StatusForChart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('currentTime', models.DateTimeField(default=django.utils.timezone.now)),
('currentActive', models.CharField(default='000000', max_length=6)),
('currentDeceased', models.CharField(default='000000', max_length=6)),
('currentRecovered', models.CharField(default='000000', max_length=6)),
],
),
migrations.CreateModel(
name='Reception',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fName', models.CharField(default='Mukesh', max_length=30)),
('lName', models.CharField(default='Ambani', max_length=30)),
('fullName', models.CharField(default='Mukesh Ambani', max_length=60)),
('staffID', models.CharField(default='A1A1A1', max_length=12)),
('contactNo', models.CharField(default='0000000000', max_length=15)),
('email', models.CharField(default='patient@patient.com', max_length=50)),
('address', models.CharField(default='Antilla, Mumbai', max_length=500)),
('shift', models.CharField(default='Morning', max_length=10)),
('photo', models.URLField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='HospitalStaff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fName', models.CharField(default='Mukesh', max_length=30)),
('lName', models.CharField(default='Ambani', max_length=30)),
('fullName', models.CharField(default='Mukesh Ambani', max_length=60)),
('staffID', models.CharField(default='A1A1A1', max_length=12)),
('contactNo', models.CharField(default='0000000000', max_length=15)),
('email', models.CharField(default='patient@patient.com', max_length=50)),
('address', models.CharField(default='Antilla, Mumbai', max_length=500)),
('shift', models.CharField(default='Morning', max_length=10)),
('photo', models.URLField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='HospitalAdmin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fName', models.CharField(default='Mukesh', max_length=30)),
('lName', models.CharField(default='Ambani', max_length=30)),
('fullName', models.CharField(default='Mukesh Ambani', max_length=60)),
('adminID', models.CharField(default='A1A1A1', max_length=12)),
('contactNo', models.CharField(default='0000000000', max_length=15)),
('email', models.CharField(default='patient@patient.com', max_length=50)),
('address', models.CharField(default='Antilla, Mumbai', max_length=500)),
('shift', models.CharField(default='Morning', max_length=10)),
('photo', models.URLField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Doctor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fName', models.CharField(default='Mukesh', max_length=30)),
('lName', models.CharField(default='Ambani', max_length=30)),
('fullName', models.CharField(default='Mukesh Ambani', max_length=60)),
('title', models.CharField(default='ENT Specialist', max_length=60)),
('contactNo', models.CharField(default='0000000000', max_length=15)),
('email', models.CharField(default='patient@patient.com', max_length=50)),
('address', models.CharField(default='Antilla, Mumbai', max_length=500)),
('doctorID', models.CharField(default='A1A1A1', max_length=12)),
('shift', models.CharField(default='Morning', max_length=10)),
('photo', models.URLField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 59.023438
| 121
| 0.593647
| 738
| 7,555
| 5.952575
| 0.163957
| 0.191213
| 0.280446
| 0.070112
| 0.781243
| 0.781243
| 0.77282
| 0.703164
| 0.67949
| 0.654906
| 0
| 0.045053
| 0.250827
| 7,555
| 127
| 122
| 59.488189
| 0.731095
| 0.005956
| 0
| 0.675
| 1
| 0
| 0.153436
| 0.003063
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033333
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec939c8be54f49063c50b8f4acc0f06cfd7daa97
| 13,175
|
py
|
Python
|
examples/qas_models.py
|
yumoxu/pytorch-transformers
|
4184e261873aa63c92607f073697fb68385f6739
|
[
"Apache-2.0"
] | null | null | null |
examples/qas_models.py
|
yumoxu/pytorch-transformers
|
4184e261873aa63c92607f073697fb68385f6739
|
[
"Apache-2.0"
] | null | null | null |
examples/qas_models.py
|
yumoxu/pytorch-transformers
|
4184e261873aa63c92607f073697fb68385f6739
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn
from torch.nn.utils.rnn import pad_sequence
from torch.nn import CrossEntropyLoss, MSELoss
from pytorch_transformers.modeling_bert import (BertPreTrainedModel, BertModel)
class BertForSharedAnswerSelection(BertPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForSharedAnswerSelection, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_weights)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_ids, token_type_ids, attention_mask, position_ids=None, labels=None, sent_mask=None):
num_choices = input_ids.shape[1]
# print('num_choices: {}'.format(num_choices))
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids else None
outputs = self.bert(flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
reshaped_logits = reshaped_logits.masked_fill(sent_mask==False, -1e9)
# logger.info('labels size: {}'.format(labels.size()))
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
normed_scores = self.softmax(reshaped_logits) # d_batch * n_choices
loss = loss_fct(normed_scores, labels)
outputs = (loss,) + outputs
# loss_fct = MSELoss()
# normed_scores = self.softmax(reshaped_logits) # d_batch * n_choices
# loss = loss_fct(normed_scores.view(-1), labels.view(-1))
# outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
class BertConcatForSharedAnswerSelection(BertPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(BertConcatForSharedAnswerSelection, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_weights)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_ids, token_type_ids, attention_mask, position_ids=None, labels=None, cls_mask=None):
"""
Args:
input_ids:
token_type_ids:
attention_mask:
position_ids:
labels: d_batch,
cls_mask: d_batch * max_ns
Returns:
"""
# print('num_choices: {}'.format(num_choices))
outputs = self.bert(input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask)
top_vec = outputs[0]
sents_vec = top_vec[torch.arange(top_vec.size(0)).unsqueeze(1), clss]
sents_vec = sents_vec * cls_mask[:, :, None].float()
# sent_scores = self.ext_layer(sents_vec, cls_mask).squeeze(-1)
logits = self.classifier(sents_vec) # d_batch * max_nt
logits = logits.masked_fill(cls_mask==False, -1e9)
# logger.info('labels size: {}'.format(labels.size()))
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
normed_scores = self.softmax(logits) # d_batch * ns
loss = loss_fct(normed_scores, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
| 54.895833
| 151
| 0.63704
| 1,628
| 13,175
| 4.996929
| 0.136364
| 0.031961
| 0.008113
| 0.008851
| 0.86343
| 0.856177
| 0.841057
| 0.841057
| 0.841057
| 0.829133
| 0
| 0.01131
| 0.255104
| 13,175
| 239
| 152
| 55.125523
| 0.817608
| 0.706869
| 0
| 0.354839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.080645
| 0
| 0.209677
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
01a93012624e342ab80cadf9af2bf6820da4e371
| 119
|
py
|
Python
|
worker/__init__.py
|
gcvalderrama/Palantir
|
403be804e9ac0f16dd24598cf6af585319882367
|
[
"BSD-2-Clause"
] | null | null | null |
worker/__init__.py
|
gcvalderrama/Palantir
|
403be804e9ac0f16dd24598cf6af585319882367
|
[
"BSD-2-Clause"
] | null | null | null |
worker/__init__.py
|
gcvalderrama/Palantir
|
403be804e9ac0f16dd24598cf6af585319882367
|
[
"BSD-2-Clause"
] | null | null | null |
# __init__.py
from worker.crawler import Crawler
from worker.helper import Helper
from worker.trainer import Trainer
| 17
| 34
| 0.823529
| 17
| 119
| 5.529412
| 0.470588
| 0.319149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134454
| 119
| 6
| 35
| 19.833333
| 0.912621
| 0.092437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
177f5e21336930b94f5473d637a2c3cc616c0f72
| 9,406
|
py
|
Python
|
disambiguation/groundtruth/adjudication.py
|
ScholarIndex/LinkedBooks
|
0cae008427ed1eb34a882e9d85f24b42b3ee3a28
|
[
"MIT"
] | null | null | null |
disambiguation/groundtruth/adjudication.py
|
ScholarIndex/LinkedBooks
|
0cae008427ed1eb34a882e9d85f24b42b3ee3a28
|
[
"MIT"
] | 6
|
2020-03-20T18:10:01.000Z
|
2021-09-29T17:31:17.000Z
|
disambiguation/groundtruth/adjudication.py
|
ScholarIndex/LinkedBooks
|
0cae008427ed1eb34a882e9d85f24b42b3ee3a28
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Takes two different ground truths and outputs the disagreements for manual check, with basic statistics.
"""
__author__ = """Giovanni Colavizza"""
import codecs, logging, csv, copy
logging.basicConfig(filename="logs/PF.log", level=logging.WARNING)
logger = logging.getLogger(__name__)
def clean_bid(bid):
if bid.startswith("IT"):
bid = bid.split("\\")
bid = "".join(bid[-2:])
return bid
def process_bid(bids):
if len(bids) < 10:
return []
else:
if len(bids.split(",")) > 1:
spl = bids.split(",")
spl = [clean_bid(x.strip()) for x in spl]
return spl
elif len(bids.split()) > 1:
spl = bids.split()
spl = [clean_bid(x.strip()) for x in spl]
return spl
else:
return [clean_bid(bids.strip())]
def adjudication_secondary(file_1,file_2):
#references_1 = dict()
references_1_byart = dict()
#references_2 = dict()
references_2_byart = dict()
# load files
with codecs.open(file_1) as f1:
reader = csv.reader(f1, delimiter=',', quotechar='"')
next(reader, None) # skip the headers
for n,row in enumerate(reader):
article_id, article_url, article_title, image_number, reference, BID_SBN, BID_LBC, type_pub, linkedbooks_article_id, note = row
if len(article_id) == 0 or len(article_title) == 0 or len(image_number) == 0 or len(reference) == 0 or len(BID_SBN) == 0 or len(type_pub) == 0:
logging.warning("Missing data in row: %d"%(n+2))
continue
if article_id not in references_1_byart.keys():
references_1_byart[article_id] = {image_number:process_bid(BID_SBN)}
else:
if image_number not in references_1_byart[article_id].keys():
references_1_byart[article_id][image_number] = process_bid(BID_SBN)
else:
references_1_byart[article_id][image_number].extend(process_bid(BID_SBN))
with codecs.open(file_2) as f2:
reader = csv.reader(f2, delimiter=',', quotechar='"')
next(reader, None) # skip the headers
for n,row in enumerate(reader):
article_id, article_url, article_title, image_number, reference, BID_SBN, BID_LBC, type_pub, linkedbooks_article_id, note = row
if len(article_id) == 0 or len(article_title) == 0 or len(image_number) == 0 or len(reference) == 0 or len(BID_SBN) == 0 or len(type_pub) == 0:
continue
if article_id not in references_2_byart.keys():
references_2_byart[article_id] = {image_number:process_bid(BID_SBN)}
else:
if image_number not in references_2_byart[article_id].keys():
references_2_byart[article_id][image_number] = process_bid(BID_SBN)
else:
references_2_byart[article_id][image_number].extend(process_bid(BID_SBN))
print(set(references_1_byart.keys()).difference(set(references_2_byart.keys())))
print(set(references_2_byart.keys()).difference(set(references_1_byart.keys())))
print(len(set(references_1_byart.keys()).difference(set(references_2_byart.keys()))))
print(len(set(references_2_byart.keys()).difference(set(references_1_byart.keys()))))
print(len(set(references_1_byart.keys())))
print(len(set(references_2_byart.keys())))
with codecs.open("adj_secondary_full.csv","w",encoding="utf-8") as f:
writer = csv.writer(f,delimiter=";",quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(["article","img_number","problem","bid"])
for article in list(set(references_1_byart.keys()).intersection(set(references_2_byart.keys()))):
for img_number in sorted(set(references_1_byart[article].keys()).union(references_2_byart[article].keys())):
if img_number not in references_1_byart[article].keys():
writer.writerow([article,img_number,"Missing page in file 1",""])
elif img_number not in references_2_byart[article].keys():
writer.writerow([article, img_number, "Missing page in file 2", ""])
else: # image in both, find mismatched citations
r1 = copy.deepcopy(references_1_byart[article][img_number])
r2 = copy.deepcopy(references_2_byart[article][img_number])
for ref in references_1_byart[article][img_number]:
if ref in r2:
r1.remove(ref)
for ref in references_2_byart[article][img_number]:
if ref in r1:
r2.remove(ref)
for ref in r1:
writer.writerow([article, img_number, "Missing reference in file 2", ref])
for ref in r2:
writer.writerow([article, img_number, "Missing reference in file 1", ref])
def adjudication_primary(file_1,file_2):
references_1_byart = dict()
references_2_byart = dict()
# load files
with codecs.open(file_1) as f1:
reader = csv.reader(f1, delimiter=',', quotechar='"')
next(reader, None) # skip the headers
for n,row in enumerate(reader):
article_id, article_url, article_title, image_number, reference, asve_id, note = row
if len(article_id) == 0 or len(article_title) == 0 or len(image_number) == 0 or len(reference) == 0 or len(asve_id) == 0:
logging.warning("Missing data in row: %d"%(n+2))
continue
if article_id not in references_1_byart.keys():
references_1_byart[article_id] = {image_number:process_bid(asve_id)}
else:
if image_number not in references_1_byart[article_id].keys():
references_1_byart[article_id][image_number] = process_bid(asve_id)
else:
references_1_byart[article_id][image_number].extend(process_bid(asve_id))
with codecs.open(file_2) as f2:
reader = csv.reader(f2, delimiter=',', quotechar='"')
next(reader, None) # skip the headers
for n,row in enumerate(reader):
try:
article_id, article_url, article_title, image_number, reference, asve_id, note = row
except:
print(row)
if len(article_id) == 0 or len(article_title) == 0 or len(image_number) == 0 or len(reference) == 0 or len(
asve_id) == 0:
continue
if article_id not in references_2_byart.keys():
references_2_byart[article_id] = {image_number: process_bid(asve_id)}
else:
if image_number not in references_2_byart[article_id].keys():
references_2_byart[article_id][image_number] = process_bid(asve_id)
else:
references_2_byart[article_id][image_number].extend(process_bid(asve_id))
print(set(references_1_byart.keys()).difference(set(references_2_byart.keys())))
print(set(references_2_byart.keys()).difference(set(references_1_byart.keys())))
print(len(set(references_1_byart.keys()).difference(set(references_2_byart.keys()))))
print(len(set(references_2_byart.keys()).difference(set(references_1_byart.keys()))))
print(len(set(references_1_byart.keys())))
print(len(set(references_2_byart.keys())))
with codecs.open("adj_primary_full.csv","w",encoding="utf-8") as f:
writer = csv.writer(f,delimiter=";",quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(["article","img_number","problem","asve_id"])
for article in list(set(references_1_byart.keys()).intersection(set(references_2_byart.keys()))):
for img_number in sorted(set(references_1_byart[article].keys()).union(references_2_byart[article].keys())):
if img_number not in references_1_byart[article].keys():
writer.writerow([article,img_number,"Missing page in file 1",""])
elif img_number not in references_2_byart[article].keys():
writer.writerow([article, img_number, "Missing page in file 2", ""])
else: # image in both, find mismatched citations
r1 = copy.deepcopy(references_1_byart[article][img_number])
r2 = copy.deepcopy(references_2_byart[article][img_number])
for ref in references_1_byart[article][img_number]:
if ref in r2:
r1.remove(ref)
for ref in references_2_byart[article][img_number]:
if ref in r1:
r2.remove(ref)
for ref in r1:
writer.writerow([article, img_number, "Missing reference in file 2", ref])
for ref in r2:
writer.writerow([article, img_number, "Missing reference in file 1", ref])
if __name__ == "__main__":
# load
file_1 = "secondary_full_23052017_1.csv"
file_1_ps = "primary_full_23052017_1.csv"
file_2 = "secondary_full_10052017_2.csv"
file_2_ps = "primary_full_10052017_2.csv"
#adjudication_secondary(file_1,file_2)
adjudication_primary(file_1_ps, file_2_ps)
| 50.569892
| 155
| 0.613013
| 1,225
| 9,406
| 4.442449
| 0.112653
| 0.066703
| 0.094083
| 0.067622
| 0.898015
| 0.890665
| 0.866777
| 0.866777
| 0.866777
| 0.866777
| 0
| 0.026515
| 0.270253
| 9,406
| 186
| 156
| 50.569892
| 0.766317
| 0.040719
| 0
| 0.69281
| 0
| 0
| 0.057641
| 0.014882
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026144
| false
| 0
| 0.006536
| 0
| 0.065359
| 0.084967
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
17869b892e79f6631c46d3372b33c0958256cbdf
| 11,214
|
py
|
Python
|
src/algo/handle_algodex.py
|
0xChief/staketaxcsv
|
3122736c4044e9a22237fffacee80ca1d7604be1
|
[
"MIT"
] | null | null | null |
src/algo/handle_algodex.py
|
0xChief/staketaxcsv
|
3122736c4044e9a22237fffacee80ca1d7604be1
|
[
"MIT"
] | null | null | null |
src/algo/handle_algodex.py
|
0xChief/staketaxcsv
|
3122736c4044e9a22237fffacee80ca1d7604be1
|
[
"MIT"
] | null | null | null |
import base64
import json
import re
from algo import constants as co
from algo.asset import Algo, Asset
from algo.util_algo import get_transaction_note, get_transfer_asset
from common.make_tx import make_swap_tx
# For reference check the whitepaper appendix:
# https://github.com/algodex/algodex-public-documents/blob/master/Algodex%20Whitepaper%201.0.pdf
APPLICATION_ID_ALGODEX_BUY = 354073718
APPLICATION_ID_ALGODEX_SELL = 354073834
ALGODEX_LIMIT_ORDER_OPEN = "open"
ALGODEX_LIMIT_ORDER_CLOSE = "close"
ALGODEX_LIMIT_ORDER_PARTIAL = "execute_partial"
ALGODEX_LIMIT_ORDER_FULL = "execute_full"
ALGODEX_LIMIT_ORDER_ACTIONS = [
ALGODEX_LIMIT_ORDER_OPEN,
ALGODEX_LIMIT_ORDER_CLOSE,
ALGODEX_LIMIT_ORDER_PARTIAL,
ALGODEX_LIMIT_ORDER_FULL
]
ALGODEX_TRANSACTION_ORDER_EXECUTE = "ZXhlY3V0ZQ==" # "execute"
ORDER_TYPE_BUY = "buy"
ORDER_TYPE_SELL = "sell"
# <initiator_address>-<asset_id>-[<action>]_[algo|asa]
order_pattern = re.compile(r"^\w+-\d+-\[(?P<action>\w+)\]_\[(?:algo|asa)\]")
def is_algodex_transaction(wallet_address, group):
length = len(group)
if length < 1 or length > 5:
return False
transaction = group[0]
txtype = transaction["tx-type"]
if txtype == co.TRANSACTION_TYPE_APP_CALL:
app_id = transaction[co.TRANSACTION_KEY_APP_CALL]["application-id"]
if app_id != APPLICATION_ID_ALGODEX_BUY and app_id != APPLICATION_ID_ALGODEX_SELL:
return False
note = get_transaction_note(transaction)
if note is None:
if txtype == co.TRANSACTION_TYPE_APP_CALL:
appl_args = transaction[co.TRANSACTION_KEY_APP_CALL]["application-args"]
return ALGODEX_TRANSACTION_ORDER_EXECUTE in appl_args
return False
if len(note) < len(wallet_address):
return False
try:
order = json.loads(note)
except Exception:
return False
key = next(iter(order))
match = order_pattern.match(key)
if not match:
return False
action_type = match.group("action")
return action_type in ALGODEX_LIMIT_ORDER_ACTIONS
def handle_algodex_transaction(wallet_address, group, exporter, txinfo):
transaction = group[0]
txtype = transaction["tx-type"]
note = get_transaction_note(transaction)
if note is None:
if txtype == co.TRANSACTION_TYPE_APP_CALL:
appl_args = transaction[co.TRANSACTION_KEY_APP_CALL]["application-args"]
if ALGODEX_TRANSACTION_ORDER_EXECUTE in appl_args:
if group[-1]["sender"] == wallet_address:
_handle_algodex_market_order_buy_side(group, exporter, txinfo)
else:
_handle_algodex_market_order_sell_side(group, exporter, txinfo)
return
order = json.loads(note)
key = next(iter(order))
order_details = order.get(key)
initiator_address = key.split("-", 1)[0]
order_type = order_details["escrowOrderType"]
if ALGODEX_LIMIT_ORDER_PARTIAL in key:
if order_type == ORDER_TYPE_BUY:
if initiator_address == wallet_address:
_handle_algodex_partial_buy_sell_side(group, exporter, txinfo)
else:
_handle_algodex_partial_buy_buy_side(group, exporter, txinfo)
else:
if initiator_address == wallet_address:
_handle_algodex_partial_sell_buy_side(group, exporter, txinfo)
else:
_handle_algodex_partial_sell_sell_side(group, exporter, txinfo)
elif ALGODEX_LIMIT_ORDER_FULL in key:
if order_type == ORDER_TYPE_BUY:
if initiator_address == wallet_address:
_handle_algodex_full_buy_sell_side(group, exporter, txinfo)
else:
_handle_algodex_full_buy_buy_side(group, exporter, txinfo)
else:
if initiator_address == wallet_address:
_handle_algodex_full_sell_buy_side(group, exporter, txinfo)
else:
_handle_algodex_full_sell_sell_side(group, exporter, txinfo)
# Ignore open and close orders
# AlgoDex whitepaper: Diagram 7
def _handle_algodex_partial_buy_sell_side(group, exporter, txinfo):
fee_amount = 0
receive_transaction = group[1]
receive_asset = get_transfer_asset(receive_transaction)
send_transaction = group[2]
fee_amount = send_transaction["fee"]
send_asset = get_transfer_asset(send_transaction)
fee_transaction = group[3]
fee_amount += fee_transaction[co.TRANSACTION_KEY_PAYMENT]["amount"] + fee_transaction["fee"]
txinfo.comment = "AlgoDex Partial Limit Sell Order"
row = make_swap_tx(txinfo, send_asset.amount, send_asset.ticker, receive_asset.amount, receive_asset.ticker)
fee = Algo(fee_amount)
row.fee = fee.amount
exporter.ingest_row(row)
def _handle_algodex_partial_buy_buy_side(group, exporter, txinfo):
receive_transaction = group[1]
receive_asset = get_transfer_asset(receive_transaction)
app_transaction = group[0]
n, d, _ = _get_order_details(app_transaction)
send_asset = Algo((receive_asset.uint_amount * d) / n)
txinfo.comment = "AlgoDex Partial Limit Buy Order"
row = make_swap_tx(txinfo, send_asset.amount, send_asset.ticker, receive_asset.amount, receive_asset.ticker)
exporter.ingest_row(row)
# AlgoDex whitepaper: Diagram 11
def _handle_algodex_partial_sell_buy_side(group, exporter, txinfo):
fee_amount = 0
send_transaction = group[1]
fee_amount = send_transaction["fee"]
send_asset = get_transfer_asset(send_transaction)
receive_transaction = group[2]
receive_asset = get_transfer_asset(receive_transaction)
if receive_asset.zero() and len(group) > 4:
# ASA opt-in
receive_transaction = group[3]
receive_asset = get_transfer_asset(receive_transaction)
fee_transaction = group[4]
else:
fee_transaction = group[3]
fee_amount += fee_transaction[co.TRANSACTION_KEY_PAYMENT]["amount"] + fee_transaction["fee"]
txinfo.comment = "AlgoDex Partial Limit Buy Order"
row = make_swap_tx(txinfo, send_asset.amount, send_asset.ticker, receive_asset.amount, receive_asset.ticker)
fee = Algo(fee_amount)
row.fee = fee.amount
exporter.ingest_row(row)
def _handle_algodex_partial_sell_sell_side(group, exporter, txinfo):
receive_transaction = group[1]
receive_asset = get_transfer_asset(receive_transaction)
app_transaction = group[0]
n, d, asset_id = _get_order_details(app_transaction)
send_asset = Asset(asset_id, (receive_asset.uint_amount * n) / d)
txinfo.comment = "AlgoDex Partial Limit Sell Order"
row = make_swap_tx(txinfo, send_asset.amount, send_asset.ticker, receive_asset.amount, receive_asset.ticker)
exporter.ingest_row(row)
# AlgoDex whitepaper: Diagram 6
def _handle_algodex_full_buy_sell_side(group, exporter, txinfo):
fee_amount = 0
receive_transaction = group[1]
receive_asset = get_transfer_asset(receive_transaction)
send_transaction = group[2]
fee_amount = send_transaction["fee"]
send_asset = get_transfer_asset(send_transaction)
txinfo.comment = "AlgoDex Full Limit Sell Order"
row = make_swap_tx(txinfo, send_asset.amount, send_asset.ticker, receive_asset.amount, receive_asset.ticker)
fee = Algo(fee_amount)
row.fee = fee.amount
exporter.ingest_row(row)
def _handle_algodex_full_buy_buy_side(group, exporter, txinfo):
receive_transaction = group[2]
receive_asset = get_transfer_asset(receive_transaction)
app_transaction = group[0]
n, d, _ = _get_order_details(app_transaction)
send_asset = Algo((receive_asset.uint_amount * d) / n)
txinfo.comment = "AlgoDex Full Limit Buy Order"
row = make_swap_tx(txinfo, send_asset.amount, send_asset.ticker, receive_asset.amount, receive_asset.ticker)
exporter.ingest_row(row)
# AlgoDex whitepaper: Diagram 10
def _handle_algodex_full_sell_buy_side(group, exporter, txinfo):
fee_amount = 0
send_transaction = group[1]
fee_amount = send_transaction["fee"]
send_asset = get_transfer_asset(send_transaction)
receive_transaction = group[2]
receive_asset = get_transfer_asset(receive_transaction)
if receive_asset.zero() and len(group) > 3:
# ASA opt-in
receive_transaction = group[3]
receive_asset = get_transfer_asset(receive_transaction)
txinfo.comment = "AlgoDex Full Limit Buy Order"
row = make_swap_tx(txinfo, send_asset.amount, send_asset.ticker, receive_asset.amount, receive_asset.ticker)
fee = Algo(fee_amount)
row.fee = fee.amount
exporter.ingest_row(row)
def _handle_algodex_full_sell_sell_side(group, exporter, txinfo):
receive_transaction = group[1]
receive_asset = get_transfer_asset(receive_transaction)
app_transaction = group[0]
n, d, asset_id = _get_order_details(app_transaction)
send_asset = Asset(asset_id, (receive_asset.uint_amount * n) / d)
txinfo.comment = "AlgoDex Full Limit Sell Order"
row = make_swap_tx(txinfo, send_asset.amount, send_asset.ticker, receive_asset.amount, receive_asset.ticker)
exporter.ingest_row(row)
# Undocumented
def _handle_algodex_market_order_buy_side(group, exporter, txinfo):
send_transaction = group[1]
fee_amount = send_transaction["fee"]
send_asset = get_transfer_asset(send_transaction)
receive_transaction = group[2]
receive_asset = get_transfer_asset(receive_transaction)
if receive_asset.zero() and len(group) > 4:
# ASA opt-in
receive_transaction = group[3]
receive_asset = get_transfer_asset(receive_transaction)
fee_transaction = group[4]
else:
fee_transaction = group[3]
fee_amount += fee_transaction[co.TRANSACTION_KEY_PAYMENT]["amount"] + fee_transaction["fee"]
txinfo.comment = "AlgoDex Market Buy Order"
row = make_swap_tx(txinfo, send_asset.amount, send_asset.ticker, receive_asset.amount, receive_asset.ticker)
fee = Algo(fee_amount)
row.fee = fee.amount
exporter.ingest_row(row)
def _handle_algodex_market_order_sell_side(group, exporter, txinfo):
app_transaction = group[0]
n, d, asset_id = _get_order_details(app_transaction)
receive_transaction = group[1]
receive_asset = get_transfer_asset(receive_transaction)
send_asset = Asset(asset_id, (receive_asset.uint_amount * n) / d)
txinfo.comment = "AlgoDex Market Sell Order"
row = make_swap_tx(txinfo, send_asset.amount, send_asset.ticker, receive_asset.amount, receive_asset.ticker)
exporter.ingest_row(row)
def _get_order_details(transaction):
appl_args = transaction[co.TRANSACTION_KEY_APP_CALL]["application-args"]
# <n>-<d>-<min>-<asset_id>
order_details = base64.b64decode(appl_args[1]).decode("utf-8").split("-")
n = int(order_details[0])
d = int(order_details[1])
asset_id = int(order_details[3])
return n, d, asset_id
| 35.713376
| 113
| 0.703585
| 1,434
| 11,214
| 5.140167
| 0.092748
| 0.066748
| 0.054131
| 0.062407
| 0.814815
| 0.774929
| 0.774929
| 0.742776
| 0.742233
| 0.661918
| 0
| 0.009606
| 0.210897
| 11,214
| 313
| 114
| 35.827476
| 0.82337
| 0.03781
| 0
| 0.665158
| 0
| 0
| 0.051711
| 0.004301
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.031674
| 0
| 0.135747
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bdd86b88596f1cad6d8c49ee2a44d385773feb71
| 90
|
py
|
Python
|
jade2/deep_learning/graphs/__init__.py
|
RosettaCommons/jade2
|
40affc7c4e0f1f6ee07030e72de284e3484946e7
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T21:52:23.000Z
|
2019-12-23T21:52:23.000Z
|
jade2/deep_learning/graphs/__init__.py
|
RosettaCommons/jade2
|
40affc7c4e0f1f6ee07030e72de284e3484946e7
|
[
"BSD-3-Clause"
] | null | null | null |
jade2/deep_learning/graphs/__init__.py
|
RosettaCommons/jade2
|
40affc7c4e0f1f6ee07030e72de284e3484946e7
|
[
"BSD-3-Clause"
] | 2
|
2021-11-13T01:34:15.000Z
|
2021-11-13T01:34:34.000Z
|
from .creation import *
from .features import *
from .util import *
from .modules import *
| 22.5
| 23
| 0.744444
| 12
| 90
| 5.583333
| 0.5
| 0.447761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 90
| 4
| 24
| 22.5
| 0.893333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
da1f5aacb6b4e07b89909c0d951c70c31e6a7bb3
| 284
|
py
|
Python
|
gapandas/__init__.py
|
flyandlure/gapandas
|
31a884da101cf1e1bc0a33a171fa820c5eebf560
|
[
"MIT"
] | 9
|
2020-06-08T14:43:00.000Z
|
2022-03-03T18:14:02.000Z
|
gapandas/__init__.py
|
flyandlure/gapandas
|
31a884da101cf1e1bc0a33a171fa820c5eebf560
|
[
"MIT"
] | 2
|
2021-06-17T12:04:32.000Z
|
2021-07-02T15:56:15.000Z
|
gapandas/__init__.py
|
flyandlure/gapandas
|
31a884da101cf1e1bc0a33a171fa820c5eebf560
|
[
"MIT"
] | 2
|
2020-06-18T10:45:38.000Z
|
2021-12-19T20:00:05.000Z
|
from .connect import get_service
from .query import get_column_headers, get_profile_info, get_totals, get_rows, results_to_pandas, run_query
from .reports import monthly_ecommerce_overview
from .reports import monthly_coupons_overview
from .reports import monthly_google_ads_overview
| 47.333333
| 107
| 0.876761
| 42
| 284
| 5.52381
| 0.547619
| 0.142241
| 0.219828
| 0.310345
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088028
| 284
| 5
| 108
| 56.8
| 0.895753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
da62d74a9f15b04f6d1cf8794f07ec91f0ed151c
| 145
|
py
|
Python
|
lambdata/helper_functions.py
|
tiannabrianne/lambdata-25
|
1fe0372d9516357a4fe727e5a302fe8cc330b902
|
[
"MIT"
] | null | null | null |
lambdata/helper_functions.py
|
tiannabrianne/lambdata-25
|
1fe0372d9516357a4fe727e5a302fe8cc330b902
|
[
"MIT"
] | null | null | null |
lambdata/helper_functions.py
|
tiannabrianne/lambdata-25
|
1fe0372d9516357a4fe727e5a302fe8cc330b902
|
[
"MIT"
] | null | null | null |
def null_count(df):
return df.isnull().sum().sum()
def list_to_series(list_to_series, df):
df = pd.Series(list_tp_series)
return df
| 20.714286
| 39
| 0.689655
| 25
| 145
| 3.72
| 0.48
| 0.172043
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 145
| 6
| 40
| 24.166667
| 0.775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
e517062c6a24636e510d1785bb5a6c9a2a6bb945
| 15,331
|
py
|
Python
|
encommon/networks.py
|
enasisnetwork/encommon-py
|
c2bb1412171c84fe2917a23b535a6db1b5f523c1
|
[
"MIT"
] | null | null | null |
encommon/networks.py
|
enasisnetwork/encommon-py
|
c2bb1412171c84fe2917a23b535a6db1b5f523c1
|
[
"MIT"
] | null | null | null |
encommon/networks.py
|
enasisnetwork/encommon-py
|
c2bb1412171c84fe2917a23b535a6db1b5f523c1
|
[
"MIT"
] | null | null | null |
#==============================================================================#
# Enasis Network Common Libraries #
# Python Functions Network Addressing #
#==============================================================================#
# Required Libraries and Configuration #
# : - - - - - - - - - - - - - - - - - - -- - - - - - - - - - - - - - - - - - - #
# : Library Import and Global Variables #
#------------------------------------------------------------------------------#
# Primary Functions for Network Addressing #
# : - - - - - - - - - - - - - - - - - - -- - - - - - - - - - - - - - - - - - - #
# : Manipulate an IPv4 Network Address ipv4format #
#------------------------------------------------------------------------------#
# Simplistic Utilities for Network Addressing #
# : - - - - - - - - - - - - - - - - - - -- - - - - - - - - - - - - - - - - - - #
# : Check IP Address is Valid str_ipv4_isvalid #
# : - - - - - - - - - - - - - - - - - - -- - - - - - - - - - - - - - - - - - - #
# : Check IP Address is Public str_ipv4_ispublic #
# : - - - - - - - - - - - - - - - - - - -- - - - - - - - - - - - - - - - - - - #
# : Check IP Address is RFC1918 str_ipv4_isrfc1918 #
# : - - - - - - - - - - - - - - - - - - -- - - - - - - - - - - - - - - - - - - #
# : Check IP Address is Link-Local str_ipv4_islinklocal #
# : - - - - - - - - - - - - - - - - - - -- - - - - - - - - - - - - - - - - - - #
# : Check IP Address is Localhost str_ipv4_islocalhost #
# : - - - - - - - - - - - - - - - - - - -- - - - - - - - - - - - - - - - - - - #
# : Check IP Address in Network str_ipv4_insubnet #
#==============================================================================#
#------------------------------------------------------------------------------#
# Required Libraries and Configuration #
#------------------------------------------------------------------------------#
#
#~~ Library Import and Global Variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Import libraries that should be present in the virtual or system environment
#-----------------------------------------------------------------------------
from netaddr import IPNetwork as netaddr_ipnetwork
from netaddr import IPAddress as netaddr_ipaddress
from re import match as re_match
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Primary Functions for Network Addressing #
#------------------------------------------------------------------------------#
#
#~~ Manipulate an IPv4 Network Address ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Convert the specified IPv4 network address using the intended network format
#-----------------------------------------------------------------------------
# source [REQUIRED] [STRING]
# Network address in the IPv4 format that will be converted to desired format
#-----------------------------------------------------------------------------
# format [REQUIRED] [STRING]
# Desired target converted format that includes several options which include
# * address 12.34.56.7 * network 12.34.56.0
# * address_cidr 12.34.56.7/24 * network_cidr 12.34.56.0/24
# * address_host 12.34.56.7/32 * network_zero 12.34.56.0
# * broadcast 12.34.56.255 * netmask 255.255.255.0
# * address_mac 01:20:34:05:60:07 * reversed 7.56.34.12
#-----------------------------------------------------------------------------
# Returns the newly converted addressing for source and desired address format
#-----------------------------------------------------------------------------
def ipv4format(source, format):
#
# Initial section for instantizing variables expected by remaining routine
returned = str()
#
# Convert specified IPv4 network address using the intended network format
excepted = "failed to convert and format the source address into intended"
if format == "address":
try: x = str(netaddr_ipnetwork(source).ip)
except Exception as reason: raise Exception(excepted) from reason
else: returned = x
if format == "address_cidr":
try: x = str(netaddr_ipnetwork(source).prefixlen)
except Exception as reason: raise Exception(excepted) from reason
else: returned = "{0}/{1}".format(ipv4format(source, "address"), x)
if format == "address_host":
try: x = "{0}/32".format(str(netaddr_ipnetwork(source).ip))
except Exception as reason: raise Exception(excepted) from reason
else: returned = x
#
# Convert specified IPv4 network address using the intended network format
excepted = "failed to convert and format the source address into intended"
if format == "network":
try: x = str(netaddr_ipnetwork(source).network)
except Exception as reason: raise Exception(excepted) from reason
else: returned = x
if format == "network_cidr":
try: x = str(netaddr_ipnetwork(source).prefixlen)
except Exception as reason: raise Exception(excepted) from reason
else: returned = "{0}/{1}".format(ipv4format(source, "network"), x)
if format == "network_zero":
try: x = str(netaddr_ipnetwork(source).network)
except Exception as reason: raise Exception(excepted) from reason
else: returned = x
#
# Convert specified IPv4 network address using the intended network format
excepted = "failed to convert and format the source address into intended"
if format == "broadcast":
try: x = str(netaddr_ipnetwork(source).broadcast)
except Exception as reason: raise Exception(excepted) from reason
else: returned = x
if format == "netmask":
try: x = str(netaddr_ipnetwork(source).netmask)
except Exception as reason: raise Exception(excepted) from reason
else: returned = x
#
# Convert specified IPv4 network address using the intended network format
excepted = "failed to convert and format the source address into intended"
if format == "address_mac":
address = ipv4format(source, "address")
try:
x = str().join([str(x.zfill(3)) for x in address.split(".")])
y = '{0}:{1}:{2}:{3}:{4}:{5}'
x = y.format(x[0:2], x[2:4], x[4:6], x[6:8], x[8:10], x[10:12])
except Exception as reason: raise Exception(excepted) from reason
else: returned = x
#
# Convert specified IPv4 network address using the intended network format
if format == "reversed":
address = ipv4format(source, "address")
try:
x = address.split(".")
x = ".".join([x[3], x[2], x[1], x[0]])
except Exception as reason: raise Exception(excepted) from reason
else: returned = x
#
# Returns newly converted addressing for source and desired address format
return returned
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Simplistic Utilities for Network Addressing #
#------------------------------------------------------------------------------#
#
#~~ Check IP Address is Valid ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Validate that the IP address is within the boundary the octect values permit
#-----------------------------------------------------------------------------
# value [REQUIRED] [STRING]
# String based value that is parsed and processed determining when validated
#-----------------------------------------------------------------------------
# Returns the correct boolean indicating whether or not the value is validated
#-----------------------------------------------------------------------------
def str_ipv4_isvalid(value):
#
# Initial section for instantizing variables expected by remaining routine
returned = None
matching = r'^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}'
matching += r'(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'
#
# Validate that IP address is within the boundary the octect values permit
if re_match(matching, str(value)): returned = True
else: returned = False
#
# Returns correct boolean indicating whether or not the value is validated
return returned
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#~~ Check IP Address is Public ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Validate that the IP address is within the boundary of public IP assignments
#-----------------------------------------------------------------------------
# value [REQUIRED] [STRING]
# String based value that is parsed and processed determining when validated
#-----------------------------------------------------------------------------
# Returns the correct boolean indicating whether or not the value is validated
#-----------------------------------------------------------------------------
def str_ipv4_ispublic(value):
#
# Initial section for instantizing variables expected by remaining routine
returned = None
#
# Validate that IP address is within the boundary of public IP assignments
if not str_ipv4_isvalid(value): returned = False
elif str_ipv4_isrfc1918(value): returned = False
elif str_ipv4_islinklocal(value): returned = False
elif str_ipv4_islocalhost(value): returned = False
else: returned = True
#
# Returns correct boolean indicating whether or not the value is validated
return returned
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#~~ Check IP Address is RFC1918 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Validate that the IP address is within the boundaries of what RFC1918 define
#-----------------------------------------------------------------------------
# value [REQUIRED] [STRING]
# String based value that is parsed and processed determining when validated
#-----------------------------------------------------------------------------
# Returns the correct boolean indicating whether or not the value is validated
#-----------------------------------------------------------------------------
def str_ipv4_isrfc1918(value):
#
# Initial section for instantizing variables expected by remaining routine
returned = None
matching = r'^(10\.\d+\.\d+\.\d+)|(192\.168\.\d+\.\d+)'
matching += r'|(172\.((1[6-9])|(2[0-9])|(3[0-1]))\.\d+\.\d+)$'
#
# Validate that IP address is within the boundaries of what RFC1918 define
if not str_ipv4_isvalid(value): returned = False
elif re_match(matching, str(value)): returned = True
else: returned = False
#
# Returns correct boolean indicating whether or not the value is validated
return returned
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#~~ Check IP Address is Link-Local ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Validate that the IP address is within specifications for link-local address
#-----------------------------------------------------------------------------
# value [REQUIRED] [STRING]
# String based value that is parsed and processed determining when validated
#-----------------------------------------------------------------------------
# Returns the correct boolean indicating whether or not the value is validated
#-----------------------------------------------------------------------------
def str_ipv4_islinklocal(value):
#
# Initial section for instantizing variables expected by remaining routine
returned = None
matching = r'^(169\.254\.\d+\.\d+)$'
#
# Validate that IP address is within specifications for link-local address
if not str_ipv4_isvalid(value): returned = False
elif re_match(matching, str(value)): returned = True
else: returned = False
#
# Returns correct boolean indicating whether or not the value is validated
return returned
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#~~ Check IP Address is Localhost ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Validate that the IP address is within specifications for loopback interface
#-----------------------------------------------------------------------------
# value [REQUIRED] [STRING]
# String based value that is parsed and processed determining when validated
#-----------------------------------------------------------------------------
# Returns the correct boolean indicating whether or not the value is validated
#-----------------------------------------------------------------------------
def str_ipv4_islocalhost(value):
#
# Initial section for instantizing variables expected by remaining routine
returned = None
matching = r'^(127\.\d+\.\d+\.\d+)$'
#
# Validate that IP address is within specifications for link-local address
if not str_ipv4_isvalid(value): returned = False
elif re_match(matching, str(value)): returned = True
else: returned = False
#
# Returns correct boolean indicating whether or not the value is validated
return returned
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#~~ Check IP Address in Network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Validate that the IP address is within the one of the networks in given list
#-----------------------------------------------------------------------------
# value [REQUIRED] [STRING]
# String based value that is parsed and processed determining when validated
#-----------------------------------------------------------------------------
# networks [REQUIRED] [LIST]
# List of networks in the CIDR format iterated for validating provided value
#-----------------------------------------------------------------------------
# Returns the correct boolean indicating whether or not the value is validated
#-----------------------------------------------------------------------------
def str_ipv4_insubnet(value, networks):
#
# Initial section for instantizing variables expected by remaining routine
returned = False
#
# Validate that IP address is within the one of the networks in given list
for network in networks:
if netaddr_ipaddress(value) not in netaddr_ipnetwork(network): continue
returned = True
#
# Returns correct boolean indicating whether or not the value is validated
return returned
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#------------------------------------------------------------------------------#
| 53.982394
| 80
| 0.464158
| 1,369
| 15,331
| 5.150475
| 0.130022
| 0.030634
| 0.034321
| 0.028932
| 0.808963
| 0.791377
| 0.758899
| 0.757198
| 0.747554
| 0.679336
| 0
| 0.019682
| 0.201292
| 15,331
| 283
| 81
| 54.173145
| 0.556145
| 0.671515
| 0
| 0.53
| 0
| 0.03
| 0.131093
| 0.049782
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07
| false
| 0
| 0.03
| 0
| 0.17
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e5243c444333c81c0aed99122c46c68405be1d6e
| 46
|
py
|
Python
|
library/__init__.py
|
unSAD-admin/unSAD
|
9f1d0e680a0086d140bc8d1c55fe21dd7de87df5
|
[
"Apache-2.0"
] | 3
|
2019-11-01T04:51:51.000Z
|
2019-12-17T04:25:18.000Z
|
library/__init__.py
|
unSAD-admin/unSAD
|
9f1d0e680a0086d140bc8d1c55fe21dd7de87df5
|
[
"Apache-2.0"
] | 1
|
2019-11-11T18:29:36.000Z
|
2019-11-11T18:29:36.000Z
|
library/__init__.py
|
unSAD-admin/unSAD
|
9f1d0e680a0086d140bc8d1c55fe21dd7de87df5
|
[
"Apache-2.0"
] | 2
|
2019-12-18T11:49:00.000Z
|
2020-03-27T20:06:15.000Z
|
# Created by Xinyu Zhu on 10/28/2019, 5:00 PM
| 23
| 45
| 0.695652
| 11
| 46
| 2.909091
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.297297
| 0.195652
| 46
| 1
| 46
| 46
| 0.567568
| 0.934783
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e544edd1a6256cf803831ef1b227c951ae82d7cf
| 52
|
py
|
Python
|
pyqt_left_right_list_widget/__init__.py
|
yjg30737/pyqt-left-right-list-widget
|
dd1f2dfc3b2c73ed9692570e362a2bae20465e47
|
[
"MIT"
] | null | null | null |
pyqt_left_right_list_widget/__init__.py
|
yjg30737/pyqt-left-right-list-widget
|
dd1f2dfc3b2c73ed9692570e362a2bae20465e47
|
[
"MIT"
] | null | null | null |
pyqt_left_right_list_widget/__init__.py
|
yjg30737/pyqt-left-right-list-widget
|
dd1f2dfc3b2c73ed9692570e362a2bae20465e47
|
[
"MIT"
] | null | null | null |
from .leftRightListWidget import LeftRightListWidget
| 52
| 52
| 0.923077
| 4
| 52
| 12
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057692
| 52
| 1
| 52
| 52
| 0.979592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e552b22e1b53d5c9d30ce7fb6009a6474dc70823
| 85
|
py
|
Python
|
minicurve/__init__.py
|
marekyggdrasil/minicurve
|
aedaed2b37861c05e29b2c512b8ca99cce711631
|
[
"MIT"
] | 2
|
2021-09-11T13:40:55.000Z
|
2021-11-20T14:22:16.000Z
|
minicurve/__init__.py
|
marekyggdrasil/minicurve
|
aedaed2b37861c05e29b2c512b8ca99cce711631
|
[
"MIT"
] | null | null | null |
minicurve/__init__.py
|
marekyggdrasil/minicurve
|
aedaed2b37861c05e29b2c512b8ca99cce711631
|
[
"MIT"
] | null | null | null |
from minicurve.curve import MiniCurve
from minicurve.visualization import Visualizer
| 28.333333
| 46
| 0.882353
| 10
| 85
| 7.5
| 0.6
| 0.346667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094118
| 85
| 2
| 47
| 42.5
| 0.974026
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e576d183dec2ce1e22c68c04b1b49b3db9b1a3e1
| 29
|
py
|
Python
|
feature_process/__init__.py
|
vc-nju/drfi_python
|
8e72867f478f51f0840b2fe2887074cae9a8f3ee
|
[
"MIT"
] | 13
|
2019-01-07T06:57:58.000Z
|
2020-10-31T16:02:02.000Z
|
feature_process/__init__.py
|
vc-nju/drfi_python
|
8e72867f478f51f0840b2fe2887074cae9a8f3ee
|
[
"MIT"
] | 2
|
2019-01-11T22:13:34.000Z
|
2020-07-03T08:18:17.000Z
|
feature_process/__init__.py
|
vc-nju/drfi_python
|
8e72867f478f51f0840b2fe2887074cae9a8f3ee
|
[
"MIT"
] | 2
|
2019-01-11T06:43:22.000Z
|
2019-11-16T06:02:32.000Z
|
from .feature import Features
| 29
| 29
| 0.862069
| 4
| 29
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e5b9a11054da1d47192198a2cc5d6a9affb984b4
| 43
|
py
|
Python
|
try.py
|
Wairimu-3/pitch2
|
0b6276f3a8ab1ffd191676705c93e0ea2e7f938a
|
[
"MIT"
] | null | null | null |
try.py
|
Wairimu-3/pitch2
|
0b6276f3a8ab1ffd191676705c93e0ea2e7f938a
|
[
"MIT"
] | null | null | null |
try.py
|
Wairimu-3/pitch2
|
0b6276f3a8ab1ffd191676705c93e0ea2e7f938a
|
[
"MIT"
] | null | null | null |
from werkzeug import url_encode
print(1+1)
| 14.333333
| 31
| 0.813953
| 8
| 43
| 4.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.116279
| 43
| 2
| 32
| 21.5
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
e5d41a0cb4890b2433301af291d7497307106915
| 45
|
py
|
Python
|
pygpconnect/__init__.py
|
elementechemlyn/pygpconnect
|
0f2c95ce212e86c2a0edc6d1e2e3da69a50b176b
|
[
"MIT"
] | null | null | null |
pygpconnect/__init__.py
|
elementechemlyn/pygpconnect
|
0f2c95ce212e86c2a0edc6d1e2e3da69a50b176b
|
[
"MIT"
] | null | null | null |
pygpconnect/__init__.py
|
elementechemlyn/pygpconnect
|
0f2c95ce212e86c2a0edc6d1e2e3da69a50b176b
|
[
"MIT"
] | null | null | null |
from pygpconnect.gpconnect import GPConnect
| 15
| 43
| 0.866667
| 5
| 45
| 7.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 45
| 2
| 44
| 22.5
| 0.975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e5f651a5619d60f1360e6dcd71938dbbbfd01add
| 27
|
py
|
Python
|
Chapter 01/Chap01_Example1.140.py
|
bpbpublications/Programming-Techniques-using-Python
|
49b785f37e95a3aad1d36cef51e219ac56e5e9f0
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.140.py
|
bpbpublications/Programming-Techniques-using-Python
|
49b785f37e95a3aad1d36cef51e219ac56e5e9f0
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.140.py
|
bpbpublications/Programming-Techniques-using-Python
|
49b785f37e95a3aad1d36cef51e219ac56e5e9f0
|
[
"MIT"
] | null | null | null |
t1 = (5,6,2,1)
t1[0] = 4
| 9
| 15
| 0.37037
| 8
| 27
| 1.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.421053
| 0.296296
| 27
| 2
| 16
| 13.5
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f923aad4f1f5ac9e253d6ff5b6601d5226e92774
| 46
|
py
|
Python
|
environment/lib/python3.7/site-packages/visions/utils/coercion/__init__.py
|
sid-the-coder/Easy-Data-Analysis-With-Pandas
|
5ae2a867e5a548e34f28aec89e49c361071b872c
|
[
"MIT"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
environment/lib/python3.7/site-packages/visions/utils/coercion/__init__.py
|
sid-the-coder/Easy-Data-Analysis-With-Pandas
|
5ae2a867e5a548e34f28aec89e49c361071b872c
|
[
"MIT"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
environment/lib/python3.7/site-packages/visions/utils/coercion/__init__.py
|
sid-the-coder/Easy-Data-Analysis-With-Pandas
|
5ae2a867e5a548e34f28aec89e49c361071b872c
|
[
"MIT"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
from visions.utils.coercion import test_utils
| 23
| 45
| 0.869565
| 7
| 46
| 5.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
006c3de6cff50f72db0182f2bc2c61771aab54e0
| 47
|
py
|
Python
|
fourierflow/viz/__init__.py
|
alasdairtran/fourierflow
|
7fb610bd15b41288a1639ec7b4ec615c4d4e5c2b
|
[
"MIT"
] | 42
|
2021-11-24T14:29:24.000Z
|
2022-03-14T07:20:30.000Z
|
fourierflow/viz/__init__.py
|
alasdairtran/fourierflow
|
7fb610bd15b41288a1639ec7b4ec615c4d4e5c2b
|
[
"MIT"
] | 1
|
2021-12-17T03:27:40.000Z
|
2021-12-24T12:31:47.000Z
|
fourierflow/viz/__init__.py
|
alasdairtran/fourierflow
|
7fb610bd15b41288a1639ec7b4ec615c4d4e5c2b
|
[
"MIT"
] | 4
|
2021-12-03T07:51:34.000Z
|
2022-01-24T08:26:35.000Z
|
from .heatmap import log_navier_stokes_heatmap
| 23.5
| 46
| 0.893617
| 7
| 47
| 5.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00a87a13be2486ba915e08671d4478019deccb8c
| 76
|
py
|
Python
|
src/restaff/types/notation_markup/__init__.py
|
ko10ok/scorator
|
130250550126bbf863ed0028f99045c17d6249e6
|
[
"Apache-2.0"
] | null | null | null |
src/restaff/types/notation_markup/__init__.py
|
ko10ok/scorator
|
130250550126bbf863ed0028f99045c17d6249e6
|
[
"Apache-2.0"
] | 10
|
2020-06-20T07:37:27.000Z
|
2020-07-05T06:22:07.000Z
|
src/restaff/types/notation_markup/__init__.py
|
ko10ok/scorator
|
130250550126bbf863ed0028f99045c17d6249e6
|
[
"Apache-2.0"
] | null | null | null |
from .markup_properties import *
from .measure import *
from .note import *
| 19
| 32
| 0.763158
| 10
| 76
| 5.7
| 0.6
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 76
| 3
| 33
| 25.333333
| 0.890625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00b0e85195289557ba6d82bd76c0aabaebcd5e35
| 32
|
py
|
Python
|
bulletin/tools/plugins/api/story/__init__.py
|
rerb/django-bulletin
|
0b64c9f2eeef4f60c54b54e720b7160aeafa9eb5
|
[
"MIT"
] | 5
|
2015-03-13T19:17:23.000Z
|
2016-08-07T00:12:23.000Z
|
bulletin/tools/plugins/api/story/__init__.py
|
rerb/django-bulletin
|
0b64c9f2eeef4f60c54b54e720b7160aeafa9eb5
|
[
"MIT"
] | 54
|
2015-03-13T20:04:03.000Z
|
2021-07-21T05:25:20.000Z
|
bulletin/tools/plugins/api/story/__init__.py
|
rerb/django-bulletin
|
0b64c9f2eeef4f60c54b54e720b7160aeafa9eb5
|
[
"MIT"
] | 5
|
2015-02-12T20:19:19.000Z
|
2020-02-26T22:11:47.000Z
|
import serializers
import views
| 10.666667
| 18
| 0.875
| 4
| 32
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 2
| 19
| 16
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00b9d1fa0f5bfed2cfcdcaeb5342c122503a78db
| 41
|
py
|
Python
|
statsdhandler/__init__.py
|
EBRD-ProzorroSale/statsdhandler
|
6c4d83a5acebfae1f1443373e5f2721914ff3076
|
[
"Apache-2.0"
] | null | null | null |
statsdhandler/__init__.py
|
EBRD-ProzorroSale/statsdhandler
|
6c4d83a5acebfae1f1443373e5f2721914ff3076
|
[
"Apache-2.0"
] | null | null | null |
statsdhandler/__init__.py
|
EBRD-ProzorroSale/statsdhandler
|
6c4d83a5acebfae1f1443373e5f2721914ff3076
|
[
"Apache-2.0"
] | 1
|
2019-12-10T10:11:36.000Z
|
2019-12-10T10:11:36.000Z
|
from .statsdhandler import StatsdHandler
| 20.5
| 40
| 0.878049
| 4
| 41
| 9
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00ce30fefbdded96b7020975402aa4ad999b11cc
| 66
|
py
|
Python
|
sim/mechanism/__init__.py
|
vegaprotocol/mlp-tools
|
f00ddf92766f78253fe61e5abe3f67ad9542a809
|
[
"MIT"
] | 1
|
2021-01-26T01:56:36.000Z
|
2021-01-26T01:56:36.000Z
|
sim/mechanism/__init__.py
|
vegaprotocol/mlp-tools
|
f00ddf92766f78253fe61e5abe3f67ad9542a809
|
[
"MIT"
] | null | null | null |
sim/mechanism/__init__.py
|
vegaprotocol/mlp-tools
|
f00ddf92766f78253fe61e5abe3f67ad9542a809
|
[
"MIT"
] | 1
|
2020-10-22T07:32:27.000Z
|
2020-10-22T07:32:27.000Z
|
from .liquidity import *
from .market import *
from .risk import *
| 22
| 24
| 0.742424
| 9
| 66
| 5.444444
| 0.555556
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 66
| 3
| 25
| 22
| 0.890909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00f1c5eb07a8f03bd754e9f012c1b059fe75afe1
| 172
|
py
|
Python
|
scripts/python_helpers.py
|
ankane/duckdb
|
8a60a6144059067939092bdc30ca229093c984e5
|
[
"MIT"
] | 3
|
2021-05-13T04:15:45.000Z
|
2022-03-03T16:57:16.000Z
|
scripts/python_helpers.py
|
ankane/duckdb
|
8a60a6144059067939092bdc30ca229093c984e5
|
[
"MIT"
] | 2
|
2021-10-02T02:52:39.000Z
|
2022-01-04T20:08:06.000Z
|
scripts/python_helpers.py
|
ankane/duckdb
|
8a60a6144059067939092bdc30ca229093c984e5
|
[
"MIT"
] | 1
|
2022-03-09T10:50:29.000Z
|
2022-03-09T10:50:29.000Z
|
def open_utf8(fpath, flags):
import sys
if sys.version_info[0] < 3:
return open(fpath, flags)
else:
return open(fpath, flags, encoding="utf8")
| 21.5
| 50
| 0.616279
| 24
| 172
| 4.333333
| 0.625
| 0.288462
| 0.288462
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031746
| 0.267442
| 172
| 7
| 51
| 24.571429
| 0.793651
| 0
| 0
| 0
| 0
| 0
| 0.023392
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
da96922b58e031bfa056e262586c9a50d9d44477
| 52
|
py
|
Python
|
train/PPO/__init__.py
|
LiuTed/gym-TD
|
66fda4fbd877ceb0ce815e7a6a746ac0afde21a0
|
[
"MIT"
] | null | null | null |
train/PPO/__init__.py
|
LiuTed/gym-TD
|
66fda4fbd877ceb0ce815e7a6a746ac0afde21a0
|
[
"MIT"
] | null | null | null |
train/PPO/__init__.py
|
LiuTed/gym-TD
|
66fda4fbd877ceb0ce815e7a6a746ac0afde21a0
|
[
"MIT"
] | null | null | null |
from PPO.Model import PPO
from PPO import Callbacks
| 17.333333
| 25
| 0.826923
| 9
| 52
| 4.777778
| 0.555556
| 0.325581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 52
| 2
| 26
| 26
| 0.977273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dab197def2db5f70dd75f1ba19e3715479779d4d
| 663
|
py
|
Python
|
plenum/test/input_validation/test_handle_one_node_message.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/input_validation/test_handle_one_node_message.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/input_validation/test_handle_one_node_message.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | 2
|
2017-12-13T21:14:54.000Z
|
2021-06-06T15:48:03.000Z
|
import pytest
@pytest.mark.skip('INDY-79. Implement')
def test_empty_args_fail(testNode):
before_msg = len(testNode.nodeInBox)
while pytest.raises(AssertionError):
testNode.handleOneNodeMsg(())
assert before_msg == len(testNode.nodeInBox), \
'nodeInBox has not got a message'
@pytest.mark.skip('INDY-79. Implement')
def test_too_many_args_fail(testNode):
before_msg = len(testNode.nodeInBox)
testNode.handleOneNodeMsg(({}, 'otherNone', 'extra_arg'))
while pytest.raises(AssertionError):
testNode.handleOneNodeMsg(())
assert before_msg == len(testNode.nodeInBox), \
'nodeInBox has not got a message'
| 31.571429
| 61
| 0.710407
| 78
| 663
| 5.884615
| 0.410256
| 0.078431
| 0.104575
| 0.174292
| 0.858388
| 0.858388
| 0.858388
| 0.858388
| 0.505447
| 0.505447
| 0
| 0.007246
| 0.167421
| 663
| 20
| 62
| 33.15
| 0.824275
| 0
| 0
| 0.75
| 0
| 0
| 0.174962
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
971b0a8f3d62063a3256917f2e3e836e1272f60f
| 24
|
py
|
Python
|
Lib/test/bad_coding.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 52,316
|
2015-01-01T15:56:25.000Z
|
2022-03-31T23:19:01.000Z
|
Lib/test/bad_coding.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 25,286
|
2015-03-03T23:18:02.000Z
|
2022-03-31T23:17:27.000Z
|
Lib/test/bad_coding.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 31,623
|
2015-01-01T13:29:37.000Z
|
2022-03-31T19:55:06.000Z
|
# -*- coding: uft-8 -*-
| 12
| 23
| 0.416667
| 3
| 24
| 3.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.208333
| 24
| 1
| 24
| 24
| 0.473684
| 0.875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8adf320972d13d62d037841e6a1798412e94142f
| 87
|
py
|
Python
|
ecommerce/shipping.py
|
anuragarwalkar/basic-python
|
1de8088b29247a4851c31e1c03fe168945f06951
|
[
"MIT"
] | null | null | null |
ecommerce/shipping.py
|
anuragarwalkar/basic-python
|
1de8088b29247a4851c31e1c03fe168945f06951
|
[
"MIT"
] | null | null | null |
ecommerce/shipping.py
|
anuragarwalkar/basic-python
|
1de8088b29247a4851c31e1c03fe168945f06951
|
[
"MIT"
] | null | null | null |
def calculate_shipping():
print("calculate shipping")
# import ecommerce.shipping
| 17.4
| 31
| 0.758621
| 9
| 87
| 7.222222
| 0.666667
| 0.523077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 87
| 5
| 32
| 17.4
| 0.866667
| 0.287356
| 0
| 0
| 0
| 0
| 0.295082
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
8af11a97f1e5e9e04c59cbfb08b2a7f60f7ccecf
| 11,240
|
py
|
Python
|
zhaquirks/tuya/ts0601_dimmer.py
|
Siglis-AG/zha-device-handlers
|
2e8d6a117fbc1bec50cad463a8a2dcf948588838
|
[
"Apache-2.0"
] | 56
|
2018-12-07T19:45:36.000Z
|
2020-03-30T15:01:58.000Z
|
zhaquirks/tuya/ts0601_dimmer.py
|
Siglis-AG/zha-device-handlers
|
2e8d6a117fbc1bec50cad463a8a2dcf948588838
|
[
"Apache-2.0"
] | 207
|
2018-12-07T20:34:30.000Z
|
2020-04-03T11:50:39.000Z
|
zhaquirks/tuya/ts0601_dimmer.py
|
Siglis-AG/zha-device-handlers
|
2e8d6a117fbc1bec50cad463a8a2dcf948588838
|
[
"Apache-2.0"
] | 65
|
2018-12-08T01:11:41.000Z
|
2020-03-24T18:23:17.000Z
|
"""Tuya based touch switch."""
from zigpy.profiles import zha
from zigpy.zcl.clusters.general import Basic, GreenPowerProxy, Groups, Ota, Scenes, Time
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
from zhaquirks.tuya import NoManufacturerCluster, TuyaDimmerSwitch
from zhaquirks.tuya.mcu import (
TuyaInWallLevelControl,
TuyaLevelControlManufCluster,
TuyaOnOff,
TuyaOnOffNM,
)
class TuyaInWallLevelControlNM(NoManufacturerCluster, TuyaInWallLevelControl):
"""Tuya Level cluster for inwall dimmable device with NoManufacturerID."""
pass
# --- DEVICE SUMMARY ---
# TuyaSingleSwitchDimmer: 0x00, 0x04, 0x05, 0xEF00; 0x000A, 0x0019
# TuyaDoubleSwitchDimmer: 0x00, 0x04, 0x05, 0xEF00; 0x000A, 0x0019
# - Dimmer with Green Power Proxy: Endpoint=242 profile=41440 device_type=0x0061, output_clusters: 0x0021 -
# TuyaSingleSwitchDimmerGP: 0x00, 0x04, 0x05, 0xEF00; 0x000A, 0x0019
# TuyaDoubleSwitchDimmerGP: 0x00, 0x04, 0x05, 0xEF00; 0x000A, 0x0019
# TuyaTripleSwitchDimmerGP: 0x00, 0x04, 0x05, 0xEF00; 0x000A, 0x0019
class TuyaSingleSwitchDimmer(TuyaDimmerSwitch):
"""Tuya touch switch device."""
signature = {
MODELS_INFO: [
("_TZE200_dfxkcots", "TS0601"),
("_TZE200_whpb9yts", "TS0601"),
("_TZE200_ebwgzdqq", "TS0601"),
("_TZE200_9i9dt8is", "TS0601"),
("_TZE200_swaamsoy", "TS0601"),
("_TZE200_0nauxa0p", "TS0601"),
("_TZE200_la2c2uo9", "TS0601"),
("_TZE200_1agwnems", "TS0601"), # TODO: validation pending?
("_TZE200_9cxuhakf", "TS0601"), # Added for Mercator IKUU SSWM-DIMZ Device
],
ENDPOINTS: {
# <SimpleDescriptor endpoint=1 profile=260 device_type=0x0051
# device_version=1
# input_clusters=[0, 4, 5, 61184]
# output_clusters=[10, 25]>
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaLevelControlManufCluster.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
}
},
}
replacement = {
ENDPOINTS: {
1: {
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaLevelControlManufCluster,
TuyaOnOff,
TuyaInWallLevelControl,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
}
}
}
class TuyaDoubleSwitchDimmer(TuyaDimmerSwitch):
"""Tuya double channel dimmer device."""
signature = {
MODELS_INFO: [
("_TZE200_e3oitdyu", "TS0601"),
],
ENDPOINTS: {
# <SimpleDescriptor endpoint=1 profile=260 device_type=0x0051
# device_version=1
# input_clusters=[0, 4, 5, 61184]
# output_clusters=[10, 25]>
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaLevelControlManufCluster.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
}
},
}
replacement = {
ENDPOINTS: {
1: {
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaLevelControlManufCluster,
TuyaOnOff,
TuyaInWallLevelControl,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
INPUT_CLUSTERS: [
TuyaOnOff,
TuyaInWallLevelControl,
],
OUTPUT_CLUSTERS: [],
},
}
}
class TuyaSingleSwitchDimmerGP(TuyaDimmerSwitch):
"""Tuya touch switch device."""
signature = {
MODELS_INFO: [
("_TZE200_3p5ydos3", "TS0601"),
("_TZE200_ip2akl4w", "TS0601"),
],
ENDPOINTS: {
# <SimpleDescriptor endpoint=1 profile=260 device_type=0x0100
# device_version=1
# input_clusters=[0, 4, 5, 61184]
# output_clusters=[10, 25]>
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaLevelControlManufCluster.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
# <SimpleDescriptor endpoint=242 profile=41440 device_type=97
# input_clusters=[]
# output_clusters=[33]
242: {
PROFILE_ID: 41440,
DEVICE_TYPE: 97,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaLevelControlManufCluster,
TuyaOnOffNM,
TuyaInWallLevelControlNM,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
242: {
PROFILE_ID: 41440,
DEVICE_TYPE: 97,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
}
}
class TuyaDoubleSwitchDimmerGP(TuyaDimmerSwitch):
"""Tuya double channel dimmer device."""
signature = {
MODELS_INFO: [
("_TZE200_fjjbhx9d", "TS0601"),
],
ENDPOINTS: {
# <SimpleDescriptor endpoint=1 profile=260 device_type=0x0100
# device_version=1
# input_clusters=[0, 4, 5, 61184]
# output_clusters=[10, 25]>
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaLevelControlManufCluster.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
# <SimpleDescriptor endpoint=242 profile=41440 device_type=97
# input_clusters=[]
# output_clusters=[33]
242: {
PROFILE_ID: 41440,
DEVICE_TYPE: 97,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaLevelControlManufCluster,
TuyaOnOffNM,
TuyaInWallLevelControlNM,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
INPUT_CLUSTERS: [
TuyaOnOffNM,
TuyaInWallLevelControlNM,
],
OUTPUT_CLUSTERS: [],
},
242: {
PROFILE_ID: 41440,
DEVICE_TYPE: 97,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
}
}
class TuyaTripleSwitchDimmerGP(TuyaDimmerSwitch):
"""Tuya triple channel dimmer device."""
signature = {
MODELS_INFO: [
("_TZE200_vm1gyrso", "TS0601"),
],
ENDPOINTS: {
# <SimpleDescriptor endpoint=1 profile=260 device_type=0x0100
# device_version=1
# input_clusters=[0, 4, 5, 61184]
# output_clusters=[10, 25]>
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaLevelControlManufCluster.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
# <SimpleDescriptor endpoint=242 profile=41440 device_type=97
# input_clusters=[]
# output_clusters=[33]
242: {
PROFILE_ID: 41440,
DEVICE_TYPE: 97,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaLevelControlManufCluster,
TuyaOnOffNM,
TuyaInWallLevelControlNM,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
INPUT_CLUSTERS: [
TuyaOnOffNM,
TuyaInWallLevelControlNM,
],
OUTPUT_CLUSTERS: [],
},
3: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
INPUT_CLUSTERS: [
TuyaOnOffNM,
TuyaInWallLevelControlNM,
],
OUTPUT_CLUSTERS: [],
},
242: {
PROFILE_ID: 41440,
DEVICE_TYPE: 97,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
}
}
| 32.57971
| 107
| 0.499911
| 861
| 11,240
| 6.250871
| 0.13705
| 0.102007
| 0.033816
| 0.059829
| 0.784095
| 0.784095
| 0.750093
| 0.741918
| 0.741918
| 0.717763
| 0
| 0.069909
| 0.414591
| 11,240
| 344
| 108
| 32.674419
| 0.748024
| 0.155694
| 0
| 0.727273
| 0
| 0
| 0.032707
| 0
| 0
| 0
| 0
| 0.002907
| 0
| 1
| 0
| false
| 0.003636
| 0.018182
| 0
| 0.076364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c11c9ec845d1493d11855f3e668b8aaf20e3dc18
| 28
|
py
|
Python
|
app/snippets/models/__init__.py
|
cosmos-sajal/django-rest-todo
|
c8ebf5acd305b9d8df9b7be1069613bd309e1857
|
[
"MIT"
] | null | null | null |
app/snippets/models/__init__.py
|
cosmos-sajal/django-rest-todo
|
c8ebf5acd305b9d8df9b7be1069613bd309e1857
|
[
"MIT"
] | null | null | null |
app/snippets/models/__init__.py
|
cosmos-sajal/django-rest-todo
|
c8ebf5acd305b9d8df9b7be1069613bd309e1857
|
[
"MIT"
] | null | null | null |
from .snippet import Snippet
| 28
| 28
| 0.857143
| 4
| 28
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c1a4eb7f168b8dbc50cc2c3a8fa0df967fb48267
| 146
|
py
|
Python
|
app/crud/__init__.py
|
johnshumon/fastapi-boilerplate
|
f0cb31e74ab773b8ce044149b17ce24c2e7fa4fc
|
[
"MIT"
] | null | null | null |
app/crud/__init__.py
|
johnshumon/fastapi-boilerplate
|
f0cb31e74ab773b8ce044149b17ce24c2e7fa4fc
|
[
"MIT"
] | null | null | null |
app/crud/__init__.py
|
johnshumon/fastapi-boilerplate
|
f0cb31e74ab773b8ce044149b17ce24c2e7fa4fc
|
[
"MIT"
] | null | null | null |
"""
Module imports
"""
# Ignore warnings for the entire file
# flake8: noqa
from app.crud.product import product
from app.crud.user import user
| 14.6
| 37
| 0.746575
| 22
| 146
| 4.954545
| 0.727273
| 0.12844
| 0.201835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008197
| 0.164384
| 146
| 9
| 38
| 16.222222
| 0.885246
| 0.438356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c1f7ee28c52807200734ffa0987c5fe58fb0fe28
| 2,134
|
py
|
Python
|
tests/datasets/context.py
|
GlobalMaksimum/sadedegel
|
8e28dbeabc3bf0d6f2222089ac5e3a849f9d3a6b
|
[
"MIT"
] | 100
|
2020-07-06T05:50:49.000Z
|
2022-03-21T21:56:55.000Z
|
tests/datasets/context.py
|
LyotardPostmodernizm/sadedegel
|
8e28dbeabc3bf0d6f2222089ac5e3a849f9d3a6b
|
[
"MIT"
] | 244
|
2020-07-06T06:31:01.000Z
|
2022-02-26T10:40:17.000Z
|
tests/datasets/context.py
|
LyotardPostmodernizm/sadedegel
|
8e28dbeabc3bf0d6f2222089ac5e3a849f9d3a6b
|
[
"MIT"
] | 23
|
2020-07-27T16:32:48.000Z
|
2022-03-18T11:13:07.000Z
|
import sys
from pathlib import Path
sys.path.insert(0, (Path(__file__) / '..' / '..').absolute())
from sadedegel.dataset import load_raw_corpus, load_sentence_corpus,load_annotated_corpus # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset.extended import load_extended_metadata, load_extended_sents_corpus, load_extended_raw_corpus # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset.tscorpus import load_tokenization_raw,load_tokenization_tokenized, check_and_display, CORPUS_SIZE # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset.tweet_sentiment import load_tweet_sentiment_train, CLASS_VALUES # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset.product_sentiment import load_product_sentiment_train # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset.product_sentiment import CLASS_VALUES as PS_CLASS_VALUES # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset.telco_sentiment import load_telco_sentiment_train, load_telco_sentiment_test, load_telco_sentiment_test_label # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset.telco_sentiment import CLASS_VALUES as TELCO_CLASS_VALUES # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset.categorized_product_sentiment import load_categorized_product_sentiment_train, SENTIMENT_CLASS_VALUES, PRODUCT_CATEGORIES # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset import movie_sentiment # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset import hotel_sentiment # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.bblock.cli.__main__ import tok_eval # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset import util # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset import file_paths, CorpusTypeEnum # noqa # pylint: disable=unused-import, wrong-import-position
| 106.7
| 206
| 0.840206
| 284
| 2,134
| 6.066901
| 0.193662
| 0.10563
| 0.138131
| 0.186883
| 0.65119
| 0.6361
| 0.6361
| 0.6361
| 0.608241
| 0.608241
| 0
| 0.000509
| 0.079663
| 2,134
| 19
| 207
| 112.315789
| 0.876782
| 0.386598
| 0
| 0
| 0
| 0
| 0.003125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.941176
| 0
| 0.941176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a9d3bdf49e84bf37e8a1e873cb4786dd0aa3d685
| 3,858
|
py
|
Python
|
test/test_data_type_json.py
|
nianxy/jsonalize
|
d34ac24cf438590fe731d70d9a98694efe4d22fe
|
[
"MIT"
] | 2
|
2020-01-09T10:24:35.000Z
|
2020-01-21T02:57:30.000Z
|
test/test_data_type_json.py
|
nianxy/jsonalize
|
d34ac24cf438590fe731d70d9a98694efe4d22fe
|
[
"MIT"
] | null | null | null |
test/test_data_type_json.py
|
nianxy/jsonalize
|
d34ac24cf438590fe731d70d9a98694efe4d22fe
|
[
"MIT"
] | null | null | null |
import test
import pytest
from jsonalize.jsonalize import *
def _remove_space_from_str(s):
return s.replace(" ", "")
class TestDataTypeJSON:
def test_int(self):
class A(JSONObject):
def __init__(self):
JSONObject.__init__(self)
self.v = JSONInt()
obj = A()
obj.v = 10
json_str = obj.to_json()
test_json_str = '{"v":10}'
assert _remove_space_from_str(json_str) == test_json_str
if test.IS_PYTHON_2:
def test_long(self):
class A(JSONObject):
def __init__(self):
JSONObject.__init__(self)
self.v = JSONLong()
obj = A()
obj.v = 10
json_str = obj.to_json()
test_json_str = '{"v":10}'
assert _remove_space_from_str(json_str) == test_json_str
def test_float(self):
class A(JSONObject):
def __init__(self):
JSONObject.__init__(self)
self.v = JSONFloat()
obj = A()
obj.v = 10.0
json_str = obj.to_json()
test_json_str = '{"v":10.0}'
assert _remove_space_from_str(json_str) == test_json_str
def test_complex(self):
class A(JSONObject):
def __init__(self):
JSONObject.__init__(self)
self.v = JSONComplex()
obj = A()
obj.v = 1+2j
json_str = obj.to_json()
test_json_str = ['{"v":{"r":1.0,"i":2.0}}','{"v":{"i":2.0,"r":1.0}}']
assert _remove_space_from_str(json_str) in test_json_str
def test_bool(self):
class A(JSONObject):
def __init__(self):
JSONObject.__init__(self)
self.v = JSONBool()
obj = A()
obj.v = True
json_str = obj.to_json()
test_json_str = '{"v":true}'
assert _remove_space_from_str(json_str) == test_json_str
def test_string(self):
class A(JSONObject):
def __init__(self):
JSONObject.__init__(self)
self.v = JSONString()
obj = A()
obj.v = "jsonalize"
json_str = obj.to_json()
test_json_str = '{"v":"jsonalize"}'
assert _remove_space_from_str(json_str) == test_json_str
def test_list(self):
class A(JSONObject):
def __init__(self):
JSONObject.__init__(self)
self.v = JSONList()
obj = A()
obj.v = [1,2]
json_str = obj.to_json()
test_json_str = '{"v":[1,2]}'
assert _remove_space_from_str(json_str) == test_json_str
def test_set(self):
class A(JSONObject):
def __init__(self):
JSONObject.__init__(self)
self.v = JSONSet()
obj = A()
obj.v = set([1,2])
json_str = obj.to_json()
test_json_str = ['{"v":[1,2]}','{"v":[2,1]}']
assert _remove_space_from_str(json_str) in test_json_str
def test_dict(self):
class A(JSONObject):
def __init__(self):
JSONObject.__init__(self)
self.v = JSONDict()
obj = A()
obj.v = {"v1":1}
json_str = obj.to_json()
test_json_str = '{"v":{"v1":1}}'
assert _remove_space_from_str(json_str) in test_json_str
def test_object(self):
class A(JSONObject):
def __init__(self):
JSONObject.__init__(self)
self.v = B()
class B(JSONObject):
def __init__(self):
JSONObject.__init__(self)
self.v = JSONInt()
obj = A()
obj.v.v = 1
json_str = obj.to_json()
test_json_str = '{"v":{"v":1}}'
assert _remove_space_from_str(json_str) in test_json_str
| 24.573248
| 77
| 0.516848
| 477
| 3,858
| 3.731656
| 0.1174
| 0.157303
| 0.123596
| 0.111236
| 0.807865
| 0.792697
| 0.792697
| 0.792697
| 0.791573
| 0.744382
| 0
| 0.016619
| 0.36055
| 3,858
| 156
| 78
| 24.730769
| 0.704905
| 0
| 0
| 0.612613
| 0
| 0
| 0.044335
| 0.011926
| 0
| 0
| 0
| 0
| 0.09009
| 1
| 0.198198
| false
| 0
| 0.027027
| 0.009009
| 0.342342
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a9d81d3edfc79b4a914235da1af2cca0a6486ae8
| 10,550
|
py
|
Python
|
pymatflow/cp2k/base/dft_real_time_propagation.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 6
|
2020-03-06T16:13:08.000Z
|
2022-03-09T07:53:34.000Z
|
pymatflow/cp2k/base/dft_real_time_propagation.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-10-02T02:23:08.000Z
|
2021-11-08T13:29:37.000Z
|
pymatflow/cp2k/base/dft_real_time_propagation.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-07-10T16:28:14.000Z
|
2021-07-10T16:28:14.000Z
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# ==================================
# ==================================
class cp2k_dft_real_time_propagation_print_current_each:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_real_time_propagation_print_current:
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_real_time_propagation_print_current_each()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&CURRENT\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END CURRENT\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_real_time_propagation_print_program_run_info_each:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_real_time_propagation_print_program_run_info:
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_real_time_propagation_print_program_run_info_each()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&PROGRAM_RUN_INFO\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END PROGRAM_RUN_INFO\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_real_time_propagation_print_restart_each:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_real_time_propagation_print_restart:
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_real_time_propagation_print_restart_each()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&RESTART\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END RESTART\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_real_time_propagation_print_restart_history_each:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_real_time_propagation_print_restart_history:
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_real_time_propagation_print_restart_history_each()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&RESTART_HISTORY\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END RESTART_HISTORY")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_real_time_propagation_print:
def __init__(self):
self.params = {
}
self.status = False
self.current = cp2k_dft_real_time_propagation_print_current()
self.program_run_info = cp2k_dft_real_time_propagation_print_program_run_info()
self.restart = cp2k_dft_real_time_propagation_print_restart()
self.restart_history = cp2k_dft_real_time_propagation_print_restart_history()
# basic settign
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t&PRINT\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.current.status == True:
self.current.to_input(fout)
if self.program_run_info.status == True:
self.program_run_info.to_input(fout)
if self.restart.status == True:
self.restart.to_input(fout)
if self.restart_history.status == True:
self.restart_history.to_input(fout)
fout.write("\t\t\t&END PRINT\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 4:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[3] == "CURRENT":
self.current.set_params({item: params[item]})
elif item.split("-")[3] == "PROGRAM_RUN_INFO":
self.program_run_info.set_params({item: params[item]})
elif item.split("-")[3] == "RESTART":
self.restart.set_params({item: params[item]})
elif item.split("-")[3] == "RESTART_HISTORY":
self.restart_history.set_params({item: params[item]})
else:
pass
class cp2k_dft_real_time_propagation:
def __init__(self):
self.params = {
"ACCURACY_REFINEMENT": None,
"APPLY_DELTA_PULSE": None,
"ASPC_ORDER": None,
"PERIODIC": None,
"PROPAGATOR": None,
"MAX_EXP": None,
"MAX_ITER": None,
"EPS_ITER": None,
}
self.status = False
self.printout = cp2k_dft_real_time_propagation_print()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t&REAL_TIME_PROPAGATION\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.printout.status == True:
self.printout.to_input(fout)
fout.write("\t\t&END REAL_TIME_PROPAGATION\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 3:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[2] == "PRINT":
self.printout.set_params({item: params[item]})
else:
pass
| 32.262997
| 88
| 0.500853
| 1,304
| 10,550
| 3.865031
| 0.059049
| 0.040873
| 0.043452
| 0.035714
| 0.880556
| 0.873016
| 0.857143
| 0.831548
| 0.806746
| 0.781151
| 0
| 0.00725
| 0.359336
| 10,550
| 326
| 89
| 32.361963
| 0.738423
| 0.04891
| 0
| 0.672646
| 0
| 0
| 0.083527
| 0.010996
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134529
| false
| 0.044843
| 0
| 0
| 0.179372
| 0.09417
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e71fb5d3829f66c9f9e645d14a5d352e4e80e2e3
| 47
|
py
|
Python
|
database_connection/__init__.py
|
kkwanyang/database_connection
|
046a2384c7cb00b9edb817d36dfe1ee6514f1215
|
[
"MIT"
] | null | null | null |
database_connection/__init__.py
|
kkwanyang/database_connection
|
046a2384c7cb00b9edb817d36dfe1ee6514f1215
|
[
"MIT"
] | null | null | null |
database_connection/__init__.py
|
kkwanyang/database_connection
|
046a2384c7cb00b9edb817d36dfe1ee6514f1215
|
[
"MIT"
] | null | null | null |
from .database_connection import db_connection
| 23.5
| 46
| 0.893617
| 6
| 47
| 6.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e7651f3cbd2844053de042988cec59ed7d292a95
| 57
|
py
|
Python
|
12.py
|
a0345/A1
|
073858356d21c9c7317f1cc7df96004af9bf721e
|
[
"MIT"
] | null | null | null |
12.py
|
a0345/A1
|
073858356d21c9c7317f1cc7df96004af9bf721e
|
[
"MIT"
] | null | null | null |
12.py
|
a0345/A1
|
073858356d21c9c7317f1cc7df96004af9bf721e
|
[
"MIT"
] | null | null | null |
print ("Hello World")
print (5+4)
print (5,"+",4,"=",5+4)
| 19
| 23
| 0.54386
| 11
| 57
| 2.818182
| 0.454545
| 0.193548
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 0.105263
| 57
| 3
| 23
| 19
| 0.490196
| 0
| 0
| 0
| 0
| 0
| 0.224138
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
99e19718bf443a1af734587b78796ba4c2b3732b
| 94
|
py
|
Python
|
apps/kpi/views.py
|
taliasman/kitsune
|
f8085205eef143011adb4c52d1f183da06c1c58e
|
[
"BSD-3-Clause"
] | 2
|
2019-08-19T17:08:47.000Z
|
2019-10-05T11:37:02.000Z
|
apps/kpi/views.py
|
taliasman/kitsune
|
f8085205eef143011adb4c52d1f183da06c1c58e
|
[
"BSD-3-Clause"
] | null | null | null |
apps/kpi/views.py
|
taliasman/kitsune
|
f8085205eef143011adb4c52d1f183da06c1c58e
|
[
"BSD-3-Clause"
] | null | null | null |
import jingo
def dashboard(request):
return jingo.render(request, 'kpi/dashboard.html')
| 15.666667
| 54
| 0.744681
| 12
| 94
| 5.833333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138298
| 94
| 5
| 55
| 18.8
| 0.864198
| 0
| 0
| 0
| 0
| 0
| 0.191489
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
99f8fa4e80f78b79de78b933a7f224f242ca9dbe
| 1,289
|
py
|
Python
|
unitorch/cli/models/__init__.py
|
fuliucansheng/UniTorch
|
47038321593ce4e7eabda555bd58c0cf89482146
|
[
"MIT"
] | 2
|
2022-02-05T08:52:00.000Z
|
2022-03-27T07:01:34.000Z
|
unitorch/cli/models/__init__.py
|
Lixin-Qian/unitorch
|
47038321593ce4e7eabda555bd58c0cf89482146
|
[
"MIT"
] | null | null | null |
unitorch/cli/models/__init__.py
|
Lixin-Qian/unitorch
|
47038321593ce4e7eabda555bd58c0cf89482146
|
[
"MIT"
] | 1
|
2022-03-27T07:01:13.000Z
|
2022-03-27T07:01:13.000Z
|
# Copyright (c) FULIUCANSHENG.
# Licensed under the MIT License.
from unitorch.cli.models.modeling_utils import (
BaseInputs,
BaseOutputs,
BaseTargets,
ListInputs,
LossOutputs,
EmbeddingOutputs,
ClassificationOutputs,
ClassificationTargets,
GenerationOutputs,
GenerationTargets,
DetectionOutputs,
DetectionTargets,
SegmentationOutputs,
SegmentationTargets,
)
from unitorch.cli.models.modeling_utils import (
general_model_decorator,
generation_model_decorator,
detection_model_decorator,
segmentation_model_decorator,
)
# import model classes & process functions
import unitorch.cli.models.processing_utils
import unitorch.cli.models.bart
import unitorch.cli.models.bert
import unitorch.cli.models.clip
import unitorch.cli.models.deberta
import unitorch.cli.models.detectron2
import unitorch.cli.models.mass
import unitorch.cli.models.mbart
import unitorch.cli.models.prophetnet
import unitorch.cli.models.roberta
import unitorch.cli.models.xprophetnet
import unitorch.cli.models.unilm
import unitorch.cli.models.vlp
import unitorch.cli.models.infoxlm
import unitorch.cli.models.senet
import unitorch.cli.models.vit
import unitorch.cli.models.vit_mae
import unitorch.cli.models.swin
import unitorch.cli.models.detr
| 27.425532
| 48
| 0.806051
| 150
| 1,289
| 6.846667
| 0.38
| 0.224927
| 0.347614
| 0.425511
| 0.12853
| 0.077897
| 0.077897
| 0
| 0
| 0
| 0
| 0.000883
| 0.1218
| 1,289
| 46
| 49
| 28.021739
| 0.90636
| 0.078355
| 0
| 0.04878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.512195
| 0
| 0.512195
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
41b4e76f546fd8efcd434ae1c2549dad50dddf7f
| 33
|
py
|
Python
|
qset_core/logging/__init__.py
|
adragolov/qset-core
|
ca2beb9d1a530b75f8f93194649c9d9c3e8d6ac1
|
[
"MIT"
] | null | null | null |
qset_core/logging/__init__.py
|
adragolov/qset-core
|
ca2beb9d1a530b75f8f93194649c9d9c3e8d6ac1
|
[
"MIT"
] | null | null | null |
qset_core/logging/__init__.py
|
adragolov/qset-core
|
ca2beb9d1a530b75f8f93194649c9d9c3e8d6ac1
|
[
"MIT"
] | null | null | null |
from .setup import setup_logging
| 16.5
| 32
| 0.848485
| 5
| 33
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
41cd1b255c3dddf11f8234d255ddd35574c031b8
| 3,642
|
py
|
Python
|
pset_conditionals/rps/tests/test_p2.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 5
|
2019-04-08T20:05:37.000Z
|
2019-12-04T20:48:45.000Z
|
pset_conditionals/rps/tests/test_p2.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 8
|
2019-04-15T15:16:05.000Z
|
2022-02-12T10:33:32.000Z
|
pset_conditionals/rps/tests/test_p2.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 2
|
2019-04-10T00:14:42.000Z
|
2020-02-26T20:35:21.000Z
|
import io
import pytest
import sys
from unittest import TestCase
from unittest.mock import patch
@pytest.mark.describe('Play RPS w/Computer')
class TestPrint(TestCase):
vals = [1, 3]
def set_pvals(self):
vals = self.vals
def ret(*args, **kwargs):
nonlocal vals
r = vals[0]
vals = vals[1:]
return r
return ret
@pytest.mark.it('if p1 and p2 are equal then print 0')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('p2.random.randint')
def test_output_tie(self, mock_randint, mock_stdout):
mock_randint.return_value = 1
if sys.modules.get('p2'):
del sys.modules['p2']
import p2
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "0" in stdout_sanitized
@pytest.mark.it('if p1 is r and p2 is s, print 1')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('p2.random.randint')
def test_output_rs(self, mock_randint, mock_stdout):
self.vals = [1, 3]
mock_randint.side_effect = self.set_pvals()
if sys.modules.get('p2'):
del sys.modules['p2']
import p2
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "1" in stdout_sanitized
@pytest.mark.it('if p1 is r and p2 is p, print 2')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('p2.random.randint')
def test_output_rp(self, mock_randint, mock_stdout):
self.vals = [1, 2]
mock_randint.side_effect = self.set_pvals()
if sys.modules.get('p2'):
del sys.modules['p2']
import p2
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "2" in stdout_sanitized
@pytest.mark.it('if p1 is s and p2 is p, print 1')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('p2.random.randint')
def test_output_sp(self, mock_randint, mock_stdout):
self.vals = [3, 2]
mock_randint.side_effect = self.set_pvals()
if sys.modules.get('p2'):
del sys.modules['p2']
import p2
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "1" in stdout_sanitized
@pytest.mark.it('if p1 is s and p2 is r, print 2')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('p2.random.randint')
def test_output_sr(self, mock_randint, mock_stdout):
self.vals = [3, 1]
mock_randint.side_effect = self.set_pvals()
if sys.modules.get('p2'):
del sys.modules['p2']
import p2
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "2" in stdout_sanitized
@pytest.mark.it('if p1 is p and p2 is r, print 1')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('p2.random.randint')
def test_output_pr(self, mock_randint, mock_stdout):
self.vals = [2, 1]
mock_randint.side_effect = self.set_pvals()
if sys.modules.get('p2'):
del sys.modules['p2']
import p2
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "1" in stdout_sanitized
@pytest.mark.it('if p1 is p and p2 is s, print 2')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('p2.random.randint')
def test_output_ps(self, mock_randint, mock_stdout):
self.vals = [2, 3]
mock_randint.side_effect = self.set_pvals()
if sys.modules.get('p2'):
del sys.modules['p2']
import p2
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "2" in stdout_sanitized
| 32.230088
| 67
| 0.611203
| 513
| 3,642
| 4.189084
| 0.138402
| 0.071661
| 0.039088
| 0.045603
| 0.864123
| 0.842252
| 0.842252
| 0.842252
| 0.747324
| 0.747324
| 0
| 0.026898
| 0.254805
| 3,642
| 112
| 68
| 32.517857
| 0.764923
| 0
| 0
| 0.586957
| 0
| 0
| 0.131247
| 0
| 0
| 0
| 0
| 0
| 0.076087
| 1
| 0.097826
| false
| 0
| 0.130435
| 0
| 0.271739
| 0.076087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
68ccd0e5944058edc8c19799b389a2fa3633ad26
| 7,783
|
py
|
Python
|
packages/gtmapi/lmsrvlabbook/tests/snapshots/snap_test_environment_bundled_app_mutations.py
|
gigabackup/gigantum-client
|
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
|
[
"MIT"
] | 60
|
2018-09-26T15:46:00.000Z
|
2021-10-10T02:37:14.000Z
|
packages/gtmapi/lmsrvlabbook/tests/snapshots/snap_test_environment_bundled_app_mutations.py
|
gigabackup/gigantum-client
|
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
|
[
"MIT"
] | 1,706
|
2018-09-26T16:11:22.000Z
|
2021-08-20T13:37:59.000Z
|
packages/gtmapi/lmsrvlabbook/tests/snapshots/snap_test_environment_bundled_app_mutations.py
|
griffinmilsap/gigantum-client
|
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
|
[
"MIT"
] | 11
|
2019-03-14T13:23:51.000Z
|
2022-01-25T01:29:16.000Z
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['TestBundledAppMutations.test_add_bundled_app 1'] = {
'data': {
'labbook': {
'environment': {
'bundledApps': [
],
'id': 'RW52aXJvbm1lbnQ6ZGVmYXVsdCZ0ZXN0LWFwcA=='
},
'id': 'TGFiYm9vazpkZWZhdWx0JnRlc3QtYXBw'
}
}
}
snapshots['TestBundledAppMutations.test_add_bundled_app 2'] = {
'data': {
'setBundledApp': {
'clientMutationId': None,
'environment': {
'bundledApps': [
{
'appName': 'my app',
'command': 'python /opt/app.py',
'description': 'a cool app to do things',
'id': 'QnVuZGxlZEFwcDpkZWZhdWx0JnRlc3QtYXBwJm15IGFwcA==',
'port': 9999
}
],
'id': 'RW52aXJvbm1lbnQ6ZGVmYXVsdCZ0ZXN0LWFwcA=='
}
}
}
}
snapshots['TestBundledAppMutations.test_add_bundled_app 3'] = {
'data': {
'labbook': {
'environment': {
'bundledApps': [
{
'appName': 'my app',
'command': 'python /opt/app.py',
'description': 'a cool app to do things',
'id': 'QnVuZGxlZEFwcDpkZWZhdWx0JnRlc3QtYXBwJm15IGFwcA==',
'port': 9999
}
],
'id': 'RW52aXJvbm1lbnQ6ZGVmYXVsdCZ0ZXN0LWFwcA=='
},
'id': 'TGFiYm9vazpkZWZhdWx0JnRlc3QtYXBw'
}
}
}
snapshots['TestBundledAppMutations.test_add_bundled_app 4'] = {
'data': {
'setBundledApp': {
'clientMutationId': None,
'environment': {
'bundledApps': [
{
'appName': 'my app',
'command': 'python /opt/app2.py',
'description': 'a cooler app to do things',
'id': 'QnVuZGxlZEFwcDpkZWZhdWx0JnRlc3QtYXBwJm15IGFwcA==',
'port': 9900
}
],
'id': 'RW52aXJvbm1lbnQ6ZGVmYXVsdCZ0ZXN0LWFwcA=='
}
}
}
}
snapshots['TestBundledAppMutations.test_add_bundled_app 5'] = {
'data': {
'labbook': {
'environment': {
'bundledApps': [
{
'appName': 'my app',
'command': 'python /opt/app2.py',
'description': 'a cooler app to do things',
'id': 'QnVuZGxlZEFwcDpkZWZhdWx0JnRlc3QtYXBwJm15IGFwcA==',
'port': 9900
}
],
'id': 'RW52aXJvbm1lbnQ6ZGVmYXVsdCZ0ZXN0LWFwcA=='
},
'id': 'TGFiYm9vazpkZWZhdWx0JnRlc3QtYXBw'
}
}
}
snapshots['TestBundledAppMutations.test_remove_bundled_app 1'] = {
'data': {
'labbook': {
'environment': {
'bundledApps': [
{
'appName': 'dash app 1',
'command': 'python /mnt/labbook/code/dash1.py',
'description': 'my example bundled app 1',
'id': 'QnVuZGxlZEFwcDpkZWZhdWx0JnRlc3QtYXBwLTImZGFzaCBhcHAgMQ==',
'port': 9999
},
{
'appName': 'dash app 2',
'command': 'python /mnt/labbook/code/dash2.py',
'description': 'my example bundled app 2',
'id': 'QnVuZGxlZEFwcDpkZWZhdWx0JnRlc3QtYXBwLTImZGFzaCBhcHAgMg==',
'port': 8822
},
{
'appName': 'dash app 3',
'command': 'python /mnt/labbook/code/dash3.py',
'description': 'my example bundled app 3',
'id': 'QnVuZGxlZEFwcDpkZWZhdWx0JnRlc3QtYXBwLTImZGFzaCBhcHAgMw==',
'port': 9966
}
],
'id': 'RW52aXJvbm1lbnQ6ZGVmYXVsdCZ0ZXN0LWFwcC0y'
},
'id': 'TGFiYm9vazpkZWZhdWx0JnRlc3QtYXBwLTI='
}
}
}
snapshots['TestBundledAppMutations.test_remove_bundled_app 2'] = {
'data': {
'removeBundledApp': {
'clientMutationId': None,
'environment': {
'bundledApps': [
{
'appName': 'dash app 1',
'command': 'python /mnt/labbook/code/dash1.py',
'description': 'my example bundled app 1',
'id': 'QnVuZGxlZEFwcDpkZWZhdWx0JnRlc3QtYXBwLTImZGFzaCBhcHAgMQ==',
'port': 9999
},
{
'appName': 'dash app 3',
'command': 'python /mnt/labbook/code/dash3.py',
'description': 'my example bundled app 3',
'id': 'QnVuZGxlZEFwcDpkZWZhdWx0JnRlc3QtYXBwLTImZGFzaCBhcHAgMw==',
'port': 9966
}
],
'id': 'RW52aXJvbm1lbnQ6ZGVmYXVsdCZ0ZXN0LWFwcC0y'
}
}
}
}
snapshots['TestBundledAppMutations.test_remove_bundled_app 3'] = {
'data': {
'labbook': {
'environment': {
'bundledApps': [
{
'appName': 'dash app 1',
'command': 'python /mnt/labbook/code/dash1.py',
'description': 'my example bundled app 1',
'id': 'QnVuZGxlZEFwcDpkZWZhdWx0JnRlc3QtYXBwLTImZGFzaCBhcHAgMQ==',
'port': 9999
},
{
'appName': 'dash app 3',
'command': 'python /mnt/labbook/code/dash3.py',
'description': 'my example bundled app 3',
'id': 'QnVuZGxlZEFwcDpkZWZhdWx0JnRlc3QtYXBwLTImZGFzaCBhcHAgMw==',
'port': 9966
}
],
'id': 'RW52aXJvbm1lbnQ6ZGVmYXVsdCZ0ZXN0LWFwcC0y'
},
'id': 'TGFiYm9vazpkZWZhdWx0JnRlc3QtYXBwLTI='
}
}
}
snapshots['TestBundledAppMutations.test_start_bundled_app 1'] = {
'data': {
'labbook': {
'environment': {
'bundledApps': [
{
'appName': 'dash app 1',
'command': 'echo test',
'description': 'my example bundled app 1',
'id': 'QnVuZGxlZEFwcDpkZWZhdWx0JnRlc3QtYXBwLTEmZGFzaCBhcHAgMQ==',
'port': 9999
}
],
'id': 'RW52aXJvbm1lbnQ6ZGVmYXVsdCZ0ZXN0LWFwcC0x'
},
'id': 'TGFiYm9vazpkZWZhdWx0JnRlc3QtYXBwLTE='
}
}
}
snapshots['TestBundledAppMutations.test_start_bundled_app 2'] = {
'data': {
'removeBundledApp': None
},
'errors': [
{
'locations': [
{
'column': 11,
'line': 3
}
],
'message': 'App dash app 2 does not exist. Cannot remove.',
'path': [
'removeBundledApp'
]
}
]
}
| 33.83913
| 89
| 0.430425
| 423
| 7,783
| 7.836879
| 0.20331
| 0.054299
| 0.108597
| 0.065158
| 0.868778
| 0.841931
| 0.784314
| 0.73997
| 0.706787
| 0.706787
| 0
| 0.046986
| 0.458564
| 7,783
| 229
| 90
| 33.9869
| 0.739677
| 0.007966
| 0
| 0.537383
| 0
| 0
| 0.423426
| 0.238274
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009346
| 0
| 0.009346
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec45caf957d4f300843a5fdb041b9fb90bd17c3f
| 353
|
py
|
Python
|
great_expectations/rule_based_profiler/parameter_builder/__init__.py
|
harvard-vpal/great_expectations
|
1cd0aa7a3f392d7af3f01e3226392e0583275d66
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/rule_based_profiler/parameter_builder/__init__.py
|
harvard-vpal/great_expectations
|
1cd0aa7a3f392d7af3f01e3226392e0583275d66
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/rule_based_profiler/parameter_builder/__init__.py
|
harvard-vpal/great_expectations
|
1cd0aa7a3f392d7af3f01e3226392e0583275d66
|
[
"Apache-2.0"
] | null | null | null |
from .metric_parameter_builder import MetricParameterBuilder
from .multi_batch_parameter_builder import MultiBatchParameterBuilder
from .numeric_metric_range_multi_batch_parameter_builder import (
NumericMetricRangeMultiBatchParameterBuilder,
)
from .simple_dateformat_string_parameter_builder import (
SimpleDateFormatStringParameterBuilder,
)
| 39.222222
| 69
| 0.892351
| 31
| 353
| 9.677419
| 0.516129
| 0.213333
| 0.293333
| 0.173333
| 0.213333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07932
| 353
| 8
| 70
| 44.125
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6b726d0e58215477e7d76bab77e271f23c462b1b
| 24
|
py
|
Python
|
app/Matrix_Capsules_EM/model/__init__.py
|
wudidaizi/RAVEN
|
10d126930ed31056e55803da4f8d606cde2b56d2
|
[
"MIT"
] | 97
|
2018-04-23T03:56:05.000Z
|
2021-09-28T11:45:20.000Z
|
app/Matrix_Capsules_EM/model/__init__.py
|
wudidaizi/RAVEN
|
10d126930ed31056e55803da4f8d606cde2b56d2
|
[
"MIT"
] | 13
|
2018-05-02T02:32:39.000Z
|
2020-07-04T04:16:29.000Z
|
app/Matrix_Capsules_EM/model/__init__.py
|
wudidaizi/RAVEN
|
10d126930ed31056e55803da4f8d606cde2b56d2
|
[
"MIT"
] | 34
|
2018-04-24T08:43:15.000Z
|
2021-11-02T14:38:49.000Z
|
from .capsules import *
| 12
| 23
| 0.75
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6bc4ddfd1a2efbf4e4d013baaac7c1f249acd4fc
| 1,978
|
py
|
Python
|
tests/test_png_plte.py
|
kosta-pag/segno
|
5e65842800a77ba86d5c7c565f75e1f8b8755104
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_png_plte.py
|
kosta-pag/segno
|
5e65842800a77ba86d5c7c565f75e1f8b8755104
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_png_plte.py
|
kosta-pag/segno
|
5e65842800a77ba86d5c7c565f75e1f8b8755104
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2022 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
Tests if the PNG serializer does not add more colors than needed.
See also issue <https://github.com/heuer/segno/issues/62>
"""
from __future__ import unicode_literals, absolute_import
import io
import pytest
import segno
def test_plte():
qr = segno.make_qr('test')
assert qr.version < 7
dark = 'red'
buff_1 = io.BytesIO()
buff_2 = io.BytesIO()
qr.save(buff_1, kind='png', dark=dark, finder_dark=dark, version_dark='green')
qr.save(buff_2, kind='png', dark=dark)
assert buff_1.getvalue() == buff_2.getvalue()
def test_plte2():
qr = segno.make_qr('test')
assert qr.version < 7
dark = 'red'
buff_1 = io.BytesIO()
buff_2 = io.BytesIO()
qr.save(buff_1, kind='png', dark=dark, finder_dark=dark, version_dark='green')
qr.save(buff_2, kind='png', dark=dark)
assert buff_1.getvalue() == buff_2.getvalue()
def test_plte3():
qr = segno.make_qr('test')
assert qr.version < 7
dark = 'red'
buff_1 = io.BytesIO()
buff_2 = io.BytesIO()
qr.save(buff_1, kind='png', dark=dark, finder_dark=dark, version_dark='green')
qr.save(buff_2, kind='png', dark=dark)
assert buff_1.getvalue() == buff_2.getvalue()
def test_plte_micro():
qr = segno.make_micro('RAIN')
dark = 'red'
buff_1 = io.BytesIO()
buff_2 = io.BytesIO()
qr.save(buff_1, kind='png', dark=dark, finder_dark=dark, alignment_dark='green')
qr.save(buff_2, kind='png', dark=dark)
assert buff_1.getvalue() == buff_2.getvalue()
def test_plte_micro2():
qr = segno.make_micro('RAIN')
dark = 'red'
buff_1 = io.BytesIO()
buff_2 = io.BytesIO()
qr.save(buff_1, kind='png', dark=dark, finder_dark=dark, dark_module='green')
qr.save(buff_2, kind='png', dark=dark)
assert buff_1.getvalue() == buff_2.getvalue()
if __name__ == '__main__':
pytest.main([__file__])
| 26.72973
| 84
| 0.652679
| 305
| 1,978
| 4.003279
| 0.236066
| 0.104832
| 0.0819
| 0.12285
| 0.738739
| 0.738739
| 0.738739
| 0.738739
| 0.738739
| 0.738739
| 0
| 0.029229
| 0.187058
| 1,978
| 73
| 85
| 27.09589
| 0.7301
| 0.115774
| 0
| 0.734694
| 0
| 0
| 0.056517
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 1
| 0.102041
| false
| 0
| 0.081633
| 0
| 0.183673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4033092a8379c5f78bdf04e683e7c963fed0f41
| 83
|
py
|
Python
|
stager/pages/password_tooltip.py
|
rorymurdock/stager
|
331b4eaa174ac6c31c724c02c93c7b8e635ea788
|
[
"Apache-2.0"
] | 2
|
2022-02-23T05:57:18.000Z
|
2022-03-07T02:46:40.000Z
|
stager/pages/password_tooltip.py
|
rorymurdock/stager
|
331b4eaa174ac6c31c724c02c93c7b8e635ea788
|
[
"Apache-2.0"
] | 10
|
2022-02-25T04:33:38.000Z
|
2022-02-25T06:46:59.000Z
|
stager/pages/password_tooltip.py
|
rorymurdock/stager
|
331b4eaa174ac6c31c724c02c93c7b8e635ea788
|
[
"Apache-2.0"
] | null | null | null |
from stager.utils.constants import PASSWORD_TOOLTIP_ID
NAME = PASSWORD_TOOLTIP_ID
| 20.75
| 54
| 0.86747
| 12
| 83
| 5.666667
| 0.75
| 0.441176
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096386
| 83
| 4
| 55
| 20.75
| 0.906667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 1
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
2e86d174c2896cd04fc2fc2b330a9f1c8cd94437
| 23,608
|
py
|
Python
|
main/views.py
|
The-Nightwing/Suitor
|
27c4e7829d7951430deb7ada1599a5c74a2102e1
|
[
"MIT"
] | null | null | null |
main/views.py
|
The-Nightwing/Suitor
|
27c4e7829d7951430deb7ada1599a5c74a2102e1
|
[
"MIT"
] | null | null | null |
main/views.py
|
The-Nightwing/Suitor
|
27c4e7829d7951430deb7ada1599a5c74a2102e1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.db import connection
import pandas as pd
from pathlib import Path
import os
from django.shortcuts import HttpResponse
from main.helpers import *
from main.data import *
from django.shortcuts import redirect
from django.http import HttpResponseRedirect
def index(request):
return render(request,'main/index-1.html',{})
def login(request):
return render(request,'main/login.html',{})
def contact_us(request):
return render(request, 'main/contact-us.html', {})
def about_us(request):
return render(request, 'main/about-us.html', {})
def services(request):
return render(request, 'main/services.html', {})
def loginaccess(request):
if request.POST['username'][0]=='I':
writeinfile(request.POST['username'].strip())
if user_data[request.POST['username'].strip()]==request.POST['password'].strip():
return render(request, 'main/customer.html',{})
if request.POST['username'][0]=='Y':
writeinfile(request.POST['username'].strip())
if user_data[request.POST['username'].strip()]==request.POST['password'].strip():
return render(request, 'main/customer_client_company.html',{})
elif request.POST['username'][0]=='A':
writeinfile(request.POST['username'].strip())
if user_data[request.POST['username'].strip()]==request.POST['password'].strip():
with connection.cursor() as cursor:
query="select positionAtFirm from Lawyer where userID='{}'"
query = query.format(readfile())
cursor.execute(query)
data = cursor.fetchall()
print(data)
if data[0][0]=='Paralegal':
return render(request,'main/paralegal.html',{})
else:
return render(request, 'main/Lawyer.html',{})
elif request.POST['username'][0]=='O':
writeinfile(request.POST['username'].strip())
if user_data[request.POST['username'].strip()]==request.POST['password'].strip():
return render(request, 'main/other_staff.html',{})
elif request.POST['username']=='Harvey':
writeinfile(request.POST['username'].strip())
if user_data[request.POST['username'].strip()]==request.POST['password'].strip():
return render(request,'main/managing_partner.html',{})
return render(request,'main/user1.html',{})
def paralegal(request):
if request.POST.get("q1"):
with connection.cursor() as cursor:
query = """CREATE OR REPLACE VIEW myDetails AS SELECT * FROM Lawyer
WHERE userID = "{}";"""
query = query.format(readfile())
cursor.execute(query)
cursor.execute("select * from myDetails;")
data = cursor.fetchall()
print(data)
context={}
columns = ['userID','firstName','middleName','lastName','dateOfBirth','gender','charges','casesWon','casesLost','casesSettled','experience','emailID','phoneNumber','positionAtFirm','avgTimePerCase','streetName','city','pincode','state','specialization','clientRating']
obj = getdf(context, columns, data)
return render(request, 'data.html', {'table': obj})
#query2
elif request.POST.get("q2"):
context = {}
with connection.cursor() as cursor:
query = "create or replace VIEW myEvents AS select * from Calendar where userID = '{}';"
query = query.format(readfile())
cursor.execute(query)
cursor.execute("select * from myEvents;")
data = cursor.fetchall()
columns=['userID','whentt','description']
obj = getdf(context, columns, data)
return render(request, 'data.html', {'table': obj})
elif request.POST.get("q3"):
context = {}
with connection.cursor() as cursor:
query = """create or replace view allCases as
select h.caseID, c.plaintiff, c.lastDateOfActivity, c.flair, c.dateOfFiling, c.duration, c.status, ic.userID as ClientID, ic.firstName as CFirstName, ic.lastName as CLastName, ic.emailID as CEmailID, ic.isClient, ic.city as CCity, l.userID as LawyerID, l.firstName as LFirstName, l.lastName as LLastName, l.emailID as LEmailID, l.positionAtFirm, l.specialization, l.city as LCity, o.oppositionID, o.firstName as OFirstName, o.lastName as OLastName from Lawyer l, Handles h, LegalCases c, HasA ch, IndividualClients ic, Opposition o, Against a
where l.userID = h.userID and h.caseID = c.caseID and ch.userID = ic.userID and a.oppositionID = o.oppositionID and a.caseID = c.caseID;"""
cursor.execute(query)
cursor.execute("select * from allCases;")
data = cursor.fetchall()
columns = ['caseID', 'plaintiff', 'lastDateOfActivity', 'flair', 'dateOfFiling', 'duration', 'status', 'userID']
obj = getdf(context,columns,data)
return render(request, 'data.html', {'table': obj})
elif request.POST.get("q4"):
context = {}
with connection.cursor() as cursor:
query = """create or replace view allLegalDocs as
select d.docID, d.createdOn, d.dateLastModified, d.type, c.caseID, c.lastdateofactivity, c.flair, c.status, c.plaintiff from LegalDocuments d, LegalCases c
where d.caseID = c.caseID and d.visibility = 1;"""
cursor.execute(query)
cursor.execute("select * from allLegalDocs;")
data = cursor.fetchall()
columns = ['docID', 'createdOn', 'dateLastModified', 'type', 'caseID', 'lastDateofActivity', 'flair', 'status','plaintiff']
obj = getdf(context, columns, data)
return render(request, 'data.html', {'table': obj})
elif request.POST.get("q5"):
return render(request,'main/meeting_form.html',{})
return render(request,'main/user1.html',{})
def customer_client(request):
if request.POST.get("q1"):
with connection.cursor() as cursor:
query="""create or replace view myDetailsClientCompany as
select * from ClientCompanies
where userID = "{}";"""
query = query.format(readfile())
cursor.execute(query)
cursor.execute("select * from myDetailsClientCompany;")
data = cursor.fetchall()
context={}
columns= ['userID','firstName','middleName','lastName','budget','emailID','phoneNumber','streetName','city','pincode','state','isClient','fax','companyName','gstIN']
getdf(context, columns, data)
return render(request, 'data.html')
elif request.POST.get("q3"):
context = {}
with connection.cursor() as cursor:
query = """create or replace view allMyCasesClientCompanies as
select h.caseID, c.plaintiff, c.lastDateOfActivity, c.flair, c.dateOfFiling, c.duration, c.status, l.userID as LawyerID, l.firstName as LFirstName, l.lastName as LLastName, l.emailID as LEmailID, l.positionAtFirm, l.specialization, l.city as LCity, o.oppositionID, o.firstName as OFirstName, o.lastName as OLastName from Lawyer l, Handles h, LegalCases c, HasA ch, ClientCompanies ic, Opposition o, Against a
where l.userID = h.userID and h.caseID = c.caseID and ch.userID = ic.userID and a.oppositionID = o.oppositionID and a.caseID = c.caseID;
"""
cursor.execute(query)
cursor.execute("select * from allMyCasesClientCompanies;")
data = cursor.fetchall()
columns = ['caseID', 'plaintiff', 'lastDateOfActivity', 'flair', 'dateOfFiling', 'duration', 'status', 'userID']
obj = getdf(context,columns,data)
return render(request, 'data.html', {'table': obj})
def customer(request):
print(request.POST)
#query1
if request.POST.get("q1"):
with connection.cursor() as cursor:
query="""create or replace view myDetailsClient as
select * from IndividualClients
where userID = "{}";"""
query = query.format(readfile())
cursor.execute(query)
cursor.execute("select * from myDetailsClient;")
data = cursor.fetchall()
context={}
columns = ['userID','firstName','middleName','lastName','dateOfBirth','budget','emailID','phoneNumber','streetName','city','pincode','state','isClient']
getdf(context, columns, data)
return render(request, 'data.html')
#query2
elif request.POST.get("q2"):
context = {}
with connection.cursor() as cursor:
query = """CREATE OR REPLACE VIEW myEventsClient AS
select * from Calendar
where userID = "{}";"""
query = query.format(readfile())
print(query)
cursor.execute(query)
cursor.execute("select * from myEventsClient;")
data = cursor.fetchall()
columns=['userID','whentt','description']
obj = getdf(context, columns, data)
return render(request, 'data.html', {'table': obj})
elif request.POST.get("q3"):
context = {}
with connection.cursor() as cursor:
query = """create or replace view allMyCasesClient as
select h.caseID, c.plaintiff, c.lastDateOfActivity, c.flair, c.dateOfFiling, c.duration, c.status, l.userID as LawyerID, l.firstName as LFirstName, l.lastName as LLastName, l.emailID as LEmailID, l.positionAtFirm, l.specialization, l.city as LCity, o.oppositionID, o.firstName as OFirstName, o.lastName as OLastName from Lawyer l, Handles h, LegalCases c, HasA ch, IndividualClients ic, Opposition o, Against a
where l.userID = h.userID and h.caseID = c.caseID and ch.userID = ic.userID and a.oppositionID = o.oppositionID and a.caseID = c.caseID;
"""
cursor.execute(query)
cursor.execute("select * from allMyCasesClient;")
data = cursor.fetchall()
columns = ['caseID', 'plaintiff', 'lastDateOfActivity', 'flair', 'dateOfFiling', 'duration', 'status', 'userID']
obj = getdf(context,columns,data)
return render(request, 'data.html', {'table': obj})
elif request.POST.get("q4"):
context = {}
with connection.cursor() as cursor:
query = """create or replace view myBillsClient as
select f.transactionID, f.dateOfPayment, f.description, f.amount, c.caseID, c.flair, c.status from FinancialTransactions f, Invest i, HasA h, LegalCases c
where f.transactionID = i.transactionid and i.caseID = h.caseID and h.caseID = c.caseID and h.userID = "{}";"""
query = query.format(readfile())
cursor.execute(query)
cursor.execute("select * from myBillsClient;")
data = cursor.fetchall()
columns = ['transactionID', 'dateOfPayment', 'description', 'amount', 'caseID', 'flair', 'status']
obj = getdf(context, columns, data)
return render(request, 'data.html', {'table': obj})
elif request.POST.get("q5"):
return render(request,'main/form_lawyer.html',{})
elif request.POST.get("q6"):
return render(request,'main/meeting_form.html',{})
return render(request,'main/user1.html',{})
def user_search_lawyer_query(request):
specialization = request.POST['specialization']
clientRating = request.POST['clientRating']
experience = request.POST['Experience']
avgtime = request.POST['avgTimePerCase']
charges = request.POST['charges']
context = {}
with connection.cursor() as cursor:
query = """Create or Replace view BestSuitedLawyer as select Lawyer.firstname, Lawyer.lastname, Lawyer.userID from Lawyer
where (specialization="{}" or specialization="{}") and experience >= {} and avgTimePerCase <= {} and charges <= {} and clientRating >= {} and casesWon div casesLost >= 0;
"""
query = query.format(specialization,specialization.replace(" ",""),experience,avgtime,charges,clientRating)
print(query)
cursor.execute(query)
cursor.execute("select * from BestSuitedLawyer;")
data = cursor.fetchall()
columns = ['firstName','lastName','userID']
obj = getdf(context,columns,data)
return render(request, 'data.html', {'table': obj})
def lawyer(request):
if request.POST.get("q1"):
context = {}
with connection.cursor() as cursor:
query = """
CREATE OR REPLACE VIEW LawyerEvents AS
select * from Calendar
where userID = "{}";
"""
query = query.format(readfile())
cursor.execute(query)
cursor.execute("select * from LawyerEvents;")
data = cursor.fetchall()
columns = ['userID','whentt','description']
obj = getdf(context,columns,data)
return render(request, 'data.html', {'table': obj})
elif request.POST.get("q2"):
context = {}
with connection.cursor() as cursor:
query = """
CREATE OR REPLACE VIEW LawyerCases AS
select LegalCases.caseID, plaintiff, lastDateOfActivity, flair, dateOfFiling, duration, LegalCases.status
from Handles inner join LegalCases
on LegalCases.caseID=Handles.caseID and Handles.userID="{}";
"""
query = query.format(readfile())
cursor.execute(query)
cursor.execute("select * from LawyerCases;")
data = cursor.fetchall()
columns = ['caseID', 'plaintiff', 'lastDateOfActivity', 'flair', 'dateOfFiling', 'duration', 'status']
obj = getdf(context,columns,data)
return render(request, 'data.html', {'table': obj})
elif request.POST.get("q3"):
context = {}
with connection.cursor() as cursor:
query = """
create or replace view LawyerDeets as
select * from Lawyer where userId="{}";
"""
query = query.format(readfile())
cursor.execute(query)
cursor.execute("select * from LawyerDeets;")
data = cursor.fetchall()
columns = ['userID','firstName','middleName','lastName','dateOfBirth','gender','charges','casesWon','casesLost','casesSettled','experience','emailID','phoneNumber','positionAtFirm','avgTimePerCase','streetName','city','pincode','state','specialization','clientRating']
obj = getdf(context,columns,data)
return render(request, 'data.html', {'table': obj})
elif request.POST.get("q4"):
context = {}
with connection.cursor() as cursor:
query = """
create or replace view otherlawyers as
select firstname, lastname, emailId, specialization, experience, casesLost, casesSettled, avgTimePerCase, clientRating from Lawyer;
"""
cursor.execute(query)
cursor.execute("select * from otherlawyers;")
data = cursor.fetchall()
columns = ['firstname', 'lastname', 'emailId', 'specialization', 'experience', 'casesLost', 'casesSettled', 'avgTimePerCase', 'clientRating']
obj = getdf(context,columns,data)
return render(request, 'data.html', {'table': obj})
if request.POST.get("q5"):
context = {}
with connection.cursor() as cursor:
query = """
create or replace view visibleDocs as
select d.docID, d.createdOn, d.dateLastModified, d.type, c.caseID, c.lastdateofactivity, c.flair, c.status, c.plaintiff from LegalDocuments d, LegalCases c
where d.caseID = c.caseID and d.visibility = 1;
"""
cursor.execute(query)
cursor.execute("select * from visibleDocs;")
data = cursor.fetchall()
columns = ['docID', 'createdOn', 'dateLastModified', 'type', 'caseID', 'lastdateofactivity', 'flair', 'status', 'plaintiff']
obj = getdf(context,columns,data)
return render(request, 'data.html', {'table': obj})
if request.POST.get("q6"):
context = {}
with connection.cursor() as cursor:
query = """
CREATE OR REPLACE VIEW IndividualsAsClients AS
select * from IndividualClients where userID in (
select HasA.userID
from Handles inner join Lawyer
on Handles.userID=Lawyer.userID and Lawyer.userID="{}"
inner join HasA
on HasA.caseID=Handles.caseID);
"""
query = query.format(readfile())
cursor.execute(query)
cursor.execute("select * from IndividualsAsClients;")
data = cursor.fetchall()
columns = ['userID','firstName','middleName','lastName','dateOfBirth','budget','emailID','phoneNumber','streetName','city','pincode','state','isClient']
obj = getdf(context,columns,data)
return render(request, 'data.html', {'table': obj})
if request.POST.get("q7"):
context = {}
with connection.cursor() as cursor:
query = """
update Lawyer
set casesWon=casesWon+1
where userID="{}";
"""
query=query.format(readfile())
cursor.execute(query)
# cursor.execute("select * from BestSuitedLawyer;")
return HttpResponseRedirect('login')
elif request.POST.get("q8"):
return render(request,'main/meeting_form.html',{})
return render(request,'main/user1.html',{})
def otherstaff(request):
if request.POST.get("q1"):
with connection.cursor() as cursor:
query="""create or replace view myDetailsStaff as
select * from OtherStaff
where userID = "{}";
"""
query = query.format(readfile())
cursor.execute(query)
cursor.execute("select * from myDetailsStaff;")
data = cursor.fetchall()
context={}
columns = ['userID','firstName','middleName','lastName','dateOfBirth','gender','salary','experience','emailID','phoneNumber','positionAtFirm','streetName','city','pincode','state']
getdf(context, columns, data)
return render(request, 'data.html')
elif request.POST.get("q2"):
with connection.cursor() as cursor:
query="""CREATE OR REPLACE VIEW myEventsStaff AS
select * from Calendar
where userID = "{}";
"""
query = query.format(readfile())
cursor.execute(query)
cursor.execute("select * from myEventsStaff;")
data = cursor.fetchall()
context={}
columns = ['userID','whentt','description']
getdf(context, columns, data)
return render(request, 'data.html')
elif request.POST.get("q3"):
with connection.cursor() as cursor:
query="""CREATE OR REPLACE VIEW allFinancialTrans AS
select * from FinancialTransactions ;
"""
cursor.execute(query)
cursor.execute("select * from allFinancialTrans;")
data = cursor.fetchall()
context={}
columns = ['transactionID','dateOfPayment','description','amount','type']
getdf(context, columns, data)
return render(request, 'data.html')
elif request.POST.get("q5"):
return render(request,'main/meeting_form.html',{})
def managing_partner(request):
if request.POST.get("q1"):
with connection.cursor() as cursor:
query="""create or replace view myManagingPartner as
select * from OtherStaff
where userID = "O21a0b2d6K";
"""
cursor.execute(query)
cursor.execute("select * from myManagingPartner;")
data = cursor.fetchall()
context={}
columns = ['userID','firstName','middleName','lastName','dateOfBirth','gender','salary','experience','emailID','phoneNumber','positionAtFirm','streetName','city','pincode','state']
getdf(context, columns, data)
return render(request, 'data.html')
elif request.POST.get("q2"):
with connection.cursor() as cursor:
query="""CREATE OR REPLACE VIEW myEventsManagement AS
select * from Calendar
where userID = "O21a0b2d6K";
"""
cursor.execute(query)
cursor.execute("select * from myEventsManagement;")
data = cursor.fetchall()
context={}
columns = ['userID','whentt','description']
getdf(context, columns, data)
return render(request, 'data.html')
elif request.POST.get("q3"):
with connection.cursor() as cursor:
query="""CREATE OR REPLACE VIEW allFinancialTrans AS
select * from FinancialTransactions ;
"""
cursor.execute(query)
cursor.execute("select * from allFinancialTrans;")
data = cursor.fetchall()
context={}
columns = ['transactionID','dateOfPayment','description','amount','type']
getdf(context, columns, data)
return render(request, 'data.html')
elif request.POST.get("q4"):
with connection.cursor() as cursor:
query="""CREATE OR REPLACE VIEW ChooseLawyerRatio AS
Select * From Lawyer where round(casesWon/casesLost) in
(Select max(Ratio) from
(select userID, round(casesWon/casesLost) as Ratio from Lawyer) as latest);
"""
cursor.execute(query)
cursor.execute("select * from ChooseLawyerRatio;")
data = cursor.fetchall()
context={}
columns = ['userID','firstName','middleName','lastName','dateOfBirth','gender','charges','casesWon','casesLost','casesSettled','experience','emailID','phoneNumber','positionAtFirm','avgTimePerCase','streetName','city','pincode','state','specialization','clientRating']
getdf(context, columns, data)
return render(request, 'data.html')
elif request.POST.get("q5"):
with connection.cursor() as cursor:
query="""
CREATE OR REPLACE VIEW ChooseLawyerRating AS
Select Distinct userID, firstName, lastName From Lawyer where clientRating in
(Select max(clientRating) from Lawyer)
limit 1;
"""
cursor.execute(query)
cursor.execute("select * from ChooseLawyerRating;")
data = cursor.fetchall()
context={}
columns = ['userID','firstName','lastName']
getdf(context, columns, data)
return render(request, 'data.html')
elif request.POST.get("q6"):
return render(request,'main/meeting_form.html',{})
def meeting_form(request):
time=request.POST['time']
description = request.POST['description']
print(time)
print(description)
context = {}
with connection.cursor() as cursor:
query="""
CREATE OR REPLACE VIEW myEventsManagement AS
select * from Calendar
where userID = "{}";
"""
query = query.format(readfile())
cursor.execute(query)
query = """
insert into myEventsManagement values("{}", '{}',"{}");
"""
query = query.format(readfile(),time,description)
cursor.execute(query)
return redirect('login')
| 40.218058
| 554
| 0.603863
| 2,416
| 23,608
| 5.891142
| 0.086921
| 0.045598
| 0.061407
| 0.04328
| 0.801237
| 0.780018
| 0.749385
| 0.737511
| 0.725076
| 0.694794
| 0
| 0.003434
| 0.25987
| 23,608
| 586
| 555
| 40.286689
| 0.811137
| 0.00288
| 0
| 0.667411
| 0
| 0.026786
| 0.431297
| 0.026937
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0.011161
| 0.022321
| 0.011161
| 0.160714
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cf368f2a9863a298a5f602bcae3e5f40f3b4ac23
| 70
|
py
|
Python
|
scripts/list_devices.py
|
sodaplayer/approxeng.input
|
c08dd789d8435a73e776422ad50ffeecb1d7dd2f
|
[
"Apache-2.0"
] | null | null | null |
scripts/list_devices.py
|
sodaplayer/approxeng.input
|
c08dd789d8435a73e776422ad50ffeecb1d7dd2f
|
[
"Apache-2.0"
] | null | null | null |
scripts/list_devices.py
|
sodaplayer/approxeng.input
|
c08dd789d8435a73e776422ad50ffeecb1d7dd2f
|
[
"Apache-2.0"
] | 1
|
2020-06-14T04:45:06.000Z
|
2020-06-14T04:45:06.000Z
|
from approxeng.input.controllers import print_devices
print_devices()
| 23.333333
| 53
| 0.871429
| 9
| 70
| 6.555556
| 0.777778
| 0.40678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 70
| 3
| 54
| 23.333333
| 0.907692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
cf497aec85b14b0fe24b926d7687cdd1d79afb07
| 205
|
py
|
Python
|
decomon/metrics/__init__.py
|
airbus/decomon
|
f3668fbd8edd0def4e23aa0634eebfec58349c35
|
[
"MIT"
] | 11
|
2021-11-03T12:09:50.000Z
|
2022-02-20T21:42:13.000Z
|
decomon/metrics/__init__.py
|
airbus/decomon
|
f3668fbd8edd0def4e23aa0634eebfec58349c35
|
[
"MIT"
] | 1
|
2022-02-18T13:40:46.000Z
|
2022-02-18T13:40:46.000Z
|
decomon/metrics/__init__.py
|
airbus/decomon
|
f3668fbd8edd0def4e23aa0634eebfec58349c35
|
[
"MIT"
] | null | null | null |
from .metric import build_formal_adv_model, build_formal_upper_model, build_formal_adv_check_model
from .loss import build_crossentropy_model, build_asymptotic_crossentropy_model, build_radius_robust_model
| 102.5
| 106
| 0.917073
| 30
| 205
| 5.666667
| 0.466667
| 0.235294
| 0.164706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053659
| 205
| 2
| 106
| 102.5
| 0.876289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cf5e77a388c5c299d880013799a5f6d9e84a0cb2
| 27
|
py
|
Python
|
plugins/gzip_plugin/__init__.py
|
blinskey/www.linskey.org
|
815c2b7b966fe7c263c0038d4ac3ef9040d8fc80
|
[
"0BSD"
] | null | null | null |
plugins/gzip_plugin/__init__.py
|
blinskey/www.linskey.org
|
815c2b7b966fe7c263c0038d4ac3ef9040d8fc80
|
[
"0BSD"
] | null | null | null |
plugins/gzip_plugin/__init__.py
|
blinskey/www.linskey.org
|
815c2b7b966fe7c263c0038d4ac3ef9040d8fc80
|
[
"0BSD"
] | null | null | null |
from .gzip_plugin import *
| 13.5
| 26
| 0.777778
| 4
| 27
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d8d8b05e18a546b7ad5a035918f6cc91dac0d662
| 33,838
|
py
|
Python
|
wouso/interface/activity/tests.py
|
ruxandraS/wouso
|
5adb0a547e6b25c0141da9e8805e683d653804ef
|
[
"Apache-2.0"
] | 117
|
2015-01-02T18:07:33.000Z
|
2021-01-06T22:36:25.000Z
|
wouso/interface/activity/tests.py
|
ruxandraS/wouso
|
5adb0a547e6b25c0141da9e8805e683d653804ef
|
[
"Apache-2.0"
] | 229
|
2015-01-12T07:07:58.000Z
|
2019-10-12T08:27:01.000Z
|
wouso/interface/activity/tests.py
|
iulianR/wouso
|
7fe93e503a3672380cf0db84118da9fc3194ae2e
|
[
"Apache-2.0"
] | 96
|
2015-01-07T05:26:09.000Z
|
2020-06-25T07:28:51.000Z
|
from datetime import datetime, timedelta
from wouso.core.magic.models import Artifact, Spell, SpellHistory
from wouso.core.magic.manager import MagicManager
from wouso.core.tests import WousoTest
from wouso.core import scoring, signals
from wouso.core.scoring.models import Coin
from wouso.games.qotd.models import QotdGame
from wouso.games.challenge.models import ChallengeGame, ChallengeUser, Challenge
from wouso.interface.apps.messaging.models import Message, MessagingUser
from achievements import consecutive_days_seen, consecutive_qotd_correct, consecutive_chall_won, challenge_count, \
refused_challenges, get_challenge_time, unique_users_pm, wrong_first_qotd, get_chall_score, \
challenges_played_today, check_for_god_mode, spell_count, spent_gold, gold_amount, \
Achievements
from models import Activity
class AchievementTest(WousoTest):
def test_login_with_multiple_seens(self):
"""
Multiple seens every day for more than 14 days in a row.
"""
player = self._get_player()
for i in range(100):
timestamp = datetime.now() - timedelta(hours=i*16)
Activity.objects.create(timestamp=timestamp, user_from=player, action='seen', public=False)
self.assertGreaterEqual(consecutive_days_seen(player, datetime.now()), 14)
def test_login_10(self):
"""
One seen every day for 14 days in a row.
"""
player = self._get_player()
for i in range(14):
timestamp = datetime.now() + timedelta(days=-i)
Activity.objects.create(timestamp=timestamp, user_from=player, action='seen', public=False)
self.assertEqual(consecutive_days_seen(player, datetime.now()), 14)
def test_login_10_less(self):
"""
Multiple seens every day for less than 14 days in a row.
"""
player = self._get_player()
for i in range(20):
timestamp = datetime.now() - timedelta(hours=i*7)
Activity.objects.create(timestamp=timestamp, user_from=player, action='seen', public=False)
self.assertLess(consecutive_days_seen(player, datetime.now()), 14)
def test_login_10_wrong(self):
player = self._get_player()
for i in range(14):
timestamp = datetime.now() + timedelta(days=-i)
if i == 5:
continue
Activity.objects.create(timestamp=timestamp, user_from=player, action='seen', public=False)
self.assertEqual(consecutive_days_seen(player, datetime.now()), 5)
def test_login_10_activity(self):
Artifact.objects.create(group=None, name='ach-login-10')
player = self._get_player()
for i in range(1, 14):
timestamp = datetime.now() + timedelta(days=-i)
a = Activity.objects.create(timestamp=timestamp, user_from=player, action='seen', public=False)
self.client.login(username=player.user.username, password='test')
self.client.get('/hub/')
self.assertTrue(player.magic.has_modifier('ach-login-10'))
def test_early_bird_not(self):
player = self._get_player()
Artifact.objects.create(group=None, name='ach-early-bird')
for i in range(1,2):
Activity.objects.create(timestamp=datetime(2012,9,17,6,0,0),
user_from=player, user_to=player, action='seen', public=False)
for i in range(1,4):
Activity.objects.create(timestamp=datetime(2012,9,17,5,0,0),
user_from=player, user_to=player, action='seen', public=False)
signals.addActivity.send(sender=None, timestamp=datetime(2012,9,17,5,0,0),
user_from=player,
user_to=player,
action='seen',
game=None)
self.assertFalse(player.magic.has_modifier('ach-early-bird'))
def test_early_bird_set(self):
player = self._get_player()
Artifact.objects.create(group=None, name='ach-early-bird')
for i in range(1,4):
Activity.objects.create(timestamp=datetime(2012,9,17,6,0,0),
user_from=player, user_to=player, action='seen', public=False)
signals.addActivity.send(sender=None, timestamp=datetime(2012,9,17,6,0,0),
user_from=player,
user_to=player,
action='seen',
game=None)
self.assertTrue(player.magic.has_modifier('ach-early-bird'))
def test_night_owl_not(self):
player = self._get_player()
Artifact.objects.create(group=None, name='ach-night-owl')
for i in range(1,3):
Activity.objects.create(timestamp=datetime(2012,9,17,6,0,0),
user_from=player, user_to=player, action='seen', public=False)
for i in range(1,4):
Activity.objects.create(timestamp=datetime(2012,9,17,5,0,0),
user_from=player, user_to=player, action='seen', public=False)
signals.addActivity.send(sender=None, timestamp=datetime(2012,9,17,4,0,0),
user_from=player,
user_to=player,
action='seen',
game=None)
self.assertFalse(player.magic.has_modifier('ach-night-owl'))
def test_night_owl_set(self):
player = self._get_player()
Artifact.objects.create(group=None, name='ach-night-owl')
for i in range(1,4):
Activity.objects.create(timestamp=datetime(2012,9,17,4,0,0),
user_from=player, user_to=player, action='seen', public=False)
signals.addActivity.send(sender=None, timestamp=datetime(2012,9,17,4,0,0),
user_from=player,
user_to=player,
action='seen',
game=None)
self.assertTrue(player.magic.has_modifier('ach-night-owl'))
class QotdAchievementTest(WousoTest):
def test_10_qotd_3ok(self):
player = self._get_player()
for i in range(3):
timestamp=datetime.now() + timedelta(days=-i)
a = Activity.objects.create(timestamp=timestamp, user_from=player, user_to=player, action='qotd-correct',message_string=str(i),public=True)
self.assertEqual(consecutive_qotd_correct(player),3)
def test_10_qotd_1wrong(self):
player = self._get_player()
for i in range(10):
timestamp=datetime.now() - timedelta(days=-i)
if i == 5:
a = Activity.objects.create(timestamp=timestamp, user_from=player, user_to=player, action='qotd-wrong',message_string=str(i),public=True)
else:
a = Activity.objects.create(timestamp=timestamp, user_from=player, user_to=player, action='qotd-correct',message_string=str(i),public=True)
self.assertEqual(consecutive_qotd_correct(player),4)
def test_10_qotd_get_ach(self):
Artifact.objects.create(group=None, name='ach-qotd-10')
player = self._get_player()
for i in range(10):
timestamp=datetime.now() + timedelta(days=-i)
a = Activity.objects.create(timestamp=timestamp, user_from=player, user_to=player, action='qotd-correct',message_string=str(i),public=True)
signals.addActivity.send(sender=None, user_from=player,
user_to=player,
action='qotd-correct',
game=QotdGame.get_instance())
self.assertTrue(player.magic.has_modifier('ach-qotd-10'))
class ChallengeAchievementTest(WousoTest):
def test_chall_10_won(self):
player = self._get_player()
for i in range(1, 11):
timestamp = datetime.now() + timedelta(days=-i)
a = Activity.objects.create(timestamp=timestamp,
user_from=player, user_to=player, action='chall-won',
public=True)
self.assertEqual(consecutive_chall_won(player), 10)
def test_chall_10_won_wrong_draw(self):
player = self._get_player()
for i in range(1, 10):
timestamp = datetime.now() + timedelta(days=-i)
if i == 5:
a = Activity.objects.create(timestamp=timestamp,
user_from=player, user_to=player, action='chall-draw',
public=True)
else:
a = Activity.objects.create(timestamp=timestamp,
user_from=player, user_to=player, action='chall-won',
public=True)
self.assertEqual(consecutive_chall_won(player), 4)
def test_chall_10_won_wrong_lost(self):
player1 = self._get_player()
player2 = self._get_player(2)
for i in range(1, 10):
timestamp = datetime.now() + timedelta(days=-i)
if i == 5:
a = Activity.objects.create(timestamp=timestamp,
user_from=player2, user_to=player1, action='chall-won',
public=True)
else:
a = Activity.objects.create(timestamp=timestamp,
user_from=player1, user_to=player2, action='chall-won',
public=True)
self.assertEqual(consecutive_chall_won(player1), 4)
def test_chall_10_won_activity(self):
Artifact.objects.create(group=None, name='ach-chall-won-10')
player = self._get_player()
for i in range(1, 10):
timestamp = datetime.now() + timedelta(days=-i)
a = Activity.objects.create(timestamp=timestamp,
user_from=player, user_to=player, action='chall-won',
public=True)
self.assertFalse(player.magic.has_modifier('ach-chall-won-10'))
signals.addActivity.send(sender=None, user_from=player,
user_to=player,
action='chall-won',
game=ChallengeGame.get_instance())
self.assertTrue(player.magic.has_modifier('ach-chall-won-10'))
def test_chall_30(self):
player = self._get_player()
for i in range(1, 31):
timestamp = datetime.now() + timedelta(days=-i)
a = Activity.objects.create(timestamp=timestamp,
user_from=player, user_to=player, action='chall-won',
public=True)
self.assertEqual(challenge_count(player), 30)
def test_chall_100_draw_lost(self):
player1 = self._get_player()
player2 = self._get_player(2)
for i in range(1, 101):
timestamp = datetime.now() + timedelta(days=-i)
if (i % 5) == 0:
a = Activity.objects.create(timestamp=timestamp,
user_from=player2, user_to=player1, action='chall-won',
public=True)
elif (i % 7) == 0:
a = Activity.objects.create(timestamp=timestamp,
user_from=player1, user_to=player2, action='chall-draw',
public=True)
else:
a = Activity.objects.create(timestamp=timestamp,
user_from=player1, user_to=player2, action='chall-won',
public=True)
self.assertEqual(challenge_count(player1), 100)
def test_chall_100_activity(self):
Artifact.objects.create(group=None, name='ach-chall-100')
player = self._get_player()
for i in range(1, 100):
timestamp = datetime.now() + timedelta(days=-i)
if i % 5 == 0:
a = Activity.objects.create(timestamp=timestamp,
user_from=player, user_to=player, action='chall-draw',
public=True)
else:
a = Activity.objects.create(timestamp=timestamp,
user_from=player, user_to=player, action='chall-won',
public=True)
signals.addActivity.send(sender=None, user_from=player,
user_to=player,
action='chall-won',
game=ChallengeGame.get_instance())
self.assertTrue(player.magic.has_modifier('ach-chall-100'))
def test_defeated_better_player_activity(self):
Artifact.objects.create(group=None, name='ach-chall-def-big')
player1 = self._get_player()
player2 = self._get_player(2)
player2.level_no = 4
player2.save()
for i in range(1,5):
signals.addActivity.send(sender=None, user_from=player1,
user_to=player2,
action='chall-won',
game=ChallengeGame.get_instance())
self.assertFalse(player1.magic.has_modifier('ach-chall-def-big'))
signals.addActivity.send(sender=None, user_from=player1,
user_to=player2,
action='chall-won',
game=ChallengeGame.get_instance())
self.assertTrue(player1.magic.has_modifier('ach-chall-def-big'))
def test_this_is_sparta_correct(self):
player = self._get_player()
for i in range(1, 7):
timestamp = datetime.now() + timedelta(days=-i)
a = Activity.objects.create(timestamp=timestamp,
user_from=player, user_to=player, action='chall-refused',
public=True)
self.assertEqual(refused_challenges(player), 6)
def test_this_is_sparta_activity_not_given(self):
Artifact.objects.create(group=None, name='ach-this-is-sparta')
player1 = self._get_player()
player2 = self._get_player(2)
first_seen = datetime.now() + timedelta(days=-10)#10 days since first login
Activity.objects.create(timestamp=first_seen,
user_from=player1, user_to=player1, action='seen',
public=False)
for i in range(1, 7):
timestamp = datetime.now() + timedelta(days=-i)
if (i % 4) == 0:
a = Activity.objects.create(timestamp=timestamp,
user_from=player1, user_to=player2, action='chall-refused',
public=True)
else:
a = Activity.objects.create(timestamp=timestamp,
user_from=player1, user_to=player2, action='chall-lost',
public=True)
#send signal to enable achievement validation
signals.addActivity.send(sender=None, user_from=player1,
user_to=player2,
action='chall-refused',
game=ChallengeGame.get_instance())
#False due to refused challenge
self.assertFalse(player1.magic.has_modifier('ach-this-is-sparta'))
def test_this_is_sparta_activity_not_enough_challenges(self):
Artifact.objects.create(group=None, name='ach-this-is-sparta')
player1 = self._get_player()
player2 = self._get_player(2)
first_seen = datetime.now() + timedelta(days=-10)#10 days since first login
Activity.objects.create(timestamp=first_seen,
user_from=player1, user_to=player1, action='seen',
public=False)
for i in range(1, 3):
timestamp = datetime.now() + timedelta(days=-i)
a = Activity.objects.create(timestamp=timestamp,
user_from=player1, user_to=player2, action='chall-lost',
public=True)
#send signal to enable achievement validation
signals.addActivity.send(sender=None, user_from=player1,
user_to=player2,
action='chall-won',
game=ChallengeGame.get_instance())
#False due to not enough challenges played
self.assertFalse(player1.magic.has_modifier('ach-this-is-sparta'))
def test_this_is_sparta_activity_not_enough_time(self):
Artifact.objects.create(group=None, name='ach-this-is-sparta')
player1 = self._get_player()
player2 = self._get_player(2)
first_seen = datetime.now() + timedelta(days=-6)#only 6 days have passed
Activity.objects.create(timestamp=first_seen,
user_from=player1, user_to=player1, action='seen',
public=False)
for i in range(1, 5):
timestamp = datetime.now() + timedelta(days=-i)
a = Activity.objects.create(timestamp=timestamp,
user_from=player1, user_to=player2, action='chall-lost',
public=True)
#send signal to enable achievement validation
signals.addActivity.send(sender=None, user_from=player1,
user_to=player2,
action='chall-won',
game=ChallengeGame.get_instance())
#achievement condition earned
self.assertFalse(player1.magic.has_modifier('ach-this-is-sparta'))
def test_this_is_sparta_activity_passed(self):
Artifact.objects.create(group=None, name='ach-this-is-sparta')
player1 = self._get_player()
player2 = self._get_player(2)
first_seen = datetime.now() + timedelta(days=-7)#barely enough time
Activity.objects.create(timestamp=first_seen,
user_from=player1, user_to=player1, action='seen',
public=False)
for i in range(1, 5):
timestamp = datetime.now() + timedelta(days=-i)
a = Activity.objects.create(timestamp=timestamp,
user_from=player1, user_to=player2, action='chall-lost',
public=True)
#send signal to enable achievement validation
signals.addActivity.send(sender=None, user_from=player1,
user_to=player2,
action='chall-won',
game=ChallengeGame.get_instance())
#achievement condition earned
self.assertTrue(player1.magic.has_modifier('ach-this-is-sparta'))
def test_challenges_played_today(self):
player = self._get_player()
for i in range(1, 10):
timestamp = datetime.now()
if (i % 4) == 0:
Activity.objects.create(timestamp=timestamp,
user_from=player, user_to=player,
action="chall-lost", public=True)
else:
Activity.objects.create(timestamp=timestamp,
user_from=player, user_to=player,
action="chall-won", public=True)
self.assertEqual(challenges_played_today(player), 9)
def test_challenges_played_today_activity(self):
player = self._get_player()
Artifact.objects.create(group=None, name='ach-chall-10-a-day')
for i in range(1, 10):
timestamp = datetime.now()
if (i % 4) == 0:
Activity.objects.create(timestamp=timestamp,
user_from=player, user_to=player,
action="chall-lost", public=True)
else:
Activity.objects.create(timestamp=timestamp,
user_from=player, user_to=player,
action="chall-won", public=True)
signals.addActivity.send(sender=None, user_from=player,
user_to=player,
action='chall-won',
game=ChallengeGame.get_instance())
self.assertTrue(player.magic.has_modifier('ach-chall-10-a-day'))
class PopularityTest(WousoTest):
def setUp(self):
Message.disable_check()
def tearDown(self):
Message.enable_check()
def test_popularity_5_pm_1(self):
player = self._get_player()
player = player.get_extension(MessagingUser)
for i in range(10):
timestamp=datetime.now() + timedelta(minutes = -1)
a = Message.objects.create(timestamp=timestamp, sender=player,receiver=player,subject = "a",text = "b")
self.assertEqual(unique_users_pm(player,3),1)
def test_popularity_5_pm_2(self):
player = self._get_player()
player=player.get_extension(MessagingUser)
timestamp=datetime.now() + timedelta(minutes = -1)
a = Message.objects.create(timestamp=timestamp, sender=player,receiver=player,subject = "a",text = "b")
a = Message.objects.create(timestamp=timestamp, sender=self._get_player(2).get_extension(MessagingUser),receiver=player,subject = "a",text = "b")
self.assertEqual(unique_users_pm(player,3),2)
def test_popularity_5_pm_3(self):
Artifact.objects.create(group=None, name='ach-popularity')
user_to = self._get_player(100).get_extension(MessagingUser)
for i in range(10):
player = self._get_player(i).get_extension(MessagingUser)
if i <= 3:
timestamp = datetime.now() + timedelta(minutes=-10)
a = Message.objects.create(timestamp=timestamp, sender=player,receiver=user_to,subject = "a",text = "b")
else:
timestamp = datetime.now() + timedelta(minutes=-35)
a = Message.objects.create(timestamp=timestamp, sender=player,receiver=user_to,subject = "a",text = "b")
Message.send(sender=player,receiver=user_to,subject="a",text="b")
self.assertEqual(unique_users_pm(user_to,30),5)
self.assertTrue(user_to.magic.has_modifier('ach-popularity'))
class NotificationsTest(WousoTest):
def test_ach_notification(self):
player = self._get_player()
Artifact.objects.create(group=None, name='ach-notfication')
Achievements.earn_achievement(player, 'ach-notfication')
self.assertEqual(len(Message.objects.all()), 1)
class FlawlessVictoryTest(WousoTest):
def setUp(self):
super(FlawlessVictoryTest, self).setUp()
self.user_from = self._get_player(1)
self.user_to = self._get_player(2)
self.chall_user1 = self.user_from.get_extension(ChallengeUser)
self.chall_user2 = self.user_to.get_extension(ChallengeUser)
scoring.setup_scoring()
self.chall = Challenge.create(user_from=self.chall_user1, user_to=self.chall_user2, ignore_questions=True)
def test_scorring(self):
self.chall.user_from.score = 100
self.chall.user_from.save()
self.chall.user_to.score = 200
self.chall.user_to.save()
self.assertEqual(get_chall_score(dict(id=self.chall.id)),200)
self.chall.user_from.score = 300
self.chall.user_from.save()
self.assertEqual(get_chall_score(dict(id=self.chall.id)),300)
self.chall.user_to.score = 500
self.chall.user_to.save()
self.assertEqual(get_chall_score(dict(id=self.chall.id)),500)
def test_ach_fake(self):
Artifact.objects.create(group=None, name='ach-flawless-victory')
player=self._get_player()
self.chall.user_from.score = 100
self.chall.user_from.save()
self.chall.user_to.score = 200
self.chall.user_to.save()
signals.addActivity.send(sender=None, user_from=player, user_to=player, arguments=dict(id=self.chall.id), action="chall-won", game=None)
self.assertTrue(not player.magic.has_modifier('ach-flawless-victory'))
self.chall.user_from.score = 500
self.chall.user_from.save()
signals.addActivity.send(sender=None, user_from=player, user_to=player, arguments=dict(id=self.chall.id), action="chall-won", game=None)
self.assertTrue(player.magic.has_modifier('ach-flawless-victory'))
def test_ach_real(self):
Artifact.objects.create(group=None, name='ach-flawless-victory')
self.chall.user_from.score = 500
self.chall.user_from.save()
self.chall.user_to.score = 200
self.chall.user_to.save()
self.assertFalse(self.user_from.magic.has_modifier('ach-flawless-victory'))
self.chall.played()
self.assertTrue(self.user_from.magic.has_modifier('ach-flawless-victory'))
class WinFastTest(WousoTest):
def setUp(self):
super(WinFastTest, self).setUp()
user_from = self._get_player(1)
user_to = self._get_player(2)
chall_user1 = user_from.get_extension(ChallengeUser)
chall_user2 = user_to.get_extension(ChallengeUser)
scoring.setup_scoring()
self.chall = Challenge.create(user_from=chall_user1, user_to=chall_user2, ignore_questions=True)
def test_get_time(self):
self.chall.user_from.seconds_took = 30
self.chall.user_from.score = 500
self.chall.user_from.save()
self.chall.user_to.seconds_took = 80
self.chall.user_to.score = 0
self.chall.user_to.save()
self.chall.winner = self.chall.user_from.user
self.chall.save()
self.assertEqual(get_challenge_time(dict(id=self.chall.id)), 30)
self.chall.user_from.seconds_took = 180
self.chall.user_from.save()
self.chall.user_to.seconds_took = 20
self.chall.user_to.save()
self.assertEqual(get_challenge_time(dict(id=self.chall.id)), 180)
def test_ach(self):
Artifact.objects.create(group=None, name='ach-win-fast')
player = self._get_player()
self.chall.user_from.seconds_took = 30
self.chall.user_from.score = 400
self.chall.user_from.save()
self.chall.user_to.seconds_took = 80
self.chall.user_to.score = 300
self.chall.user_to.save()
self.chall.winner = self.chall.user_from.user
self.chall.save()
signals.addActivity.send(sender=None, user_from=player,
user_to=player,
arguments=dict(id=self.chall.id),
action="chall-won",
game = ChallengeGame.get_instance())
self.assertTrue(player.magic.has_modifier('ach-win-fast'))
class SpellAchievement(WousoTest):
def test_spell_count(self):
player = self._get_player()
spell = Spell.objects.create(name="test", title="", description="",
image=None, percents=100, type='s')
player.magic.add_spell(spell)
player.magic.cast_spell(spell, player, datetime.now() + timedelta(days=3))
self.assertTrue(player.magic.is_spelled)
self.assertTrue(spell_count(player), 1)
def test_spell_count_activity(self):
Artifact.objects.create(group=None, name='ach-spell-5')
player = self._get_player()
for i in range(1, 6):
name = "test" + str(i)
spell = Spell.objects.create(name=name, title="", description="",
image=None, percents=100)
player.magic.add_spell(spell)
player.magic.cast_spell(spell, player, datetime.now() + timedelta(days=i))
signals.addActivity.send(sender=None, user_from=player,
user_to=player, action="cast", game=None)
self.assertTrue(player.magic.has_modifier('ach-spell-5'))
def test_gold_spent(self):
player = self._get_player()
spell = Spell.objects.create(name="test", title="", description="",
image=None, percents=100, type='s',
price=25)
SpellHistory.objects.create(type='b', user_from=player, user_to=player,
date=datetime.now(), spell=spell)
self.assertTrue(spent_gold(player), 25)
def test_gold_spent_activity(self):
Artifact.objects.create(group=None, name='ach-spent-gold')
player = self._get_player()
spell = Spell.objects.create(name="test", title="", description="",
image=None, percents=100, type='s',
price=600)
SpellHistory.objects.create(type='b', user_from=player, user_to=player,
date=datetime.now(), spell=spell)
signals.addActivity.send(sender=None, user_from=player,
user_to=player, action='spell-buy',
game=None)
self.assertTrue(player.magic.has_modifier('ach-spent-gold'))
def test_used_all_spells_activity(self):
Artifact.objects.create(group=None, name='ach-use-all-spells')
player = self._get_player()
spell = Spell.objects.create(name="test", title="", description="",
image=None, percents=100, type='s',
price=600)
SpellHistory.objects.create(type='u', user_from=player, user_to=player,
date=datetime.now(), spell=spell)
signals.addActivity.send(sender=None, user_from=player,
user_to=player, action='cast',
game=None)
self.assertTrue(player.magic.has_modifier('ach-use-all-spells'))
def test_used_all_mass_spells_activity(self):
Artifact.objects.create(group=None, name='ach-use-all-mass')
player = self._get_player()
spell = Spell.objects.create(name="test", title="", description="",
image=None, percents=100, type='s',
price=600, mass=True)
SpellHistory.objects.create(type='u', user_from=player, user_to=player,
date=datetime.now(), spell=spell)
signals.addActivity.send(sender=None, user_from=player,
user_to=player, action='cast',
game=None)
self.assertTrue(player.magic.has_modifier('ach-use-all-mass'))
class LevelUpTest(WousoTest):
def test_level_ach(self):
Artifact.objects.create(group=None, name='ach-level-5')
Artifact.objects.create(group=None, name='ach-level-10')
coin = Coin.add('gold')
player = self._get_player()
player.level_no = 5
player.save()
signals.addActivity.send(sender=None, user_from=player,
user_to=player, action='gold-won',
game=None)
self.assertTrue(player.magic.has_modifier('ach-level-5'))
player.level_no = 10
player.save()
signals.addActivity.send(sender=None, user_from=player,
user_to=player, action='gold-won',
game=None)
self.assertTrue(player.magic.has_modifier('ach-level-10'))
class GoldTest(WousoTest):
def test_gold_amount(self):
player = self._get_player()
coin = Coin.add('gold')
scoring.score_simple(player, coin, amount=100)
self.assertEqual(gold_amount(player), 100)
def test_gold_amount_ach(self):
Artifact.objects.create(group=None, name='ach-gold-300')
player = self._get_player()
coin = Coin.add('gold')
scoring.score_simple(player, coin, amount=500)
signals.addActivity.send(sender=None, user_from=player,
user_to=player, action='gold-won',
game=None)
self.assertTrue(player.magic.has_modifier('ach-gold-300'))
class GodModeTest(WousoTest):
def test_check_for_god_mode1(self):
player=self._get_player()
timestamp=datetime.now()
for i in range(5):
timestamp -= timedelta(days=1)
Activity.objects.create(timestamp=timestamp, user_from=player, user_to=player, action='qotd-correct')
self.assertTrue(check_for_god_mode(player,5,0))
def test_check_for_god_mode2(self):
player=self._get_player()
timestamp=datetime.now()
for i in range(5):
timestamp -= timedelta(days=1)
if i == 3:
Activity.objects.create(timestamp=timestamp, user_from=player, user_to=player, action='qotd-wrong')
continue
Activity.objects.create(timestamp=timestamp, user_from=player, user_to=player, action='qotd-correct')
self.assertFalse(check_for_god_mode(player,5,0))
def test_check_for_god_mode3(self):
player = self._get_player()
player2 = self._get_player(1)
timestamp = datetime.now()
for i in range(5):
timestamp -= timedelta(days=1)
Activity.objects.create(timestamp=timestamp, user_from=player, user_to=player2, action='chall-won')
Activity.objects.create(timestamp=timestamp, user_from=player, user_to=player, action='qotd-correct')
self.assertTrue(check_for_god_mode(player,5,5))
Artifact.objects.create(group=None, name='ach-god-mode-on')
signals.addActivity.send(sender=None, user_from=player,
user_to=player,
action='seen',
game=None)
self.assertTrue(player.magic.has_modifier('ach-god-mode-on'))
def test_check_for_god_mode4(self):
player = self._get_player()
player2 = self._get_player(1)
timestamp = datetime.now()
for i in range(5):
timestamp -= timedelta(days=1)
Activity.objects.create(timestamp=timestamp, user_from=player, user_to=player, action='chall-correct')
if i == 3:
Activity.objects.create(timestamp=timestamp, user_from=player2, user_to=player, action='chall-won')
continue
self.assertFalse(check_for_god_mode(player,5,0))
| 45.17757
| 155
| 0.602813
| 4,008
| 33,838
| 4.923403
| 0.061128
| 0.042568
| 0.040186
| 0.046521
| 0.862211
| 0.833122
| 0.811534
| 0.796078
| 0.777581
| 0.73182
| 0
| 0.022578
| 0.282729
| 33,838
| 748
| 156
| 45.237968
| 0.79045
| 0.016224
| 0
| 0.630819
| 0
| 0
| 0.046133
| 0
| 0
| 0
| 0
| 0
| 0.096308
| 1
| 0.085072
| false
| 0.00321
| 0.017657
| 0
| 0.120385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d8eda5a96235358bc7952ff3c08db7502765063d
| 24
|
py
|
Python
|
noveldownloader/biquge/__init__.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 2
|
2019-05-19T11:54:26.000Z
|
2019-05-19T12:03:49.000Z
|
noveldownloader/biquge/__init__.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 1
|
2020-11-27T07:55:15.000Z
|
2020-11-27T07:55:15.000Z
|
noveldownloader/biquge/__init__.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 2
|
2019-01-17T15:01:28.000Z
|
2019-09-20T09:32:17.000Z
|
from .novel import Novel
| 24
| 24
| 0.833333
| 4
| 24
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2b26d2c73cad6a94f630d26da198ca7922b27828
| 1,782
|
py
|
Python
|
python/code_challenges/fifo_animal_shelter/test_fifo_animal_shelter.py
|
brendanwelzien/data-structures-and-algorithms
|
0bffe825e34de2e5c072b1e6b6c2cb1d7d1d61f5
|
[
"MIT"
] | null | null | null |
python/code_challenges/fifo_animal_shelter/test_fifo_animal_shelter.py
|
brendanwelzien/data-structures-and-algorithms
|
0bffe825e34de2e5c072b1e6b6c2cb1d7d1d61f5
|
[
"MIT"
] | 1
|
2020-11-10T01:31:39.000Z
|
2020-11-10T01:31:39.000Z
|
python/code_challenges/fifo_animal_shelter/test_fifo_animal_shelter.py
|
brendanwelzien/data-structures-and-algorithms
|
0bffe825e34de2e5c072b1e6b6c2cb1d7d1d61f5
|
[
"MIT"
] | null | null | null |
import pytest
from fifo_animal_shelter import Queue
def test_empty_shelter():
shelter = Queue()
actual = shelter.front
expected = None
assert actual == expected
def test_enqueue_once():
shelter = Queue()
shelter.enqueue("dog")
actual = shelter.front.animal_type
expected = "dog"
assert actual == expected
def test_enqueue_multiple_dog_first():
shelter = Queue()
shelter.enqueue("dog")
shelter.enqueue("cat")
shelter.enqueue("dog")
shelter.enqueue("cat")
actual = shelter.front.animal_type
expected = "dog"
assert actual == expected
def test_enqueue_multiple_cat_first():
shelter = Queue()
shelter.enqueue("cat")
shelter.enqueue("dog")
shelter.enqueue("dog")
shelter.enqueue("cat")
actual = shelter.front.animal_type
expected = "cat"
assert actual == expected
def test_dequeue_from_empty():
shelter = Queue()
actual = shelter.dequeue("cat")
expected = "No animals available"
assert actual == expected
def test_dequeue_existing_cat():
shelter = Queue()
shelter.enqueue("dog")
shelter.enqueue("dog")
shelter.enqueue("cat")
shelter.enqueue("dog")
actual = shelter.dequeue("cat")
expected = "cat"
assert actual == expected
def test_dequeue_invalid_preference():
shelter = Queue()
shelter.enqueue("dog")
shelter.enqueue("dog")
shelter.enqueue("dog")
shelter.enqueue("dog")
actual = shelter.dequeue("pig")
expected = "Null"
assert actual == expected
def test_dequeue_missing_cat():
shelter = Queue()
shelter.enqueue("dog")
shelter.enqueue("dog")
shelter.enqueue("dog")
shelter.enqueue("dog")
actual = shelter.dequeue("cat")
expected = "Null"
assert actual == expected
| 24.410959
| 38
| 0.665544
| 203
| 1,782
| 5.699507
| 0.157635
| 0.254105
| 0.235091
| 0.24892
| 0.836647
| 0.734659
| 0.6465
| 0.636128
| 0.521175
| 0.45981
| 0
| 0
| 0.207632
| 1,782
| 72
| 39
| 24.75
| 0.819405
| 0
| 0
| 0.777778
| 0
| 0
| 0.064534
| 0
| 0
| 0
| 0
| 0
| 0.126984
| 1
| 0.126984
| false
| 0
| 0.031746
| 0
| 0.15873
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9985265605c86fb1b9b853ad9dec499965855404
| 156
|
py
|
Python
|
sbaas/analysis/visualization/__init__.py
|
SBRG/sbaas
|
9df76bbffdd620cf8566744a2b0503935998fbe0
|
[
"Apache-2.0"
] | 1
|
2017-05-13T04:35:08.000Z
|
2017-05-13T04:35:08.000Z
|
sbaas/analysis/visualization/__init__.py
|
SBRG/sbaas
|
9df76bbffdd620cf8566744a2b0503935998fbe0
|
[
"Apache-2.0"
] | null | null | null |
sbaas/analysis/visualization/__init__.py
|
SBRG/sbaas
|
9df76bbffdd620cf8566744a2b0503935998fbe0
|
[
"Apache-2.0"
] | 2
|
2017-02-23T19:32:38.000Z
|
2020-01-14T19:13:05.000Z
|
from .visualization_query import visualization_query
from .visualization_execute import visualization_execute
from .visualization_io import visualization_io
| 52
| 56
| 0.910256
| 18
| 156
| 7.555556
| 0.333333
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070513
| 156
| 3
| 57
| 52
| 0.937931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
41db9030414a547bee748d7b1b921cfa05cc8ed9
| 174
|
py
|
Python
|
ramsey/attention.py
|
dirmeier/ramsey
|
1bf843616bc4d530d0f48841511d20101b524935
|
[
"Apache-2.0"
] | 1
|
2022-03-30T00:00:36.000Z
|
2022-03-30T00:00:36.000Z
|
ramsey/attention.py
|
dirmeier/ramsey
|
1bf843616bc4d530d0f48841511d20101b524935
|
[
"Apache-2.0"
] | 2
|
2021-12-27T12:54:38.000Z
|
2022-01-03T16:41:02.000Z
|
ramsey/attention.py
|
dirmeier/ramsey
|
1bf843616bc4d530d0f48841511d20101b524935
|
[
"Apache-2.0"
] | null | null | null |
from ramsey._src.attention.attention import Attention
from ramsey._src.attention.multihead_attention import MultiHeadAttention
__all__ = ["Attention", "MultiHeadAttention"]
| 34.8
| 72
| 0.844828
| 18
| 174
| 7.777778
| 0.444444
| 0.142857
| 0.185714
| 0.314286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074713
| 174
| 4
| 73
| 43.5
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0.155172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5120ca3898364efafca34722120b6137c56cb9f5
| 24,849
|
py
|
Python
|
scaffoldgraph/prioritization/generic_rules.py
|
trumanw/ScaffoldGraph
|
a594e5c5effe6c5e45c0061a235ccbeb64e416f9
|
[
"MIT"
] | 121
|
2019-12-12T15:30:16.000Z
|
2022-02-28T02:00:54.000Z
|
scaffoldgraph/prioritization/generic_rules.py
|
trumanw/ScaffoldGraph
|
a594e5c5effe6c5e45c0061a235ccbeb64e416f9
|
[
"MIT"
] | 8
|
2020-04-04T15:37:26.000Z
|
2021-11-17T07:30:31.000Z
|
scaffoldgraph/prioritization/generic_rules.py
|
trumanw/ScaffoldGraph
|
a594e5c5effe6c5e45c0061a235ccbeb64e416f9
|
[
"MIT"
] | 28
|
2019-12-16T11:58:53.000Z
|
2021-11-19T09:57:46.000Z
|
"""
scaffoldgraph.prioritization.generic_rules
Generic rules for defining custom rulesets* for prioritizing
scaffolds during scaffold tree construction.
*scaffoldgraph.prioritization.prioritization_ruleset.ScaffoldRuleSet
Rule Prefix definitions:
------------------------
SCP - Scaffold property (parent scaffold)
RRP - Removed ring property
RSP - Property of ring system of removed ring before removal
"""
from rdkit.Chem import MolFromSmarts
from itertools import chain, compress
from abc import abstractmethod
from scaffoldgraph.core.fragment import collect_linker_atoms
from .prioritization_rules import BaseScaffoldFilterRule
__all__ = [
'SCPNumLinkerBonds',
'SCPDelta',
'SCPAbsDelta',
'SCPNumAromaticRings',
'SCPNumHetAtoms',
'SCPNumNAtoms',
'SCPNumOAtoms',
'SCPNumSAtoms',
'SCPNumXAtoms',
'RRPRingSize',
'RRPLinkerLength',
'RRPHetAtomLinked',
'RRPLinkerLengthX',
'RRPNumHetAtoms',
'RRPNumNAtoms',
'RRPNumOAtoms',
'RRPNumSAtoms',
'RRPNumXAtoms',
'RRPRingSizeX',
'RSPAbsDelta',
'RSPDelta',
'RSPNumAromaticRings',
'RSPNumHetAtoms',
'RSPNumNAtoms',
'RSPNumOAtoms',
'RSPNumRings',
'RSPNumSAtoms',
'RSPNumXAtoms',
'Tiebreaker'
]
class _MinMaxScaffoldFilterRule(BaseScaffoldFilterRule):
"""Abstract base class for generic rules where 'min' or 'max' filtering can be specified.
This class is designed to be used for defining a set of generic rules to simplify
the creation of custom rulesets for scaffold prioritization.
See Also
--------
BaseScaffoldFilterRule
"""
_f = {'min', 'max'}
def __init__(self, min_max='min'):
"""
Parameters
----------
min_max : {'min', 'max'}, optional
If 'min' use a minimum property based filtering. If 'max'
use a maximum property based filtering. The default is
'min'.
"""
assert min_max in self._f, f'function must be min or max'
self.func = min if min_max == 'min' else max
def filter(self, child, parents):
"""Filter a set of parent scaffolds using a defined condition.
Parameters
----------
child : scaffoldgraph.core.Scaffold
The child scaffold from which the parent scaffolds were obtained.
parents : iterable
An iterable of all parent scaffolds generated by a fragmenter.
"""
props = [self.get_property(child, s) for s in parents]
val = self.func(props)
return list(compress(parents, [True if p == val else False for p in props]))
@abstractmethod
def get_property(self, child, parent):
raise NotImplementedError()
@property
def name(self):
return '{}'.format(
self.__class__.__name__
)
class SCPNumLinkerBonds(_MinMaxScaffoldFilterRule):
"""Filter by number of linker bonds in the parent scaffold.
Specify 'min' to prioritize scaffolds with the smallest
number of acyclic linker bonds.
Specify 'max' to prioritize scaffolds with the largest
number of acyclic linker bonds.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
acyc_linker_smarts = MolFromSmarts('*!@!=!#*')
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
matches = parent.mol.GetSubstructMatches(self.acyc_linker_smarts)
return len(matches)
class SCPDelta(_MinMaxScaffoldFilterRule):
"""Filter by the delta value of the parent scaffold.
Specify 'min' to prioritize scaffolds with the smallest
delta value.
Specify 'max' to prioritize scaffolds with the largest
delta value.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
nr = parent.rings.count
rb = list(chain(*parent.rings.bond_rings))
nrrb = len(rb) - len(set(rb))
delta = nrrb - (nr - 1)
return delta
class SCPAbsDelta(SCPDelta):
"""Filter by the absolute delta value of the parent scaffold.
Specify 'min' to prioritize scaffolds with the smallest
absolute delta value.
Specify 'max' to prioritize scaffolds with the largest
absolute delta value.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
return abs(super().get_property(child, parent))
class SCPNumHetAtoms(_MinMaxScaffoldFilterRule):
"""Filter by the number of heteroatoms in the parent scaffold.
Specify 'min' to prioritize scaffolds with the smallest
number of heteroatoms.
Specify 'max' to prioritize scaffolds with the largest
number of heteroatoms.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
parent_atomic_nums = [a.GetAtomicNum() for a in parent.atoms]
num_het = len([a for a in parent_atomic_nums if a != 1 and a != 6])
return num_het
class SCPNumAromaticRings(_MinMaxScaffoldFilterRule):
"""Filter by the number of aromatic rings in the parent scaffold.
Specify 'min' to prioritize scaffolds with the smallest
number of aromatic rings.
Specify 'max' to prioritize scaffolds with the largest
number of aromatic rings.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
aro_r = [x.aromatic for x in parent.rings]
return aro_r.count(True)
class _SCPAtomicNumRule(_MinMaxScaffoldFilterRule):
def __init__(self, min_max, atomic_num):
super().__init__(min_max)
self.atomic_num = atomic_num
def get_property(self, child, parent):
parent_atomic_nums = [a.GetAtomicNum() for a in parent.atoms]
return parent_atomic_nums.count(self.atomic_num)
class SCPNumOAtoms(_SCPAtomicNumRule):
"""Filter by the number of oxygen in the parent scaffold.
Specify 'min' to prioritize scaffolds with the smallest
number of oxygen atoms.
Specify 'max' to prioritize scaffolds with the largest
number of oxygen atoms.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max, 8)
class SCPNumNAtoms(_SCPAtomicNumRule):
"""Filter by the number of nitrogen atoms in the parent scaffold.
Specify 'min' to prioritize scaffolds with the smallest
number of nitrogen atoms.
Specify 'max' to prioritize scaffolds with the largest
number of nitrogen atoms.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max, 7)
class SCPNumSAtoms(_SCPAtomicNumRule):
"""Filter by the number of sulphur atoms in the parent scaffold.
Specify 'min' to prioritize scaffolds with the smallest
number of sulphur atoms.
Specify 'max' to prioritize scaffolds with the largest
number of sulphur atoms.
Parameters
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max, 16)
class SCPNumXAtoms(_SCPAtomicNumRule):
"""Filter by the number atoms with atomic number X in
the parent scaffold.
Specify 'min' to prioritize scaffolds with the smallest
number of X atoms.
Specify 'max' to prioritize scaffolds with the largest
number of X atoms.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
atomic_num : int
Atomic number for prioritization.
"""
def __init__(self, min_max, atomic_num):
super().__init__(min_max, atomic_num)
class RRPRingSize(_MinMaxScaffoldFilterRule):
"""Filter by the size of the removed ring.
Specify 'min' to prioritize scaffolds where the smallest
ring has been removed.
Specify 'max' to prioritize scaffolds where the largest
ring has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
removed_ring = child.rings[parent.removed_ring_idx]
return removed_ring.size
class RRPNumHetAtoms(_MinMaxScaffoldFilterRule):
"""Filter by the number of heteroatoms in the removed ring.
Specify 'min' to prioritize scaffolds where the ring with
the least heteroatoms has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the most heteroatoms has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
removed_ring = child.rings[parent.removed_ring_idx]
ring_atomic_nums = [a.GetAtomicNum() for a in removed_ring.atoms]
return len([a for a in ring_atomic_nums if a != 1 and a != 6])
class _RRPAtomicNumRule(_MinMaxScaffoldFilterRule):
def __init__(self, min_max, atomic_num):
super().__init__(min_max)
self.atomic_num = atomic_num
def get_property(self, child, parent):
removed_ring = child.rings[parent.removed_ring_idx]
ring_atomic_nums = [a.GetAtomicNum() for a in removed_ring.atoms]
return ring_atomic_nums.count(self.atomic_num)
class RRPNumOAtoms(_RRPAtomicNumRule):
"""Filter by the number of oxygen atoms in the removed ring.
Specify 'min' to prioritize scaffolds where the ring with
the least oxygen atoms has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the most oxygen atoms has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max, 8)
class RRPNumNAtoms(_RRPAtomicNumRule):
"""Filter by the number of nitrogen atoms in the removed ring.
Specify 'min' to prioritize scaffolds where the ring with
the least nitrogen atoms has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the most nitrogen atoms has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max, 7)
class RRPNumSAtoms(_RRPAtomicNumRule):
"""Filter by the number of sulphur atoms in the removed ring.
Specify 'min' to prioritize scaffolds where the ring with
the least sulphur atoms has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the most sulphur atoms has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max, 16)
class RRPNumXAtoms(_RRPAtomicNumRule):
"""Filter by the number of atoms with atomic number X in
the removed ring.
Specify 'min' to prioritize scaffolds where the ring with
the least X atoms has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the most X atoms has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
atomic_num : int
Atomic number for prioritization.
"""
def __init__(self, min_max, atomic_num):
super().__init__(min_max, atomic_num)
class RRPHetAtomLinked(_MinMaxScaffoldFilterRule):
"""Filter by whether the removed rings linker is attached to
a ring hetero atom at either end of the linker.
Specify 'min' to prioritize scaffolds where the removed rings
linker is not attached to a heteroatom.
Specify 'max' to prioritize scaffolds where the removed rings
linker is attached to a heteroatom.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
linker, ra = set(), set()
removed_ring = child.rings[parent.removed_ring_idx]
attachments = removed_ring.get_attachment_points()
for attachment in attachments:
ra.update(collect_linker_atoms(
child.mol.GetAtomWithIdx(attachment), linker, False
))
atomic_nums = [child.atoms[x].GetAtomicNum() for x in ra]
return len([a for a in atomic_nums if a != 1 and a != 6]) > 0
class RRPRingSizeX(RRPRingSize):
"""Filter by the size X of the removed ring where the
ring size X is specified.
Specify 'min' to prioritize scaffolds where the removed
rings size is != to X.
Specify 'max' to prioritize scaffolds where the removed
rings size == to X.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
size : int
Ring size for prioritization.
"""
def __init__(self, min_max, size):
super().__init__(min_max)
self.size = size
def get_property(self, child, parent):
rs = super().get_property(child, parent)
return rs == self.size
class RRPLinkerLength(_MinMaxScaffoldFilterRule):
"""Filter by the size of the removed rings linker.
Specify 'min' to prioritize scaffolds where the ring with
the smallest linker has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the largest linker has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
linker = set()
removed_ring = child.rings[parent.removed_ring_idx]
attachments = removed_ring.get_attachment_points()
for attachment in attachments:
collect_linker_atoms(
child.mol.GetAtomWithIdx(attachment), linker, False
)
return len(linker)
class RRPLinkerLengthX(RRPLinkerLength):
"""Filter by the size X of the removed rings linker
where the linker size X is specified.
Specify 'min' to prioritize scaffolds where the removed
rings linker size is != X.
Specify 'max' to prioritize scaffolds where the removed
rings linker size == to X.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
length : int
Linker size for prioritization.
"""
def __init__(self, min_max, length):
super().__init__(min_max)
self.length = length
def get_property(self, child, parent):
linker_length = super().get_property(child, parent)
return linker_length == self.length
class RSPDelta(_MinMaxScaffoldFilterRule):
"""Filter by the delta value of the removed rings
ring system.
Specify 'min' to prioritize scaffolds where the ring with
the smallest ring system delta value has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the largest ring system delta value has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
removed_ring = child.rings[parent.removed_ring_idx]
system = removed_ring.get_ring_system()
nr = system.num_rings
rb = list(chain(*[x.bix for x in system]))
nrrb = len(rb) - len(set(rb))
delta = nrrb - (nr - 1)
return delta
class RSPAbsDelta(RSPDelta):
"""Filter by the absolute delta value of the removed rings
ring system.
Specify 'min' to prioritize scaffolds where the ring with
the smallest ring system abs delta value has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the largest ring system abs delta value has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
return abs(super().get_property(child, parent))
class RSPNumRings(_MinMaxScaffoldFilterRule):
"""Filter by the size of the removed rings ring system.
Specify 'min' to prioritize scaffolds where the ring with
the smallest ring system has been removed (num rings).
Specify 'max' to prioritize scaffolds where the ring with
the largest ring system has been removed (num rings).
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
removed_ring = child.rings[parent.removed_ring_idx]
system = removed_ring.get_ring_system()
return system.num_rings
class RSPNumAromaticRings(_MinMaxScaffoldFilterRule):
"""Filter by the number of aromatic rings in the removed
rings ring system.
Specify 'min' to prioritize scaffolds where the ring with
the least aromatic rings in its ring system has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the most aromatic rings in its ring system has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
removed_ring = child.rings[parent.removed_ring_idx]
system = removed_ring.get_ring_system()
aro_rings = [x.aromatic for x in system].count(True)
return aro_rings
class RSPNumHetAtoms(_MinMaxScaffoldFilterRule):
"""Filter by the number of heteroatoms in the removed
rings ring system.
Specify 'min' to prioritize scaffolds where the ring with
the least heteroatoms in its ring system has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the most heteroatoms in its ring system has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max)
def get_property(self, child, parent):
removed_ring = child.rings[parent.removed_ring_idx]
system = removed_ring.get_ring_system()
sys_atomic_nums = [a.GetAtomicNum() for a in system.atoms]
return len([a for a in sys_atomic_nums if a != 1 and a != 6])
class _RSPAtomicNumRule(_MinMaxScaffoldFilterRule):
def __init__(self, min_max, atomic_num):
super().__init__(min_max)
self.atomic_num = atomic_num
def get_property(self, child, parent):
removed_ring = child.rings[parent.removed_ring_idx]
system = removed_ring.get_ring_system()
sys_atomic_nums = [a.GetAtomicNum() for a in system.atoms]
return sys_atomic_nums.count(self.atomic_num)
class RSPNumNAtoms(_RSPAtomicNumRule):
"""Filter by the number of nitrogen atoms in the removed
rings ring system.
Specify 'min' to prioritize scaffolds where the ring with
the least nitrogen atoms in its ring system has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the most nitrogen atoms in its ring system has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max, 7)
class RSPNumOAtoms(_RSPAtomicNumRule):
"""Filter by the number of oxygen atoms in the removed
rings ring system.
Specify 'min' to prioritize scaffolds where the ring with
the least oxygen atoms in its ring system has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the most oxygen atoms in its ring system has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max, 8)
class RSPNumSAtoms(_RSPAtomicNumRule):
"""Filter by the number of sulphur atoms in the removed
rings ring system.
Specify 'min' to prioritize scaffolds where the ring with
the least sulphur atoms in its ring system has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the most sulphur atoms in its ring system has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max):
super().__init__(min_max, 16)
class RSPNumXAtoms(_RSPAtomicNumRule):
"""Filter by the number of atoms with the atomic number X
in the removed rings ring system.
Specify 'min' to prioritize scaffolds where the ring with
the least X atoms in its ring system has been removed.
Specify 'max' to prioritize scaffolds where the ring with
the most X atoms in its ring system has been removed.
Parameters
----------
min_max : {'min', 'max'}
Specify 'min' or 'max' to define the function used to
prioritize scaffolds based on the returned property.
"""
def __init__(self, min_max, atomic_num):
super().__init__(min_max, atomic_num)
class Tiebreaker(BaseScaffoldFilterRule):
"""Tie-breaker rule (alphabetical).
In the case where multiple scaffolds are left after all
rules have been evaluated, sort the scaffolds by their
canonical SMILES and keep the first.
"""
def filter(self, child, parents):
return [sorted(parents, key=lambda p: p.smiles)[0]]
@property
def name(self):
return '{}'.format(
self.__class__.__name__
)
| 29.688172
| 93
| 0.666506
| 3,164
| 24,849
| 5.061315
| 0.076485
| 0.046459
| 0.110154
| 0.061696
| 0.796553
| 0.781129
| 0.758711
| 0.740977
| 0.716873
| 0.686025
| 0
| 0.001281
| 0.245885
| 24,849
| 836
| 94
| 29.723684
| 0.853301
| 0.536641
| 0
| 0.520325
| 0
| 0
| 0.042401
| 0
| 0
| 0
| 0
| 0
| 0.004065
| 1
| 0.227642
| false
| 0
| 0.020325
| 0.020325
| 0.48374
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8501defe20298f2235f21b0e44752688024b99a1
| 27,067
|
py
|
Python
|
sedinet_infer.py
|
ericslevenson/SediNet
|
666ffaa5edc9b83d860aecaab309b12fc55600e9
|
[
"MIT"
] | null | null | null |
sedinet_infer.py
|
ericslevenson/SediNet
|
666ffaa5edc9b83d860aecaab309b12fc55600e9
|
[
"MIT"
] | null | null | null |
sedinet_infer.py
|
ericslevenson/SediNet
|
666ffaa5edc9b83d860aecaab309b12fc55600e9
|
[
"MIT"
] | null | null | null |
## Written by Daniel Buscombe,
## MARDA Science
## daniel@mardascience.com
##> Release v1.3 (July 2020)
from sedinet_models import *
###===================================================
def run_training_siso_simo(vars, train_csvfile, test_csvfile, name, res_folder,
mode, greyscale, dropout, numclass, scale):
"""
This function generates, trains and evaluates a sedinet model for
continuous prediction
"""
if numclass>0:
ID_MAP = dict(zip(np.arange(numclass), [str(k) for k in range(numclass)]))
##======================================
## this randomly selects imagery for training and testing imagery sets
## while also making sure that both training and tetsing sets have
## at least 3 examples of each category
train_idx, train_df = get_df(train_csvfile)
test_idx, test_df = get_df(test_csvfile)
##==============================================
## create a sedinet model to estimate category
if numclass>0:
SM = make_cat_sedinet(ID_MAP, dropout, greyscale)
else:
SM = make_sedinet_siso_simo(vars, greyscale, dropout)
if scale==True:
CS = []
for var in vars:
cs = RobustScaler() ##alternative = MinMaxScaler()
cs.fit_transform(
np.r_[train_df[var].values, test_df[var].values].reshape(-1,1)
)
CS.append(cs)
del cs
else:
CS = []
##==============================================
## train model
if numclass==0:
if type(BATCH_SIZE)==list:
SMs = []; weights_path = []
for batch_size, valid_batch_size in zip(BATCH_SIZE, VALID_BATCH_SIZE):
sm, wp = train_sedinet_siso_simo(SM, train_df, test_df,
train_idx, test_idx, name,
vars, mode, greyscale, CS,
dropout, batch_size, valid_batch_size,
res_folder, scale)
SMs.append(sm)
weights_path.append(wp)
gc.collect()
else:
SM, weights_path = train_sedinet_siso_simo(SM, train_df, test_df,
train_idx, test_idx, name,
vars, mode, greyscale, CS,
dropout, BATCH_SIZE, VALID_BATCH_SIZE,
res_folder, scale)
else:
if type(BATCH_SIZE)==list:
SMs = []; weights_path = []
for batch_size, valid_batch_size in zip(BATCH_SIZE, VALID_BATCH_SIZE):
sm, wp = train_sedinet_cat(SM, train_df, test_df, train_idx,
test_idx, ID_MAP, vars, greyscale, name, mode,
batch_size, valid_batch_size, res_folder)
SMs.append(sm)
weights_path.append(wp)
gc.collect()
else:
SM, weights_path = train_sedinet_cat(SM, train_df, test_df, train_idx,
test_idx, ID_MAP, vars, greyscale, name, mode,
BATCH_SIZE, VALID_BATCH_SIZE, res_folder)
classes = np.arange(len(ID_MAP))
K.clear_session()
# classes = [i for i in ID_MAP.keys()]
# SM = SMs
# var = vars[0]
##==============================================
# test model
if numclass==0:
if type(BATCH_SIZE)==list:
predict_test_train_siso_simo(train_df, test_df, train_idx, test_idx, vars,
SMs, weights_path, name, mode, greyscale, CS,
dropout, scale, DO_AUG)
else:
predict_test_train_siso_simo(train_df, test_df, train_idx, test_idx, vars,
SM, weights_path, name, mode, greyscale, CS,
dropout, scale, DO_AUG)
else:
if type(BATCH_SIZE)==list:
predict_test_train_cat(train_df, test_df, train_idx, test_idx, vars[0],
SMs, [i for i in ID_MAP.keys()], weights_path, greyscale,
name, DO_AUG)
else:
predict_test_train_cat(train_df, test_df, train_idx, test_idx, vars[0],
SM, [i for i in ID_MAP.keys()], weights_path, greyscale,
name, DO_AUG)
K.clear_session()
##===================================
## move model files and plots to the results folder
tidy(name, res_folder)
# df = train_df
# indices=train_idx[:10]
# for_training=True
###==================================
def train_sedinet_cat(SM, train_df, test_df, train_idx, test_idx,
ID_MAP, vars, greyscale, name, mode, batch_size, valid_batch_size,
res_folder):
"""
This function trains an implementation of SediNet
"""
##================================
## create training and testing file generators, set the weights path,
## plot the model, and create a callback list for model training
train_gen = get_data_generator_1image(train_df, train_idx, True, ID_MAP,
vars[0], batch_size, greyscale, DO_AUG) ##BATCH_SIZE
valid_gen = get_data_generator_1image(test_df, test_idx, True, ID_MAP,
vars[0], valid_batch_size, greyscale, False) ##VALID_BATCH_SIZE
if SHALLOW is True:
if DO_AUG is True:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_shallow_"+vars[0]+"_"+CAT_LOSS+"_aug.hdf5"
else:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_shallow_"+vars[0]+"_"+CAT_LOSS+"_noaug.hdf5"
else:
if DO_AUG is True:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_"+vars[0]+"_"+CAT_LOSS+"_aug.hdf5"
else:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_"+vars[0]+"_"+CAT_LOSS+"_noaug.hdf5"
if os.path.exists(weights_path):
SM.load_weights(weights_path)
print("==========================================")
print("Loading weights that already exist: %s" % (weights_path) )
print("Skipping model training")
elif os.path.exists(res_folder+os.sep+weights_path):
weights_path = res_folder+os.sep+weights_path
SM.load_weights(weights_path)
print("==========================================")
print("Loading weights that already exist: %s" % (weights_path) )
print("Skipping model training")
else:
try:
plot_model(SM, weights_path.replace('.hdf5', '_model.png'),
show_shapes=True, show_layer_names=True)
except:
pass
callbacks_list = [
ModelCheckpoint(weights_path, monitor='val_loss', verbose=1,
save_best_only=True, mode='min',
save_weights_only = True)
]
print("=========================================")
print("[INFORMATION] schematic of the model has been written out to: "+\
weights_path.replace('.hdf5', '_model.png'))
print("[INFORMATION] weights will be written out to: "+weights_path)
##==============================================
## set checkpoint file and parameters that control early stopping,
## and reduction of learning rate if and when validation
## scores plateau upon successive epochs
# reduceloss_plat = ReduceLROnPlateau(monitor='val_loss', factor=FACTOR,
# patience=STOP_PATIENCE, verbose=1, mode='auto', min_delta=MIN_DELTA,
# cooldown=STOP_PATIENCE, min_lr=MIN_LR)
#
# earlystop = EarlyStopping(monitor="val_loss", mode="min", patience=STOP_PATIENCE)
model_checkpoint = ModelCheckpoint(weights_path, monitor='val_loss',
verbose=1, save_best_only=True, mode='min',
save_weights_only = True)
#tqdm_callback = tfa.callbacks.TQDMProgressBar()
# callbacks_list = [model_checkpoint, reduceloss_plat, earlystop] #, tqdm_callback]
##==============================================
## train the model
# history = SM.fit(train_gen,
# steps_per_epoch=len(train_idx)//batch_size, ##BATCH_SIZE
# epochs=NUM_EPOCHS,
# callbacks=callbacks_list,
# validation_data=valid_gen, #use_multiprocessing=True,
# validation_steps=len(test_idx)//valid_batch_size) #max_queue_size=10 ##VALID_BATCH_SIZE
## with non-adaptive exponentially decreasing learning rate
exponential_decay_fn = exponential_decay(MAX_LR, NUM_EPOCHS)
lr_scheduler = LearningRateScheduler(exponential_decay_fn)
callbacks_list = [model_checkpoint, lr_scheduler]
## train the model
history = SM.fit(train_gen,
steps_per_epoch=len(train_idx)//batch_size, ##BATCH_SIZE
epochs=NUM_EPOCHS,
callbacks=callbacks_list,
validation_data=valid_gen, #use_multiprocessing=True,
validation_steps=len(test_idx)//valid_batch_size) #max_queue_size=10 ##VALID_BATCH_SIZE
###===================================================
## Plot the loss and accuracy as a function of epoch
plot_train_history_1var(history)
# plt.savefig(vars+'_'+str(IM_HEIGHT)+'_batch'+str(batch_size)+'_history.png', ##BATCH_SIZE
# dpi=300, bbox_inches='tight')
plt.savefig(weights_path.replace('.hdf5','_history.png'),dpi=300, bbox_inches='tight')
plt.close('all')
# serialize model to JSON to use later to predict
model_json = SM.to_json()
with open(weights_path.replace('.hdf5','.json'), "w") as json_file:
json_file.write(model_json)
return SM, weights_path
###===================================================
def train_sedinet_siso_simo(SM, train_df, test_df, train_idx, test_idx, name,
vars, mode, greyscale, CS, dropout, batch_size, valid_batch_size,
res_folder, scale):
"""
This function trains an implementation of sedinet
"""
##==============================================
## create training and testing file generators, set the weights path,
## plot the model, and create a callback list for model training
train_gen = get_data_generator_Nvars_siso_simo(train_df, train_idx, True,
vars, batch_size, greyscale, CS, DO_AUG)
valid_gen = get_data_generator_Nvars_siso_simo(test_df, test_idx, True,
vars, valid_batch_size, greyscale, CS, False) ##only augment training
# get a string saying how many variables, fr the output files
varstring = str(len(vars))+'vars' #''.join([str(k)+'_' for k in vars])
# mae the appropriate weights file
if SHALLOW is True:
if DO_AUG is True:
if scale is True:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_shallow_"+varstring+"_"+CONT_LOSS+"_aug_scale.hdf5"
else:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_shallow_"+varstring+"_"+CONT_LOSS+"_aug.hdf5"
else:
if scale is True:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_shallow_"+varstring+"_"+CONT_LOSS+"_noaug_scale.hdf5"
else:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_shallow_"+varstring+"_"+CONT_LOSS+"_noaug.hdf5"
else:
if DO_AUG is True:
if scale is True:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_"+varstring+"_"+CONT_LOSS+"_aug_scale.hdf5"
else:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_"+varstring+"_"+CONT_LOSS+"_aug.hdf5"
else:
if scale is True:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_"+varstring+"_"+CONT_LOSS+"_noaug_scale.hdf5"
else:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_"+varstring+"_"+CONT_LOSS+"_noaug.hdf5"
# if it already exists, skip training
if os.path.exists(weights_path):
SM.load_weights(weights_path)
print("==========================================")
print("Loading weights that already exist: %s" % (weights_path) )
print("Skipping model training")
# if it already exists in res_folder, skip training
elif os.path.exists(res_folder+os.sep+weights_path):
weights_path = res_folder+os.sep+weights_path
SM.load_weights(weights_path)
print("==========================================")
print("Loading weights that already exist: %s" % (weights_path) )
print("Skipping model training")
else: #train
# if scaler=true (CS=[]), dump out scalers to pickle file
if len(CS)==0:
pass
else:
joblib.dump(CS, weights_path.replace('.hdf5','_scaler.pkl'))
try: # plot the model if pydot/graphviz installed
plot_model(SM, weights_path.replace('.hdf5', '_model.png'),
show_shapes=True, show_layer_names=True)
print("[INFORMATION] model schematic written to: "+\
weights_path.replace('.hdf5', '_model.png'))
except:
pass
print("==========================================")
print("[INFORMATION] weights will be written out to: "+weights_path)
##==============================================
## set checkpoint file and parameters that control early stopping,
## and reduction of learning rate if and when validation scores plateau upon successive epochs
# reduceloss_plat = ReduceLROnPlateau(monitor='val_loss', factor=FACTOR,
# patience=STOP_PATIENCE, verbose=1, mode='auto',
# min_delta=MIN_DELTA, cooldown=5,
# min_lr=MIN_LR)
#
# earlystop = EarlyStopping(monitor="val_loss", mode="min",
# patience=STOP_PATIENCE)
# set model checkpoint. only save best weights, based on min validation loss
model_checkpoint = ModelCheckpoint(weights_path, monitor='val_loss', verbose=1,
save_best_only=True, mode='min',
save_weights_only = True)
#tqdm_callback = tfa.callbacks.TQDMProgressBar()
# callbacks_list = [model_checkpoint, reduceloss_plat, earlystop] #, tqdm_callback]
try: #write summary of the model to txt file
with open(weights_path.replace('.hdf5','') + '_report.txt','w') as fh:
# Pass the file handle in as a lambda function to make it callable
SM.summary(print_fn=lambda x: fh.write(x + '\n'))
fh.close()
print("[INFORMATION] model summary written to: "+ \
weights_path.replace('.hdf5','') + '_report.txt')
with open(weights_path.replace('.hdf5','') + '_report.txt','r') as fh:
tmp = fh.readlines()
print("===============================================")
print("Total parameters: %s" %\
(''.join(tmp).split('Total params:')[-1].split('\n')[0]))
fh.close()
print("===============================================")
except:
pass
##==============================================
## train the model
# history = SM.fit(train_gen,
# steps_per_epoch=len(train_idx)//batch_size, ##BATCH_SIZE
# epochs=NUM_EPOCHS,
# callbacks=callbacks_list,
# validation_data=valid_gen,
# validation_steps=len(test_idx)//valid_batch_size) ##VALID_BATCH_SIZE
# #use_multiprocessing=True
## non-adaptive exponentially decreasing learning rate
exponential_decay_fn = exponential_decay(MAX_LR, NUM_EPOCHS)
lr_scheduler = LearningRateScheduler(exponential_decay_fn)
callbacks_list = [model_checkpoint, lr_scheduler]
## train the model
history = SM.fit(train_gen,
steps_per_epoch=len(train_idx)//batch_size, ##BATCH_SIZE
epochs=NUM_EPOCHS,
callbacks=callbacks_list,
validation_data=valid_gen, #use_multiprocessing=True,
validation_steps=len(test_idx)//valid_batch_size) #max_queue_size=10 ##VALID_BATCH_SIZE
###===================================================
## Plot the loss and accuracy as a function of epoch
if len(vars)==1:
plot_train_history_1var_mae(history)
else:
plot_train_history_Nvar(history, vars, len(vars))
varstring = ''.join([str(k)+'_' for k in vars])
plt.savefig(weights_path.replace('.hdf5', '_history.png'), dpi=300,
bbox_inches='tight')
plt.close('all')
# serialize model to JSON to use later to predict
model_json = SM.to_json()
with open(weights_path.replace('.hdf5','.json'), "w") as json_file:
json_file.write(model_json)
return SM, weights_path
#
# ###===================================================
# def run_training_miso_mimo(vars, train_csvfile, test_csvfile, name, res_folder,
# mode, greyscale, auxin, dropout):
# """
# This function generates, trains and evaluates a sedinet model for
# continuous prediction
# """
# ##======================================
# ## this randomly selects imagery for training and testing imagery sets
# ## while also making sure that both training and tetsing sets
# ## have at least 3 examples of each category
# train_idx, train_df = get_df(train_csvfile)
# test_idx, test_df = get_df(test_csvfile)
#
# ##==============================================
# ## create a sedinet model to estimate category
# cnn = make_sedinet_miso_mimo(False, dropout)
#
# CS = []
# for var in vars:
# cs = RobustScaler() #MinMaxScaler()
# cs.fit_transform(
# np.r_[train_df[var].values, test_df[var].values].reshape(-1,1)
# )
# CS.append(cs)
# del cs
#
# CSaux = []
# cs = RobustScaler() #MinMaxScaler()
# cs.fit_transform(
# np.r_[train_df[auxin].values, test_df[auxin].values].reshape(-1,1)
# )
# CSaux.append(cs)
# del cs
#
# ##==============================================
# ## train model
# if type(BATCH_SIZE)==list:
# # SM, weights_path = train_sedinet_miso_mimo(cnn, train_df, test_df,
# # train_idx, test_idx, name, vars,
# # auxin, mode, greyscale,
# # CS, CSaux)
# SMs = []; weights_path = []
# for batch_size, valid_batch_size in zip(BATCH_SIZE, VALID_BATCH_SIZE):
# sm, wp = train_sedinet_miso_mimo(cnn, train_df, test_df,
# train_idx, test_idx, name,
# vars, auxin, mode, greyscale, CS, CSaux,
# batch_size, valid_batch_size)
# SMs.append(sm)
# weights_path.append(wp)
# else:
# SM, weights_path = train_sedinet_miso_mimo(cnn, train_df, test_df,
# train_idx, test_idx, name, vars,
# auxin, mode, greyscale,
# CS, CSaux)
#
# if type(BATCH_SIZE)==list:
# # test model
# predict_test_train_miso_mimo(train_df, test_df, train_idx, test_idx, vars,
# auxin, SMs, weights_path, name, mode,
# greyscale, CS, CSaux)
#
# else:
# predict_test_train_miso_mimo(train_df, test_df, train_idx, test_idx, vars,
# auxin, SM, weights_path, name, mode,
# greyscale, CS, CSaux)
#
# K.clear_session()
#
# ##==============================================
# ## move model files and plots to the results folder
# tidy(res_folder)#, name)
#
# ###===================================================
# def train_sedinet_miso_mimo(cnn, train_df, test_df, train_idx, test_idx,
# name, vars, auxin, mode, greyscale, CS, CSaux):
# """
# This function trains an implementation of sedinet
# """
#
# dense_neurons = 4
#
# ##==============================================
# ## create training and testing file generators,
# # set the weights path, plot the model, and create
# # a callback list for model training
# varstring = ''.join([str(k)+'_' for k in vars])
# weights_path = name+"_"+auxin+"_"+mode+"_batch"+str(BATCH_SIZE)+"_"+\
# varstring+"_checkpoint.hdf5"
#
# # Create the MLP and CNN models
# mlp = make_mlp(1) #dense_neurons
#
# # Create the input to the final set of layers as the output of both the MLP and CNN
# combinedInput = concatenate([mlp.output, cnn.output])
#
# # The final fully-connected layer head will have two dense layers
# # (one relu and one sigmoid)
# x = Dense(dense_neurons, activation="relu")(combinedInput)
# x = Dense(1, activation="sigmoid")(x)
#
# ## The final model accepts numerical data on the MLP input and
# ## images on the CNN input, outputting a single value
# outputs = []
# for var in vars:
# outputs.append(Dense(units=1, activation='linear', name=var+'_output')(x) )
#
# loss = dict(zip([k+"_output" for k in vars], ['mse' for k in vars]))
# metrics = dict(zip([k+"_output" for k in vars], ['mae' for k in vars]))
#
# # our final model will accept categorical/numerical data on the MLP
# # input and images on the CNN input
# SM = Model(inputs=[mlp.input, cnn.input], outputs=outputs)
#
# SM.compile(optimizer=OPT, loss=loss, metrics=metrics)
#
# try:
# plot_model(SM, weights_path.replace('.hdf5', '_model.png'),
# show_shapes=True, show_layer_names=True)
# print("[INFORMATION] model schematic written to: "+\
# weights_path.replace('.hdf5', '_model.png'))
# except:
# pass
#
# print("==========================================")
# print("[INFORMATION] weights will be written out to: "+weights_path)
#
#
# try:
# with open(weights_path.replace('.hdf5','') + '_report.txt','w') as fh:
# # Pass the file handle in as a lambda function to make it callable
# SM.summary(print_fn=lambda x: fh.write(x + '\n'))
# fh.close()
# print("[INFORMATION] model summary written to: "+\
# weights_path.replace('.hdf5','') + '_report.txt')
# with open(weights_path.replace('.hdf5','') + '_report.txt','r') as fh:
# tmp = fh.readlines()
# print("===============================================")
# print("Total parameters: %s" % (''.join(tmp).split('Total params:')[-1].split('\n')[0]))
# fh.close()
# print("===============================================")
# except:
# pass
#
#
# reduceloss_plat = ReduceLROnPlateau(monitor='val_loss', factor=FACTOR, patience=STOP_PATIENCE,
# verbose=1, mode='auto', min_delta=MIN_DELTA,
# cooldown=5, min_lr=MIN_LR)
#
# earlystop = EarlyStopping(monitor="val_loss", mode="auto",
# patience=STOP_PATIENCE)
#
# model_checkpoint = ModelCheckpoint(weights_path, monitor='val_loss',
# verbose=1,
# save_best_only=True, mode='min',
# save_weights_only = True)
#
#
# callbacks_list = [model_checkpoint, reduceloss_plat, earlystop]
#
# #aux_mean = train_df[auxin].mean()
# #aux_std = train_df[auxin].std()
#
# train_gen = get_data_generator_Nvars_miso_mimo(train_df, train_idx, True,
# vars, auxin, BATCH_SIZE,
# greyscale, CS, CSaux)
# valid_gen = get_data_generator_Nvars_miso_mimo(test_df, test_idx, True,
# vars, auxin, VALID_BATCH_SIZE,
# greyscale, CS, CSaux)
#
# ##==============================================
# ## train the model
# history = SM.fit(train_gen,
# steps_per_epoch=len(train_idx)//BATCH_SIZE,
# epochs=NUM_EPOCHS,
# callbacks=callbacks_list,
# validation_data=valid_gen,
# validation_steps=len(test_idx)//VALID_BATCH_SIZE)
# #use_multiprocessing=True,
#
# ###===================================================
# ## Plot the loss and accuracy as a function of epoch
# if len(vars)==1:
# plot_train_history_1var_mae(history)
# else:
# plot_train_history_Nvar(history, vars, len(vars))
#
# varstring = ''.join([str(k)+'_' for k in vars])
# plt.savefig(weights_path.replace('.hdf5', '_history.png'),
# dpi=300, bbox_inches='tight')
# plt.close('all')
#
# # serialize model to JSON to use later to predict
# model_json = SM.to_json()
# with open(weights_path.replace('.hdf5','.json'), "w") as json_file:
# json_file.write(model_json)
#
# ## do some garbage collection
# #gc.collect()
#
# return SM, weights_path
| 44.154976
| 120
| 0.52721
| 2,955
| 27,067
| 4.561421
| 0.115398
| 0.062838
| 0.027005
| 0.031011
| 0.849544
| 0.830996
| 0.813413
| 0.78218
| 0.775206
| 0.767639
| 0
| 0.00554
| 0.306499
| 27,067
| 612
| 121
| 44.227124
| 0.712535
| 0.479625
| 0
| 0.655738
| 0
| 0
| 0.106394
| 0.025384
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012295
| false
| 0.016393
| 0.004098
| 0
| 0.02459
| 0.094262
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
850e17929b067ae405a15eee34684255d6729377
| 18
|
py
|
Python
|
test/mock_resources/src/catkin_test/d/src/d/__init__.py
|
kgeorgiev93/catkin
|
2645a33ed36516910dfb3409ebbb36d9f907f53e
|
[
"BSD-3-Clause"
] | 742
|
2017-07-05T02:49:36.000Z
|
2022-03-30T12:55:43.000Z
|
test/mock_resources/src/catkin_test/d/src/d/__init__.py
|
kgeorgiev93/catkin
|
2645a33ed36516910dfb3409ebbb36d9f907f53e
|
[
"BSD-3-Clause"
] | 73
|
2017-07-06T12:50:51.000Z
|
2022-03-07T08:07:07.000Z
|
test/mock_resources/src/catkin_test/d/src/d/__init__.py
|
kgeorgiev93/catkin
|
2645a33ed36516910dfb3409ebbb36d9f907f53e
|
[
"BSD-3-Clause"
] | 425
|
2017-07-04T22:03:29.000Z
|
2022-03-29T06:59:06.000Z
|
print "IMPORTING"
| 9
| 17
| 0.777778
| 2
| 18
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 18
| 1
| 18
| 18
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
518e4d10b8ec1c01d79f3fd9ad79c4b1bcddaff7
| 4,509
|
py
|
Python
|
runners/runner.py
|
jpatel888/deep-screens
|
f20ff402aed202982353ad12d1a8b9480c4eff59
|
[
"Apache-2.0"
] | null | null | null |
runners/runner.py
|
jpatel888/deep-screens
|
f20ff402aed202982353ad12d1a8b9480c4eff59
|
[
"Apache-2.0"
] | null | null | null |
runners/runner.py
|
jpatel888/deep-screens
|
f20ff402aed202982353ad12d1a8b9480c4eff59
|
[
"Apache-2.0"
] | null | null | null |
from base.base_run import BaseRun
from tqdm import tqdm
import numpy as np
class Runner(BaseRun):
def __init__(self, sess, model, data, config, logger, figure):
super(Runner, self).__init__(sess, model, data, config, logger, figure)
def train_epoch(self, epoch_num):
"""
Runs training and logging on train images
:param epoch_num: current iteration of all training data passed
:return:
"""
loop = tqdm(range(self.config.run.num_iter_per_train_epoch), desc="Running Train Epoch " + str(epoch_num))
losses, l2_losses, sigmoid_losses = [], [], []
for _ in loop:
loss, l2_loss, sigmoid_loss, input_image, label, logit = self.train_step()
losses.append(loss)
l2_losses.append(l2_loss)
sigmoid_losses.append(sigmoid_loss)
loss = np.mean(losses)
l2_loss = np.mean(l2_losses)
sigmoid_loss = np.mean(sigmoid_losses)
cur_it = self.model.global_step_tensor.eval(self.sess)
summaries_dict = {
self.config.exp_name + '_loss': loss,
self.config.exp_name + '_l2_loss': l2_loss,
self.config.exp_name + '_sigmoid_loss': sigmoid_loss
}
if epoch_num % 1 == 0:
self.figure.draw_figure((input_image, label, logit), cur_it, summarizer="train", tag="images")
self.logger.summarize(cur_it, summaries_dict=summaries_dict, summarizer="train")
self.model.save(self.sess)
def test_epoch(self, epoch_num):
"""
Runs testing and logging on test images
:param epoch_num: current iteration of all training data passed
:return:
"""
loop = tqdm(range(self.config.run.num_iter_per_test_epoch), desc="Running Test Epoch " + str(epoch_num))
losses, l2_losses, sigmoid_losses = [], [], []
for _ in loop:
loss, l2_loss, sigmoid_loss, input_image, label, logit = self.test_step()
losses.append(loss)
l2_losses.append(l2_loss)
sigmoid_losses.append(sigmoid_loss)
loss = np.mean(losses)
l2_loss = np.mean(l2_losses)
sigmoid_loss = np.mean(sigmoid_losses)
cur_it = self.model.global_step_tensor.eval(self.sess)
summaries_dict = {
self.config.exp_name + '_loss': loss,
self.config.exp_name + '_l2_loss': l2_loss,
self.config.exp_name + '_sigmoid_loss': sigmoid_loss
}
if epoch_num % 1 == 0:
self.figure.draw_figure((input_image, label, logit), cur_it, summarizer="test", tag="images")
self.logger.summarize(cur_it, summaries_dict=summaries_dict, summarizer="test")
self.model.save(self.sess)
def train_step(self):
"""
Runs one step of training
:return: loss, input image, and model output
"""
batch_x, batch_y = next(self.data.next_batch(self.config.model.batch_size, 'train'))
feed_dict = {self.model.input: batch_x, self.model.y: batch_y}
optimizer, loss, l2_loss, sigmoid_loss, results = self.sess.run([self.model.train_step,
self.model.loss,
self.model.l2_loss,
self.model.sigmoid_cross_entropy_loss,
self.model.post_processed],
feed_dict=feed_dict)
return loss, l2_loss, sigmoid_loss, batch_x[0], batch_y[0], results[0]
def test_step(self):
"""
Runs one step of testing
:return: loss, input_image, model output
"""
batch_x, batch_y = next(self.data.next_batch(self.config.model.batch_size, 'test'))
feed_dict = {self.model.input: batch_x, self.model.y: batch_y}
loss, l2_loss, sigmoid_loss, results = self.sess.run([self.model.loss,
self.model.l2_loss,
self.model.sigmoid_cross_entropy_loss,
self.model.post_processed],
feed_dict=feed_dict)
return loss, l2_loss, sigmoid_loss, batch_x[0], batch_y[0], results[0]
| 49.01087
| 114
| 0.558217
| 534
| 4,509
| 4.449438
| 0.168539
| 0.064394
| 0.03367
| 0.042929
| 0.85564
| 0.837963
| 0.77399
| 0.77399
| 0.77399
| 0.77399
| 0
| 0.010807
| 0.343313
| 4,509
| 91
| 115
| 49.549451
| 0.791624
| 0.080727
| 0
| 0.626866
| 0
| 0
| 0.032468
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074627
| false
| 0
| 0.044776
| 0
| 0.164179
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
51941a84b21267e5d8fc8f471875b422877f9169
| 146
|
py
|
Python
|
Projects Cousera/PythonDataStructures/String is a sequence.py
|
teksaulo/My-projects
|
3f328c81d12bc77f5374c4a73b92184732d9038c
|
[
"MIT"
] | null | null | null |
Projects Cousera/PythonDataStructures/String is a sequence.py
|
teksaulo/My-projects
|
3f328c81d12bc77f5374c4a73b92184732d9038c
|
[
"MIT"
] | null | null | null |
Projects Cousera/PythonDataStructures/String is a sequence.py
|
teksaulo/My-projects
|
3f328c81d12bc77f5374c4a73b92184732d9038c
|
[
"MIT"
] | null | null | null |
fruit = 'Banana'
letter = fruit[1]
print(letter)
# 0 is the first letter
fruit = 'Banana'
letter = fruit[0]
print(letter)
print(len('banana')*7)
| 14.6
| 23
| 0.684932
| 23
| 146
| 4.347826
| 0.478261
| 0.33
| 0.34
| 0.44
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.150685
| 146
| 9
| 24
| 16.222222
| 0.774194
| 0.143836
| 0
| 0.571429
| 0
| 0
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
51cc59dc08e2d2da2bf0ef5457e842207f58d2e5
| 26
|
py
|
Python
|
python_src/success_polly_speech/__init__.py
|
SUCCESS-MURI/success_polly_speech
|
131d80dab1d9d295103590ceee2b0325327aab5f
|
[
"MIT"
] | null | null | null |
python_src/success_polly_speech/__init__.py
|
SUCCESS-MURI/success_polly_speech
|
131d80dab1d9d295103590ceee2b0325327aab5f
|
[
"MIT"
] | null | null | null |
python_src/success_polly_speech/__init__.py
|
SUCCESS-MURI/success_polly_speech
|
131d80dab1d9d295103590ceee2b0325327aab5f
|
[
"MIT"
] | null | null | null |
from polly_speech import *
| 26
| 26
| 0.846154
| 4
| 26
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
51fc1aca6aa7bb7a2733067c94cf52c5795ae1b6
| 59
|
py
|
Python
|
test.py
|
Helper1010/Teste3
|
b00fb503a07c0790831748f78f88103271b85107
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
Helper1010/Teste3
|
b00fb503a07c0790831748f78f88103271b85107
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
Helper1010/Teste3
|
b00fb503a07c0790831748f78f88103271b85107
|
[
"Apache-2.0"
] | null | null | null |
from fun import zero
def teste():
assert zero (8) == 0
| 14.75
| 24
| 0.627119
| 10
| 59
| 3.7
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.254237
| 59
| 4
| 24
| 14.75
| 0.795455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cfa2a81e1e3f0143652b01a9289fb7a2c3304795
| 46
|
py
|
Python
|
templates/Web/_composition/Flask/Project.Flask/backend/app.py
|
Tryweirder/WebTemplateStudio
|
3db53b6099510eb269e68c809fc9a073f2f662c6
|
[
"MIT"
] | 2,105
|
2019-05-06T23:16:48.000Z
|
2022-03-29T03:54:21.000Z
|
templates/Web/_composition/Flask/Project.Flask/backend/app.py
|
Tryweirder/WebTemplateStudio
|
3db53b6099510eb269e68c809fc9a073f2f662c6
|
[
"MIT"
] | 736
|
2019-05-09T17:25:37.000Z
|
2022-03-02T04:12:05.000Z
|
templates/Web/_composition/Flask/Project.Flask/backend/app.py
|
Tryweirder/WebTemplateStudio
|
3db53b6099510eb269e68c809fc9a073f2f662c6
|
[
"MIT"
] | 229
|
2019-05-07T21:44:03.000Z
|
2022-02-15T14:22:11.000Z
|
from Param_SourceName_Snake.server import app
| 23
| 45
| 0.891304
| 7
| 46
| 5.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cfd2c0920e1f2c3b6dfc0247ec352209cdffb647
| 33
|
py
|
Python
|
amocrm_asterisk_ng/telephony/impl/instances/asterisk_16/ami_handlers/ami_store/core/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/telephony/impl/instances/asterisk_16/ami_handlers/ami_store/core/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/telephony/impl/instances/asterisk_16/ami_handlers/ami_store/core/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
from .IAmiStore import IAmiStore
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cffceb86a811174b92331ad4a5d58cc8ce023087
| 85
|
py
|
Python
|
src/hub/dataload/sources/geno2mp/__init__.py
|
erikyao/myvariant.info
|
a4eaaca7ab6c069199f8942d5afae2dece908147
|
[
"Apache-2.0"
] | 39
|
2017-07-01T22:34:39.000Z
|
2022-03-15T22:25:59.000Z
|
src/hub/dataload/sources/geno2mp/__init__.py
|
erikyao/myvariant.info
|
a4eaaca7ab6c069199f8942d5afae2dece908147
|
[
"Apache-2.0"
] | 105
|
2017-06-28T17:26:06.000Z
|
2022-03-17T17:49:53.000Z
|
src/hub/dataload/sources/geno2mp/__init__.py
|
erikyao/myvariant.info
|
a4eaaca7ab6c069199f8942d5afae2dece908147
|
[
"Apache-2.0"
] | 15
|
2015-10-15T20:46:50.000Z
|
2021-07-12T19:17:49.000Z
|
from .geno2mp_upload import Geno2MPUploader
from .geno2mp_dump import Geno2MPDumper
| 21.25
| 43
| 0.870588
| 10
| 85
| 7.2
| 0.7
| 0.305556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.105882
| 85
| 3
| 44
| 28.333333
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5c6235e08a7161286ea9f405ccf58f245d018fc0
| 132
|
py
|
Python
|
adslproxy/__init__.py
|
ruoshuifuping/AdslProxyPool
|
271aec68432509911a19b0c11777309a81f21fc9
|
[
"MIT"
] | 2
|
2017-07-17T11:00:55.000Z
|
2018-03-15T09:56:53.000Z
|
adslproxy/__init__.py
|
ruoshuifuping/AdslProxyPool
|
271aec68432509911a19b0c11777309a81f21fc9
|
[
"MIT"
] | null | null | null |
adslproxy/__init__.py
|
ruoshuifuping/AdslProxyPool
|
271aec68432509911a19b0c11777309a81f21fc9
|
[
"MIT"
] | 1
|
2018-11-22T10:03:14.000Z
|
2018-11-22T10:03:14.000Z
|
__version__ = '0.9.9'
from adslproxy.db import RedisClient
from adslproxy.api import server
def version():
return __version__
| 16.5
| 36
| 0.765152
| 18
| 132
| 5.166667
| 0.666667
| 0.27957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.159091
| 132
| 7
| 37
| 18.857143
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0.038168
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
5ccb6fbdc2ee5d905976b290d705d1e4b5b2ce51
| 29
|
py
|
Python
|
sgpublish/exporter/ui/publish/__init__.py
|
vfxetc/sgpublish
|
f6dcdb7d727ca78bc29ce76b91f13962628bfea1
|
[
"BSD-3-Clause"
] | 3
|
2018-03-19T03:58:08.000Z
|
2020-09-30T17:47:16.000Z
|
sgpublish/exporter/ui/publish/__init__.py
|
vfxetc/sgpublish
|
f6dcdb7d727ca78bc29ce76b91f13962628bfea1
|
[
"BSD-3-Clause"
] | null | null | null |
sgpublish/exporter/ui/publish/__init__.py
|
vfxetc/sgpublish
|
f6dcdb7d727ca78bc29ce76b91f13962628bfea1
|
[
"BSD-3-Clause"
] | 2
|
2017-07-04T19:29:47.000Z
|
2019-07-19T01:15:43.000Z
|
from .generic import Widget
| 9.666667
| 27
| 0.793103
| 4
| 29
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 29
| 2
| 28
| 14.5
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7a5235598e8c0eba4a27f566858ab17f638887a4
| 178
|
py
|
Python
|
services/web/config.py
|
mbronk/github-microservices
|
c3da356240e4feaa2e3dea580fb592dc82482744
|
[
"MIT"
] | 5
|
2019-07-16T16:18:22.000Z
|
2019-10-06T01:55:02.000Z
|
services/web/config.py
|
mbronk/github-microservices
|
c3da356240e4feaa2e3dea580fb592dc82482744
|
[
"MIT"
] | 6
|
2019-07-16T17:45:04.000Z
|
2019-07-28T20:52:37.000Z
|
services/web/config.py
|
mbronk/github-microservices
|
c3da356240e4feaa2e3dea580fb592dc82482744
|
[
"MIT"
] | 6
|
2019-07-16T16:27:46.000Z
|
2019-07-16T18:39:18.000Z
|
import os
class BaseConfig:
"""Base configuration"""
DEBUG = False
TESTING = False
GITHUB_MANAGER_MICROSERVICES_IP=os.environ['GITHUB_MANAGER_MICROSERVICES_IP']
| 22.25
| 81
| 0.747191
| 20
| 178
| 6.35
| 0.7
| 0.204724
| 0.409449
| 0.440945
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168539
| 178
| 7
| 82
| 25.428571
| 0.858108
| 0.101124
| 0
| 0
| 0
| 0
| 0.201299
| 0.201299
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
7a79c423b9f983557398eaaaf8c35a2c945bb164
| 87
|
py
|
Python
|
shapes/__init__.py
|
nalbarr/hello-shapes-python
|
438cc4207fc9d8bf2019a821a3c2fbab7f4c432d
|
[
"MIT"
] | null | null | null |
shapes/__init__.py
|
nalbarr/hello-shapes-python
|
438cc4207fc9d8bf2019a821a3c2fbab7f4c432d
|
[
"MIT"
] | null | null | null |
shapes/__init__.py
|
nalbarr/hello-shapes-python
|
438cc4207fc9d8bf2019a821a3c2fbab7f4c432d
|
[
"MIT"
] | null | null | null |
### Shapes module
###
from .shapes import Square, Triangle
from .helpers import square
| 17.4
| 36
| 0.747126
| 11
| 87
| 5.909091
| 0.636364
| 0.369231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149425
| 87
| 4
| 37
| 21.75
| 0.878378
| 0.149425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8f815ac55f39d11f6ae75876c52ee031b09a92ce
| 80
|
py
|
Python
|
applitools/logger.py
|
applitools/eyes.selenium.python
|
3a09a3372a3a8915b3c97ee54fc223580c45c0a3
|
[
"Apache-2.0"
] | 11
|
2016-04-20T21:21:37.000Z
|
2020-04-27T19:46:56.000Z
|
applitools/logger.py
|
applitools/eyes.selenium.python
|
3a09a3372a3a8915b3c97ee54fc223580c45c0a3
|
[
"Apache-2.0"
] | 15
|
2017-01-11T04:58:31.000Z
|
2019-09-13T18:00:35.000Z
|
applitools/logger.py
|
applitools/eyes.selenium.python
|
3a09a3372a3a8915b3c97ee54fc223580c45c0a3
|
[
"Apache-2.0"
] | 15
|
2016-03-23T22:06:39.000Z
|
2020-06-14T09:11:58.000Z
|
from applitools.core.logger import * # noqa
from applitools.core import logger
| 26.666667
| 44
| 0.8
| 11
| 80
| 5.818182
| 0.545455
| 0.4375
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1375
| 80
| 2
| 45
| 40
| 0.927536
| 0.05
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8fb2b0e2e2fbfd24896898d40a62007a6c2d3b82
| 2,156
|
py
|
Python
|
leetcode/72.edit-distance.py
|
geemaple/algorithm
|
68bc5032e1ee52c22ef2f2e608053484c487af54
|
[
"MIT"
] | 177
|
2017-08-21T08:57:43.000Z
|
2020-06-22T03:44:22.000Z
|
leetcode/72.edit-distance.py
|
geemaple/algorithm
|
68bc5032e1ee52c22ef2f2e608053484c487af54
|
[
"MIT"
] | 2
|
2018-09-06T13:39:12.000Z
|
2019-06-03T02:54:45.000Z
|
leetcode/72.edit-distance.py
|
geemaple/algorithm
|
68bc5032e1ee52c22ef2f2e608053484c487af54
|
[
"MIT"
] | 23
|
2017-08-23T06:01:28.000Z
|
2020-04-20T03:17:36.000Z
|
# f[i][j] = min(
# f[i - 1][j] + 1,
# f[i][j - 1] + 1,
# f[i - 1][j - 1] + 1, where s1[i - 1] != s2[j - 1]
# f[i - 1][j - 1] where s1[i - 1] == s2[j - 1]
# )
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
m = len(word1)
n = len(word2)
table = [[float('inf') for _ in range(n + 1)] for _ in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
table[i][j] = abs(i - j)
continue
table[i][j] = min(table[i][j], table[i - 1][j] + 1) # delete
table[i][j] = min(table[i][j], table[i][j - 1] + 1) # insert
if word1[i - 1] == word2[j - 1]:
table[i][j] = min(table[i][j], table[i - 1][j - 1]) # last equal
else:
table[i][j] = min(table[i][j], table[i - 1][j - 1] + 1) # replace
return table[m][n]
# sliding array
class Solution2(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
m = len(word1)
n = len(word2)
k = 2
table = [[float('inf') for _ in range(n + 1)] for _ in range(k)]
for i in range(m + 1):
for j in range(n + 1):
table[i % k][j] = float('inf')
if i == 0 or j == 0:
table[i % k][j] = abs(i - j)
continue
table[i % k][j] = min(table[i % k][j], table[(i - 1) % k][j] + 1) # delete
table[i % k][j] = min(table[i % k][j], table[i % k][j - 1] + 1) # insert
if word1[i - 1] == word2[j - 1]:
table[i % k][j] = min(table[i % k][j], table[(i - 1) % k][j - 1]) # last equal
else:
table[i % k][j] = min(table[i % k][j], table[(i - 1) % k][j - 1] + 1) # replace
return table[m % k][n]
| 32.179104
| 99
| 0.375232
| 314
| 2,156
| 2.563694
| 0.146497
| 0.201242
| 0.095652
| 0.109317
| 0.919255
| 0.885714
| 0.873292
| 0.756522
| 0.686957
| 0.658385
| 0
| 0.056525
| 0.442022
| 2,156
| 66
| 100
| 32.666667
| 0.612635
| 0.154917
| 0
| 0.5
| 0
| 0
| 0.005205
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8ffdc83e59a124d7d65731bde9307b3370fdaf20
| 596
|
py
|
Python
|
raiden/network/proxies/__init__.py
|
yahgwai/raiden
|
a76809872468890d7f2a66b293876aff93b6ea97
|
[
"MIT"
] | null | null | null |
raiden/network/proxies/__init__.py
|
yahgwai/raiden
|
a76809872468890d7f2a66b293876aff93b6ea97
|
[
"MIT"
] | null | null | null |
raiden/network/proxies/__init__.py
|
yahgwai/raiden
|
a76809872468890d7f2a66b293876aff93b6ea97
|
[
"MIT"
] | null | null | null |
# isort:skip_file
from raiden.network.proxies.discovery import Discovery # NOQA
from raiden.network.proxies.token import Token # NOQA
from raiden.network.proxies.token_network_registry import TokenNetworkRegistry # NOQA
from raiden.network.proxies.token_network import TokenNetwork # NOQA
from raiden.network.proxies.secret_registry import SecretRegistry # NOQA
from raiden.network.proxies.payment_channel import PaymentChannel # NOQA
from raiden.network.proxies.token_network_registry import TokenNetworkRegistry # NOQA
from raiden.network.proxies.user_deposit import UserDeposit # NOQA
| 59.6
| 86
| 0.845638
| 75
| 596
| 6.6
| 0.293333
| 0.161616
| 0.274747
| 0.387879
| 0.616162
| 0.50303
| 0.436364
| 0.412121
| 0.412121
| 0.412121
| 0
| 0
| 0.097315
| 596
| 9
| 87
| 66.222222
| 0.920074
| 0.092282
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
890b97c26b9fe2b1c2fe154ac91cb4c4c705610a
| 30
|
py
|
Python
|
sRemo/__init__.py
|
zunda-pixel/sRemo
|
43ab04580cc85c1070445c9e5a0b240205d0dd67
|
[
"MIT"
] | null | null | null |
sRemo/__init__.py
|
zunda-pixel/sRemo
|
43ab04580cc85c1070445c9e5a0b240205d0dd67
|
[
"MIT"
] | null | null | null |
sRemo/__init__.py
|
zunda-pixel/sRemo
|
43ab04580cc85c1070445c9e5a0b240205d0dd67
|
[
"MIT"
] | null | null | null |
from .sRemoAPI import sRemoAPI
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
64e01884cc262ee9e29e704a95e62245588c5349
| 24
|
py
|
Python
|
foundation/rest/__init__.py
|
tbone255/foundation
|
ca76fdd9b5345fead2d200f829eb67ba77bc865e
|
[
"MIT"
] | null | null | null |
foundation/rest/__init__.py
|
tbone255/foundation
|
ca76fdd9b5345fead2d200f829eb67ba77bc865e
|
[
"MIT"
] | null | null | null |
foundation/rest/__init__.py
|
tbone255/foundation
|
ca76fdd9b5345fead2d200f829eb67ba77bc865e
|
[
"MIT"
] | null | null | null |
from .viewsets import *
| 12
| 23
| 0.75
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8f07be191a02f3415187adc7e0fd45abfb631fbc
| 81
|
py
|
Python
|
ilpexp/system/popper/__init__.py
|
logic-and-learning-lab/Popper-experiments
|
94d7499e32c3c9b01da5fd53cddef8a8afa8d509
|
[
"MIT"
] | 3
|
2022-01-30T09:51:17.000Z
|
2022-03-13T20:04:09.000Z
|
ilpexp/system/popper/__init__.py
|
logic-and-learning-lab/Popper-experiments
|
94d7499e32c3c9b01da5fd53cddef8a8afa8d509
|
[
"MIT"
] | 5
|
2022-01-30T09:38:12.000Z
|
2022-01-31T08:34:49.000Z
|
ilpexp/system/popper/__init__.py
|
logic-and-learning-lab/Popper-experiments
|
94d7499e32c3c9b01da5fd53cddef8a8afa8d509
|
[
"MIT"
] | null | null | null |
from .popper import Popper, PopperTrainSettings, BASIC_POPPER, generate_bias_file
| 81
| 81
| 0.876543
| 10
| 81
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 81
| 1
| 81
| 81
| 0.906667
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8f085f71db4bbc22f0220f5b69daf0767cd3714e
| 984
|
py
|
Python
|
isiscb/isisdata/migrations/0040_auto_20160701_1946.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 4
|
2016-01-25T20:35:33.000Z
|
2020-04-07T15:39:52.000Z
|
isiscb/isisdata/migrations/0040_auto_20160701_1946.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 41
|
2015-08-19T17:34:41.000Z
|
2022-03-11T23:19:01.000Z
|
isiscb/isisdata/migrations/0040_auto_20160701_1946.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 2
|
2020-11-25T20:18:18.000Z
|
2021-06-24T15:15:41.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('isisdata', '0039_auto_20160701_1900'),
]
operations = [
migrations.AlterField(
model_name='historicalperson',
name='personal_name_first',
field=models.CharField(max_length=255, blank=True),
),
migrations.AlterField(
model_name='historicalperson',
name='personal_name_last',
field=models.CharField(max_length=255, blank=True),
),
migrations.AlterField(
model_name='person',
name='personal_name_first',
field=models.CharField(max_length=255, blank=True),
),
migrations.AlterField(
model_name='person',
name='personal_name_last',
field=models.CharField(max_length=255, blank=True),
),
]
| 28.114286
| 63
| 0.597561
| 93
| 984
| 6.064516
| 0.387097
| 0.141844
| 0.177305
| 0.205674
| 0.719858
| 0.719858
| 0.719858
| 0.719858
| 0.611702
| 0.611702
| 0
| 0.041607
| 0.291667
| 984
| 34
| 64
| 28.941176
| 0.767575
| 0.021341
| 0
| 0.714286
| 0
| 0
| 0.155047
| 0.023933
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8f10a8da3d99dfd2ffcb2c0d358674670aff4557
| 111
|
py
|
Python
|
finviz/__init__.py
|
vonSpok/finviz
|
c369e78d4ed22557de66774fc3ff3905836603d5
|
[
"MIT"
] | null | null | null |
finviz/__init__.py
|
vonSpok/finviz
|
c369e78d4ed22557de66774fc3ff3905836603d5
|
[
"MIT"
] | null | null | null |
finviz/__init__.py
|
vonSpok/finviz
|
c369e78d4ed22557de66774fc3ff3905836603d5
|
[
"MIT"
] | null | null | null |
from finviz.main_func import Stock
from finviz.portfolio import Portfolio
from finviz.screener import Screener
| 27.75
| 38
| 0.864865
| 16
| 111
| 5.9375
| 0.5
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 111
| 3
| 39
| 37
| 0.959596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8f59730fe591bb7c8b7dad23ad97dbf1c6bf4222
| 105
|
py
|
Python
|
project_template/project_settings_local.sample.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 52
|
2016-09-13T03:50:58.000Z
|
2022-02-23T16:25:08.000Z
|
project_template/project_settings_local.sample.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 304
|
2016-08-11T14:17:30.000Z
|
2020-07-22T13:35:18.000Z
|
project_template/project_settings_local.sample.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 12
|
2016-09-21T18:46:35.000Z
|
2021-02-15T19:37:50.000Z
|
# This file is ignored by VCS.
from project_settings import *
# Override the default project settings.
| 17.5
| 40
| 0.771429
| 15
| 105
| 5.333333
| 0.866667
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180952
| 105
| 5
| 41
| 21
| 0.930233
| 0.638095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
56b3d0d3f5529b9d83610667882014cae9bdb8e7
| 30
|
py
|
Python
|
back/models/decoders/__init__.py
|
nizhib/portrait-demo
|
b4dc80187824a07c3562c1580a1a7c4f2f4ecc93
|
[
"MIT"
] | 47
|
2019-04-10T07:27:58.000Z
|
2022-02-03T10:13:23.000Z
|
back/models/decoders/__init__.py
|
nizhib/portrait-demo
|
b4dc80187824a07c3562c1580a1a7c4f2f4ecc93
|
[
"MIT"
] | 3
|
2021-09-08T01:46:13.000Z
|
2022-03-12T00:19:09.000Z
|
back/models/decoders/__init__.py
|
nizhib/portrait-demo
|
b4dc80187824a07c3562c1580a1a7c4f2f4ecc93
|
[
"MIT"
] | 17
|
2019-04-01T23:01:57.000Z
|
2021-06-29T13:23:05.000Z
|
from .unet import UNetDecoder
| 15
| 29
| 0.833333
| 4
| 30
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
56c26d2f603b334293c024a1c6d2a029358de796
| 160
|
py
|
Python
|
crosswalk/exceptions.py
|
cofin/django-crosswalk
|
349ebbd5676d3ef3ccf889ec3849b2f1cff4be32
|
[
"MIT"
] | 4
|
2019-04-08T23:24:30.000Z
|
2021-12-22T16:42:12.000Z
|
crosswalk/exceptions.py
|
cofin/django-crosswalk
|
349ebbd5676d3ef3ccf889ec3849b2f1cff4be32
|
[
"MIT"
] | 12
|
2017-12-18T04:27:14.000Z
|
2021-06-10T18:05:46.000Z
|
crosswalk/exceptions.py
|
cofin/django-crosswalk
|
349ebbd5676d3ef3ccf889ec3849b2f1cff4be32
|
[
"MIT"
] | 3
|
2019-08-12T14:36:04.000Z
|
2020-10-17T20:54:09.000Z
|
from django.core.exceptions import ValidationError
class ReservedKeyError(ValidationError):
pass
class NestedAttributesError(ValidationError):
pass
| 16
| 50
| 0.8125
| 14
| 160
| 9.285714
| 0.714286
| 0.292308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1375
| 160
| 9
| 51
| 17.777778
| 0.942029
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
56c461f77855157a72b0d0aa43b803caafd947d9
| 32
|
py
|
Python
|
vector_tile21/__init__.py
|
georgejhunt/python-mbtiles
|
d9d320aa1d5c2b47bd6aa5fe3699227dd893639e
|
[
"MIT"
] | null | null | null |
vector_tile21/__init__.py
|
georgejhunt/python-mbtiles
|
d9d320aa1d5c2b47bd6aa5fe3699227dd893639e
|
[
"MIT"
] | null | null | null |
vector_tile21/__init__.py
|
georgejhunt/python-mbtiles
|
d9d320aa1d5c2b47bd6aa5fe3699227dd893639e
|
[
"MIT"
] | null | null | null |
from .vector_tile_pb2 import *
| 10.666667
| 30
| 0.78125
| 5
| 32
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.15625
| 32
| 2
| 31
| 16
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
56cd4cd9c0bb1a6bcb415fc2a5aaf7e8130b4c2f
| 153
|
py
|
Python
|
horizon/openstack_dashboard/dashboards/cdn/cdn_monitor_report/constants.py
|
yianjiajia/openstack_horizon
|
9e36a4c3648ef29d0df6912d990465f51d6124a6
|
[
"Apache-2.0"
] | null | null | null |
horizon/openstack_dashboard/dashboards/cdn/cdn_monitor_report/constants.py
|
yianjiajia/openstack_horizon
|
9e36a4c3648ef29d0df6912d990465f51d6124a6
|
[
"Apache-2.0"
] | null | null | null |
horizon/openstack_dashboard/dashboards/cdn/cdn_monitor_report/constants.py
|
yianjiajia/openstack_horizon
|
9e36a4c3648ef29d0df6912d990465f51d6124a6
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'yanjiajia'
INFO_TEMPLATE_NAME = 'cdn/cdn_monitor_report/index.html'
INFO_DETAIL_TEMPLATE_NAME = 'cdn/cdn_monitor_report/detail_table.html'
| 38.25
| 70
| 0.843137
| 22
| 153
| 5.227273
| 0.545455
| 0.208696
| 0.26087
| 0.313043
| 0.53913
| 0.53913
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 153
| 3
| 71
| 51
| 0.798611
| 0
| 0
| 0
| 0
| 0
| 0.535948
| 0.477124
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
56fc97d5ce456d60eb962129c5d489b219bdc88a
| 44
|
py
|
Python
|
problem_21/__init__.py
|
oltionzefi/daily-coding-problem
|
4fe3ec53e1f3c7d299849671fdfead462d548cd3
|
[
"MIT"
] | null | null | null |
problem_21/__init__.py
|
oltionzefi/daily-coding-problem
|
4fe3ec53e1f3c7d299849671fdfead462d548cd3
|
[
"MIT"
] | null | null | null |
problem_21/__init__.py
|
oltionzefi/daily-coding-problem
|
4fe3ec53e1f3c7d299849671fdfead462d548cd3
|
[
"MIT"
] | null | null | null |
from .problem_21 import rooms, rooms_sorted
| 22
| 43
| 0.840909
| 7
| 44
| 5
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 0.113636
| 44
| 1
| 44
| 44
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7117715ac4b95a9cc4606c0ed6f96a30c7ebd861
| 48
|
py
|
Python
|
app/__init__.py
|
rubaalibrahim/Nawafea
|
40ef7437605d5b40b19d337564153f1586cbbf30
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
rubaalibrahim/Nawafea
|
40ef7437605d5b40b19d337564153f1586cbbf30
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
rubaalibrahim/Nawafea
|
40ef7437605d5b40b19d337564153f1586cbbf30
|
[
"MIT"
] | null | null | null |
"""Main application package"""
from . import app
| 24
| 30
| 0.729167
| 6
| 48
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 48
| 2
| 31
| 24
| 0.833333
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
858cd6179b007e01c58b4110c4435081f033f260
| 81
|
py
|
Python
|
supervisord_dependent_startup/__main__.py
|
bendikro/ordered-startup-supervisord
|
736256fcfafc9a8a738544393d4293cb15db2761
|
[
"Apache-2.0"
] | 54
|
2018-03-02T16:27:23.000Z
|
2022-02-21T14:39:12.000Z
|
supervisord_dependent_startup/__main__.py
|
bendikro/ordered-startup-supervisord
|
736256fcfafc9a8a738544393d4293cb15db2761
|
[
"Apache-2.0"
] | 10
|
2018-05-19T06:03:37.000Z
|
2021-10-07T14:43:04.000Z
|
supervisord_dependent_startup/__main__.py
|
bendikro/ordered-startup-supervisord
|
736256fcfafc9a8a738544393d4293cb15db2761
|
[
"Apache-2.0"
] | 16
|
2018-03-24T19:59:03.000Z
|
2022-02-18T03:19:51.000Z
|
from . import supervisord_dependent_startup
supervisord_dependent_startup.run()
| 20.25
| 43
| 0.876543
| 9
| 81
| 7.444444
| 0.666667
| 0.597015
| 0.80597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 81
| 3
| 44
| 27
| 0.893333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a489717b932754b2d93fdf73d221bb07d0164c4a
| 21
|
py
|
Python
|
lib/__init__.py
|
user3301/DummyFileGenerator
|
c36466788ff07bede010d5b496a1fe478ce33d8b
|
[
"MIT"
] | 1
|
2018-04-16T03:27:00.000Z
|
2018-04-16T03:27:00.000Z
|
lib/__init__.py
|
user3301/DummyFileGenerator
|
c36466788ff07bede010d5b496a1fe478ce33d8b
|
[
"MIT"
] | null | null | null |
lib/__init__.py
|
user3301/DummyFileGenerator
|
c36466788ff07bede010d5b496a1fe478ce33d8b
|
[
"MIT"
] | null | null | null |
from lib.core import*
| 21
| 21
| 0.809524
| 4
| 21
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 21
| 1
| 21
| 21
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a4a615609ba1dfa2b7d4e905722be29c908bccdc
| 58
|
py
|
Python
|
python/sequences/__init__.py
|
jwg4/oeis_misc
|
eb4ffc3c093179e5d4c4fd7f0a290d2b332031be
|
[
"MIT"
] | null | null | null |
python/sequences/__init__.py
|
jwg4/oeis_misc
|
eb4ffc3c093179e5d4c4fd7f0a290d2b332031be
|
[
"MIT"
] | null | null | null |
python/sequences/__init__.py
|
jwg4/oeis_misc
|
eb4ffc3c093179e5d4c4fd7f0a290d2b332031be
|
[
"MIT"
] | null | null | null |
from .simple import A000326
from .A294381 import A294381
| 14.5
| 28
| 0.810345
| 8
| 58
| 5.875
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.367347
| 0.155172
| 58
| 3
| 29
| 19.333333
| 0.591837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a4a7573ae36ecddbeb1472bac3ed2615a87e4e06
| 65
|
py
|
Python
|
frappe/integrations/doctype/ldap_settings/test_ldap_settings.py
|
ebymathew5225/frappe
|
880d824b77d2a6392a5d8ae9ea7db22199513c91
|
[
"MIT"
] | null | null | null |
frappe/integrations/doctype/ldap_settings/test_ldap_settings.py
|
ebymathew5225/frappe
|
880d824b77d2a6392a5d8ae9ea7db22199513c91
|
[
"MIT"
] | 6
|
2020-03-24T17:30:01.000Z
|
2022-02-10T19:13:10.000Z
|
frappe/integrations/doctype/ldap_settings/test_ldap_settings.py
|
ebymathew5225/frappe
|
880d824b77d2a6392a5d8ae9ea7db22199513c91
|
[
"MIT"
] | null | null | null |
import unittest
class TestLDAPSettings(unittest.TestCase):
pass
| 16.25
| 42
| 0.846154
| 7
| 65
| 7.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092308
| 65
| 4
| 43
| 16.25
| 0.932203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
f1071cd71aa0cb57f5541fb95496c76954d3f211
| 46
|
py
|
Python
|
intake_pangaeapy/__init__.py
|
ESM-VFC/intake_pangaeapy
|
e07f7a3f0c1cf26082a1bc7133b89fe27fba3e3c
|
[
"MIT"
] | null | null | null |
intake_pangaeapy/__init__.py
|
ESM-VFC/intake_pangaeapy
|
e07f7a3f0c1cf26082a1bc7133b89fe27fba3e3c
|
[
"MIT"
] | 2
|
2020-04-30T08:11:39.000Z
|
2020-09-13T10:23:53.000Z
|
intake_pangaeapy/__init__.py
|
ESM-VFC/intake_pangaeapy
|
e07f7a3f0c1cf26082a1bc7133b89fe27fba3e3c
|
[
"MIT"
] | 1
|
2020-05-20T09:38:27.000Z
|
2020-05-20T09:38:27.000Z
|
from .pangaeapy_driver import PangaeapySource
| 23
| 45
| 0.891304
| 5
| 46
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f16991d5f25eea857b6e61390ad7b50efb301e19
| 31
|
py
|
Python
|
segmentify/semantic/__init__.py
|
kne42/segmentify
|
cdacf55be64d066958d0114c0748141203708a06
|
[
"BSD-3-Clause"
] | 26
|
2019-07-29T21:52:08.000Z
|
2022-03-30T16:47:12.000Z
|
segmentify/semantic/__init__.py
|
joaomamede/segmentify
|
bd57cfcc94ad2f6dfcb080ae786f410e044659c4
|
[
"BSD-3-Clause"
] | 24
|
2019-07-25T20:38:43.000Z
|
2021-02-09T21:53:55.000Z
|
segmentify/semantic/__init__.py
|
joaomamede/segmentify
|
bd57cfcc94ad2f6dfcb080ae786f410e044659c4
|
[
"BSD-3-Clause"
] | 11
|
2019-06-18T22:37:34.000Z
|
2021-12-14T05:35:24.000Z
|
from .main import fit, predict
| 15.5
| 30
| 0.774194
| 5
| 31
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 31
| 1
| 31
| 31
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
74c2cadd413a55ef65f805bc938d1b257d2d72f7
| 1,024
|
py
|
Python
|
converter/TweeUtilities/Nodes/__init__.py
|
nerdymcnerdyson/pythonPlay
|
af9ab8db6d5818184d662342835ecccc90d8f5aa
|
[
"Apache-2.0"
] | null | null | null |
converter/TweeUtilities/Nodes/__init__.py
|
nerdymcnerdyson/pythonPlay
|
af9ab8db6d5818184d662342835ecccc90d8f5aa
|
[
"Apache-2.0"
] | null | null | null |
converter/TweeUtilities/Nodes/__init__.py
|
nerdymcnerdyson/pythonPlay
|
af9ab8db6d5818184d662342835ecccc90d8f5aa
|
[
"Apache-2.0"
] | null | null | null |
from TweeUtilities.Nodes.Utilities import *
from TweeUtilities.Nodes.NodeBase import *
from TweeUtilities.Nodes.NodeRegExes import *
from TweeUtilities.Nodes.Action import *
from TweeUtilities.Nodes.Category import *
from TweeUtilities.Nodes.ChoiceNode import *
from TweeUtilities.Nodes.EndSilentlyNode import *
from TweeUtilities.Nodes.ConditionalNodes import *
from TweeUtilities.Nodes.EitherNode import *
from TweeUtilities.Nodes.LinkNode import *
from TweeUtilities.Nodes.SetNode import *
from TweeUtilities.Nodes.SilentlyNode import *
from TweeUtilities.Nodes.TextNode import *
from TweeUtilities.Nodes.WaypointNode import *
# class SequenceNodeTemplate:
# def __init__(self):
# #node variables here
# super().__init__()
# self.type = SequenceNodeType.null
# #factory method.. returns instance of class or None
# @classmethod
# def tryIsNodeType():
# return None
# #instance method
# def javascriptOutputString():
# return ''
| 23.272727
| 57
| 0.735352
| 102
| 1,024
| 7.303922
| 0.401961
| 0.319463
| 0.413423
| 0.488591
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186523
| 1,024
| 43
| 58
| 23.813953
| 0.894358
| 0.329102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
740d6d742a5e37566cc970400d16ade8e81ccdff
| 9,608
|
py
|
Python
|
learn_basic_math.py
|
iliankostadinov/forKalin
|
979dd304ea764b7646bbc4b73778a4445ed4f06a
|
[
"Apache-2.0"
] | null | null | null |
learn_basic_math.py
|
iliankostadinov/forKalin
|
979dd304ea764b7646bbc4b73778a4445ed4f06a
|
[
"Apache-2.0"
] | null | null | null |
learn_basic_math.py
|
iliankostadinov/forKalin
|
979dd304ea764b7646bbc4b73778a4445ed4f06a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""Sum, substracion numbers"""
import tkinter as tk
import random
class OneLine():
"""Visualize one line for sum of two numbers"""
def __init__(self, window, row_num=0):
self.first_num = random.randint(1, 10)
self.second_num = random.randint(1, 10)
self.num1 = tk.Label(window, text=self.first_num, font=(600))
self.num1.grid(row=row_num, column=0, padx=10, pady=10)
self.plus = tk.Label(window, text="+")
self.plus.grid(row=row_num, column=1, padx=10, pady=10)
self.num2 = tk.Label(window, text=self.second_num, font=(600))
self.num2.grid(row=row_num, column=2, padx=10, pady=10)
self.equal = tk.Label(window, text="=", font=(600))
self.equal.grid(row=row_num, column=3, padx=10, pady=10)
self.imp_field = tk.Entry(window, font=(600))
self.imp_field.grid(row=row_num, column=4)
def check_sum(self):
"""Executed when check button pressed"""
num = self.imp_field.get()
if int(num) == self.first_num+self.second_num:
self.imp_field.configure({"background": "green"})
return True
self.imp_field.configure({"background": "red"})
self.imp_field.delete(0, "end")
return False
class OneLineSubstr():
"""Visualize one line for substract two numbers """
def __init__(self, window, row_num=0):
self.first_num = random.randint(1, 20)
self.second_num = random.randint(1, 20)
if self.first_num < self.second_num:
self.first_num, self.second_num = self.second_num, self.first_num
self.num1 = tk.Label(window, text=self.first_num, font=(600))
self.num1.grid(row=row_num, column=0, padx=10, pady=10)
self.plus = tk.Label(window, text="--", font=("Arial", 13, "bold"))
self.plus.grid(row=row_num, column=1, padx=10, pady=10)
self.num2 = tk.Label(window, text=self.second_num, font=(600))
self.num2.grid(row=row_num, column=2, padx=10, pady=10)
self.equal = tk.Label(window, text="=", font=(600))
self.equal.grid(row=row_num, column=3, padx=10, pady=10)
self.imp_field = tk.Entry(window, font=(600))
self.imp_field.grid(row=row_num, column=4)
def check_substr(self):
"""Executed when check button pressed"""
num = self.imp_field.get()
if int(num) == self.first_num-self.second_num:
self.imp_field.configure({"background": "green"})
return True
self.imp_field.configure({"background": "red"})
self.imp_field.delete(0, "end")
return False
class OneLineUnknown():
"""Visualize one line for find unknown number"""
def __init__(self, window, row_num=0):
self.first_num = random.randint(1, 20)
self.second_num = random.randint(1, 20)
if self.first_num > self.second_num:
self.first_num, self.second_num = self.second_num, self.first_num
self.num1 = tk.Label(window, text=self.first_num, font=(600))
self.num1.grid(row=row_num, column=0, padx=10, pady=10)
self.plus = tk.Label(window, text="+", font=("Arial", 13, "bold"))
self.plus.grid(row=row_num, column=1, padx=10, pady=10)
self.num2 = tk.Label(window, text=self.second_num, font=(600))
self.num2.grid(row=row_num, column=4, padx=10, pady=10)
self.equal = tk.Label(window, text="=", font=(600))
self.equal.grid(row=row_num, column=3, padx=10, pady=10)
self.imp_field = tk.Entry(window, font=(600))
self.imp_field.grid(row=row_num, column=2)
def check_unknown(self):
"""Executed when check button pressed"""
num = self.imp_field.get()
if int(num) == self.second_num-self.first_num:
self.imp_field.configure({"background": "green"})
return True
self.imp_field.configure({"background": "red"})
self.imp_field.delete(0, "end")
return False
class OneLineUnknownMinus():
"""Visualize one line for find unknown number"""
def __init__(self, window, row_num=0):
self.first_num = random.randint(1, 20)
self.second_num = random.randint(1, 20)
if self.first_num < self.second_num:
self.first_num, self.second_num = self.second_num, self.first_num
self.num1 = tk.Label(window, text=self.first_num, font=(600))
self.num1.grid(row=row_num, column=0, padx=10, pady=10)
self.plus = tk.Label(window, text="--", font=("Arial", 13, "bold"))
self.plus.grid(row=row_num, column=1, padx=10, pady=10)
self.num2 = tk.Label(window, text=self.second_num, font=(600))
self.num2.grid(row=row_num, column=4, padx=10, pady=10)
self.equal = tk.Label(window, text="=", font=(600))
self.equal.grid(row=row_num, column=3, padx=10, pady=10)
self.imp_field = tk.Entry(window, font=(600))
self.imp_field.grid(row=row_num, column=2)
def check_unknown_minus(self):
"""Executed when check button pressed"""
num = self.imp_field.get()
if int(num) == self.first_num-self.second_num:
self.imp_field.configure({"background": "green"})
return True
self.imp_field.configure({"background": "red"})
self.imp_field.delete(0, "end")
return False
if __name__ == "__main__":
ALL_LINES = [("fir_line", 1), ("sec_line", 2), ("thrid_line", 3),
("four_l", 4), ("five", 5), ("six", 6), ("sev", 7),
("eight", 8), ("nine", 9), ("ten", 10)]
ALL_LINES_COMB = [("fir_line", 1), ("sec_line", 2), ("thrid_line", 3),
("four_l", 4), ("five", 5), ("six", 6), ("sev", 7),
("eight", 8), ("nine", 9), ("ten", 10), ("ele", 11),
("twelve", 12), ("thirteen", 13), ("fourteen", 14),
("fifteen", 15), ("sixteen", 16)]
root_win = tk.Tk()
root_win.title("Задачи за Калин")
root_win.geometry('1800x900')
def summ_fun():
"""Creating name object for drawing summ examples"""
for name, number in ALL_LINES:
name = OneLine(root_win, number)
check_but = tk.Button(root_win, text="ПРОВЕРИ", font=(600),
command=name.check_sum)
check_but.grid(row=number, column=5)
def substr_fun():
"""Creating name object for drawing substrac examples"""
for name, number in ALL_LINES:
name = OneLineSubstr(root_win, number)
check_but = tk.Button(root_win, text="ПРОВЕРИ", font=(600),
command=name.check_substr)
check_but.grid(row=number, column=5)
def unknow_fun():
"""Creating name object for drawing unknown examples"""
for name, number in ALL_LINES:
name = OneLineUnknown(root_win, number)
check_but = tk.Button(root_win, text="ПРОВЕРИ", font=(600),
command=name.check_unknown)
check_but.grid(row=number, column=5)
def unknow_minus_fun():
"""Creating name object for drawing unknown examples"""
for name, number in ALL_LINES:
name = OneLineUnknownMinus(root_win, number)
check_but = tk.Button(root_win, text="ПРОВЕРИ", font=(600),
command=name.check_unknown_minus)
check_but.grid(row=number, column=5)
def combine_fun():
"""Create name object for drawing combine examples"""
func_list = [OneLine, OneLineSubstr, OneLineUnknown, OneLineUnknownMinus]
for name, number in ALL_LINES_COMB:
func_name = random.choice(func_list)
name = func_name(root_win, number)
print(func_name)
print(isinstance(func_name, OneLine))
if func_name == OneLine:
check_but = tk.Button(root_win, text="ПРОВЕРИ", font=(600),
command=name.check_sum)
check_but.grid(row=number, column=5)
if func_name == OneLineSubstr:
check_but = tk.Button(root_win, text="ПРОВЕРИ", font=(600),
command=name.check_substr)
check_but.grid(row=number, column=5)
if func_name == OneLineUnknown:
check_but = tk.Button(root_win, text="ПРОВЕРИ", font=(600),
command=name.check_unknown)
check_but.grid(row=number, column=5)
if func_name == OneLineUnknownMinus:
check_but = tk.Button(root_win, text="ПРОВЕРИ", font=(600),
command=name.check_unknown_minus)
check_but.grid(row=number, column=5)
SUMM_BUT = tk.Button(root_win, text="СЪБИРАНЕ", font=(60),
command=summ_fun)
SUMM_BUT.grid(row=0, column=1)
SUBSTR_BUT = tk.Button(root_win, text="ИЗВАЖДАНЕ", font=(60),
command=substr_fun)
SUBSTR_BUT.grid(row=0, column=2)
UNKNOWN_BUT = tk.Button(root_win, text="НЕИЗВЕСТНО", font=(60),
command=unknow_fun)
UNKNOWN_BUT.grid(row=0, column=3)
COMB_BUT = tk.Button(root_win, text="КОМБИНИРАНИ", font=(60),
command=combine_fun)
COMB_BUT.grid(row=0, column=4)
UNKNOWN_MINUS_BUT = tk.Button(root_win, text="НЕИЗВЕСТНО С МИНУС",
font=(60), command=unknow_minus_fun)
UNKNOWN_MINUS_BUT.grid(row=0, column=5)
root_win.mainloop()
| 45.107981
| 81
| 0.588364
| 1,292
| 9,608
| 4.206656
| 0.113777
| 0.04379
| 0.05299
| 0.047838
| 0.830359
| 0.814719
| 0.781233
| 0.769457
| 0.752162
| 0.74885
| 0
| 0.039841
| 0.268526
| 9,608
| 212
| 82
| 45.320755
| 0.733495
| 0.063072
| 0
| 0.637427
| 0
| 0
| 0.050498
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076023
| false
| 0
| 0.011696
| 0
| 0.157895
| 0.011696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.