hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
920ebfe6550083cb71000c7277acc5baedc761c8
| 129
|
py
|
Python
|
libs/presenters/flashcardPackSettingsPresenter.py
|
Subjuntivo/The-Vocab
|
899abbab57976a892753776849abf9e000d2bef0
|
[
"BSD-2-Clause"
] | 1
|
2021-11-07T17:51:38.000Z
|
2021-11-07T17:51:38.000Z
|
libs/presenters/flashcardPackSettingsPresenter.py
|
Subjuntivo/The-Vocab
|
899abbab57976a892753776849abf9e000d2bef0
|
[
"BSD-2-Clause"
] | null | null | null |
libs/presenters/flashcardPackSettingsPresenter.py
|
Subjuntivo/The-Vocab
|
899abbab57976a892753776849abf9e000d2bef0
|
[
"BSD-2-Clause"
] | 2
|
2021-11-07T17:51:53.000Z
|
2021-11-23T16:55:16.000Z
|
from libs.model.flashcardModel import FlashcardModel
class FlashcardPackSettingPresenter():
def __init_(self):
pass
| 21.5
| 52
| 0.767442
| 12
| 129
| 8
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170543
| 129
| 6
| 53
| 21.5
| 0.897196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
a610a1e22be4f0404c999f9e4cda65b2f1a2836e
| 35
|
py
|
Python
|
plugins/DataBase/__init__.py
|
pr0stre1/tbot
|
90aacc1e9b8ae2cc323974b0872fa8b496a2ecb3
|
[
"MIT"
] | null | null | null |
plugins/DataBase/__init__.py
|
pr0stre1/tbot
|
90aacc1e9b8ae2cc323974b0872fa8b496a2ecb3
|
[
"MIT"
] | 1
|
2022-03-30T18:56:14.000Z
|
2022-03-30T18:56:14.000Z
|
plugins/DataBase/__init__.py
|
pr0stre1/tbot
|
90aacc1e9b8ae2cc323974b0872fa8b496a2ecb3
|
[
"MIT"
] | null | null | null |
from plugins.DataBase import mongo
| 17.5
| 34
| 0.857143
| 5
| 35
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a655fabfed25e55170987e6fdc27e579824f24f0
| 40
|
py
|
Python
|
src/ui/__init__.py
|
s-graveyard/PencilUi
|
f75bac419fb161edd28f225f4b35bced38e3ac8c
|
[
"Unlicense"
] | 1
|
2018-02-14T17:02:37.000Z
|
2018-02-14T17:02:37.000Z
|
src/ui/__init__.py
|
SanjayGubaju/PencilUi
|
f75bac419fb161edd28f225f4b35bced38e3ac8c
|
[
"Unlicense"
] | null | null | null |
src/ui/__init__.py
|
SanjayGubaju/PencilUi
|
f75bac419fb161edd28f225f4b35bced38e3ac8c
|
[
"Unlicense"
] | null | null | null |
# Contains
# Application, Canvas, Ruler
| 13.333333
| 28
| 0.75
| 4
| 40
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 40
| 2
| 29
| 20
| 0.882353
| 0.875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a663ac40efb1bf7f106624b03ad7e0fd8dec1136
| 142
|
py
|
Python
|
ex/ex-39.py
|
LiR4/EX-python
|
0d80b81a4622f127ec397aa21e7703ca4b786ab8
|
[
"MIT"
] | 2
|
2021-11-11T19:40:12.000Z
|
2021-12-01T16:37:15.000Z
|
ex/ex-39.py
|
LiR4/ex-python
|
0d80b81a4622f127ec397aa21e7703ca4b786ab8
|
[
"MIT"
] | null | null | null |
ex/ex-39.py
|
LiR4/ex-python
|
0d80b81a4622f127ec397aa21e7703ca4b786ab8
|
[
"MIT"
] | null | null | null |
def area():
return larg * comp
larg = float(input('informe a largura '))
comp = float(input('informe o comprimento '))
print(area())
| 23.666667
| 46
| 0.647887
| 19
| 142
| 4.842105
| 0.684211
| 0.217391
| 0.369565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197183
| 142
| 6
| 47
| 23.666667
| 0.807018
| 0
| 0
| 0
| 0
| 0
| 0.289855
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0.2
| 0.4
| 0.2
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
a68976fff2b91c38ea5386cb6de90db1fb16279d
| 81,610
|
py
|
Python
|
FlowOpt/flow_operation.py
|
ga1008/flow_operate
|
2c86a99fec97bb75dbe5107a6e76d61581568542
|
[
"MIT"
] | null | null | null |
FlowOpt/flow_operation.py
|
ga1008/flow_operate
|
2c86a99fec97bb75dbe5107a6e76d61581568542
|
[
"MIT"
] | null | null | null |
FlowOpt/flow_operation.py
|
ga1008/flow_operate
|
2c86a99fec97bb75dbe5107a6e76d61581568542
|
[
"MIT"
] | null | null | null |
import json
import os
import random
import re
import time
from argparse import ArgumentParser, RawTextHelpFormatter
from io import BytesIO
import matplotlib.pyplot as plt
import numpy as np
import pyautogui
import redis
import requests
from BaseColor.base_colors import green, blue, hgreen, hred, red, hblue, yellow
from PIL import Image
from skimage import draw
from skimage.feature import match_template
from FlowOpt.tools.file_lock import FLock
from FlowOpt.tools.time_format import tell_the_datetime, tell_timestamp, waiting
class ImageTool(object):
def __init__(self):
self.threshold_value = 90
self._image_show = None
self.color = {
'red': [255, 0, 0],
'yellow': [255, 255, 0],
'green': [0, 255, 0],
'cyan': [0, 255, 255],
'blue': [0, 0, 255],
'magenta': [255, 0, 255],
'white': [255, 255, 255],
'silver': [192, 192, 192],
'gray': [128, 128, 128],
'black': [0, 0, 0],
}
def locate(self,
template_path,
template_resize=1.0,
img_path=None,
locate_center=True,
threshold_value=None,
as_gray=False,
as_binary=False, img_shape_times=1.0, return_score_only=False, screenshot_region=None
):
if threshold_value:
self.threshold_value = threshold_value
if img_path:
img_array = self._load_img(img_path, as_gray=as_gray, as_binary=as_binary, shape_times=img_shape_times)
self._image_show = self._load_img(img_path, as_gray=as_gray, as_binary=as_binary,
shape_times=img_shape_times)
else:
img_array = self._get_screen_shot(
as_gray=as_gray, as_binary=as_binary, shape_times=img_shape_times, region=screenshot_region
)
template_array = self._load_img(template_path, as_gray=as_gray, as_binary=as_binary,
shape_times=template_resize)
result = self._get_result_score(template_array=template_array, image_array=img_array)
score = (round(result.max(), 4) if result is not None else 0) * 100
if return_score_only:
return {"score": score, "template_path": template_path}
if score and score > self.threshold_value:
ij = np.unravel_index(np.argmax(result), result.shape)
if not as_gray:
c, x, y = ij[::-1]
tem_h, tem_w, tc = template_array.shape
ih, iw, ic = img_array.shape
else:
x, y = ij[::-1]
tem_h, tem_w = template_array.shape
ih, iw = img_array.shape
x, y = int(x), int(y)
center = [int(x + tem_w / 2), int(y + tem_h / 2)]
print(f"[ {green(tell_the_datetime())} ]\n "
f" matching image: [ {blue(img_path or 'ScreenShot')} ]\n "
f" using template: [ {blue(template_path)} ]\n "
f" >>> locate success! score: {hgreen(score)}\n")
self._draw_box(x, y, tem_h, tem_w, ih, iw, 2, color="red")
return center if locate_center else [int(x), int(y)]
else:
print(f"[ {green(tell_the_datetime())} ]\n "
f" matching image: [ {blue(img_path or 'ScreenShot')} ]\n "
f" using template: [ {blue(template_path)} ]\n "
f" >>> score not pass! score: {hred(score)}\n")
def patch_locate(self, template_path_list,
template_resize=1.0,
img_path=None,
locate_center=True,
threshold_value=None, as_gray=False,
as_binary=False, img_shape_times=1.0, screenshot_region=None):
if threshold_value:
self.threshold_value = threshold_value
if img_path:
img_array = self._load_img(img_path, as_gray=as_gray, as_binary=as_binary, shape_times=img_shape_times)
self._image_show = self._load_img(img_path, as_gray=as_gray, as_binary=as_binary,
shape_times=img_shape_times)
else:
img_array = self._get_screen_shot(
as_gray=as_gray, as_binary=as_binary, shape_times=img_shape_times, region=screenshot_region
)
for template_path in template_path_list:
template_array = self._load_img(template_path, as_gray=as_gray, as_binary=as_binary,
shape_times=template_resize)
result = self._get_result_score(template_array=template_array, image_array=img_array)
score = (round(result.max(), 4) if result is not None else 0) * 100
if score and score > self.threshold_value:
ij = np.unravel_index(np.argmax(result), result.shape)
if not as_gray:
c, x, y = ij[::-1]
tem_h, tem_w, tc = template_array.shape
ih, iw, ic = img_array.shape
else:
x, y = ij[::-1]
tem_h, tem_w = template_array.shape
ih, iw = img_array.shape
x, y = int(x), int(y)
center = [int(x + tem_w / 2), int(y + tem_h / 2)]
print(f"[ {green(tell_the_datetime())} ]\n "
f" matching image: [ {blue(img_path or 'ScreenShot')} ]\n "
f" using template: [ {blue(template_path)} ]\n "
f" >>> locate success! score: {hgreen(score)}\n")
self._draw_box(x, y, tem_h, tem_w, ih, iw, 2, color="red")
return center if locate_center else [int(x), int(y)]
else:
print(f"[ {green(tell_the_datetime())} ]\n "
f" matching image: [ {blue(img_path or 'ScreenShot')} ]\n "
f" using template: [ {blue(template_path)} ]\n "
f" >>> score not pass! score: {hred(score)}\n")
def patch_locate_color(self, template_paths, img_path=None, color_tolerance=None, color_purity=None,
screenshot_region=None, img_shape_times=None):
if not img_shape_times:
img_shape_times = 1.0
if img_path:
img_color_map = self._load_img_color(img_path)
self._image_show = self._load_img(img_path, as_gray=False, as_binary=False, shape_times=img_shape_times)
else:
img_color_map = self._get_screen_shot(
region=screenshot_region, load_as_color_map=True, shape_times=img_shape_times
)
color_tolerance = 0 if not color_tolerance else color_tolerance
color_purity = 1 if not color_purity else color_purity
all_positions = []
for template_path in template_paths:
template_color_map = self._load_img_color(template_path)
max_min_r = [0, 255]
max_min_g = [0, 255]
max_min_b = [0, 255]
for x_y, rgb in template_color_map.items():
r, g, b = rgb
if r > max_min_r[0]:
max_min_r[0] = r
elif r < max_min_r[1]:
max_min_r[1] = r
if g > max_min_g[0]:
max_min_g[0] = g
elif g < max_min_g[1]:
max_min_g[1] = g
if b > max_min_b[0]:
max_min_b[0] = b
elif b < max_min_b[1]:
max_min_b[1] = b
max_min_r = [max_min_r[0] + color_tolerance, max_min_r[1] - color_tolerance]
max_min_r = [max_min_r[0] if max_min_r[0] < 255 else 255, max_min_r[1] if max_min_r[1] > 0 else 0]
max_min_g = [max_min_g[0] + color_tolerance, max_min_g[1] - color_tolerance]
max_min_g = [max_min_g[0] if max_min_g[0] < 255 else 255, max_min_g[1] if max_min_g[1] > 0 else 0]
max_min_b = [max_min_b[0] + color_tolerance, max_min_b[1] - color_tolerance]
max_min_b = [max_min_b[0] if max_min_b[0] < 255 else 255, max_min_b[1] if max_min_b[1] > 0 else 0]
color_zones = []
for x_y, rgb in img_color_map.items():
r, g, b = rgb
in_zone_conditions = [
max_min_r[0] > r > max_min_r[1],
max_min_g[0] > g > max_min_g[1],
max_min_b[0] > b > max_min_b[1],
]
if all(in_zone_conditions):
color_zones.append(x_y)
start_lines = []
start_column = []
for cz_point in color_zones:
x, y = cz_point.split('-')
in_line_sta = False
for line_set in start_lines:
if cz_point in line_set:
line_set.add(f"{int(x) + 1}-{y}")
in_line_sta = True
if not in_line_sta:
start_lines.append({cz_point, f"{int(x) + 1}-{y}"})
in_col_sta = False
for col_set in start_column:
if cz_point in col_set:
col_set.add(f"{x}-{int(y) + 1}")
in_col_sta = True
if not in_col_sta:
start_column.append({cz_point, f"{x}-{int(y) + 1}"})
v_center_pts = set()
for l_set in start_lines:
if len(l_set) > color_purity:
ld = {int(x.split("-")[0]): x for x in l_set}
all_value = [x for x in ld.keys()]
all_value.sort()
center_pt = ld.get(all_value[int(len(all_value) / 2)])
v_center_pts.add(center_pt)
spx, spy = [int(i) for i in ld.get(all_value[0]).split("-")]
epx, epy = [int(i) for i in ld.get(all_value[-1]).split("-")]
self.draw_color(px=spx, py=spy, color=[255, 0, 0])
self.draw_color(px=epx, py=epy, color=[255, 0, 0])
h_center_pts = set()
for c_set in start_column:
if len(c_set) > color_purity:
cd = {int(x.split("-")[1]): x for x in c_set}
all_value = [x for x in cd.keys()]
all_value.sort()
center_pt = cd.get(all_value[int(len(all_value) / 2)])
h_center_pts.add(center_pt)
spx, spy = [int(i) for i in cd.get(all_value[0]).split("-")]
epx, epy = [int(i) for i in cd.get(all_value[-1]).split("-")]
self.draw_color(px=spx, py=spy, color=[255, 0, 0])
self.draw_color(px=epx, py=epy, color=[255, 0, 0])
final_center_pts = set()
possible_pts = set()
possible_extent = 3
for vp in v_center_pts:
if vp in h_center_pts and vp not in possible_pts:
vpx, vpy = [int(i) for i in vp.split("-")]
for i in range(-possible_extent, possible_extent + 1):
for j in range(-possible_extent, possible_extent + 1):
possible_pts.add(f"{vpx + i}-{vpy + j}")
final_center_pts.add(vp)
for hp in h_center_pts:
if hp in v_center_pts and hp not in possible_pts:
hpx, hpy = [int(i) for i in hp.split("-")]
for i in range(-possible_extent, possible_extent + 1):
for j in range(-possible_extent, possible_extent + 1):
possible_pts.add(f"{hpx + i}-{hpy + j}")
final_center_pts.add(hp)
for fp in final_center_pts:
dx, dy = [int(i) for i in fp.split("-")]
self._draw_cross(x=dx, y=dy, color='yellow', weight=2)
fpt = []
if screenshot_region and isinstance(screenshot_region, list):
for fp in final_center_pts:
fpx, fpy = [int(i) for i in fp.split("-")]
nx = screenshot_region[0] + fpx
ny = screenshot_region[1] + fpy
fpt.append([nx, ny])
else:
fpt = [[int(i) for i in j.split("-")] for j in final_center_pts]
all_positions += fpt
return all_positions
def locate_color(self, template_path, img_path=None, color_tolerance=None, color_purity=None,
screenshot_region=None, img_shape_times=None):
if not img_shape_times:
img_shape_times = 1.0
if img_path:
img_color_map = self._load_img_color(img_path)
self._image_show = self._load_img(img_path, as_gray=False, as_binary=False, shape_times=img_shape_times)
else:
img_color_map = self._get_screen_shot(
region=screenshot_region, load_as_color_map=True, shape_times=img_shape_times
)
color_tolerance = 0 if not color_tolerance else color_tolerance
color_purity = 1 if not color_purity else color_purity
template_color_map = self._load_img_color(template_path)
max_min_r = [0, 255]
max_min_g = [0, 255]
max_min_b = [0, 255]
for x_y, rgb in template_color_map.items():
r, g, b = rgb
if r > max_min_r[0]:
max_min_r[0] = r
elif r < max_min_r[1]:
max_min_r[1] = r
if g > max_min_g[0]:
max_min_g[0] = g
elif g < max_min_g[1]:
max_min_g[1] = g
if b > max_min_b[0]:
max_min_b[0] = b
elif b < max_min_b[1]:
max_min_b[1] = b
max_min_r = [max_min_r[0] + color_tolerance, max_min_r[1] - color_tolerance]
max_min_r = [max_min_r[0] if max_min_r[0] < 255 else 255, max_min_r[1] if max_min_r[1] > 0 else 0]
max_min_g = [max_min_g[0] + color_tolerance, max_min_g[1] - color_tolerance]
max_min_g = [max_min_g[0] if max_min_g[0] < 255 else 255, max_min_g[1] if max_min_g[1] > 0 else 0]
max_min_b = [max_min_b[0] + color_tolerance, max_min_b[1] - color_tolerance]
max_min_b = [max_min_b[0] if max_min_b[0] < 255 else 255, max_min_b[1] if max_min_b[1] > 0 else 0]
color_zones = []
for x_y, rgb in img_color_map.items():
r, g, b = rgb
in_zone_conditions = [
max_min_r[0] > r > max_min_r[1],
max_min_g[0] > g > max_min_g[1],
max_min_b[0] > b > max_min_b[1],
]
if all(in_zone_conditions):
color_zones.append(x_y)
start_lines = []
start_column = []
for cz_point in color_zones:
x, y = cz_point.split('-')
in_line_sta = False
for line_set in start_lines:
if cz_point in line_set:
line_set.add(f"{int(x) + 1}-{y}")
in_line_sta = True
if not in_line_sta:
start_lines.append({cz_point, f"{int(x) + 1}-{y}"})
in_col_sta = False
for col_set in start_column:
if cz_point in col_set:
col_set.add(f"{x}-{int(y) + 1}")
in_col_sta = True
if not in_col_sta:
start_column.append({cz_point, f"{x}-{int(y) + 1}"})
v_center_pts = set()
for l_set in start_lines:
if len(l_set) > color_purity:
ld = {int(x.split("-")[0]): x for x in l_set}
all_value = [x for x in ld.keys()]
all_value.sort()
center_pt = ld.get(all_value[int(len(all_value) / 2)])
v_center_pts.add(center_pt)
spx, spy = [int(i) for i in ld.get(all_value[0]).split("-")]
epx, epy = [int(i) for i in ld.get(all_value[-1]).split("-")]
self.draw_color(px=spx, py=spy, color=[255, 0, 0])
self.draw_color(px=epx, py=epy, color=[255, 0, 0])
h_center_pts = set()
for c_set in start_column:
if len(c_set) > color_purity:
cd = {int(x.split("-")[1]): x for x in c_set}
all_value = [x for x in cd.keys()]
all_value.sort()
center_pt = cd.get(all_value[int(len(all_value) / 2)])
h_center_pts.add(center_pt)
spx, spy = [int(i) for i in cd.get(all_value[0]).split("-")]
epx, epy = [int(i) for i in cd.get(all_value[-1]).split("-")]
self.draw_color(px=spx, py=spy, color=[255, 0, 0])
self.draw_color(px=epx, py=epy, color=[255, 0, 0])
final_center_pts = set()
possible_pts = set()
possible_extent = 3
for vp in v_center_pts:
if vp in h_center_pts and vp not in possible_pts:
vpx, vpy = [int(i) for i in vp.split("-")]
for i in range(-possible_extent, possible_extent + 1):
for j in range(-possible_extent, possible_extent + 1):
possible_pts.add(f"{vpx + i}-{vpy + j}")
final_center_pts.add(vp)
for hp in h_center_pts:
if hp in v_center_pts and hp not in possible_pts:
hpx, hpy = [int(i) for i in hp.split("-")]
for i in range(-possible_extent, possible_extent + 1):
for j in range(-possible_extent, possible_extent + 1):
possible_pts.add(f"{hpx + i}-{hpy + j}")
final_center_pts.add(hp)
for fp in final_center_pts:
dx, dy = [int(i) for i in fp.split("-")]
self._draw_cross(x=dx, y=dy, color='yellow', weight=2)
fpt = []
if screenshot_region and isinstance(screenshot_region, list):
for fp in final_center_pts:
fpx, fpy = [int(i) for i in fp.split("-")]
nx = screenshot_region[0] + fpx
ny = screenshot_region[1] + fpy
fpt.append([nx, ny])
else:
fpt = [[int(i) for i in j.split("-")] for j in final_center_pts]
return fpt
def _get_screen_shot(self, as_gray=False, as_binary=False, shape_times=1.0, region=None, load_as_color_map=False):
if not region:
img_obj = pyautogui.screenshot()
else:
region = region if isinstance(region, tuple) else tuple(region)
img_obj = pyautogui.screenshot(region=region) # (0, 0, 300, 400)
if load_as_color_map:
tmp_array = self._load_img(img_obj, as_gray=as_gray, as_binary=as_binary, shape_times=shape_times)
self._image_show = tmp_array
return self._load_img_color(img_obj)
else:
tmp_array = self._load_img(img_obj, as_gray=as_gray, as_binary=as_binary, shape_times=shape_times)
self._image_show = tmp_array
return tmp_array
def _draw_box(self, x, y, th, tw, ih, iw, weight=1, color='red'):
self._image_show = np.array(Image.fromarray(self._image_show).convert("RGB"))
for Y in range(y, y + weight):
for X in range(x, x + tw + weight):
if Y > ih:
Y = ih
if X > iw:
X = iw
self.draw_color(X, Y, color=self.color.get(color))
for Y in range(y, y + th):
for X in range(x + tw, x + tw + weight):
Y = Y if Y <= ih else ih
X = X if X <= iw else iw
self.draw_color(X, Y, color=self.color.get(color))
for Y in range(y + th, y + th + weight):
for X in range(x, x + tw + weight):
Y = Y if Y <= ih else ih
X = X if X <= iw else iw
self.draw_color(X, Y, color=self.color.get(color))
for Y in range(y, y + th):
for X in range(x, x + weight):
Y = Y if Y <= ih else ih
X = X if X <= iw else iw
self.draw_color(X, Y, color=self.color.get(color))
def _draw_cross(self, x, y, weight=1, color='red'):
self._image_show = np.array(Image.fromarray(self._image_show).convert("RGB"))
extent_pts = {f'{x}-{y}'}
for i in range(-weight, weight + 1):
extent_pts.add(
f"{x + i}-{y}"
)
extent_pts.add(
f"{x}-{y + i}"
)
for pt in extent_pts:
x, y = [int(x) for x in pt.split("-")]
self.draw_color(px=x, py=y, color=self.color.get(color))
def draw_color(self, px, py, color=None):
if color is None:
color = [255, 255, 255]
draw_y = np.array([py, py, py + 1, py + 1])
draw_x = np.array([px, px + 1, px + 1, px])
rr, cc = draw.polygon(draw_y, draw_x)
draw.set_color(self._image_show, [rr, cc], color)
@staticmethod
def _get_result_score(template_array, image_array):
result = None
try:
result = match_template(image_array, template_array)
# result = match_template(template_array, image_array)
except ValueError as e:
print('sth wrong when matching the template : {}'.format(e))
finally:
return result
@staticmethod
def _load_img(file_path, as_gray=False, as_binary=False, shape_times=None):
convert_to = 'RGB'
if as_gray:
convert_to = 'L'
if as_binary:
convert_to = '1'
if isinstance(file_path, str):
img = Image.open(file_path).convert(convert_to)
else:
img = file_path.convert(convert_to)
img = img.resize((int(x * shape_times) for x in img.size)) if shape_times else img
img = np.array(img)
return img
@staticmethod
def _load_img_color(file_path):
if isinstance(file_path, str):
img = Image.open(file_path).convert('RGB')
else:
img = file_path.convert('RGB')
cur_size_x, cur_size_y = img.size
color_map = {}
for y in range(cur_size_y):
for x in range(cur_size_x):
color_map[f"{x}-{y}"] = img.getpixel((x, y))
return color_map
@staticmethod
def _load_img_color_as_iter(file_path):
if isinstance(file_path, str):
img = Image.open(file_path).convert('RGB')
else:
img = file_path.convert('RGB')
cur_size_x, cur_size_y = img.size
for y in range(cur_size_y):
for x in range(cur_size_x):
yield list(img.getpixel((x, y)))
@staticmethod
def load_image_from_url(url):
if re.findall('^https?://', url):
res = requests.request("GET", url)
img = res.content
else:
if not re.findall('^/', url):
base_path = os.getcwd()
path = os.path.join(base_path, url)
else:
path = url
with open(path, 'rb') as rf:
img = rf.read()
bio = BytesIO()
bio.write(img)
return bio
def show(self):
if self._image_show is not None:
plt.imshow(self._image_show, plt.cm.gray)
plt.show()
class FlowTool(object):
def __init__(self, operate_list, project_name=None):
"""
step by step
:param operate_list:
[{
"name": "search image and click",
"method": "SearchClick",
"icon_path": "/root/... .../image.png",
"match_options": {
"threshold_value": 90,
"as_gray": True,
"as_binary": False
"img_shape_times": 1.0
}
"speed": "fast", # "slow", "mid"
"pre_delay": None,
"sub_delay": 2,
},{
"name": "search image with multi icons, if one of them matched, then click",
"method": "MulSearchClick",
"icon_paths": ["/root/... .../image1.png", "/root/... .../image2.png", ...],
"match_options": {
"threshold_value": 90,
"as_gray": True,
"as_binary": False
"img_shape_times": 1.0
}
"speed": "fast", # "slow", "mid"
"pre_delay": None,
"sub_delay": 2,
},
{
"name": "open chrome and enter url",
"method": "EnterUrl",
"url": "http://www.xxx.com",
"speed": "fast",
"pre_delay": None,
"sub_delay": 2,
},
{
"name": "wait the icon show",
"method": "WaitIcon",
"icon_path": "/root/... .../icon.png",
"match_options": {
"threshold_value": 90,
"as_gray": True,
"as_binary": False
"img_shape_times": 1.0
}
"interval": 1,
"after_showed": "NextStep", # "Return"
"time_out": 120,
"if_timeout": "End", # "NextStep", "Return", "JumpToStep4"
},
{
"name": "wait until the icon gone",
"method": "WaitIconGone",
"icon_path": "/root/... .../icon.png",
"match_options": {
"threshold_value": 90,
"as_gray": True,
"as_binary": False
"img_shape_times": 1.0
}
"interval": 1,
"after_gone": "NextStep", # "Return"
"time_out": 120,
"if_timeout": "End", # "NextStep", "Return", "JumpToStep5"
},
{
"name": "save data to a file with vim",
"method": "SaveWithVim",
"save_path": "/root/... .../icon.json",
},
{
"name": "terminal opera",
"method": "TermCommand",
"Command": "redis-cli -p xxxx rpush GrCookies 'diahwdioawafdoanwf;ona;owdaow'",
},
{
"name": "move mouse to a position and click",
"method": "Click",
"position": "TopLeft", # "TopRight", "BottomLeft", "BottomRight", or [1000, 1000],
"pre_delay": None,
"sub_delay": 2,
},
...]
"""
self.project_name = project_name if project_name else f"Project_{tell_the_datetime(compact_mode=True, date_sep='_')}"
self.operate_list = operate_list
self.it = ImageTool()
self.default_match_opt = {
"template_resize": 1.0,
"threshold_value": 90,
"as_gray": True,
"as_binary": False,
"img_shape_times": 1.0,
}
self.default_redis_params = {
"host": 'localhost',
"port": 6379,
"db": 0,
"decode_responses": True
}
self.base_path = os.path.split(os.path.abspath(__file__))[0]
self.default_chrome_icon = os.path.join(self.base_path, "resource/icons/chrome_icon.png")
self.screen_width, self.screen_height = pyautogui.size()
self.ms_dic = dict()
self.step_call_times = dict()
self.total_steps = 0
self.resources = dict()
self.methods = self._method_map()
self._ready_steps()
def _method_map(self):
return {
"SearchClick": self._search_and_click,
"SearchDrag": self._search_and_drag,
"MulSearchClick": self._multi_search_and_click,
"MulSearchDrag": self._multi_search_and_drag,
"EnterUrl": self._open_chrome_and_enter_url,
"WaitIcon": self._wait_icon_show,
"WaitIconGone": self._wait_icon_gone,
"SaveWithVim": self._save_data_with_vim,
"TermCommand": self._terminal_operations,
"Click": self._mouse_click,
"HotKey": self._hot_key,
"InputABC": self._input_abc,
"Drag": self._mouse_drag,
}
def _ready_steps(self):
print("steps: ")
count = 1
for step_data in self.operate_list:
self.ms_dic[count] = step_data
self.step_call_times[count] = 0
data_list = step_data.get('data_list')
if data_list:
data_list_type = step_data.get('data_list_type') or 'array'
data_sep = step_data.get('data_sep')
if data_list_type == 'array':
pass
elif data_list_type == 'file':
data_list_from_file = []
for f_name in data_list:
with open(f_name, 'r') as rf:
data_list_from_file += [x for x in rf.read().split('\n')]
data_list = data_list_from_file
elif data_list_type == 'redis':
redis_params = step_data.get("data_list_redis_params") or self.default_redis_params
redis_params['decode_responses'] = True
cli = redis.Redis(**redis_params)
data_list_from_redis = []
for key in data_list:
data_list_from_redis += cli.lrange(key, 0, -1)
cli.delete(key)
data_list = data_list_from_redis
if data_sep:
new_list = []
for data in data_list:
name, value = data.split(data_sep)
new_list.append({"name": name, "value": value})
else:
new_list = []
for data in data_list:
new_list.append({"name": "", "value": data})
self.resources[count] = new_list
print(f" [ {green(count)} ] -- [ {green(step_data.get('name'))} ]")
count += 1
self.total_steps = len(self.operate_list)
def _get_resources(self, cur_step, take_method):
ips = self.resources.get(cur_step)
if "pop" not in take_method:
if "only" in take_method:
resource = self.resources[cur_step][0]
elif "order" in take_method:
resource = self.resources[cur_step][self.step_call_times.get(cur_step, 1) - 1]
elif "all" in take_method:
return self.resources[cur_step]
else:
resource = random.choice(self.resources[cur_step])
name = resource.get('name')
value = resource.get('value')
else:
if "all" in take_method:
return self.resources.pop(cur_step)
elif "order" in take_method:
pop_index = 0
else:
pop_index = random.randint(0, len(ips) - 1)
resource = self.resources[cur_step].pop(pop_index)
name = resource.get('name')
value = resource.get('value')
return name, value
def _search_and_click(self, params):
match_options = params.get("match_options")
speed = params.get("speed") or "fast"
search_method = params.get("search_method") or "shape"
not_locate = params.get("not_locate") or "exit" # "exit", "next1", "jump1"
pre_delay = params.get("pre_delay") or 0
sub_delay = params.get("sub_delay") or 0
deviation = params.get("deviation") or [0, 0]
click_times = int(params.get("click_times") or 1)
click_sep = params.get("click_sep") or 0.2
search_only = params.get("search_only") or False
cur_step = int(params.get('cur_step', 1))
self.step_call_times[cur_step] += 1
take_method = params.get('take_method') or 'order'
flow_name, icon_path = self._get_resources(cur_step, take_method)
flow_name = flow_name if flow_name else params.get('pack', {}).get('flow_name', "")
jump_step = re.findall(r'\d+', not_locate)
time.sleep(int(pre_delay))
check_region = match_options.get("check_region")
if search_method == 'color':
choice_method = match_options.get("choice_method") or 'random'
icon_positions = self.it.locate_color(
template_path=icon_path,
color_tolerance=int(match_options.get('color_tolerance', 0)) or 0,
color_purity=int(match_options.get('color_purity', 1)) or 1,
screenshot_region=check_region,
img_shape_times=float(match_options.get('img_shape_times', 1.0)) or 1.0,
) or [[]]
if choice_method == 'random':
icon_position = random.choice(icon_positions)
else:
icon_position = icon_positions[0]
else:
match_options = match_options if isinstance(match_options, dict) else self.default_match_opt
icon_position = self.it.locate(
template_path=icon_path,
threshold_value=match_options.get('threshold_value'),
as_gray=match_options.get('as_gray'),
as_binary=match_options.get('as_binary'),
img_shape_times=match_options.get('img_shape_times'),
screenshot_region=check_region,
)
if icon_position:
if check_region and isinstance(check_region, list):
icon_position = [icon_position[0] + check_region[0], icon_position[1] + check_region[1]]
icon_position = [icon_position[0] + deviation[0], icon_position[1] + deviation[1]]
if not search_only:
delay = self._speed(speed)
self._delay_move(*icon_position, delay=delay)
for i in range(click_times):
pyautogui.click()
time.sleep(click_sep)
time.sleep(sub_delay)
return {'next': cur_step + 1, "pack": {"position": icon_position, "flow_name": flow_name}}
else:
pack = params.get('pack', {})
pack["flow_name"] = flow_name
if not_locate.lower() == "exit":
print(f"System exit because can not locate template: \n {icon_path}")
raise KeyboardInterrupt
elif "jump" in not_locate.lower():
jump_step = jump_step[0] if jump_step else 0
return {'next': int(jump_step), "pack": pack}
elif "back" in not_locate.lower():
jump_step = jump_step[0] if jump_step else 1
jump_step = int(params.get('cur_step', 1)) - int(jump_step)
jump_step = jump_step if jump_step >= 0 else 0
return {'next': jump_step, "pack": pack}
else:
jump_step = jump_step[0] if jump_step else 1
return {'next': cur_step + int(jump_step), "pack": pack}
def _search_and_drag(self, params):
match_options = params.get("match_options")
speed = params.get("speed") or "fast"
search_method = params.get("search_method") or "shape"
not_locate = params.get("not_locate") or "exit" # "exit", "next1", "jump1"
pre_delay = params.get("pre_delay") or 0
sub_delay = params.get("sub_delay") or 0
deviation = params.get("deviation") or [0, 0]
start_position = params.get("start_position") # ["pre_step", 300]/[100, 200]
end_position = params.get("end_position") # ["pre_step", "pre_step"]/[100, 200]
cur_step = int(params.get('cur_step', 1))
self.step_call_times[cur_step] += 1
take_method = params.get('take_method') or 'all'
flow_name, icon_path = self._get_resources(cur_step, take_method)
flow_name = flow_name if flow_name else params.get('pack', {}).get('flow_name', "")
jump_step = re.findall(r'\d+', not_locate)
time.sleep(int(pre_delay))
check_region = match_options.get("check_region")
if search_method == 'color':
choice_method = match_options.get("choice_method") or 'random'
icon_positions = self.it.locate_color(
template_path=icon_path,
color_tolerance=int(match_options.get('color_tolerance', 0)) or 0,
color_purity=int(match_options.get('color_purity', 1)) or 1,
screenshot_region=check_region,
img_shape_times=float(match_options.get('img_shape_times', 1.0)) or 1.0,
) or [[]]
if choice_method == 'random':
icon_position = random.choice(icon_positions)
else:
icon_position = icon_positions[0]
else:
match_options = match_options if isinstance(match_options, dict) else self.default_match_opt
icon_position = self.it.locate(
template_path=icon_path,
threshold_value=match_options.get('threshold_value'),
as_gray=match_options.get('as_gray'),
as_binary=match_options.get('as_binary'),
img_shape_times=match_options.get('img_shape_times'),
screenshot_region=check_region,
)
if icon_position:
if check_region and isinstance(check_region, list):
icon_position = [icon_position[0] + check_region[0], icon_position[1] + check_region[1]]
icon_position = [icon_position[0] + deviation[0], icon_position[1] + deviation[1]]
delay = self._speed(speed)
if start_position:
sx, sy = start_position
if isinstance(sx, str) and "pre_step" in sx:
sx = params.get('pack', {}).get('position', [])[0]
else:
sx = icon_position[0]
if isinstance(sy, str) and "pre_step" in sy:
sy = params.get('pack', {}).get('position', [0, ])[1]
else:
sy = icon_position[1]
self._delay_move(sx, sy, delay=0.5)
self._delay_drag(*icon_position, delay=delay)
elif end_position:
ex, ey = end_position
if isinstance(ex, str) and "pre_step" in ex:
ex = params.get('pack', {}).get('position', [])[0]
else:
ex = icon_position[0]
if isinstance(ey, str) and "pre_step" in ey:
ey = params.get('pack', {}).get('position', [0, ])[1]
else:
ey = icon_position[1]
self._delay_move(*icon_position, delay=0.5)
self._delay_drag(ex, ey, delay=delay)
else:
self._delay_drag(*icon_position, delay=delay)
time.sleep(sub_delay)
return {'next': cur_step + 1, "pack": {"position": icon_position, "flow_name": flow_name}}
else:
pack = params.get('pack', {})
pack["flow_name"] = flow_name
if not_locate.lower() == "exit":
print(f"System exit because can not locate template: \n {icon_path}")
raise KeyboardInterrupt
elif "jump" in not_locate.lower():
jump_step = jump_step[0] if jump_step else 0
return {'next': int(jump_step), "pack": pack}
elif "back" in not_locate.lower():
jump_step = jump_step[0] if jump_step else 1
jump_step = int(params.get('cur_step', 1)) - int(jump_step)
jump_step = jump_step if jump_step >= 0 else 0
return {'next': jump_step, "pack": pack}
else:
jump_step = jump_step[0] if jump_step else 1
return {'next': cur_step + int(jump_step), "pack": pack}
def _multi_search_and_click(self, params):
match_options = params.get("match_options")
not_locate = params.get("not_locate") or "next1" # jump1
speed = params.get("speed") or "fast"
search_method = params.get("search_method") or "shape"
pre_delay = params.get("pre_delay") or 0
sub_delay = params.get("sub_delay") or 0
jump_step = re.findall(r'\d+', not_locate)
deviation = params.get("deviation") or [0, 0]
click_times = int(params.get("click_times") or 1)
click_sep = params.get("click_sep") or 0.2
search_only = params.get("search_only") or False
cur_step = int(params.get('cur_step', 1))
self.step_call_times[cur_step] += 1
take_method = "all"
icon_paths = self._get_resources(cur_step, take_method)
icon_paths = [x.get("value") for x in icon_paths]
flow_name = params.get('pack', {}).get('flow_name', "")
time.sleep(int(pre_delay))
check_region = match_options.get("check_region")
if search_method == 'color':
choice_method = match_options.get("choice_method") or 'random'
icon_positions = self.it.patch_locate_color(
template_paths=icon_paths,
color_tolerance=int(match_options.get('color_tolerance', 0)) or 0,
color_purity=int(match_options.get('color_purity', 1)) or 1,
screenshot_region=check_region,
img_shape_times=float(match_options.get('img_shape_times', 1.0)) or 1.0,
) or [[]]
if choice_method == 'random':
icon_position = random.choice(icon_positions)
else:
icon_position = icon_positions[0]
else:
match_options = match_options if isinstance(match_options, dict) else self.default_match_opt
icon_position = self.it.patch_locate(
template_path_list=icon_paths,
threshold_value=match_options.get('threshold_value'),
as_gray=match_options.get('as_gray'),
as_binary=match_options.get('as_binary'),
img_shape_times=match_options.get('img_shape_times'),
screenshot_region=check_region,
)
if icon_position:
if check_region and isinstance(check_region, list):
icon_position = [icon_position[0] + check_region[0], icon_position[1] + check_region[1]]
icon_position = [icon_position[0] + deviation[0], icon_position[1] + deviation[1]]
if not search_only:
delay = self._speed(speed)
self._delay_move(*icon_position, delay=delay)
for i in range(click_times):
pyautogui.click()
time.sleep(click_sep)
time.sleep(sub_delay)
return {'next': cur_step + 1, "pack": {"position": icon_position, "flow_name": flow_name}}
else:
pack = params.get('pack', {})
pack["flow_name"] = flow_name
not_locate = not_locate.lower()
if not_locate == "exit":
print(f"System exit because can not locate template: \n {icon_paths}")
raise KeyboardInterrupt
elif "jump" in not_locate.lower():
jump_step = jump_step[0] if jump_step else 0
return {'next': int(jump_step), "pack": pack}
elif "back" in not_locate.lower():
jump_step = jump_step[0] if jump_step else 1
jump_step = int(params.get('cur_step', 1)) - int(jump_step)
jump_step = jump_step if jump_step >= 0 else 0
return {'next': jump_step, "pack": pack}
else:
jump_step = jump_step[0] if jump_step else 1
return {'next': cur_step + int(jump_step), "pack": pack}
def _multi_search_and_drag(self, params):
match_options = params.get("match_options")
speed = params.get("speed") or "fast"
search_method = params.get("search_method") or "shape"
not_locate = params.get("not_locate") or "exit" # "exit", "next1", "jump1"
pre_delay = params.get("pre_delay") or 0
sub_delay = params.get("sub_delay") or 0
deviation = params.get("deviation") or [0, 0]
start_position = params.get("start_position") # ["pre_step", 300]/[100, 200]
end_position = params.get("end_position") # ["pre_step", "pre_step"]/[100, 200]
cur_step = int(params.get('cur_step', 1))
self.step_call_times[cur_step] += 1
take_method = "all"
icon_paths = self._get_resources(cur_step, take_method)
icon_paths = [x.get("value") for x in icon_paths]
flow_name = params.get('pack', {}).get('flow_name', "")
jump_step = re.findall(r'\d+', not_locate)
time.sleep(int(pre_delay))
check_region = match_options.get("check_region")
if search_method == 'color':
choice_method = match_options.get("choice_method") or 'random'
icon_positions = self.it.patch_locate_color(
template_paths=icon_paths,
color_tolerance=int(match_options.get('color_tolerance', 0)) or 0,
color_purity=int(match_options.get('color_purity', 1)) or 1,
screenshot_region=check_region,
img_shape_times=float(match_options.get('img_shape_times', 1.0)) or 1.0,
) or [[]]
if choice_method == 'random':
icon_position = random.choice(icon_positions)
else:
icon_position = icon_positions[0]
else:
icon_position = self.it.patch_locate(
template_path_list=icon_paths,
threshold_value=match_options.get('threshold_value'),
as_gray=match_options.get('as_gray'),
as_binary=match_options.get('as_binary'),
img_shape_times=match_options.get('img_shape_times'),
screenshot_region=check_region,
)
if icon_position:
if check_region and isinstance(check_region, list):
icon_position = [icon_position[0] + check_region[0], icon_position[1] + check_region[1]]
icon_position = [icon_position[0] + deviation[0], icon_position[1] + deviation[1]]
delay = self._speed(speed)
cur_x, cur_y = [x for x in pyautogui.position()]
if start_position:
sx, sy = start_position
if isinstance(sx, str) and "pre_step" in sx:
sx = params.get('pack', {}).get('position', [])[0]
else:
sx = cur_x
if isinstance(sy, str) and "pre_step" in sy:
sy = params.get('pack', {}).get('position', [0, ])[1]
else:
sy = cur_y
self._delay_move(sx, sy, delay=0.5)
self._delay_drag(*icon_position, delay=delay)
elif end_position:
ex, ey = end_position
if isinstance(ex, str) and "pre_step" in ex:
ex = params.get('pack', {}).get('position', [])[0]
else:
ex = cur_x
if isinstance(ey, str) and "pre_step" in ey:
ey = params.get('pack', {}).get('position', [0, ])[1]
else:
ey = cur_y
self._delay_move(*icon_position, delay=0.5)
self._delay_drag(ex, ey, delay=delay)
else:
self._delay_drag(*icon_position, delay=delay)
time.sleep(sub_delay)
return {'next': cur_step + 1, "pack": {"position": icon_position, "flow_name": flow_name}}
else:
pack = params.get('pack', {})
pack["flow_name"] = flow_name
if not_locate.lower() == "exit":
print(f"System exit because can not locate template: \n {icon_paths}")
raise KeyboardInterrupt
elif "jump" in not_locate.lower():
jump_step = jump_step[0] if jump_step else 0
return {'next': int(jump_step), "pack": pack}
elif "back" in not_locate.lower():
jump_step = jump_step[0] if jump_step else 1
jump_step = int(params.get('cur_step', 1)) - int(jump_step)
jump_step = jump_step if jump_step >= 0 else 0
return {'next': jump_step, "pack": pack}
else:
jump_step = jump_step[0] if jump_step else 1
return {'next': cur_step + int(jump_step), "pack": pack}
def _open_chrome_and_enter_url(self, params):
not_locate = params.get("not_locate") or "next1" # jump1
chrome_icon = params.get("chrome_icon")
speed = params.get("speed") or "fast"
pre_delay = params.get("pre_delay") or 0
sub_delay = params.get("sub_delay") or 5
jump_step = re.findall(r'\d+', not_locate)
cur_step = int(params.get('cur_step', 1))
self.step_call_times[cur_step] += 1
take_method = params.get('take_method') or 'order'
flow_name, url = self._get_resources(cur_step, take_method)
flow_name = flow_name if flow_name else params.get('pack', {}).get('flow_name', "")
time.sleep(pre_delay)
if not chrome_icon or not os.path.exists(chrome_icon):
chrome_icon = self.default_chrome_icon
chrome_position = self.it.locate(
template_path=chrome_icon,
as_gray=True,
)
if chrome_position:
self._delay_move(*chrome_position)
time.sleep(0.1)
pyautogui.click()
time.sleep(0.3)
pyautogui.hotkey('ctrl', 'l')
self._delay_write(url, name=flow_name, delay_for_each=self._speed(speed))
pyautogui.press('enter')
time.sleep(sub_delay)
return {'next': cur_step + 1, "pack": {"position": chrome_position, "flow_name": flow_name}}
else:
not_locate = not_locate.lower()
pack = params.get('pack', {})
pack["flow_name"] = flow_name
if not_locate == "exit":
print(f"System exit because can not locate chrome icon: \n {chrome_icon}")
raise KeyboardInterrupt
elif "jump" in not_locate.lower():
jump_step = jump_step[0] if jump_step else 0
return {'next': int(jump_step), "pack": pack}
elif "back" in not_locate.lower():
jump_step = jump_step[0] if jump_step else 1
jump_step = int(params.get('cur_step', 1)) - int(jump_step)
jump_step = jump_step if jump_step >= 0 else 0
return {'next': jump_step, "pack": pack}
else:
jump_step = jump_step[0] if jump_step else 1
return {'next': cur_step + int(jump_step), "pack": pack}
def _wait_icon_show(self, params):
"""
"icon_path": "/root/... .../icon.png",
"interval": 1,
"after_showed": "NextStep", # "ReturnPosition"
"time_out": 120,
"if_timeout": "End", # "NextStep", "JumpToStep4"
"match_options": {
"threshold_value": 90,
"as_gray": True,
"as_binary": False
"img_shape_times": 1.0
}
:return:
"""
match_options = params.get("match_options")
interval = float(params.get("interval", 1)) or 1.0
after_showed = params.get("after_showed") or "next1"
time_out = int(params.get("time_out", 120)) or 120
if_timeout = params.get("if_timeout") or "exit"
cur_step = int(params.get('cur_step', 1))
self.step_call_times[cur_step] += 1
take_method = params.get('take_method') or 'order'
flow_name, icon_path = self._get_resources(cur_step, take_method)
flow_name = flow_name if flow_name else params.get('pack', {}).get('flow_name', "")
match_options = match_options if isinstance(match_options, dict) else self.default_match_opt
show_sta = False
times_start = time.time()
icon_position = [0, 0]
while True:
if time.time() - times_start > time_out:
break
icon_position = self.it.locate(
template_path=icon_path,
template_resize=match_options.get("template_resize"),
threshold_value=match_options.get('threshold_value'),
as_gray=match_options.get('as_gray'),
img_shape_times=match_options.get('img_shape_times'),
)
if icon_position:
show_sta = True
break
time.sleep(interval)
jump_step = re.findall(r'\d+', after_showed)
if show_sta:
jump_step = jump_step[0] if jump_step else 1
r_dic = {'next': cur_step + int(jump_step), 'pack': {'position': icon_position, "flow_name": flow_name}}
return r_dic
else:
timeout_jump = re.findall(r'\d+', if_timeout)
pack = params.get('pack', {})
pack["flow_name"] = flow_name
if if_timeout == 'exit':
print(red("\nSys out because icon not found!"))
print(f" [ {red(icon_path)} ]\n [ {tell_the_datetime()} ]")
raise KeyboardInterrupt
elif "jump" in if_timeout.lower():
timeout_jump = timeout_jump[0] if timeout_jump else 0
return {'next': int(timeout_jump), "pack": pack}
elif "back" in if_timeout.lower():
timeout_jump = timeout_jump[0] if timeout_jump else 1
timeout_jump = int(params.get('cur_step', 1)) - int(timeout_jump)
timeout_jump = timeout_jump if timeout_jump >= 0 else 0
return {'next': timeout_jump, "pack": pack}
else:
timeout_jump = timeout_jump[0] if timeout_jump else 1
return {'next': cur_step + int(timeout_jump), "pack": pack}
def _wait_icon_gone(self, params):
match_options = params.get("match_options")
interval = float(params.get("interval", 1)) or 1
after_gone = params.get("after_gone") or "next1"
time_out = int(params.get("time_out", 120)) or 120
if_timeout = params.get("if_timeout") or "exit"
cur_step = int(params.get('cur_step', 1))
self.step_call_times[cur_step] += 1
take_method = params.get('take_method') or 'order'
flow_name, icon_path = self._get_resources(cur_step, take_method)
flow_name = flow_name if flow_name else params.get('pack', {}).get('flow_name', "")
match_options = match_options if isinstance(match_options, dict) else self.default_match_opt
gone_sta = False
times_start = time.time()
icon_position = [0, 0]
count = 0
while True:
if count > 1 and count % 10 == 0:
print(f"icon still exist: \n {icon_path}")
if time.time() - times_start > time_out:
break
icon_position = self.it.locate(
template_path=icon_path,
template_resize=match_options.get("template_resize"),
threshold_value=match_options.get('threshold_value'),
as_gray=match_options.get('as_gray'),
img_shape_times=match_options.get('img_shape_times'),
)
if not icon_position:
gone_sta = True
break
time.sleep(interval)
count += 1
jump_step = re.findall(r'\d+', after_gone)
if gone_sta:
jump_step = jump_step[0] if jump_step else 1
return {'next': int(params.get('cur_step', 1)) + int(jump_step),
'pack': {'position': icon_position, "flow_name": flow_name}}
else:
timeout_jump = re.findall(r'\d+', if_timeout)
pack = params.get('pack', {})
pack["flow_name"] = flow_name
if if_timeout == 'exit':
print(red("\nSys out because timeout when waiting icon gone!"))
print(f" [ {red(icon_path)} ]\n [ {tell_the_datetime()} ]")
raise KeyboardInterrupt
elif "jump" in if_timeout.lower():
timeout_jump = timeout_jump[0] if timeout_jump else 0
return {'next': int(timeout_jump), "pack": pack}
elif "back" in if_timeout.lower():
timeout_jump = timeout_jump[0] if timeout_jump else 1
timeout_jump = int(params.get('cur_step', 1)) - int(timeout_jump)
timeout_jump = timeout_jump if timeout_jump >= 0 else 0
return {'next': timeout_jump, "pack": pack}
else:
timeout_jump = timeout_jump[0] if timeout_jump else 1
return {'next': int(params.get('cur_step', 1)) + int(timeout_jump), "pack": pack}
def _save_data_with_vim(self, params):
file_full_path = params.get("file_full_path")
pre_delay = params.get("pre_delay") or 0
sub_delay = params.get("sub_delay") or 0
after = params.get("after") or 'next1'
flow_name = params.get('pack', {}).get('flow_name', "")
time.sleep(pre_delay)
pyautogui.hotkey('ctrl', 'alt', 't')
time.sleep(0.7)
self._delay_write(f"vim {file_full_path}", name=flow_name, delay_for_each=0.01)
time.sleep(0.3)
pyautogui.press('enter')
time.sleep(0.1)
pyautogui.press(['g', 'g', 'd'])
pyautogui.hotkey('shift', 'G')
time.sleep(0.1)
pyautogui.press('i')
time.sleep(0.5)
pyautogui.hotkey('ctrl', 'shift', 'v')
inserting_vim = True
while inserting_vim:
time.sleep(0.5)
inserting_vim = self.it.locate(
template_path=os.path.join(self.base_path, 'resource/icons/vim_insert_end.png'),
threshold_value=95,
as_gray=True,
# img_shape_times=1.0
)
pyautogui.press('esc')
time.sleep(0.1)
pyautogui.hotkey('shift', ';')
time.sleep(0.1)
self._delay_write("wq", delay_for_each=0.01)
time.sleep(0.1)
pyautogui.press('enter')
time.sleep(0.2)
pyautogui.hotkey('ctrl', 'shift', 'q')
time.sleep(sub_delay)
jump_step = re.findall(r'\d+', after.lower())
pack = params.get('pack', {})
pack["flow_name"] = flow_name
if 'next' in after:
jump_step = jump_step[0] if jump_step else 1
return {'next': int(params.get('cur_step', 1)) + int(jump_step), "pack": pack}
elif "back" in after.lower():
jump_step = jump_step[0] if jump_step else 1
jump_step = int(params.get('cur_step', 1)) - int(jump_step)
jump_step = jump_step if jump_step >= 0 else 0
return {'next': jump_step, "pack": pack}
else:
jump_step = jump_step[0] if jump_step else 0
return {'next': int(jump_step), "pack": pack}
def _terminal_operations(self, params):
root_password = params.get("root_password")
after = params.get("after") or 'next1'
pre_delay = params.get("pre_delay") or 0
sub_delay = params.get("sub_delay") or 0
cur_step = int(params.get('cur_step', 1))
self.step_call_times[cur_step] += 1
take_method = params.get('take_method') or 'order'
flow_name, cmd = self._get_resources(cur_step, take_method)
flow_name = flow_name if flow_name else params.get('pack', {}).get('flow_name', "")
time.sleep(pre_delay)
pyautogui.hotkey('ctrl', 'alt', 't')
time.sleep(0.7)
self._delay_write(f"{cmd}", name=flow_name, delay_for_each=0.01)
time.sleep(0.3)
pyautogui.press('enter')
if self.it.locate(
template_path=os.path.join(self.base_path, 'resource/icons/terminal_input_password.png'),
as_gray=True,
):
if root_password:
self._delay_write(f"{root_password}", name=flow_name, delay_for_each=0.01)
time.sleep(0.3)
pyautogui.press('enter')
else:
print("please input password!")
self._wait_icon_gone({
"icon_path": os.path.join(self.base_path, 'resource/icons/terminal_input_password.png'),
"match_options": {'as_gray': True},
"time_out": 1000000}
)
time.sleep(sub_delay)
jump_step = re.findall(r'\d+', after.lower())
pack = params.get('pack', {})
pack["flow_name"] = flow_name
if 'next' in after:
jump_step = jump_step[0] if jump_step else 1
return {'next': int(params.get('cur_step', 1)) + int(jump_step), "pack": pack}
elif "back" in after.lower():
jump_step = jump_step[0] if jump_step else 1
jump_step = int(params.get('cur_step', 1)) - int(jump_step)
jump_step = jump_step if jump_step >= 0 else 0
return {'next': jump_step, "pack": pack}
else:
jump_step = jump_step[0] if jump_step else 0
return {'next': int(jump_step), "pack": pack}
def _hot_key(self, params):
pre_delay = params.get("pre_delay") or 0
sub_delay = params.get("sub_delay") or 0
after = params.get("after") or 'next1'
time.sleep(pre_delay)
cur_step = int(params.get('cur_step', 1))
self.step_call_times[cur_step] += 1
take_method = params.get('take_method') or 'order'
flow_name, key_list = self._get_resources(cur_step, take_method)
flow_name = flow_name if flow_name else params.get('pack', {}).get('flow_name', "")
if len(key_list) > 1:
pyautogui.hotkey(*key_list)
else:
pyautogui.press(*key_list)
time.sleep(sub_delay)
jump_step = re.findall(r'\d+', after.lower())
pack = params.get('pack', {})
pack["flow_name"] = flow_name
if 'next' in after:
jump_step = jump_step[0] if jump_step else 1
return {'next': cur_step + int(jump_step), "pack": pack}
elif "back" in after.lower():
jump_step = jump_step[0] if jump_step else 1
jump_step = int(params.get('cur_step', 1)) - int(jump_step)
jump_step = jump_step if jump_step >= 0 else 0
return {'next': jump_step, "pack": pack}
else:
jump_step = jump_step[0] if jump_step else 0
return {'next': int(jump_step), "pack": pack}
def _input_abc(self, params):
pre_delay = params.get("pre_delay") or 0
sub_delay = params.get("sub_delay") or 0
after = params.get("after") or 'next1'
time.sleep(pre_delay)
cur_step = int(params.get('cur_step', 1))
self.step_call_times[cur_step] += 1
take_method = params.get('take_method') or 'only'
flow_name, words = self._get_resources(cur_step, take_method)
flow_name = flow_name if flow_name else params.get('pack', {}).get('flow_name', "")
self._delay_write(f"{words}", name=flow_name, delay_for_each=0.01)
time.sleep(sub_delay)
jump_step = re.findall(r'\d+', after.lower())
pack = params.get('pack', {})
pack["flow_name"] = flow_name
if 'next' in after:
jump_step = jump_step[0] if jump_step else 1
return {'next': cur_step + int(jump_step), "pack": pack}
elif "back" in after.lower():
jump_step = jump_step[0] if jump_step else 1
jump_step = int(params.get('cur_step', 1)) - int(jump_step)
jump_step = jump_step if jump_step >= 0 else 0
return {'next': jump_step, "pack": pack}
else:
jump_step = jump_step[0] if jump_step else 0
return {'next': int(jump_step), "pack": pack}
def _mouse_click(self, params):
"""
position: ["left/center/right/pre_step", "top/center/bottom/pre_step"], or [1000, 1000],
:return:
"""
cur_step = int(params.get('cur_step', 1))
self.step_call_times[cur_step] += 1
take_method = params.get('take_method') or 'only'
click_side = params.get('click_side') or 'left'
click_times = params.get('click_times') or 1
click_sep = params.get('click_sep') or 0.2
flow_name, position = self._get_resources(cur_step, take_method)
flow_name = flow_name if flow_name else params.get('pack', {}).get('flow_name', "")
pre_delay = int(params.get("pre_delay", 0)) or 0
sub_delay = int(params.get("sub_delay", 1)) or 1
after = params.get("after") or 'next1'
time.sleep(pre_delay)
cur_x, cur_y = pyautogui.position()
pre_position = params.get('pack', {}).get('position') or [cur_x, cur_y]
click_point = self._point_format(position=position, pre_position=pre_position)
self._delay_move(*click_point)
for i in range(click_times):
if click_side == "left":
pyautogui.click()
elif click_side == "middle":
pyautogui.middleClick()
else:
pyautogui.rightClick()
time.sleep(click_sep)
time.sleep(sub_delay)
jump_step = re.findall(r'\d+', after.lower())
pack = params.get('pack', {})
pack["flow_name"] = flow_name
if 'next' in after:
jump_step = jump_step[0] if jump_step else 1
return {'next': cur_step + int(jump_step), "pack": pack}
elif "back" in after.lower():
jump_step = jump_step[0] if jump_step else 1
jump_step = int(params.get('cur_step', 1)) - int(jump_step)
jump_step = jump_step if jump_step >= 0 else 0
return {'next': jump_step, "pack": pack}
else:
jump_step = jump_step[0] if jump_step else 0
return {'next': int(jump_step), "pack": pack}
def _mouse_drag(self, params):
"""
position: [["left/center/right/pre_step", "top/center/bottom/pre_step"], [1000, 1000]]
:return:
"""
cur_step = int(params.get('cur_step', 1))
self.step_call_times[cur_step] += 1
take_method = params.get('take_method') or 'only'
flow_name, position = self._get_resources(cur_step, take_method)
flow_name = flow_name if flow_name else params.get('pack', {}).get('flow_name', "")
drag_speed = int(params.get("drag_speed", 0)) or 0.5
pre_delay = int(params.get("pre_delay", 0)) or 0
sub_delay = int(params.get("sub_delay", 1)) or 1
after = params.get("after") or 'next1'
time.sleep(pre_delay)
cur_x, cur_y = pyautogui.position()
pre_position = params.get('pack', {}).get('position') or [cur_x, cur_y]
start_point = position[0]
end_point = position[-1]
start_point = self._point_format(position=start_point, pre_position=pre_position)
end_point = self._point_format(position=end_point, pre_position=pre_position)
self._delay_move(*start_point, delay=0.2)
self._delay_drag(*end_point, delay=drag_speed)
time.sleep(sub_delay)
jump_step = re.findall(r'\d+', after.lower())
pack = params.get('pack', {})
pack["flow_name"] = flow_name
if 'next' in after:
jump_step = jump_step[0] if jump_step else 1
return {'next': cur_step + int(jump_step), "pack": pack}
elif "back" in after.lower():
jump_step = jump_step[0] if jump_step else 1
jump_step = int(params.get('cur_step', 1)) - int(jump_step)
jump_step = jump_step if jump_step >= 0 else 0
return {'next': jump_step, "pack": pack}
else:
jump_step = jump_step[0] if jump_step else 0
return {'next': int(jump_step), "pack": pack}
def _point_format(self, position, pre_position):
cur_x, cur_y = pyautogui.position()
cpx = cur_x
cpy = cur_y
px = position[0]
py = position[-1]
if isinstance(px, int):
cpx = px
elif isinstance(px, str):
if "left" in px:
cpx = 1
elif "right" in px:
cpx = self.screen_width - 1
elif "center" in px:
cpx = int(self.screen_width / 2)
elif "pre_step" in px:
cpx = pre_position[0]
if isinstance(py, int):
cpy = py
elif isinstance(py, str):
if "top" in py:
cpy = 1
elif "bottom" in py:
cpy = self.screen_height - 1
elif "center" in py:
cpy = int(self.screen_height / 2)
elif "pre_step" in py:
cpy = pre_position[0]
final_point = [cpx, cpy]
return final_point
@staticmethod
def _speed(speed):
if isinstance(speed, int) or isinstance(speed, float):
return speed
if speed == 'fast':
delay = 0.5
elif speed == 'mid':
delay = 1
else:
delay = 2
return delay
@staticmethod
def _delay_move(x, y, delay=0.5):
pyautogui.moveTo(x, y, duration=delay, tween=pyautogui.easeInOutQuad)
@staticmethod
def _delay_drag(x, y, delay=2):
x1 = random.randint(-20, 20) + x
y1 = random.randint(-5, 5) + y
delay1 = round(delay / 3 * 2, 1)
delay = delay - delay1 - 0.1
pyautogui.mouseDown()
pyautogui.moveTo(x1, y1, duration=delay1, tween=pyautogui.easeInOutBounce)
time.sleep(0.1)
pyautogui.moveTo(x, y, duration=delay, tween=pyautogui.easeInOutBounce)
pyautogui.mouseUp()
@staticmethod
def _delay_write(words, name='', delay_for_each=0.1):
if "[NAME]" in words:
words = words.replace("[NAME]", name)
pyautogui.write(words, interval=delay_for_each)
def start(self):
step = 1
pre_pack = {}
try:
while True:
if not self.resources.get(step):
print(red("process done! one or more resource used up"))
break
step_data = self.ms_dic.get(int(step))
if step_data:
name = step_data.get("name")
method = step_data.get("method")
params = step_data
params['cur_step'] = step
if pre_pack:
params.update(pre_pack)
print(f"running step: [ {hgreen(step)} ] -- [ {name} ]")
run_result = self.methods.get(method)(params=params)
step = run_result.pop('next')
pre_pack = run_result
print(f"next step [ {hgreen(step)} ]")
else:
print("all process done!")
break
except KeyboardInterrupt:
print(red(f"[ {tell_the_datetime()} ] sys exit!"))
def load_mission_from_json(jf_path):
with open(jf_path, 'r') as rf:
m_list = json.loads(rf.read())
try:
ft = FlowTool(operate_list=m_list)
ft.start()
except pyautogui.FailSafeException:
print(red("sys exist because you move the mouse to corner"))
exit(1)
except KeyboardInterrupt:
exit(1)
def start_missions():
dp = ' 自动化流程小工具,如果还不清楚怎么使用,请参考 README.md。\n' \
' https://github.com/ga1008/flow_operate\n\n' \
' 子工具: \n' \
' 定位屏幕图像: ilocate [-h]\n' \
' 定位屏幕颜色: clocate [-h]'
# da = "---> "
da = ""
parser = ArgumentParser(description=dp, formatter_class=RawTextHelpFormatter, add_help=True)
parser.add_argument("json_file", type=str, help=f'{da}json format step file path, see README.md')
parser.add_argument("-l", "--loop", dest="loop", default=False, action='store_true',
help=f'{da}is loop operation? ')
parser.add_argument("-i", "--interval", type=float, dest="interval",
default=0.0, help=f'{da}interval seconds between loops')
parser.add_argument("-s", "--start_time", type=str, dest="start_time", default=None,
help=f'{da}when to start, default NOW')
parser.add_argument("-e", "--end_time", type=str, dest="end_time", default=None,
help=f'{da}when to end, default FOREVER')
args = parser.parse_args()
json_file = args.json_file
loop = args.loop
start_time = args.start_time or tell_the_datetime()
end_time = args.end_time or tell_the_datetime(time_stamp=(time.time() + 3600 * 24 * 365 * 900))
if not os.path.exists(json_file):
print(hred(f"File Not Exists!\n {json_file}"))
exit(1)
fl = FLock()
if not loop:
print(f"running mission with json file [ {hblue(1)} ]: \n {blue(json_file)}")
fl.acquire()
load_mission_from_json(json_file)
fl.release()
print("mission complete!")
else:
start_sec = tell_timestamp(start_time)
end_sec = tell_timestamp(end_time)
count = 1
wait_sec = start_sec - time.time()
if wait_sec > 0:
print(f"waiting start time [ {red(start_time)} ] ...")
time.sleep(wait_sec)
while True:
now_sec = time.time()
if now_sec > end_sec:
print("end time")
print("mission complete!")
break
print(f"running mission with json file [ {hblue(count)} ]: \n {blue(json_file)}")
print(f"mission will end at time: [ {red(end_time)} ]")
fl.acquire()
load_mission_from_json(json_file)
fl.release()
print(f"waiting [ {red(args.interval)} ] to next loop ...")
time.sleep(args.interval)
waiting(
reset_time=args.interval,
warning=f"waiting [ {red(args.interval)} ] to next loop ...",
stop_wait_warning=f"[ {tell_the_datetime()} ] mission start again!"
)
count += 1
def locate_image():
dp = ' 自动化流程小工具的定位屏幕图像方法,如果还不清楚怎么使用,请参考 README.md。\n' \
' https://github.com/ga1008/flow_operate'
# da = "---> "
da = ""
parser = ArgumentParser(description=dp, formatter_class=RawTextHelpFormatter, add_help=True)
parser.add_argument("template_image_path", type=str, help=f'{da}the template image path')
parser.add_argument("-tr", "--template_resize", type=float, dest="template_resize",
default=1.0, help=f'{da}resize the template to 1.5/0.7/2 times...')
parser.add_argument("-th", "--threshold_value", type=int, dest="threshold_value",
default=90, help=f'{da} int type, 0-100')
parser.add_argument("-ag", "--as_gray", dest="as_gray", action='store_true',
default=False, help=f'{da} turn the image to gray, it will faster the not')
parser.add_argument("-ab", "--as_binary", dest="as_binary", action='store_true',
default=False, help=f'{da} turn the image to white or black mode, '
f'more faster, but may fail the match in most time')
parser.add_argument("-ip", "--image_path", type=str, dest="image_path",
default=None, help=f'{da}the image wait tobe match, if you not input this param, '
f'program will automatic get a screenshot')
parser.add_argument("-ir", "--image_resize", type=float, dest="image_resize",
default=1.0, help=f'{da}resize the image')
parser.add_argument("-ssr", "--screenshot_region", type=str, dest="screenshot_region",
default=None, help=f'{da}screenshot region, '
f'require 4 nums sep by ",": left,top,width,high like 0,0,1920,1080')
parser.add_argument("-d", "--delay", type=float, dest="delay",
default=0.0, help=f'{da}delay seconds to start')
args = parser.parse_args()
it = ImageTool()
print("searching ...")
delay = args.delay
print(f"delay [ {red(delay)} ] seconds ...")
time.sleep(delay)
sr = args.screenshot_region
ssr = tuple([int(x) for x in re.findall(r'\d+', sr)]) if sr else None
it.locate(
template_path=args.template_image_path,
template_resize=args.template_resize,
threshold_value=args.threshold_value,
as_gray=args.as_gray,
as_binary=args.as_binary,
img_path=args.image_path,
img_shape_times=args.image_resize,
screenshot_region=ssr
)
it.show()
def locate_color():
dp = ' 自动化流程小工具的定位屏幕颜色的方法,如果还不清楚怎么使用,请参考 README.md。\n' \
' https://github.com/ga1008/flow_operate'
# da = "---> "
da = ""
parser = ArgumentParser(description=dp, formatter_class=RawTextHelpFormatter, add_help=True)
parser.add_argument("template_color_img_path", type=str, help=f'{da}the color template image path')
parser.add_argument("-ct", "--color_tolerance", type=int, dest="color_tolerance",
default=0, help=f'{da}the tolerance of matching img color, do not over 127')
parser.add_argument("-cp", "--color_purity", type=int, dest="color_purity",
default=1, help=f'{da}the purity of matching color, default 1, do not lower then 1')
parser.add_argument("-ip", "--image_path", type=str, dest="image_path",
default=None, help=f'{da}the image wait tobe match, if you not input this param, '
f'program will automatic get a screenshot')
parser.add_argument("-ir", "--image_resize", type=float, dest="image_resize",
default=1.0, help=f'{da}resize the image')
parser.add_argument("-ssr", "--screenshot_region", type=str, dest="screenshot_region",
default=None, help=f'{da}screenshot region, '
f'require 4 nums sep by ",": left,top,width,high like 0,0,1920,1080')
parser.add_argument("-d", "--delay", type=float, dest="delay",
default=0.0, help=f'{da}delay seconds to start')
args = parser.parse_args()
it = ImageTool()
print("searching ...")
delay = args.delay
print(f"delay [ {red(delay)} ] seconds ...")
time.sleep(delay)
sr = args.screenshot_region
ssr = tuple([int(x) for x in re.findall(r'\d+', sr)]) if sr else None
located_pts = it.locate_color(
template_path=args.template_color_img_path,
img_path=args.image_path,
img_shape_times=args.image_resize,
screenshot_region=ssr,
color_tolerance=args.color_tolerance,
color_purity=args.color_purity
)
if located_pts:
print("positions:")
for pt in located_pts:
print(f" {yellow(pt)}")
else:
print("cannot locate anything, "
"maybe you can change the 'color_tolerance' [-ct] or 'color_purity' [-cp] options to see defference results")
it.show()
if __name__ == '__main__':
it = ImageTool()
# time.sleep(1)
# tlc = it.locate(
# template_path="/home/ga/Guardian/For-TiZi/BossZP/boss_zp/boss_zp/resource/process_img/t000.png",
# template_resize=1.0,
# as_gray=True,
# as_binary=False,
# threshold_value=90,
# img_shape_times=1.0,
# )
# it.show()
it.locate_color(
template_path="/home/ga/Guardian/For-TiZi/BossZP/boss_zp/boss_zp/resource/test_color_format/t01.jpg",
img_path="/home/ga/Guardian/For-TiZi/BossZP/boss_zp/boss_zp/resource/test_color_format/i02.jpg",
color_tolerance=20,
color_purity=10
)
it.show()
# jfp = "/home/ga/Guardian/For-TiZi/LaGou/lg_web/flow_get_cookies.json"
# load_mission_from_json(jfp)
| 44.766868
| 125
| 0.540363
| 10,441
| 81,610
| 3.969735
| 0.054688
| 0.040147
| 0.016503
| 0.022003
| 0.767033
| 0.741749
| 0.727997
| 0.719504
| 0.703629
| 0.698441
| 0
| 0.018822
| 0.340522
| 81,610
| 1,822
| 126
| 44.791438
| 0.751301
| 0.05514
| 0
| 0.672436
| 0
| 0.003205
| 0.114676
| 0.010536
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026923
| false
| 0.005769
| 0.011538
| 0.000641
| 0.080769
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a68a7ed49ddbc9a8c19497f3c719374163ae280e
| 190
|
py
|
Python
|
fiftyone/types/__init__.py
|
FLIR/fiftyone
|
eeed8bc9dbdada0530036ae5b3afbbe7ab423ce3
|
[
"Apache-2.0"
] | 1,130
|
2020-08-12T13:19:04.000Z
|
2022-03-31T19:54:31.000Z
|
fiftyone/types/__init__.py
|
FLIR/fiftyone
|
eeed8bc9dbdada0530036ae5b3afbbe7ab423ce3
|
[
"Apache-2.0"
] | 844
|
2020-08-11T20:11:38.000Z
|
2022-03-31T14:59:15.000Z
|
fiftyone/types/__init__.py
|
FLIR/fiftyone
|
eeed8bc9dbdada0530036ae5b3afbbe7ab423ce3
|
[
"Apache-2.0"
] | 161
|
2020-08-24T01:46:09.000Z
|
2022-03-30T07:02:32.000Z
|
"""
FiftyOne types.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
# pylint: disable=wildcard-import,unused-wildcard-import
from .dataset_types import *
| 19
| 56
| 0.715789
| 23
| 190
| 5.826087
| 0.695652
| 0.149254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08284
| 0.110526
| 190
| 9
| 57
| 21.111111
| 0.710059
| 0.794737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a6915faede74bf3e5f83d67f528846254039e8e5
| 47
|
py
|
Python
|
notebooks/exercises/day 2/hello_world.py
|
anafink/advanced_python_2021-22_HD
|
d52c47a554757f67b836c2e5388ea5c0d74a30b5
|
[
"CC0-1.0"
] | null | null | null |
notebooks/exercises/day 2/hello_world.py
|
anafink/advanced_python_2021-22_HD
|
d52c47a554757f67b836c2e5388ea5c0d74a30b5
|
[
"CC0-1.0"
] | null | null | null |
notebooks/exercises/day 2/hello_world.py
|
anafink/advanced_python_2021-22_HD
|
d52c47a554757f67b836c2e5388ea5c0d74a30b5
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python 3
print("Hellö world!")
| 11.75
| 23
| 0.659574
| 8
| 47
| 3.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0.12766
| 47
| 3
| 24
| 15.666667
| 0.731707
| 0.468085
| 0
| 0
| 0
| 0
| 0.521739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
a6bbc158ae947e6eff8236c3c568c4f1d79009dc
| 47
|
py
|
Python
|
mission/finite_state_machine/src/sm_classes/__init__.py
|
theBadMusician/Vortex-AUV
|
a2450f295b1288c0914f9505512bd8f34869b62c
|
[
"MIT"
] | 1
|
2021-03-11T19:16:50.000Z
|
2021-03-11T19:16:50.000Z
|
mission/finite_state_machine/src/sm_classes/__init__.py
|
theBadMusician/Vortex-AUV
|
a2450f295b1288c0914f9505512bd8f34869b62c
|
[
"MIT"
] | null | null | null |
mission/finite_state_machine/src/sm_classes/__init__.py
|
theBadMusician/Vortex-AUV
|
a2450f295b1288c0914f9505512bd8f34869b62c
|
[
"MIT"
] | null | null | null |
from gate_search_state import GateSearchState
| 15.666667
| 45
| 0.893617
| 6
| 47
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 2
| 46
| 23.5
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a6bde389ecf31a6f4d85c4511658d67e354d1df8
| 7,467
|
py
|
Python
|
11 - Extra-- sonos snips voice app/snipssonos/use_cases/play/music.py
|
RedaMastouri/marvis
|
75e90a66d0746f12ba6231a4cab16ab40b42928e
|
[
"MIT"
] | 1
|
2021-12-29T08:44:34.000Z
|
2021-12-29T08:44:34.000Z
|
11 - Extra-- sonos snips voice app/snipssonos/use_cases/play/music.py
|
RedaMastouri/marvis
|
75e90a66d0746f12ba6231a4cab16ab40b42928e
|
[
"MIT"
] | null | null | null |
11 - Extra-- sonos snips voice app/snipssonos/use_cases/play/music.py
|
RedaMastouri/marvis
|
75e90a66d0746f12ba6231a4cab16ab40b42928e
|
[
"MIT"
] | null | null | null |
from snipssonos.shared.use_case import UseCase
from snipssonos.shared.response_object import ResponseFailure
from snipssonos.use_cases.play.track import PlayTrackUseCase
from snipssonos.use_cases.play.artist import PlayArtistUseCase
from snipssonos.use_cases.play.album import PlayAlbumUseCase
from snipssonos.use_cases.play.playlist import PlayPlaylistUseCase
import logging
logger = logging.getLogger(__name__)
class PlayMusicUseCase(UseCase):
def __init__(self, device_discovery_service, music_search_service, music_playback_service, feedback_service):
self.device_discovery_service = device_discovery_service
self.music_search_service = music_search_service
self.music_playback_service = music_playback_service
self.feedback_service = feedback_service
def process_request(self, request_object):
track_name = request_object.track_name if request_object.track_name else None
artist_name = request_object.artist_name if request_object.artist_name else None
album_name = request_object.album_name if request_object.album_name else None
playlist_name = request_object.playlist_name if request_object.playlist_name else None
sub_use_case = self.extract_sub_use_case_from_parameters(track_name, artist_name, album_name, playlist_name)
return sub_use_case.process_request(request_object)
def extract_sub_use_case_from_parameters(self, track_name, artist_name, album_name, playlist_name):
if not track_name and not album_name and not artist_name and playlist_name:
logger.info('Use case selected : Playlist', extra={'playlist': playlist_name})
return PlayPlaylistUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if not track_name and not album_name and artist_name and not playlist_name:
logger.info('Use case selected : Artist', extra={'artist': artist_name, 'playlist': playlist_name})
return PlayArtistUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if not track_name and not album_name and artist_name and playlist_name:
logger.info('Use case selected : Artist-Playlist', extra={'artist': artist_name, 'playlist': playlist_name})
return PlayArtistUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if not track_name and album_name and not artist_name and not playlist_name:
logger.info('Use case selected : Album', extra={'album': album_name})
return PlayAlbumUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if not track_name and album_name and not artist_name and playlist_name:
logger.info('Use case selected : Album-Playlist', extra={'album': album_name, 'playlist': playlist_name})
return PlayAlbumUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if not track_name and album_name and artist_name and not playlist_name:
logger.info('Use case selected : Album-Artist', extra={'album': album_name, 'artist': artist_name})
return PlayAlbumUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if not track_name and album_name and artist_name and playlist_name:
logger.info('Use case selected : Album-Artist-Playlist', extra={'album': album_name, 'artist': artist_name, 'playlist': playlist_name})
return PlayAlbumUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if track_name and not album_name and not artist_name and not playlist_name:
logger.info('Use case selected : Song', extra={'track': track_name})
return PlayTrackUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if track_name and not album_name and not artist_name and playlist_name:
logger.info('Use case selected : Song-Playlist', extra={'track': track_name, 'playlist': playlist_name})
return PlayTrackUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if track_name and not album_name and artist_name and not playlist_name:
logger.info('Use case selected : Song-Artist', extra={'track' :track_name, 'artist': artist_name})
return PlayTrackUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if track_name and not album_name and artist_name and playlist_name:
logger.info('song-artist-playlist', extra={'track': track_name, 'artist': artist_name, 'playlist': playlist_name})
return PlayTrackUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if track_name and album_name and not artist_name and not playlist_name:
logger.info('Use case selected : Song-Album', extra={'track':track_name, 'album': album_name})
return PlayTrackUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if track_name and album_name and not artist_name and playlist_name:
logger.info('Use case selected : Song-Album-Playlist', extra={'track':track_name, 'album': album_name, 'playlist': playlist_name})
return PlayTrackUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if track_name and album_name and artist_name and not playlist_name:
logger.info('Use case selected : Song-Album-Artist', extra={'track:':track_name, 'album': album_name, 'artist': artist_name})
return PlayTrackUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
if track_name and album_name and artist_name and playlist_name:
logger.info('Use case selected : Song-Album-Artist-Playlist', extra={'track': track_name, 'album': album_name, 'artist': artist_name, 'playlist': playlist_name})
return PlayTrackUseCase(self.device_discovery_service, self.music_search_service,
self.music_playback_service, self.feedback_service)
logger.info('Use case selected : InvalidUseCase')
return PlayMusicInvalidUseCase()
class PlayMusicInvalidUseCase(UseCase):
def process_request(self, request_object):
return ResponseFailure.build_resource_error("")
| 64.930435
| 173
| 0.709388
| 898
| 7,467
| 5.583519
| 0.062361
| 0.107499
| 0.102114
| 0.088153
| 0.810531
| 0.77483
| 0.738333
| 0.700239
| 0.672916
| 0.669525
| 0
| 0
| 0.220303
| 7,467
| 114
| 174
| 65.5
| 0.861216
| 0
| 0
| 0.356322
| 0
| 0
| 0.095889
| 0.006294
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045977
| false
| 0
| 0.08046
| 0.011494
| 0.356322
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a6dd41d5869224c1f8592390be3751cfd8b00200
| 4,481
|
py
|
Python
|
assignment 3/mat/test_mat.py
|
dhruvgairola/linearAlgebra-coursera
|
20109133b9e53a7a38cbd17d8ca1fa1316bbf0d3
|
[
"MIT"
] | 6
|
2015-09-18T02:07:21.000Z
|
2020-04-22T17:05:11.000Z
|
test_mat.py
|
tri2sing/LinearAlgebraPython
|
f3dde94f02f146089607eb520ebd4467becb5f9e
|
[
"Apache-2.0"
] | null | null | null |
test_mat.py
|
tri2sing/LinearAlgebraPython
|
f3dde94f02f146089607eb520ebd4467becb5f9e
|
[
"Apache-2.0"
] | 10
|
2015-09-05T03:54:00.000Z
|
2020-04-21T12:56:40.000Z
|
"""
>>> from mat import Mat
>>> from vec import Vec
>>> from GF2 import one
No operations should mutate the input matrices, except setitem.
For getitem(M,k):
>>> M = Mat(({1,3,5}, {'a'}), {(1,'a'):4, (5,'a'): 2})
>>> M[1,'a']
4
>>> M[3,'a']
0
Make sure your operations work on other fields, like GF(2).
>>> M = Mat((set(range(1000)), {'e',' '}), {(500, ' '): one, (255, 'e'): 0})
>>> M[500, ' ']
one
>>> M[500, 'e']
0
>>> M[255, 'e']
0
>>> M == Mat((set(range(1000)), {'e',' '}), {(500, ' '): one, (255, 'e'): 0})
True
For setitem(M,k,val)
>>> M = Mat(({'a','b','c'}, {5}), {('a', 5):3, ('b', 5):7})
>>> M['b', 5] = 9
>>> M['c', 5] = 13
>>> M == Mat(({'a','b','c'}, {5}), {('a', 5):3, ('b', 5):9, ('c',5):13})
True
Make sure your operations work with bizarre and unordered keys.
>>> N = Mat(({((),), 7}, {True, False}), {})
>>> N[(7, False)] = 1
>>> N[(((),), True)] = 2
>>> N == Mat(({((),), 7}, {True, False}), {(7,False):1, (((),), True):2})
True
For add(A, B):
>>> A1 = Mat(({3, 6}, {'x','y'}), {(3,'x'):-2, (6,'y'):3})
>>> A2 = Mat(({3, 6}, {'x','y'}), {(3,'y'):4})
>>> B = Mat(({3, 6}, {'x','y'}), {(3,'x'):-2, (3,'y'):4, (6,'y'):3})
>>> A1 + A2 == B
True
>>> A2 + A1 == B
True
>>> A1 == Mat(({3, 6}, {'x','y'}), {(3,'x'):-2, (6,'y'):3})
True
>>> zero = Mat(({3,6}, {'x','y'}), {})
>>> B + zero == B
True
>>> C1 = Mat(({1,3}, {2,4}), {(1,2):2, (3,4):3})
>>> C2 = Mat(({1,3}, {2,4}), {(1,4):1, (1,2):4})
>>> D = Mat(({1,3}, {2,4}), {(1,2):6, (1,4):1, (3,4):3})
>>> C1 + C2 == D
True
For scalar_mul(M, x):
>>> M = Mat(({1,3,5}, {2,4}), {(1,2):4, (5,4):2, (3,4):3})
>>> 0*M == Mat(({1, 3, 5}, {2, 4}), {})
True
>>> 1*M == M
True
>>> 0.25*M == Mat(({1,3,5}, {2,4}), {(1,2):1.0, (5,4):0.5, (3,4):0.75})
True
>>> M = Mat(({1,2,3},{4,5,6}), {(1,4):one, (3,5):one, (2,5): 0})
>>> one * M == Mat(({1,2,3},{4,5,6}), {(1,4):one, (3,5):one, (2,5): 0})
True
>>> 0 * M == Mat(({1,2,3},{4,5,6}), {})
True
For equal(A, B):
>>> Mat(({'a','b'}, {0,1}), {('a',1):0}) == Mat(({'a','b'}, {0,1}), {('b',1):0})
True
>>> A = Mat(({'a','b'}, {0,1}), {('a',1):2, ('b',0):1})
>>> B = Mat(({'a','b'}, {0,1}), {('a',1):2, ('b',0):1, ('b',1):0})
>>> C = Mat(({'a','b'}, {0,1}), {('a',1):2, ('b',0):1, ('b',1):5})
>>> A == B
True
>>> A == C
False
>>> A == Mat(({'a','b'}, {0,1}), {('a',1):2, ('b',0):1})
True
For transpose(M):
>>> M = Mat(({0,1}, {0,1}), {(0,1):3, (1,0):2, (1,1):4})
>>> M.transpose() == Mat(({0,1}, {0,1}), {(0,1):2, (1,0):3, (1,1):4})
True
>>> M = Mat(({'x','y','z'}, {2,4}), {('x',4):3, ('x',2):2, ('y',4):4, ('z',4):5})
>>> Mt = Mat(({2,4}, {'x','y','z'}), {(4,'x'):3, (2,'x'):2, (4,'y'):4, (4,'z'):5})
>>> M.transpose() == Mt
True
For vector_matrix_mul(v, M):
>>> v1 = Vec({1, 2, 3}, {1: 1, 2: 8})
>>> M1 = Mat(({1, 2, 3}, {1, 2, 3}), {(1, 2): 2, (2, 1):-1, (3, 1): 1, (3, 3): 7})
>>> v1*M1 == Vec({1, 2, 3},{1: -8, 2: 2, 3: 0})
True
>>> v1 == Vec({1, 2, 3}, {1: 1, 2: 8})
True
>>> M1 == Mat(({1, 2, 3}, {1, 2, 3}), {(1, 2): 2, (2, 1):-1, (3, 1): 1, (3, 3): 7})
True
>>> v2 = Vec({'a','b'}, {})
>>> M2 = Mat(({'a','b'}, {0, 2, 4, 6, 7}), {})
>>> v2*M2 == Vec({0, 2, 4, 6, 7},{})
True
For matrix_vector_mul(M, v):
>>> N1 = Mat(({1, 3, 5, 7}, {'a', 'b'}), {(1, 'a'): -1, (1, 'b'): 2, (3, 'a'): 1, (3, 'b'):4, (7, 'a'): 3, (5, 'b'):-1})
>>> u1 = Vec({'a', 'b'}, {'a': 1, 'b': 2})
>>> N1*u1 == Vec({1, 3, 5, 7},{1: 3, 3: 9, 5: -2, 7: 3})
True
>>> N1 == Mat(({1, 3, 5, 7}, {'a', 'b'}), {(1, 'a'): -1, (1, 'b'): 2, (3, 'a'): 1, (3, 'b'):4, (7, 'a'): 3, (5, 'b'):-1})
True
>>> u1 == Vec({'a', 'b'}, {'a': 1, 'b': 2})
True
>>> N2 = Mat(({('a', 'b'), ('c', 'd')}, {1, 2, 3, 5, 8}), {})
>>> u2 = Vec({1, 2, 3, 5, 8}, {})
>>> N2*u2 == Vec({('a', 'b'), ('c', 'd')},{})
True
For matrix_matrix_mul(A, B):
>>> A = Mat(({0,1,2}, {0,1,2}), {(1,1):4, (0,0):0, (1,2):1, (1,0):5, (0,1):3, (0,2):2})
>>> B = Mat(({0,1,2}, {0,1,2}), {(1,0):5, (2,1):3, (1,1):2, (2,0):0, (0,0):1, (0,1):4})
>>> A*B == Mat(({0,1,2}, {0,1,2}), {(0,0):15, (0,1):12, (1,0):25, (1,1):31})
True
>>> C = Mat(({0,1,2}, {'a','b'}), {(0,'a'):4, (0,'b'):-3, (1,'a'):1, (2,'a'):1, (2,'b'):-2})
>>> D = Mat(({'a','b'}, {'x','y'}), {('a','x'):3, ('a','y'):-2, ('b','x'):4, ('b','y'):-1})
>>> C*D == Mat(({0,1,2}, {'x','y'}), {(0,'y'):-5, (1,'x'):3, (1,'y'):-2, (2,'x'):-5})
True
>>> M = Mat(({0, 1}, {'a', 'c', 'b'}), {})
>>> N = Mat(({'a', 'c', 'b'}, {(1, 1), (2, 2)}), {})
>>> M*N == Mat(({0,1}, {(1,1), (2,2)}), {})
True
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28.724359
| 121
| 0.349476
| 932
| 4,481
| 1.664163
| 0.099785
| 0.05158
| 0.023211
| 0.027079
| 0.423598
| 0.333978
| 0.319149
| 0.290135
| 0.236622
| 0.206963
| 0
| 0.15023
| 0.174068
| 4,481
| 155
| 122
| 28.909677
| 0.268846
| 0.982593
| 0
| 0
| 0
| 0
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a6ea78943de1b4b185da77b58d7ec61d1bf3854d
| 128
|
py
|
Python
|
DjangoSearchView/sample/admin.py
|
Arisophy/django-searchview
|
0898e171417366cf85666288ae9e2e44c173853f
|
[
"MIT"
] | 3
|
2021-01-12T19:27:11.000Z
|
2021-09-27T11:53:06.000Z
|
DjangoSearchView/sample/admin.py
|
Arisophy/django-searchview
|
0898e171417366cf85666288ae9e2e44c173853f
|
[
"MIT"
] | null | null | null |
DjangoSearchView/sample/admin.py
|
Arisophy/django-searchview
|
0898e171417366cf85666288ae9e2e44c173853f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Musician,Album
admin.site.register(Musician)
admin.site.register(Album)
| 16
| 34
| 0.8125
| 18
| 128
| 5.777778
| 0.555556
| 0.173077
| 0.326923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101563
| 128
| 7
| 35
| 18.285714
| 0.904348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5b4cdd92a07f18dde1a6a0fbdaf86774b550c153
| 44
|
py
|
Python
|
tests/components/media_player/__init__.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/media_player/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/media_player/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""The tests for Media player platforms."""
| 22
| 43
| 0.704545
| 6
| 44
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 44
| 1
| 44
| 44
| 0.815789
| 0.840909
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5b976355a0229fb483054070a174c59cc5a7ee85
| 157
|
py
|
Python
|
tests/pbraiders/pages/contacts/__init__.py
|
pbraiders/pomponne-test-bdd
|
7f2973936318221f54e65e0f8bd839cad7216fa4
|
[
"MIT"
] | 1
|
2021-03-30T14:41:29.000Z
|
2021-03-30T14:41:29.000Z
|
tests/pbraiders/pages/contacts/__init__.py
|
pbraiders/pomponne-test-bdd
|
7f2973936318221f54e65e0f8bd839cad7216fa4
|
[
"MIT"
] | null | null | null |
tests/pbraiders/pages/contacts/__init__.py
|
pbraiders/pomponne-test-bdd
|
7f2973936318221f54e65e0f8bd839cad7216fa4
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from .abstract import ContactPageAbstract
from .new import ContactNewPage
from .contact import ContactPage
from .contacts import ContactsPage
| 26.166667
| 41
| 0.840764
| 19
| 157
| 6.947368
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007194
| 0.11465
| 157
| 5
| 42
| 31.4
| 0.942446
| 0.076433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5bab2becc6a79ffed32430259b0d8fc91e2c0ffd
| 1,190
|
py
|
Python
|
tests/test_events.py
|
lemoncheesecake/lemoncheesecake
|
bc92cb8225d74e2687ed5825ee5af3f56f907829
|
[
"Apache-2.0",
"MIT"
] | 34
|
2017-06-12T18:50:36.000Z
|
2021-11-29T01:59:07.000Z
|
tests/test_events.py
|
lemoncheesecake/lemoncheesecake
|
bc92cb8225d74e2687ed5825ee5af3f56f907829
|
[
"Apache-2.0",
"MIT"
] | 25
|
2017-12-07T13:35:29.000Z
|
2022-03-10T01:27:58.000Z
|
tests/test_events.py
|
lemoncheesecake/lemoncheesecake
|
bc92cb8225d74e2687ed5825ee5af3f56f907829
|
[
"Apache-2.0",
"MIT"
] | 4
|
2019-05-05T03:19:00.000Z
|
2021-10-06T13:12:05.000Z
|
from lemoncheesecake.events import AsyncEventManager, SyncEventManager, Event
class MyEvent(Event):
def __init__(self, val):
super(MyEvent, self).__init__()
self.val = val
def test_async_fire():
i_got_called = []
def handler(event):
i_got_called.append(event.val)
eventmgr = AsyncEventManager()
eventmgr.register_event(MyEvent)
eventmgr.subscribe_to_event(MyEvent, handler)
with eventmgr.handle_events():
eventmgr.fire(MyEvent(42))
assert i_got_called
def test_sync_fire():
i_got_called = []
def handler(event):
i_got_called.append(event.val)
eventmgr = SyncEventManager()
eventmgr.register_event(MyEvent)
eventmgr.subscribe_to_event(MyEvent, handler)
eventmgr.fire(MyEvent(42))
assert i_got_called
def test_unsubscribe():
i_got_called = []
def handler(event):
i_got_called.append(event.val)
eventmgr = AsyncEventManager()
eventmgr.register_event(MyEvent)
eventmgr.subscribe_to_event(MyEvent, handler)
eventmgr.unsubscribe_from_event(MyEvent, handler)
with eventmgr.handle_events():
eventmgr.fire(MyEvent(42))
assert not i_got_called
| 27.045455
| 77
| 0.710924
| 143
| 1,190
| 5.608392
| 0.230769
| 0.044888
| 0.112219
| 0.081047
| 0.749377
| 0.749377
| 0.749377
| 0.749377
| 0.749377
| 0.749377
| 0
| 0.00625
| 0.193277
| 1,190
| 43
| 78
| 27.674419
| 0.829167
| 0
| 0
| 0.685714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 1
| 0.2
| false
| 0
| 0.028571
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5bad851bd1e0f1e40c2b00ca2de847f1ad38a9c4
| 242
|
py
|
Python
|
arknights_mower/solvers/__init__.py
|
yuanyan3060/arknights-mower
|
599b96e02590a435dc50bdef450b45c851654c4f
|
[
"MIT"
] | 1
|
2021-09-11T04:11:15.000Z
|
2021-09-11T04:11:15.000Z
|
arknights_mower/solvers/__init__.py
|
yuanyan3060/arknights-mower
|
599b96e02590a435dc50bdef450b45c851654c4f
|
[
"MIT"
] | null | null | null |
arknights_mower/solvers/__init__.py
|
yuanyan3060/arknights-mower
|
599b96e02590a435dc50bdef450b45c851654c4f
|
[
"MIT"
] | null | null | null |
from .base_construct import BaseConstructSolver
from .credit import CreditSolver
from .mission import MissionSolver
from .operation import OpeSolver
from .recruit import RecruitSolver
from .shop import ShopSolver
from .mail import MailSolver
| 30.25
| 47
| 0.855372
| 29
| 242
| 7.103448
| 0.586207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115702
| 242
| 7
| 48
| 34.571429
| 0.962617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5bb3f25856b53d7ca503349a97603fad448dbbbe
| 17
|
py
|
Python
|
gl/f1.py
|
wizelab8/SmartMirror
|
bad186d4eceb6b6adfdcef90e7d93abfc04d9d61
|
[
"MIT"
] | null | null | null |
gl/f1.py
|
wizelab8/SmartMirror
|
bad186d4eceb6b6adfdcef90e7d93abfc04d9d61
|
[
"MIT"
] | null | null | null |
gl/f1.py
|
wizelab8/SmartMirror
|
bad186d4eceb6b6adfdcef90e7d93abfc04d9d61
|
[
"MIT"
] | null | null | null |
print("I am f1")
| 8.5
| 16
| 0.588235
| 4
| 17
| 2.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.176471
| 17
| 1
| 17
| 17
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
5bda38e56d3953e284ffba9772e5c13de786ed42
| 411
|
py
|
Python
|
lib/googlecloudsdk/third_party/apis/toolresults/v1beta3/__init__.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/third_party/apis/toolresults/v1beta3/__init__.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/third_party/apis/toolresults/v1beta3/__init__.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | 2
|
2020-11-04T03:08:21.000Z
|
2020-11-05T08:14:41.000Z
|
"""Common imports for generated toolresults client library."""
# pylint:disable=wildcard-import
import pkgutil
from googlecloudsdk.third_party.apitools.base.py import *
from googlecloudsdk.third_party.apis.toolresults.v1beta3.toolresults_v1beta3_client import *
from googlecloudsdk.third_party.apis.toolresults.v1beta3.toolresults_v1beta3_messages import *
__path__ = pkgutil.extend_path(__path__, __name__)
| 37.363636
| 94
| 0.846715
| 49
| 411
| 6.693878
| 0.510204
| 0.219512
| 0.210366
| 0.256098
| 0.45122
| 0.45122
| 0.45122
| 0.45122
| 0.45122
| 0.45122
| 0
| 0.020997
| 0.072993
| 411
| 10
| 95
| 41.1
| 0.839895
| 0.214112
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5be7d1f2212ba4ee4532fd56a7b48cfc837784e7
| 64
|
py
|
Python
|
src/pyzhuyin/__init__.py
|
rku1999/python-zhuyin
|
82c31bb89871e7567853406471c5849a9c2034f1
|
[
"MIT"
] | 2
|
2022-01-13T14:56:37.000Z
|
2022-01-13T16:09:26.000Z
|
src/pyzhuyin/__init__.py
|
rku1999/python-zhuyin
|
82c31bb89871e7567853406471c5849a9c2034f1
|
[
"MIT"
] | null | null | null |
src/pyzhuyin/__init__.py
|
rku1999/python-zhuyin
|
82c31bb89871e7567853406471c5849a9c2034f1
|
[
"MIT"
] | null | null | null |
from pyzhuyin.convert import pinyin_to_zhuyin, zhuyin_to_pinyin
| 32
| 63
| 0.890625
| 10
| 64
| 5.3
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078125
| 64
| 1
| 64
| 64
| 0.898305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5bf510f6fb5cf03dafd5845955ea4043dbeaa935
| 319
|
py
|
Python
|
liegroups/__init__.py
|
lvzhaoyang/liegroups
|
b3ccad280a3b615ad33c5f15c35d9325b8d3a5be
|
[
"MIT"
] | 1
|
2021-11-03T02:20:30.000Z
|
2021-11-03T02:20:30.000Z
|
liegroups/__init__.py
|
lvzhaoyang/liegroups
|
b3ccad280a3b615ad33c5f15c35d9325b8d3a5be
|
[
"MIT"
] | null | null | null |
liegroups/__init__.py
|
lvzhaoyang/liegroups
|
b3ccad280a3b615ad33c5f15c35d9325b8d3a5be
|
[
"MIT"
] | null | null | null |
"""Special Euclidean and Special Orthogonal Lie groups."""
from liegroups.numpy import SO2
from liegroups.numpy import SE2
from liegroups.numpy import SO3
from liegroups.numpy import SE3
try:
import liegroups.torch
except:
pass
__author__ = "Lee Clement"
__email__ = "lee.clement@robotics.utias.utoronto.ca"
| 21.266667
| 58
| 0.777429
| 43
| 319
| 5.581395
| 0.604651
| 0.216667
| 0.3
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014652
| 0.144201
| 319
| 14
| 59
| 22.785714
| 0.864469
| 0.163009
| 0
| 0
| 0
| 0
| 0.187739
| 0.145594
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.1
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
751487484194ccd39b5acd69444477c81836ab3c
| 7,475
|
py
|
Python
|
rlkit/launchers/experiments/vitchyr/probabilistic_goal_reaching/diagnostics.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | 1
|
2020-10-23T14:40:09.000Z
|
2020-10-23T14:40:09.000Z
|
rlkit/launchers/experiments/vitchyr/probabilistic_goal_reaching/diagnostics.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
rlkit/launchers/experiments/vitchyr/probabilistic_goal_reaching/diagnostics.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | 1
|
2021-05-27T20:38:45.000Z
|
2021-05-27T20:38:45.000Z
|
from collections import OrderedDict, defaultdict
from typing import List, Union
import numpy as np
from rlkit.envs.contextual.contextual_env import (
ContextualDiagnosticsFn,
Path,
Context,
Diagnostics,
)
from rlkit.launchers.experiments.vitchyr.probabilistic_goal_reaching.env import (
NormalizeAntFullPositionGoalEnv
)
from rlkit.misc.eval_util import create_stats_ordered_dict
class AntFullPositionGoalEnvDiagnostics(ContextualDiagnosticsFn):
def __init__(
self,
desired_goal_key: str,
achieved_goal_key: str,
success_threshold,
normalize_env: Union[None, NormalizeAntFullPositionGoalEnv] = None,
):
self._desired_goal_key = desired_goal_key
self._achieved_goal_key = achieved_goal_key
self.success_threshold = success_threshold
self.normalize_env = normalize_env
if normalize_env:
self.qpos_weights = normalize_env.qpos_weights
else:
self.qpos_weights = None
def __call__(self, paths: List[Path],
contexts: List[Context]) -> Diagnostics:
goals = [c[self._desired_goal_key] for c in contexts]
achieved_goals = [
np.array([o[self._achieved_goal_key] for o in path['observations']])
for path in paths
]
statistics = OrderedDict()
stat_to_lists = defaultdict(list)
for achieved, goal in zip(achieved_goals, goals):
difference = achieved - goal
xy_difference = difference[..., :2]
orientation_difference = difference[..., 3:7]
joint_difference = difference[..., 7:]
if self.qpos_weights is not None:
stat_to_lists['normalized/total/distance'].append(
np.linalg.norm(difference, axis=-1)
)
stat_to_lists['normalized/xy/distance'].append(
np.linalg.norm(xy_difference, axis=-1)
)
stat_to_lists['normalized/orientation/distance'].append(
np.linalg.norm(orientation_difference, axis=-1)
)
stat_to_lists['normalized/joint/distance'].append(
np.linalg.norm(joint_difference, axis=-1)
)
stat_to_lists['normalized/xy/success'].append(
np.linalg.norm(xy_difference, axis=-1)
<= self.success_threshold
)
stat_to_lists['normalized/orientation/success'].append(
np.linalg.norm(orientation_difference, axis=-1)
<= self.success_threshold
)
stat_to_lists['normalized/joint/success'].append(
np.linalg.norm(joint_difference, axis=-1)
<= self.success_threshold
)
difference = (achieved - goal) / self.qpos_weights
xy_difference = difference[..., :2]
orientation_difference = difference[..., 3:7]
joint_difference = difference[..., 7:]
stat_to_lists['total/distance'].append(
np.linalg.norm(difference, axis=-1)
)
stat_to_lists['xy/distance'].append(
np.linalg.norm(xy_difference, axis=-1)
)
stat_to_lists['orientation/distance'].append(
np.linalg.norm(orientation_difference, axis=-1)
)
stat_to_lists['joint/distance'].append(
np.linalg.norm(joint_difference, axis=-1)
)
stat_to_lists['xy/success'].append(
np.linalg.norm(xy_difference, axis=-1)
<= self.success_threshold
)
stat_to_lists['orientation/success'].append(
np.linalg.norm(orientation_difference, axis=-1)
<= self.success_threshold
)
stat_to_lists['joint/success'].append(
np.linalg.norm(joint_difference, axis=-1)
<= self.success_threshold
)
for stat_name, stat_list in stat_to_lists.items():
statistics.update(create_stats_ordered_dict(
stat_name,
stat_list,
always_show_all_stats=True,
))
statistics.update(create_stats_ordered_dict(
'{}/final'.format(stat_name),
[s[-1:] for s in stat_list],
always_show_all_stats=True,
exclude_max_min=True,
))
return statistics
class HopperFullPositionGoalEnvDiagnostics(ContextualDiagnosticsFn):
def __init__(
self,
desired_goal_key: str,
achieved_goal_key: str,
success_threshold,
):
self._desired_goal_key = desired_goal_key
self._achieved_goal_key = achieved_goal_key
self.success_threshold = success_threshold
def __call__(self, paths: List[Path],
contexts: List[Context]) -> Diagnostics:
goals = [c[self._desired_goal_key] for c in contexts]
achieved_goals = [
np.array([o[self._achieved_goal_key] for o in path['observations']])
for path in paths
]
statistics = OrderedDict()
stat_to_lists = defaultdict(list)
for achieved, goal in zip(achieved_goals, goals):
difference = achieved - goal
x_difference = difference[..., :1]
y_difference = difference[..., 1:2]
z_difference = difference[..., 2:3]
joint_difference = difference[..., 3:6]
stat_to_lists['x/distance'].append(
np.linalg.norm(x_difference, axis=-1)
)
stat_to_lists['y/distance'].append(
np.linalg.norm(y_difference, axis=-1)
)
stat_to_lists['z/distance'].append(
np.linalg.norm(z_difference, axis=-1)
)
stat_to_lists['joint/distance'].append(
np.linalg.norm(joint_difference, axis=-1)
)
stat_to_lists['x/success'].append(
np.linalg.norm(x_difference, axis=-1)
<= self.success_threshold
)
stat_to_lists['y/success'].append(
np.linalg.norm(y_difference, axis=-1)
<= self.success_threshold
)
stat_to_lists['z/success'].append(
np.linalg.norm(z_difference, axis=-1)
<= self.success_threshold
)
stat_to_lists['joint/success'].append(
np.linalg.norm(joint_difference, axis=-1)
<= self.success_threshold
)
for stat_name, stat_list in stat_to_lists.items():
statistics.update(create_stats_ordered_dict(
stat_name,
stat_list,
always_show_all_stats=True,
))
statistics.update(create_stats_ordered_dict(
'{}/final'.format(stat_name),
[s[-1:] for s in stat_list],
always_show_all_stats=True,
exclude_max_min=True,
))
return statistics
class SawyerPickAndPlaceEnvAchievedFromObs(object):
def __init__(self, key):
self._key = key
def __call__(self, observations):
return observations[self._key][..., 1:]
| 38.530928
| 81
| 0.564682
| 758
| 7,475
| 5.271768
| 0.135884
| 0.039039
| 0.071572
| 0.099099
| 0.781031
| 0.76977
| 0.764264
| 0.759259
| 0.718218
| 0.695195
| 0
| 0.008094
| 0.338863
| 7,475
| 193
| 82
| 38.73057
| 0.800486
| 0
| 0
| 0.588889
| 0
| 0
| 0.053913
| 0.023813
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.033333
| 0.005556
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7523ee31213487ecfbdd82f7d5704da659e224b7
| 61
|
py
|
Python
|
pyids/ids.py
|
kliegr/pyIDS
|
83e12503dc7b2680b35bfb377bc13521a54237bc
|
[
"MIT"
] | 1
|
2021-12-18T01:00:16.000Z
|
2021-12-18T01:00:16.000Z
|
pyids/ids.py
|
kliegr/pyIDS
|
83e12503dc7b2680b35bfb377bc13521a54237bc
|
[
"MIT"
] | null | null | null |
pyids/ids.py
|
kliegr/pyIDS
|
83e12503dc7b2680b35bfb377bc13521a54237bc
|
[
"MIT"
] | null | null | null |
from .data_structures import IDS, mine_CARs, mine_IDS_ruleset
| 61
| 61
| 0.868852
| 10
| 61
| 4.9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081967
| 61
| 1
| 61
| 61
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
75468149c4867222bb4105c5ad3977dddf26d2d0
| 36
|
py
|
Python
|
python/testData/completion/py874.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/completion/py874.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/py874.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
import root.nested_mod
root.<caret>
| 12
| 22
| 0.805556
| 6
| 36
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 2
| 23
| 18
| 0.848485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
754b429ad291e61da3977b562fee3da57437374b
| 221
|
py
|
Python
|
vega/search_space/networks/pytorch/customs/__init__.py
|
Lzc06/vega
|
852d2f57e21caed11473ddc96397124561eacf8a
|
[
"MIT"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
vega/search_space/networks/pytorch/customs/__init__.py
|
Lzc06/vega
|
852d2f57e21caed11473ddc96397124561eacf8a
|
[
"MIT"
] | 3
|
2021-03-31T20:15:40.000Z
|
2022-02-09T23:50:46.000Z
|
built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/vega/search_space/networks/pytorch/customs/__init__.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
from .adelaide import AdelaideFastNAS
from .mtm_sr import MtMSR
from .deepfm import DeepFactorizationMachineModel
from .autogate import AutoGateModel
from .autogroup import AutoGroupModel
from .simplecnn import SimpleCnn
| 31.571429
| 49
| 0.864253
| 25
| 221
| 7.6
| 0.56
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108597
| 221
| 6
| 50
| 36.833333
| 0.964467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7558e6fdaee877f31c7e022c6fb065e169cdd1ec
| 66
|
py
|
Python
|
tests/assets/projekt/projekt.py
|
Lufedi/reaper
|
bdf56b499e5b704c27b9f6c053d798c2a10fa4cf
|
[
"Apache-2.0"
] | 106
|
2015-07-21T16:18:26.000Z
|
2022-03-31T06:45:34.000Z
|
tests/assets/projekt/projekt.py
|
Kowndinya2000/enhanced_repo_reaper
|
744f794ba53bde5667b3b0f99b07273d0e32a495
|
[
"Apache-2.0"
] | 21
|
2015-07-11T03:48:28.000Z
|
2022-01-18T12:57:30.000Z
|
tests/assets/projekt/projekt.py
|
Kowndinya2000/enhanced_repo_reaper
|
744f794ba53bde5667b3b0f99b07273d0e32a495
|
[
"Apache-2.0"
] | 26
|
2015-07-22T22:38:21.000Z
|
2022-03-14T10:11:56.000Z
|
def projekt():
# Single line comment
print('RepoReapers')
| 16.5
| 25
| 0.651515
| 7
| 66
| 6.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 66
| 3
| 26
| 22
| 0.843137
| 0.287879
| 0
| 0
| 0
| 0
| 0.244444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
755b7e0ffc1fe73e7a3ef09f907fdd611a3d419b
| 191
|
py
|
Python
|
uniauth/exceptions.py
|
alranel/uniAuth
|
1d9dd044b7c3722d40162fc116d536fe3dfd5c7b
|
[
"Apache-2.0"
] | null | null | null |
uniauth/exceptions.py
|
alranel/uniAuth
|
1d9dd044b7c3722d40162fc116d536fe3dfd5c7b
|
[
"Apache-2.0"
] | null | null | null |
uniauth/exceptions.py
|
alranel/uniAuth
|
1d9dd044b7c3722d40162fc116d536fe3dfd5c7b
|
[
"Apache-2.0"
] | 1
|
2020-01-09T08:57:28.000Z
|
2020-01-09T08:57:28.000Z
|
class MetadataNotFound(Exception):
pass
class MetadataCorruption(Exception):
pass
class NotYetImplemented(Exception):
pass
class SPConfigurationMissing(Exception):
pass
| 12.733333
| 40
| 0.759162
| 16
| 191
| 9.0625
| 0.4375
| 0.358621
| 0.372414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17801
| 191
| 14
| 41
| 13.642857
| 0.923567
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
f342706daa8837946773c0644d288fc687eba864
| 370
|
py
|
Python
|
python/testData/highlighting/docstring.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | null | null | null |
python/testData/highlighting/docstring.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | 11
|
2017-02-27T22:35:32.000Z
|
2021-12-24T08:07:40.000Z
|
python/testData/highlighting/docstring.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | 1
|
2020-11-27T10:36:50.000Z
|
2020-11-27T10:36:50.000Z
|
__doc__= <info descr="null">"""<info descr="null">:param</info> v: """</info>
def <info descr="null">foo</info>(<info descr="null">a</info>, <info descr="null">v</info>):
"""
<info descr="null">:param</info> a:
<info descr="null">:param</info> v:
"""
pass
foo.<info descr="null">__doc__</info>= <info descr="null">"""<info descr="null">:param</info> v: """</info>
| 37
| 107
| 0.602703
| 56
| 370
| 3.839286
| 0.178571
| 0.418605
| 0.604651
| 0.334884
| 0.581395
| 0.47907
| 0.372093
| 0.372093
| 0.372093
| 0.372093
| 0
| 0
| 0.091892
| 370
| 10
| 107
| 37
| 0.639881
| 0
| 0
| 0
| 0
| 0
| 0.33218
| 0.179931
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.25
| 0
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
f347bfe54927a1729b2777b6837c13ea7c95ad2e
| 245
|
py
|
Python
|
torchtoolbox/data/__init__.py
|
deeplearningforfun/torch-tools
|
17aaa513ef72dbac8af88977ff11840aa2d6a2f4
|
[
"BSD-3-Clause"
] | 353
|
2019-10-05T16:55:51.000Z
|
2022-03-30T00:03:38.000Z
|
torchtoolbox/data/__init__.py
|
KAKAFEIcoffee/torch-toolbox
|
e3dc040dcfe33aec247a3139e72426bca73cda96
|
[
"BSD-3-Clause"
] | 14
|
2019-12-12T04:24:47.000Z
|
2021-10-31T07:02:54.000Z
|
torchtoolbox/data/__init__.py
|
KAKAFEIcoffee/torch-toolbox
|
e3dc040dcfe33aec247a3139e72426bca73cda96
|
[
"BSD-3-Clause"
] | 49
|
2019-10-05T16:57:24.000Z
|
2022-01-20T08:08:37.000Z
|
# -*- coding: utf-8 -*-
# @Author : DevinYang(pistonyang@gmail.com)
from .utils import *
from .lmdb_dataset import *
from .datasets import *
from .dataprefetcher import DataPreFetcher
from .dynamic_data_provider import *
from .sampler import *
| 27.222222
| 44
| 0.75102
| 30
| 245
| 6.033333
| 0.633333
| 0.220994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004739
| 0.138776
| 245
| 8
| 45
| 30.625
| 0.853081
| 0.261224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f3585ecec309265826e0aec25d25d2369aa3803d
| 47
|
py
|
Python
|
FlaskTaskr/run.py
|
nipunsadvilkar/web-dev-deliberate-practice
|
7074f646cee3b3729ab80d4a51072e4df23aedf7
|
[
"MIT"
] | null | null | null |
FlaskTaskr/run.py
|
nipunsadvilkar/web-dev-deliberate-practice
|
7074f646cee3b3729ab80d4a51072e4df23aedf7
|
[
"MIT"
] | null | null | null |
FlaskTaskr/run.py
|
nipunsadvilkar/web-dev-deliberate-practice
|
7074f646cee3b3729ab80d4a51072e4df23aedf7
|
[
"MIT"
] | null | null | null |
from flasktaskr import app
app.run(debug=True)
| 15.666667
| 26
| 0.808511
| 8
| 47
| 4.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 2
| 27
| 23.5
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f3648aa70d65d4f8432e182435176345d46d40c9
| 445
|
py
|
Python
|
Ex 107 + 108.py
|
brunobendel/Exercicios-python-Pycharm
|
145ded6cb5533aeef1b89f0bce20f0a90e37216c
|
[
"MIT"
] | null | null | null |
Ex 107 + 108.py
|
brunobendel/Exercicios-python-Pycharm
|
145ded6cb5533aeef1b89f0bce20f0a90e37216c
|
[
"MIT"
] | null | null | null |
Ex 107 + 108.py
|
brunobendel/Exercicios-python-Pycharm
|
145ded6cb5533aeef1b89f0bce20f0a90e37216c
|
[
"MIT"
] | null | null | null |
from uteis import moedas
#Programa principal
p = float(input('Digite qual valor você usará no modulo moedas: R$'))
print(f'O valor {moedas.real(p)} mais 10% é {moedas.real(moedas.aumentar(p,10))}')
print(f'O valor {moedas.real(p)} menos 20% é {moedas.real(moedas.diminuir(p,20))}')
print(f'O valor é {moedas.real(p)} e o dobro é {moedas.real(moedas.dobro(p))} ')
print(f'O valor {moedas.real(p)} e a metade é {moedas.real(moedas.metade(p))}')
| 49.444444
| 83
| 0.701124
| 83
| 445
| 3.759036
| 0.385542
| 0.25641
| 0.176282
| 0.153846
| 0.221154
| 0.221154
| 0.221154
| 0
| 0
| 0
| 0
| 0.020305
| 0.114607
| 445
| 8
| 84
| 55.625
| 0.771574
| 0.040449
| 0
| 0
| 0
| 0.666667
| 0.783529
| 0.312941
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.666667
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f36c41247c8277a0f0f6d78e21381d975c3fa710
| 68
|
py
|
Python
|
discopusher/handlers/__init__.py
|
Tina-otoge/discopusher
|
0301b15e00d52e3b1734ee25bbd42498e1d4f709
|
[
"MIT"
] | 1
|
2020-12-25T15:08:08.000Z
|
2020-12-25T15:08:08.000Z
|
discopusher/handlers/__init__.py
|
Tina-otoge/discopusher
|
0301b15e00d52e3b1734ee25bbd42498e1d4f709
|
[
"MIT"
] | null | null | null |
discopusher/handlers/__init__.py
|
Tina-otoge/discopusher
|
0301b15e00d52e3b1734ee25bbd42498e1d4f709
|
[
"MIT"
] | null | null | null |
from .twitter import TwitterHandler
from .pixiv import PixivHandler
| 22.666667
| 35
| 0.852941
| 8
| 68
| 7.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 68
| 2
| 36
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f37a90c52a9b12672090c917a36e8007bff26313
| 42
|
py
|
Python
|
tests/__init__.py
|
ElmofiedBoby/Caprover-API
|
a10e88f6dde17ccc32d159f83f1dd99d781aafca
|
[
"MIT"
] | 2
|
2021-06-15T10:08:58.000Z
|
2021-08-09T16:56:17.000Z
|
tests/__init__.py
|
ElmofiedBoby/Caprover-API
|
a10e88f6dde17ccc32d159f83f1dd99d781aafca
|
[
"MIT"
] | 2
|
2021-08-11T19:22:04.000Z
|
2021-09-08T16:33:57.000Z
|
tests/__init__.py
|
ElmofiedBoby/Caprover-API
|
a10e88f6dde17ccc32d159f83f1dd99d781aafca
|
[
"MIT"
] | 3
|
2021-06-25T15:03:09.000Z
|
2021-10-13T07:36:18.000Z
|
"""Unit test package for caprover_api."""
| 21
| 41
| 0.714286
| 6
| 42
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.783784
| 0.833333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f3aef6c3ca3a19fa17e96acd950cceb901d81588
| 510
|
py
|
Python
|
berth/vcs/base.py
|
joealcorn/berth.cc
|
9cba1355d49705a13ae58cfdffa26ee6a3fb9e31
|
[
"MIT"
] | null | null | null |
berth/vcs/base.py
|
joealcorn/berth.cc
|
9cba1355d49705a13ae58cfdffa26ee6a3fb9e31
|
[
"MIT"
] | null | null | null |
berth/vcs/base.py
|
joealcorn/berth.cc
|
9cba1355d49705a13ae58cfdffa26ee6a3fb9e31
|
[
"MIT"
] | null | null | null |
from django.conf import settings
class InvalidRevision(Exception):
pass
class VCS(object):
'''
Defines the public API for all subclasses
'''
repo_clone_dir = settings.REPO_CLONE_DIR
def __init__(self, project):
self.project = project
self.checkout_dir = project.get_checkout_directory()
def clone(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
def checkout(self, ref):
raise NotImplementedError
| 18.888889
| 60
| 0.678431
| 56
| 510
| 5.982143
| 0.571429
| 0.214925
| 0.071642
| 0.185075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25098
| 510
| 26
| 61
| 19.615385
| 0.876963
| 0.080392
| 0
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.071429
| 0.071429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
f3b1adc127a3446fcef7adf32191cd2757c464f6
| 46
|
py
|
Python
|
MotunrayoKoyejo/Phase 1/Python Basic 1/Day6/Q45.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6
|
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
MotunrayoKoyejo/Phase 1/Python Basic 1/Day6/Q45.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8
|
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
MotunrayoKoyejo/Phase 1/Python Basic 1/Day6/Q45.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39
|
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
from subprocess import call
call(["ls", "-l"])
| 23
| 27
| 0.673913
| 7
| 46
| 4.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 46
| 2
| 28
| 23
| 0.756098
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f3b5988b05250ba5d3c96560895a1d93a3aa29a3
| 146
|
py
|
Python
|
PythonBrasil_exer/sequencial/input_tempF_output_tempC.py
|
Lionofpride/Dev_utilits
|
eb7963922d6d244d1ee0a03e57e86c2f3e564de5
|
[
"MIT"
] | null | null | null |
PythonBrasil_exer/sequencial/input_tempF_output_tempC.py
|
Lionofpride/Dev_utilits
|
eb7963922d6d244d1ee0a03e57e86c2f3e564de5
|
[
"MIT"
] | null | null | null |
PythonBrasil_exer/sequencial/input_tempF_output_tempC.py
|
Lionofpride/Dev_utilits
|
eb7963922d6d244d1ee0a03e57e86c2f3e564de5
|
[
"MIT"
] | null | null | null |
tempF = input('Informe a temperatura em graus Fahrenheit ')
c = float(tempF) -32
c = c * 5 / 9
print(f'A temperatura em graus Celsius é {c}ºC')
| 24.333333
| 60
| 0.678082
| 26
| 146
| 3.807692
| 0.692308
| 0.242424
| 0.282828
| 0.383838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034188
| 0.19863
| 146
| 5
| 61
| 29.2
| 0.811966
| 0
| 0
| 0
| 0
| 0
| 0.554795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f3c17b2afb9fd9c0046d0021b02fd39b6d6f9661
| 156
|
py
|
Python
|
setup.py
|
devanshshukla99/pytest-intercept
|
e0b79c874206864d7cda3d487e263495138ca7bd
|
[
"BSD-3-Clause"
] | 1
|
2022-01-24T03:52:55.000Z
|
2022-01-24T03:52:55.000Z
|
setup.py
|
devanshshukla99/pytest-intercept
|
e0b79c874206864d7cda3d487e263495138ca7bd
|
[
"BSD-3-Clause"
] | 3
|
2021-05-23T00:05:02.000Z
|
2021-06-24T10:24:52.000Z
|
setup.py
|
devanshshukla99/pytest-intercept
|
e0b79c874206864d7cda3d487e263495138ca7bd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
from setuptools import setup
setup(use_scm_version={"write_to": os.path.join("pytest_intercept_remote", "_version.py")})
| 19.5
| 91
| 0.762821
| 24
| 156
| 4.708333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089744
| 156
| 7
| 92
| 22.285714
| 0.795775
| 0.128205
| 0
| 0
| 0
| 0
| 0.311111
| 0.17037
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
341f60dbb5480569a78bd62dd38318fa49b3c883
| 4,140
|
py
|
Python
|
AUSH/model/attack_model/baseline.py
|
sharanmayank/ShillingAttack
|
783f135a4fcc709e7ce478c2e6f2e7e6c5ad2ace
|
[
"MIT"
] | null | null | null |
AUSH/model/attack_model/baseline.py
|
sharanmayank/ShillingAttack
|
783f135a4fcc709e7ce478c2e6f2e7e6c5ad2ace
|
[
"MIT"
] | null | null | null |
AUSH/model/attack_model/baseline.py
|
sharanmayank/ShillingAttack
|
783f135a4fcc709e7ce478c2e6f2e7e6c5ad2ace
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2019/8/23 10:46
# @Author : chensi
# @File : baseline_new.py
# @Software : PyCharm
# @Desciption : None
import numpy as np
import math
class BaselineAttack:
def __init__(self, attack_num, filler_num, n_items, target_id,
global_mean, global_std, item_means, item_stds, r_max, r_min, fixed_filler_indicator=None):
#
self.attack_num = attack_num
self.filler_num = filler_num
self.n_items = n_items
self.target_id = target_id
self.global_mean = global_mean
self.global_std = global_std
self.item_means = item_means
self.item_stds = item_stds
self.r_max = r_max
self.r_min = r_min
self.fixed_filler_indicator = fixed_filler_indicator
def RandomAttack(self):
filler_candis = list(set(range(self.n_items)) - {self.target_id})
fake_profiles = np.zeros(shape=[self.attack_num, self.n_items], dtype=float)
# target
fake_profiles[:, self.target_id] = self.r_max
# fillers
for i in range(self.attack_num):
if self.fixed_filler_indicator is None:
fillers = np.random.choice(filler_candis, size=self.filler_num, replace=False)
else:
fillers = np.where(np.array(self.fixed_filler_indicator[i])== 1)[0]
ratings = np.random.normal(loc=self.global_mean, scale=self.global_std, size=self.filler_num)
for f_id, r in zip(fillers, ratings):
fake_profiles[i][f_id] = max(math.exp(-5), min(self.r_max, r))
return fake_profiles
def BandwagonAttack(self, selected_ids):
filler_candis = list(set(range(self.n_items)) - set([self.target_id] + selected_ids))
fake_profiles = np.zeros(shape=[self.attack_num, self.n_items], dtype=float)
# target & selected patch
fake_profiles[:, [self.target_id] + selected_ids] = self.r_max
# fillers
for i in range(self.attack_num):
if self.fixed_filler_indicator is None:
fillers = np.random.choice(filler_candis, size=self.filler_num, replace=False)
else:
fillers = np.where(np.array(self.fixed_filler_indicator[i])== 1)[0]
ratings = np.random.normal(loc=self.global_mean, scale=self.global_std, size=self.filler_num)
for f_id, r in zip(fillers, ratings):
fake_profiles[i][f_id] = max(math.exp(-5), min(self.r_max, r))
return fake_profiles
def AverageAttack(self):
filler_candis = list(set(range(self.n_items)) - {self.target_id})
fake_profiles = np.zeros(shape=[self.attack_num, self.n_items], dtype=float)
# target
fake_profiles[:, self.target_id] = self.r_max
# fillers
fn_normal = lambda iid: np.random.normal(loc=self.item_means[iid], scale=self.item_stds[iid], size=1)[0]
for i in range(self.attack_num):
if self.fixed_filler_indicator is None:
fillers = np.random.choice(filler_candis, size=self.filler_num, replace=False)
else:
fillers = np.where(np.array(self.fixed_filler_indicator[i])== 1)[0]
ratings = map(fn_normal, fillers)
for f_id, r in zip(fillers, ratings):
fake_profiles[i][f_id] = max(math.exp(-5), min(self.r_max, r))
return fake_profiles
def SegmentAttack(self, selected_ids):
filler_candis = list(set(range(self.n_items)) - set([self.target_id] + selected_ids))
fake_profiles = np.zeros(shape=[self.attack_num, self.n_items], dtype=float)
# target & selected patch
fake_profiles[:, [self.target_id] + selected_ids] = self.r_max
# fillers
for i in range(self.attack_num):
if self.fixed_filler_indicator is None:
fillers = np.random.choice(filler_candis, size=self.filler_num, replace=False)
else:
fillers = np.where(np.array(self.fixed_filler_indicator[i])== 1)[0]
fake_profiles[i][fillers] = self.r_min
return fake_profiles
| 43.578947
| 112
| 0.629469
| 575
| 4,140
| 4.290435
| 0.16
| 0.077827
| 0.089177
| 0.087556
| 0.744224
| 0.728415
| 0.728415
| 0.728415
| 0.728415
| 0.728415
| 0
| 0.00813
| 0.257246
| 4,140
| 94
| 113
| 44.042553
| 0.794146
| 0.057246
| 0
| 0.656716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074627
| false
| 0
| 0.029851
| 0
| 0.179104
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
344310ef1265de0274f57cac2fca8c98d41f784d
| 36
|
py
|
Python
|
deepvo/networks/common.py
|
msaroufim/deepvo
|
78f7a7add8a8ab99d15adbc4fbdb2baf1d41bec9
|
[
"MIT"
] | null | null | null |
deepvo/networks/common.py
|
msaroufim/deepvo
|
78f7a7add8a8ab99d15adbc4fbdb2baf1d41bec9
|
[
"MIT"
] | null | null | null |
deepvo/networks/common.py
|
msaroufim/deepvo
|
78f7a7add8a8ab99d15adbc4fbdb2baf1d41bec9
|
[
"MIT"
] | null | null | null |
def flownet_v1_s(input):
pass
| 7.2
| 24
| 0.666667
| 6
| 36
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.25
| 36
| 4
| 25
| 9
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
cab42284e62337cd8b5530610cb19b8caed8289f
| 98
|
py
|
Python
|
stateflow/__init__.py
|
frolenkov-nikita/django-stateflow
|
0a5d75e42606b662c7d510b5d5ef7cb996cf185b
|
[
"BSD-3-Clause"
] | null | null | null |
stateflow/__init__.py
|
frolenkov-nikita/django-stateflow
|
0a5d75e42606b662c7d510b5d5ef7cb996cf185b
|
[
"BSD-3-Clause"
] | null | null | null |
stateflow/__init__.py
|
frolenkov-nikita/django-stateflow
|
0a5d75e42606b662c7d510b5d5ef7cb996cf185b
|
[
"BSD-3-Clause"
] | null | null | null |
from stateclass import Flow, DjangoTransition, DjangoState
from statefields import StateFlowField
| 32.666667
| 58
| 0.877551
| 10
| 98
| 8.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 98
| 2
| 59
| 49
| 0.977273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cadf6de9a57a6cbf2329d6653406b44d70181b9c
| 293
|
py
|
Python
|
config.py
|
LCBRU/requests
|
2635cf91eaa9c131283c13bdcd29906a125a0f7b
|
[
"MIT"
] | null | null | null |
config.py
|
LCBRU/requests
|
2635cf91eaa9c131283c13bdcd29906a125a0f7b
|
[
"MIT"
] | null | null | null |
config.py
|
LCBRU/requests
|
2635cf91eaa9c131283c13bdcd29906a125a0f7b
|
[
"MIT"
] | null | null | null |
import os
from lbrc_flask.config import BaseConfig, BaseTestConfig
class Config(BaseConfig):
FILE_UPLOAD_DIRECTORY = os.environ["FILE_UPLOAD_DIRECTORY"]
class TestConfig(BaseTestConfig):
FILE_UPLOAD_DIRECTORY = os.getenv("TEST_FILE_UPLOAD_DIRECTORY", Config.FILE_UPLOAD_DIRECTORY)
| 26.636364
| 97
| 0.822526
| 36
| 293
| 6.361111
| 0.444444
| 0.218341
| 0.414847
| 0.183406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102389
| 293
| 10
| 98
| 29.3
| 0.870722
| 0
| 0
| 0
| 0
| 0
| 0.16041
| 0.16041
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cae4efaa53e814fd87e811da4d303751d4e390f5
| 28
|
py
|
Python
|
ws/config.py
|
usc-isi-i2/mydig-webservice
|
9628f72fed9f33d0fe341c3d8c3324cb198aae74
|
[
"MIT"
] | 2
|
2018-12-19T16:41:50.000Z
|
2021-11-11T20:52:25.000Z
|
ws/config.py
|
research-software-company/mydig-webservice
|
9628f72fed9f33d0fe341c3d8c3324cb198aae74
|
[
"MIT"
] | 55
|
2017-06-09T15:53:56.000Z
|
2018-04-16T23:53:30.000Z
|
ws/config.py
|
research-software-company/mydig-webservice
|
9628f72fed9f33d0fe341c3d8c3324cb198aae74
|
[
"MIT"
] | 12
|
2017-08-06T19:49:44.000Z
|
2020-02-16T07:12:09.000Z
|
from config_docker import *
| 14
| 27
| 0.821429
| 4
| 28
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1b495875fe8400c3fa2498b474512475b67c71cd
| 102
|
py
|
Python
|
src/backend/command/controller/xml_parser.py
|
kairu-ms/aaz-dev-tools
|
233a70253487ebbc8347bdd1851e07c2a745104f
|
[
"MIT"
] | null | null | null |
src/backend/command/controller/xml_parser.py
|
kairu-ms/aaz-dev-tools
|
233a70253487ebbc8347bdd1851e07c2a745104f
|
[
"MIT"
] | 2
|
2021-12-21T03:49:53.000Z
|
2021-12-29T07:32:31.000Z
|
src/backend/command/controller/xml_parser.py
|
kairu-ms/aaz-dev-tools
|
233a70253487ebbc8347bdd1851e07c2a745104f
|
[
"MIT"
] | 1
|
2021-11-18T09:07:11.000Z
|
2021-11-18T09:07:11.000Z
|
import xml.etree.cElementTree as ElementTree
class XmlParser:
def __init__(self):
pass
| 12.75
| 44
| 0.705882
| 12
| 102
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 102
| 7
| 45
| 14.571429
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
1b8f8a14b251671bc8ceadedddcdd574bcc27aa9
| 98
|
py
|
Python
|
ooipy/hydrophone/__init__.py
|
Molkree/ooipy
|
097b9f275de15343735a9e4c416fca14b15ed9f4
|
[
"MIT"
] | 3
|
2020-12-01T21:42:43.000Z
|
2022-01-22T01:02:58.000Z
|
ooipy/hydrophone/__init__.py
|
Molkree/ooipy
|
097b9f275de15343735a9e4c416fca14b15ed9f4
|
[
"MIT"
] | 50
|
2020-10-08T22:33:15.000Z
|
2022-01-21T23:05:31.000Z
|
ooipy/hydrophone/__init__.py
|
Molkree/ooipy
|
097b9f275de15343735a9e4c416fca14b15ed9f4
|
[
"MIT"
] | 4
|
2021-02-01T19:21:53.000Z
|
2021-07-21T22:29:21.000Z
|
# TODO: this file ensures that the module 'basic' can be imported by calling ooipy.acoustic.basic
| 49
| 97
| 0.785714
| 16
| 98
| 4.8125
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153061
| 98
| 1
| 98
| 98
| 0.927711
| 0.969388
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1ba3ba28b82ef9fc9dff6394cffb0120985e43de
| 87
|
py
|
Python
|
optimization/metrics/__init__.py
|
adamw00000/PU-joint-CCCP-DCCP-2021
|
7b97351af1884a886d1d26a848a3ced517f9feef
|
[
"MIT"
] | null | null | null |
optimization/metrics/__init__.py
|
adamw00000/PU-joint-CCCP-DCCP-2021
|
7b97351af1884a886d1d26a848a3ced517f9feef
|
[
"MIT"
] | null | null | null |
optimization/metrics/__init__.py
|
adamw00000/PU-joint-CCCP-DCCP-2021
|
7b97351af1884a886d1d26a848a3ced517f9feef
|
[
"MIT"
] | 1
|
2022-03-26T10:53:19.000Z
|
2022-03-26T10:53:19.000Z
|
from optimization.metrics.metrics import approximation_error, c_error, auc, alpha_error
| 87
| 87
| 0.873563
| 12
| 87
| 6.083333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 87
| 1
| 87
| 87
| 0.901235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
59f0d092ed916181c1798dae9f849d3eb7383f26
| 95
|
py
|
Python
|
133:A - HQ9+/script.py
|
treeindev/CodeForces
|
b3bcc332e0330a6588f021ff766737a996577147
|
[
"MIT"
] | null | null | null |
133:A - HQ9+/script.py
|
treeindev/CodeForces
|
b3bcc332e0330a6588f021ff766737a996577147
|
[
"MIT"
] | null | null | null |
133:A - HQ9+/script.py
|
treeindev/CodeForces
|
b3bcc332e0330a6588f021ff766737a996577147
|
[
"MIT"
] | null | null | null |
e = list(input())
print( "YES" if e.count("H")>0 or e.count("Q")>0 or e.count("9")>0 else "NO")
| 47.5
| 77
| 0.568421
| 22
| 95
| 2.454545
| 0.636364
| 0.333333
| 0.148148
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048193
| 0.126316
| 95
| 2
| 77
| 47.5
| 0.60241
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
59f2c4a0dec0b99c655b246ff7956ab660f1eef4
| 35
|
py
|
Python
|
tools/templating/proteus/tools/templating/templates/proteus.models.{{cookiecutter.package_name}}/__init__.py
|
PieterBlomme/proteus
|
d467be25b20f0e3e3f69e81588f08f0dda436d49
|
[
"MIT"
] | 8
|
2021-02-02T20:39:27.000Z
|
2022-02-12T07:42:06.000Z
|
tools/templating/__init__.py
|
PieterBlomme/proteus
|
d467be25b20f0e3e3f69e81588f08f0dda436d49
|
[
"MIT"
] | 1
|
2020-12-29T10:49:58.000Z
|
2020-12-29T10:49:58.000Z
|
tools/templating/proteus/tools/templating/templates/proteus.models.{{cookiecutter.package_name}}/__init__.py
|
PieterBlomme/proteus
|
d467be25b20f0e3e3f69e81588f08f0dda436d49
|
[
"MIT"
] | null | null | null |
# Do not remove, needed for pytest
| 17.5
| 34
| 0.742857
| 6
| 35
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 35
| 1
| 35
| 35
| 0.928571
| 0.914286
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9421824a5a05e635c07b59a0d29a9e084577dace
| 4,844
|
py
|
Python
|
algorithms/recombination.py
|
jmtomczak/reversible-de
|
e8ff137a72ce99526c39a5838a61c4addfaac641
|
[
"MIT"
] | 8
|
2020-02-10T12:02:56.000Z
|
2022-02-24T14:28:27.000Z
|
algorithms/recombination.py
|
jmtomczak/reversible-de
|
e8ff137a72ce99526c39a5838a61c4addfaac641
|
[
"MIT"
] | null | null | null |
algorithms/recombination.py
|
jmtomczak/reversible-de
|
e8ff137a72ce99526c39a5838a61c4addfaac641
|
[
"MIT"
] | 3
|
2020-07-02T13:12:10.000Z
|
2021-08-18T02:49:27.000Z
|
import numpy as np
from utils.distributions import bernoulli
# ----------------------------------------------------------------------------------------------------------------------
class Recombination(object):
def __init__(self):
pass
def recombination(self, x):
pass
# ----------------------------------------------------------------------------------------------------------------------
class DifferentialRecombination(Recombination):
def __init__(self, type='de', bounds=(-np.infty, np.infty), params=None):
super().__init__()
self.type = type
self.bounds = bounds
assert (0. <= params['F'] <= 2.), 'F must be in [0, 2]'
assert (0. < params['CR'] <= 1.), 'CR must be in (0, 1]'
assert type in ['de', 'ade', 'revde', 'dex3'], 'type must be one in {de, dex3, ade, revde}'
self.F = params['F']
self.CR = params['CR']
def recombination(self, x):
indices_1 = np.arange(x.shape[0])
# take first parent
x_1 = x[indices_1]
# assign second parent (ensure)
indices_2 = np.random.permutation(x.shape[0])
x_2 = x_1[indices_2]
# assign third parent
indices_3 = np.random.permutation(x.shape[0])
x_3 = x_2[indices_3]
if self.type == 'de':
y_1 = np.clip(x_1 + self.F * (x_2 - x_3), self.bounds[0], self.bounds[1])
# uniform crossover
if self.CR < 1.:
p_1 = bernoulli(self.CR, y_1.shape)
y_1 = p_1 * y_1 + (1. - p_1) * x_1
return (y_1), (indices_1, indices_2, indices_3)
elif self.type == 'revde':
y_1 = np.clip(x_1 + self.F * (x_2 - x_3), self.bounds[0], self.bounds[1])
y_2 = np.clip(x_2 + self.F * (x_3 - y_1), self.bounds[0], self.bounds[1])
y_3 = np.clip(x_3 + self.F * (y_1 - y_2), self.bounds[0], self.bounds[1])
# uniform crossover
if self.CR < 1.:
p_1 = bernoulli(self.CR, y_1.shape)
p_2 = bernoulli(self.CR, y_2.shape)
p_3 = bernoulli(self.CR, y_3.shape)
y_1 = p_1 * y_1 + (1. - p_1) * x_1
y_2 = p_2 * y_2 + (1. - p_2) * x_2
y_3 = p_3 * y_3 + (1. - p_3) * x_3
return (y_1, y_2, y_3), (indices_1, indices_2, indices_3)
elif self.type == 'ade':
y_1 = np.clip(x_1 + self.F * (x_2 - x_3), self.bounds[0], self.bounds[1])
y_2 = np.clip(x_2 + self.F * (x_3 - x_1), self.bounds[0], self.bounds[1])
y_3 = np.clip(x_3 + self.F * (x_1 - x_2), self.bounds[0], self.bounds[1])
# uniform crossover
if self.CR < 1.:
p_1 = bernoulli(self.CR, y_1.shape)
p_2 = bernoulli(self.CR, y_2.shape)
p_3 = bernoulli(self.CR, y_3.shape)
y_1 = p_1 * y_1 + (1. - p_1) * x_1
y_2 = p_2 * y_2 + (1. - p_2) * x_2
y_3 = p_3 * y_3 + (1. - p_3) * x_3
return (y_1, y_2, y_3), (indices_1, indices_2, indices_3)
if self.type == 'dex3':
# y1
y_1 = np.clip(x_1 + self.F * (x_2 - x_3), self.bounds[0], self.bounds[1])
# uniform crossover
if self.CR < 1.:
p_1 = bernoulli(self.CR, y_1.shape)
y_1 = p_1 * y_1 + (1. - p_1) * x_1
# y2
indices_1p = np.arange(x.shape[0])
# take first parent
x_1 = x[indices_1p]
# assign second parent (ensure)
indices_2p = np.random.permutation(x.shape[0])
x_2 = x_1[indices_2p]
# assign third parent
indices_3p = np.random.permutation(x.shape[0])
x_3 = x_2[indices_3p]
y_2 = np.clip(x_1 + self.F * (x_2 - x_3), self.bounds[0], self.bounds[1])
# uniform crossover
if self.CR < 1.:
p_2 = bernoulli(self.CR, y_2.shape)
y_2 = p_2 * y_2 + (1. - p_2) * x_1
# y3
indices_1p = np.arange(x.shape[0])
# take first parent
x_1 = x[indices_1p]
# assign second parent (ensure)
indices_2p = np.random.permutation(x.shape[0])
x_2 = x_1[indices_2p]
# assign third parent
indices_3p = np.random.permutation(x.shape[0])
x_3 = x_2[indices_3p]
y_3 = np.clip(x_1 + self.F * (x_2 - x_3), self.bounds[0], self.bounds[1])
# uniform crossover
if self.CR < 1.:
p_3 = bernoulli(self.CR, y_3.shape)
y_3 = p_3 * y_3 + (1. - p_3) * x_1
return (y_1, y_2, y_3), (indices_1, indices_2, indices_3)
else:
raise ValueError('Wrong name of the differential mutation!')
| 37.84375
| 120
| 0.475021
| 726
| 4,844
| 2.92562
| 0.100551
| 0.020716
| 0.016949
| 0.070621
| 0.756591
| 0.736817
| 0.72693
| 0.72693
| 0.716102
| 0.682674
| 0
| 0.070462
| 0.343724
| 4,844
| 128
| 121
| 37.84375
| 0.597672
| 0.115194
| 0
| 0.573171
| 0
| 0
| 0.036803
| 0
| 0
| 0
| 0
| 0
| 0.036585
| 1
| 0.04878
| false
| 0.02439
| 0.02439
| 0
| 0.146341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9425e4893a1ee8b4d7af4658d6763cb03bbae4f8
| 134
|
py
|
Python
|
examples/infty.py
|
LeGmask/MrPython
|
e4712590c52653d97f512e05133459870c12d7fa
|
[
"PSF-2.0"
] | 26
|
2018-09-09T17:09:56.000Z
|
2021-10-01T12:51:15.000Z
|
examples/infty.py
|
LeGmask/MrPython
|
e4712590c52653d97f512e05133459870c12d7fa
|
[
"PSF-2.0"
] | 85
|
2018-02-14T10:28:19.000Z
|
2021-12-16T17:38:47.000Z
|
examples/infty.py
|
LeGmask/MrPython
|
e4712590c52653d97f512e05133459870c12d7fa
|
[
"PSF-2.0"
] | 26
|
2018-02-08T11:17:51.000Z
|
2021-12-16T17:43:19.000Z
|
def infini(y : int) -> int:
x : int = y
while x >= 0:
x = x + 1
return x
def f(x : int) -> int:
return x + 1
| 14.888889
| 27
| 0.425373
| 24
| 134
| 2.375
| 0.416667
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038961
| 0.425373
| 134
| 8
| 28
| 16.75
| 0.701299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
9431e305b3e10976cb6d90f1a8c94c5ab2048a8e
| 321
|
py
|
Python
|
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Missions/Segments/Single_Point/__init__.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Missions/Segments/Single_Point/__init__.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Missions/Segments/Single_Point/__init__.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
## @defgroup Methods-Missions-Segments-Single_Point Single_Point
# Single Point mission methods containing the functions for setting up and solving a mission.
# @ingroup Methods-Missions-Segments
from . import Set_Speed_Set_Altitude
from . import Set_Speed_Set_Throttle
from . import Set_Speed_Set_Altitude_No_Propulsion
| 45.857143
| 93
| 0.847352
| 46
| 321
| 5.630435
| 0.543478
| 0.127413
| 0.150579
| 0.208494
| 0.305019
| 0.223938
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105919
| 321
| 7
| 94
| 45.857143
| 0.902439
| 0.58567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
945b75a906c0d66e48e7df158db8ad872fb21650
| 54
|
py
|
Python
|
spym/process/__init__.py
|
ns-rse/spym
|
5356d97d6baf774a3bdd8c03b436052b8d74dbd0
|
[
"MIT"
] | 4
|
2021-02-08T08:47:52.000Z
|
2021-12-17T19:51:17.000Z
|
spym/process/__init__.py
|
ns-rse/spym
|
5356d97d6baf774a3bdd8c03b436052b8d74dbd0
|
[
"MIT"
] | 4
|
2020-09-29T08:47:37.000Z
|
2021-07-15T13:56:43.000Z
|
spym/process/__init__.py
|
ns-rse/spym
|
5356d97d6baf774a3bdd8c03b436052b8d74dbd0
|
[
"MIT"
] | 3
|
2021-07-10T18:42:06.000Z
|
2022-03-31T08:15:42.000Z
|
from .filters import Filters
from .level import Level
| 18
| 28
| 0.814815
| 8
| 54
| 5.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 54
| 2
| 29
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
94609a309a6e2785b2d27655f4ad3797b2428ba7
| 65
|
py
|
Python
|
qcportal/outputstore/__init__.py
|
bennybp/QCPortal
|
c1d0f4e239c9363875680e93b4357c1680d6825c
|
[
"BSD-3-Clause"
] | null | null | null |
qcportal/outputstore/__init__.py
|
bennybp/QCPortal
|
c1d0f4e239c9363875680e93b4357c1680d6825c
|
[
"BSD-3-Clause"
] | null | null | null |
qcportal/outputstore/__init__.py
|
bennybp/QCPortal
|
c1d0f4e239c9363875680e93b4357c1680d6825c
|
[
"BSD-3-Clause"
] | 1
|
2022-03-18T16:37:54.000Z
|
2022-03-18T16:37:54.000Z
|
from .models import CompressionEnum, OutputStore, OutputTypeEnum
| 32.5
| 64
| 0.861538
| 6
| 65
| 9.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092308
| 65
| 1
| 65
| 65
| 0.949153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
84844b281abe840b79208336ff388b60a25ed8a3
| 106
|
py
|
Python
|
wsgi.py
|
Youth-Fellowship/yf-operation
|
7797dc893575c3a1da313f2b520c7e6c7e4fe678
|
[
"Apache-2.0"
] | 1
|
2020-01-19T07:55:59.000Z
|
2020-01-19T07:55:59.000Z
|
wsgi.py
|
Youth-Fellowship/yf-operation
|
7797dc893575c3a1da313f2b520c7e6c7e4fe678
|
[
"Apache-2.0"
] | null | null | null |
wsgi.py
|
Youth-Fellowship/yf-operation
|
7797dc893575c3a1da313f2b520c7e6c7e4fe678
|
[
"Apache-2.0"
] | null | null | null |
from app import create_app
# TODO: Implement Caching of the requests for data
application = create_app()
| 21.2
| 50
| 0.792453
| 16
| 106
| 5.125
| 0.8125
| 0.219512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160377
| 106
| 4
| 51
| 26.5
| 0.921348
| 0.45283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
849850c0e3268498859c09d66ffe6886a5403ec2
| 2,636
|
py
|
Python
|
cohesity_management_sdk/models/change_enum.py
|
chandrashekar-cohesity/management-sdk-python
|
9e6ec99e8a288005804b808c4e9b19fd204e3a8b
|
[
"Apache-2.0"
] | 1
|
2021-01-07T20:36:22.000Z
|
2021-01-07T20:36:22.000Z
|
cohesity_management_sdk/models/change_enum.py
|
chandrashekar-cohesity/management-sdk-python
|
9e6ec99e8a288005804b808c4e9b19fd204e3a8b
|
[
"Apache-2.0"
] | null | null | null |
cohesity_management_sdk/models/change_enum.py
|
chandrashekar-cohesity/management-sdk-python
|
9e6ec99e8a288005804b808c4e9b19fd204e3a8b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class ChangeEnum(object):
"""Implementation of the 'Change' enum.
TODO: type enum description here.
Attributes:
KPROTECTIONJOBNAME: TODO: type description here.
KPROTECTIONJOBDESCRIPTION: TODO: type description here.
KPROTECTIONJOBSOURCES: TODO: type description here.
KPROTECTIONJOBSCHEDULE: TODO: type description here.
KPROTECTIONJOBFULLSCHEDULE: TODO: type description here.
KPROTECTIONJOBRETRYSETTINGS: TODO: type description here.
KPROTECTIONJOBRETENTIONPOLICY: TODO: type description here.
KPROTECTIONJOBINDEXINGPOLICY: TODO: type description here.
KPROTECTIONJOBALERTINGPOLICY: TODO: type description here.
KPROTECTIONJOBPRIORITY: TODO: type description here.
KPROTECTIONJOBQUIESCE: TODO: type description here.
KPROTECTIONJOBSLA: TODO: type description here.
KPROTECTIONJOBPOLICYID: TODO: type description here.
KPROTECTIONJOBTIMEZONE: TODO: type description here.
KPROTECTIONJOBFUTURERUNSPAUSED: TODO: type description here.
KPROTECTIONJOBFUTURERUNSRESUMED: TODO: type description here.
KSNAPSHOTTARGETPOLICY: TODO: type description here.
KPROTECTIONJOBBLACKOUTWINDOW: TODO: type description here.
KPROTECTIONJOBQOS: TODO: type description here.
KPROTECTIONJOBINVALIDFIELD: TODO: type description here.
"""
KPROTECTIONJOBNAME = 'kProtectionJobName'
KPROTECTIONJOBDESCRIPTION = 'kProtectionJobDescription'
KPROTECTIONJOBSOURCES = 'kProtectionJobSources'
KPROTECTIONJOBSCHEDULE = 'kProtectionJobSchedule'
KPROTECTIONJOBFULLSCHEDULE = 'kProtectionJobFullSchedule'
KPROTECTIONJOBRETRYSETTINGS = 'kProtectionJobRetrySettings'
KPROTECTIONJOBRETENTIONPOLICY = 'kProtectionJobRetentionPolicy'
KPROTECTIONJOBINDEXINGPOLICY = 'kProtectionJobIndexingPolicy'
KPROTECTIONJOBALERTINGPOLICY = 'kProtectionJobAlertingPolicy'
KPROTECTIONJOBPRIORITY = 'kProtectionJobPriority'
KPROTECTIONJOBQUIESCE = 'kProtectionJobQuiesce'
KPROTECTIONJOBSLA = 'kProtectionJobSla'
KPROTECTIONJOBPOLICYID = 'kProtectionJobPolicyId'
KPROTECTIONJOBTIMEZONE = 'kProtectionJobTimezone'
KPROTECTIONJOBFUTURERUNSPAUSED = 'kProtectionJobFutureRunsPaused'
KPROTECTIONJOBFUTURERUNSRESUMED = 'kProtectionJobFutureRunsResumed'
KSNAPSHOTTARGETPOLICY = 'kSnapshotTargetPolicy'
KPROTECTIONJOBBLACKOUTWINDOW = 'kProtectionJobBlackoutWindow'
KPROTECTIONJOBQOS = 'kProtectionJobQOS'
KPROTECTIONJOBINVALIDFIELD = 'kProtectionJobInvalidField'
| 35.621622
| 71
| 0.76783
| 161
| 2,636
| 12.571429
| 0.248447
| 0.083004
| 0.187747
| 0.227273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002304
| 0.176783
| 2,636
| 73
| 72
| 36.109589
| 0.930415
| 0.499621
| 0
| 0
| 0
| 0
| 0.395234
| 0.352506
| 0
| 0
| 0
| 0.287671
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
84f2e04a2b2a89c9572fd870ca23c9fe02399ff9
| 3,170
|
py
|
Python
|
django_demo/migrations/0005_remove_orgs_and_mapping.py
|
zconnect-iot/zconnect-django-demo
|
f669bf107b013ab33d327387c870e1d150bde00c
|
[
"MIT"
] | 2
|
2018-08-19T16:17:23.000Z
|
2019-06-11T02:24:28.000Z
|
django_demo/migrations/0005_remove_orgs_and_mapping.py
|
zconnect-iot/zconnect-django-demo
|
f669bf107b013ab33d327387c870e1d150bde00c
|
[
"MIT"
] | null | null | null |
django_demo/migrations/0005_remove_orgs_and_mapping.py
|
zconnect-iot/zconnect-django-demo
|
f669bf107b013ab33d327387c870e1d150bde00c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.5 on 2018-06-12 14:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zconnect', '0011_fix_url_length'),
('zc_billing', '0002_add_bill_foreign_keys'),
('zc_timeseries', '0001_initial'),
('organizations', '0003_field_fix_and_editable'),
('django_demo', '0004_org_device_related_name'),
]
operations = [
migrations.RemoveField(
model_name='companygroup',
name='distributor',
),
migrations.RemoveField(
model_name='companygroup',
name='location',
),
migrations.RemoveField(
model_name='companygroup',
name='organization_ptr',
),
migrations.RemoveField(
model_name='companygroup',
name='wiring_mapping',
),
migrations.RemoveField(
model_name='demoproduct',
name='product_ptr',
),
migrations.RemoveField(
model_name='demoproduct',
name='wiring_mapping',
),
migrations.RemoveField(
model_name='distributorgroup',
name='location',
),
migrations.RemoveField(
model_name='distributorgroup',
name='organization_ptr',
),
migrations.RemoveField(
model_name='distributorgroup',
name='wiring_mapping',
),
migrations.RemoveField(
model_name='mapping',
name='mapping',
),
migrations.RemoveField(
model_name='sitegroup',
name='company',
),
migrations.RemoveField(
model_name='sitegroup',
name='location',
),
migrations.RemoveField(
model_name='sitegroup',
name='organization_ptr',
),
migrations.RemoveField(
model_name='sitegroup',
name='wiring_mapping',
),
migrations.RemoveField(
model_name='tsrawdata',
name='device',
),
migrations.RemoveField(
model_name='demodevice',
name='email_company_emergency_close',
),
migrations.RemoveField(
model_name='demodevice',
name='email_distributor_emergency_close',
),
migrations.RemoveField(
model_name='demodevice',
name='email_site_emergency_close',
),
migrations.RemoveField(
model_name='demodevice',
name='wiring_mapping',
),
migrations.DeleteModel(
name='CompanyGroup',
),
migrations.DeleteModel(
name='DemoProduct',
),
migrations.DeleteModel(
name='DistributorGroup',
),
migrations.DeleteModel(
name='Mapping',
),
migrations.DeleteModel(
name='SiteGroup',
),
migrations.DeleteModel(
name='TSRawData',
),
migrations.DeleteModel(
name='WiringMapping',
),
]
| 27.565217
| 57
| 0.530915
| 231
| 3,170
| 7.04329
| 0.294372
| 0.245237
| 0.303626
| 0.350338
| 0.626921
| 0.620774
| 0.349109
| 0.113092
| 0.077443
| 0
| 0
| 0.017292
| 0.361514
| 3,170
| 114
| 58
| 27.807018
| 0.786561
| 0.014196
| 0
| 0.740741
| 1
| 0
| 0.232469
| 0.054115
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009259
| 0
| 0.037037
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ca473a985d77706236035ca5f7de2d2e01ae206b
| 217
|
py
|
Python
|
python/macromax/utils/display/__init__.py
|
tttom/MacroMax
|
e5f66252befb11e9fd906eb6e1a8a8c5eacf1451
|
[
"MIT"
] | 11
|
2019-04-15T19:04:33.000Z
|
2021-10-17T16:14:57.000Z
|
python/macromax/utils/display/__init__.py
|
tttom/MacroMax
|
e5f66252befb11e9fd906eb6e1a8a8c5eacf1451
|
[
"MIT"
] | null | null | null |
python/macromax/utils/display/__init__.py
|
tttom/MacroMax
|
e5f66252befb11e9fd906eb6e1a8a8c5eacf1451
|
[
"MIT"
] | 2
|
2019-05-10T10:51:09.000Z
|
2020-06-09T13:31:03.000Z
|
"""
This package contains functionality to simplify the display of complex matrices.
"""
from .. import log
from .grid2extent import grid2extent
from .complex2rgb import complex2rgb
from .hsv import hsv2rgb, rgb2hsv
| 24.111111
| 80
| 0.797235
| 27
| 217
| 6.407407
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.142857
| 217
| 8
| 81
| 27.125
| 0.897849
| 0.368664
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ca5bf5db56a381f49fbaa3b1b52eae4ac9f82f34
| 57
|
py
|
Python
|
enthought/util/ring_buffer.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/util/ring_buffer.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/util/ring_buffer.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from apptools.logger.ring_buffer import *
| 19
| 41
| 0.807018
| 8
| 57
| 5.625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122807
| 57
| 2
| 42
| 28.5
| 0.9
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ca5ec1dd7c5bb539121d5ecd869d3661c667d29a
| 135
|
py
|
Python
|
apps/message/blueprints/__init__.py
|
wangyuhuiever/sanic-tailor
|
8be2c855a737803a431e87068bada8489930c425
|
[
"MIT"
] | null | null | null |
apps/message/blueprints/__init__.py
|
wangyuhuiever/sanic-tailor
|
8be2c855a737803a431e87068bada8489930c425
|
[
"MIT"
] | null | null | null |
apps/message/blueprints/__init__.py
|
wangyuhuiever/sanic-tailor
|
8be2c855a737803a431e87068bada8489930c425
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from sanic import Blueprint
from .models import api
message_api = Blueprint.group(api, url_prefix='/message')
| 22.5
| 57
| 0.725926
| 19
| 135
| 5.052632
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008547
| 0.133333
| 135
| 6
| 57
| 22.5
| 0.811966
| 0.155556
| 0
| 0
| 0
| 0
| 0.070796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 5
|
046c577d129cf61a2aa6387892e474f6f7fdfedd
| 116
|
py
|
Python
|
src/app/news/admin.py
|
serious-notreally/cappa
|
993a8df35ca6c3b22f3ca811937fd29c07fc71aa
|
[
"MIT"
] | 9
|
2020-04-05T07:35:55.000Z
|
2021-08-03T05:50:05.000Z
|
src/app/news/admin.py
|
serious-notreally/cappa
|
993a8df35ca6c3b22f3ca811937fd29c07fc71aa
|
[
"MIT"
] | 89
|
2020-01-26T11:50:06.000Z
|
2022-03-31T07:14:18.000Z
|
src/app/news/admin.py
|
serious-notreally/cappa
|
993a8df35ca6c3b22f3ca811937fd29c07fc71aa
|
[
"MIT"
] | 13
|
2020-03-10T14:45:07.000Z
|
2021-07-31T02:43:40.000Z
|
# -*- coding:utf-8 -*-
from django.contrib import admin
from app.news.models import News
admin.site.register(News)
| 19.333333
| 32
| 0.741379
| 18
| 116
| 4.777778
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009804
| 0.12069
| 116
| 5
| 33
| 23.2
| 0.833333
| 0.172414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
047459332e6a9bd7d4e3e760ca2ea7829fb5f27b
| 41,900
|
py
|
Python
|
mc-core/mc/data_gen/sgnb_modification_required_pb2.py
|
copslock/o-ran_ric-app_mc
|
243f8671c28596b1dc70dd295029d6151c9dd778
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
mc-core/mc/data_gen/sgnb_modification_required_pb2.py
|
copslock/o-ran_ric-app_mc
|
243f8671c28596b1dc70dd295029d6151c9dd778
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
mc-core/mc/data_gen/sgnb_modification_required_pb2.py
|
copslock/o-ran_ric-app_mc
|
243f8671c28596b1dc70dd295029d6151c9dd778
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 1
|
2021-07-07T06:43:16.000Z
|
2021-07-07T06:43:16.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sgnb_modification_required.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
import common_types_pb2 as common__types__pb2
import x2ap_common_types_pb2 as x2ap__common__types__pb2
import rrc_cg_config_pb2 as rrc__cg__config__pb2
import error_cause_pb2 as error__cause__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='sgnb_modification_required.proto',
package='streaming_protobufs',
syntax='proto3',
serialized_options=_b('Z1gerrit.o-ran-sc.org/r/ric-plt/streaming-protobufs'),
serialized_pb=_b('\n sgnb_modification_required.proto\x12\x13streaming_protobufs\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x12\x63ommon_types.proto\x1a\x17x2ap_common_types.proto\x1a\x13rrc_cg_config.proto\x1a\x11\x65rror_cause.proto\"b\n\x18SgNBModificationRequired\x12\x46\n\x0bprotocolIEs\x18\x01 \x01(\x0b\x32\x31.streaming_protobufs.SgNBModificationRequired_IEs\"\xd7\x05\n\x1cSgNBModificationRequired_IEs\x12\x1a\n\x12id_MeNB_UE_X2AP_ID\x18\x01 \x01(\r\x12\x1a\n\x12id_SgNB_UE_X2AP_ID\x18\x02 \x01(\r\x12,\n\x08id_Cause\x18\x03 \x01(\x0b\x32\x1a.streaming_protobufs.Cause\x12J\n\x17id_PDCPChangeIndication\x18\x04 \x01(\x0b\x32).streaming_protobufs.PDCPChangeIndication\x12h\n&id_E_RABs_ToBeReleased_SgNBModReqdList\x18\x05 \x01(\x0b\x32\x38.streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqdList\x12>\n\x16id_SgNBtoMeNBContainer\x18\x06 \x01(\x0b\x32\x1e.streaming_protobufs.CG_Config\x12\x42\n\x1cid_MeNB_UE_X2AP_ID_Extension\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12h\n&id_E_RABs_ToBeModified_SgNBModReqdList\x18\x08 \x01(\x0b\x32\x38.streaming_protobufs.E_RABs_ToBeModified_SgNBModReqdList\x12h\n&id_SgNBResourceCoordinationInformation\x18\t \x01(\x0b\x32\x38.streaming_protobufs.SgNBResourceCoordinationInformation\x12\x43\n\x16id_RRCConfigIndication\x18\n \x01(\x0b\x32#.streaming_protobufs.RRC_Config_Ind\"r\n#E_RABs_ToBeReleased_SgNBModReqdList\x12K\n\x05items\x18\x01 \x03(\x0b\x32<.streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_ItemIEs\"\x95\x01\n\'E_RABs_ToBeReleased_SgNBModReqd_ItemIEs\x12j\n\'id_E_RABs_ToBeReleased_SgNBModReqd_Item\x18\x01 \x01(\x0b\x32\x39.streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_Item\"\xbb\x01\n$E_RABs_ToBeReleased_SgNBModReqd_Item\x12\x10\n\x08\x65_RAB_ID\x18\x01 \x01(\r\x12)\n\x05\x63\x61use\x18\x02 \x01(\x0b\x32\x1a.streaming_protobufs.Cause\x12V\n\riE_Extensions\x18\x03 \x03(\x0b\x32?.streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_ItemExtIEs\"j\n*E_RABs_ToBeReleased_SgNBModReqd_ItemExtIEs\x12<\n\x16id_RLCMode_transferred\x18\x01 \x01(\x0b\x32\x1c.streaming_protobufs.RLCMode\"r\n#E_RABs_ToBeModified_SgNBModReqdList\x12K\n\x05items\x18\x01 \x03(\x0b\x32<.streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_ItemIEs\"\x95\x01\n\'E_RABs_ToBeModified_SgNBModReqd_ItemIEs\x12j\n\'id_E_RABs_ToBeModified_SgNBModReqd_Item\x18\x01 \x01(\x0b\x32\x39.streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_Item\"\xc9\x03\n$E_RABs_ToBeModified_SgNBModReqd_Item\x12\x10\n\x08\x65_RAB_ID\x18\x01 \x01(\r\x12U\n\x1b\x65n_DC_ResourceConfiguration\x18\x02 \x01(\x0b\x32\x30.streaming_protobufs.EN_DC_ResourceConfiguration\x12_\n\x0fsgNBPDCPpresent\x18\x03 \x01(\x0b\x32\x44.streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentH\x00\x12\x65\n\x12sgNBPDCPnotpresent\x18\x04 \x01(\x0b\x32G.streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresentH\x00\x12V\n\riE_Extensions\x18\x05 \x03(\x0b\x32?.streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_ItemExtIEsB\x18\n\x16resource_configuration\",\n*E_RABs_ToBeModified_SgNBModReqd_ItemExtIEs\"\xc5\x03\n/E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent\x12\x61\n(requested_MCG_E_RAB_Level_QoS_Parameters\x18\x01 \x01(\x0b\x32/.streaming_protobufs.E_RAB_Level_QoS_Parameters\x12>\n\x10uL_Configuration\x18\x02 \x01(\x0b\x32$.streaming_protobufs.ULConfiguration\x12\x46\n\x16sgNB_UL_GTP_TEIDatPDCP\x18\x03 \x01(\x0b\x32&.streaming_protobufs.GTPtunnelEndpoint\x12\x44\n\x14s1_DL_GTP_TEIDatSgNB\x18\x04 \x01(\x0b\x32&.streaming_protobufs.GTPtunnelEndpoint\x12\x61\n\riE_Extensions\x18\x05 \x03(\x0b\x32J.streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentExtIEs\"\xec\x01\n5E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentExtIEs\x12<\n\x11id_uLpDCPSnLength\x18\x01 \x01(\x0b\x32!.streaming_protobufs.PDCPSnLength\x12<\n\x11id_dLPDCPSnLength\x18\x02 \x01(\x0b\x32!.streaming_protobufs.PDCPSnLength\x12\x37\n\x11id_new_drb_ID_req\x18\x03 \x01(\x0b\x32\x1c.streaming_protobufs.TrueOpt\"\xb2\x02\n2E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresent\x12\x45\n\x15sgNB_DL_GTP_TEIDatSCG\x18\x01 \x01(\x0b\x32&.streaming_protobufs.GTPtunnelEndpoint\x12O\n\x1fsecondary_sgNB_DL_GTP_TEIDatSCG\x18\x02 \x01(\x0b\x32&.streaming_protobufs.GTPtunnelEndpoint\x12\x64\n\riE_Extensions\x18\x03 \x03(\x0b\x32M.streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresentExtIEs\"\xa1\x01\n8E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresentExtIEs\x12\x36\n\rid_RLC_Status\x18\x01 \x01(\x0b\x32\x1f.streaming_protobufs.RLC_Status\x12-\n\x07id_lCID\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB3Z1gerrit.o-ran-sc.org/r/ric-plt/streaming-protobufsb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,common__types__pb2.DESCRIPTOR,x2ap__common__types__pb2.DESCRIPTOR,rrc__cg__config__pb2.DESCRIPTOR,error__cause__pb2.DESCRIPTOR,])
_SGNBMODIFICATIONREQUIRED = _descriptor.Descriptor(
name='SgNBModificationRequired',
full_name='streaming_protobufs.SgNBModificationRequired',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='protocolIEs', full_name='streaming_protobufs.SgNBModificationRequired.protocolIEs', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=174,
serialized_end=272,
)
_SGNBMODIFICATIONREQUIRED_IES = _descriptor.Descriptor(
name='SgNBModificationRequired_IEs',
full_name='streaming_protobufs.SgNBModificationRequired_IEs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id_MeNB_UE_X2AP_ID', full_name='streaming_protobufs.SgNBModificationRequired_IEs.id_MeNB_UE_X2AP_ID', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_SgNB_UE_X2AP_ID', full_name='streaming_protobufs.SgNBModificationRequired_IEs.id_SgNB_UE_X2AP_ID', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_Cause', full_name='streaming_protobufs.SgNBModificationRequired_IEs.id_Cause', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_PDCPChangeIndication', full_name='streaming_protobufs.SgNBModificationRequired_IEs.id_PDCPChangeIndication', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_E_RABs_ToBeReleased_SgNBModReqdList', full_name='streaming_protobufs.SgNBModificationRequired_IEs.id_E_RABs_ToBeReleased_SgNBModReqdList', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_SgNBtoMeNBContainer', full_name='streaming_protobufs.SgNBModificationRequired_IEs.id_SgNBtoMeNBContainer', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_MeNB_UE_X2AP_ID_Extension', full_name='streaming_protobufs.SgNBModificationRequired_IEs.id_MeNB_UE_X2AP_ID_Extension', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_E_RABs_ToBeModified_SgNBModReqdList', full_name='streaming_protobufs.SgNBModificationRequired_IEs.id_E_RABs_ToBeModified_SgNBModReqdList', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_SgNBResourceCoordinationInformation', full_name='streaming_protobufs.SgNBModificationRequired_IEs.id_SgNBResourceCoordinationInformation', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_RRCConfigIndication', full_name='streaming_protobufs.SgNBModificationRequired_IEs.id_RRCConfigIndication', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=275,
serialized_end=1002,
)
_E_RABS_TOBERELEASED_SGNBMODREQDLIST = _descriptor.Descriptor(
name='E_RABs_ToBeReleased_SgNBModReqdList',
full_name='streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqdList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='items', full_name='streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqdList.items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1004,
serialized_end=1118,
)
_E_RABS_TOBERELEASED_SGNBMODREQD_ITEMIES = _descriptor.Descriptor(
name='E_RABs_ToBeReleased_SgNBModReqd_ItemIEs',
full_name='streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_ItemIEs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id_E_RABs_ToBeReleased_SgNBModReqd_Item', full_name='streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_ItemIEs.id_E_RABs_ToBeReleased_SgNBModReqd_Item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1121,
serialized_end=1270,
)
_E_RABS_TOBERELEASED_SGNBMODREQD_ITEM = _descriptor.Descriptor(
name='E_RABs_ToBeReleased_SgNBModReqd_Item',
full_name='streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_Item',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='e_RAB_ID', full_name='streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_Item.e_RAB_ID', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cause', full_name='streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_Item.cause', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='iE_Extensions', full_name='streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_Item.iE_Extensions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1273,
serialized_end=1460,
)
_E_RABS_TOBERELEASED_SGNBMODREQD_ITEMEXTIES = _descriptor.Descriptor(
name='E_RABs_ToBeReleased_SgNBModReqd_ItemExtIEs',
full_name='streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_ItemExtIEs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id_RLCMode_transferred', full_name='streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_ItemExtIEs.id_RLCMode_transferred', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1462,
serialized_end=1568,
)
_E_RABS_TOBEMODIFIED_SGNBMODREQDLIST = _descriptor.Descriptor(
name='E_RABs_ToBeModified_SgNBModReqdList',
full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqdList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='items', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqdList.items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1570,
serialized_end=1684,
)
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEMIES = _descriptor.Descriptor(
name='E_RABs_ToBeModified_SgNBModReqd_ItemIEs',
full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_ItemIEs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id_E_RABs_ToBeModified_SgNBModReqd_Item', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_ItemIEs.id_E_RABs_ToBeModified_SgNBModReqd_Item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1687,
serialized_end=1836,
)
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM = _descriptor.Descriptor(
name='E_RABs_ToBeModified_SgNBModReqd_Item',
full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_Item',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='e_RAB_ID', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_Item.e_RAB_ID', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='en_DC_ResourceConfiguration', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_Item.en_DC_ResourceConfiguration', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sgNBPDCPpresent', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_Item.sgNBPDCPpresent', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sgNBPDCPnotpresent', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_Item.sgNBPDCPnotpresent', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='iE_Extensions', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_Item.iE_Extensions', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='resource_configuration', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_Item.resource_configuration',
index=0, containing_type=None, fields=[]),
],
serialized_start=1839,
serialized_end=2296,
)
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEMEXTIES = _descriptor.Descriptor(
name='E_RABs_ToBeModified_SgNBModReqd_ItemExtIEs',
full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_ItemExtIEs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2298,
serialized_end=2342,
)
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENT = _descriptor.Descriptor(
name='E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent',
full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='requested_MCG_E_RAB_Level_QoS_Parameters', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent.requested_MCG_E_RAB_Level_QoS_Parameters', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uL_Configuration', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent.uL_Configuration', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sgNB_UL_GTP_TEIDatPDCP', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent.sgNB_UL_GTP_TEIDatPDCP', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s1_DL_GTP_TEIDatSgNB', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent.s1_DL_GTP_TEIDatSgNB', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='iE_Extensions', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent.iE_Extensions', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2345,
serialized_end=2798,
)
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENTEXTIES = _descriptor.Descriptor(
name='E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentExtIEs',
full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentExtIEs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id_uLpDCPSnLength', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentExtIEs.id_uLpDCPSnLength', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_dLPDCPSnLength', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentExtIEs.id_dLPDCPSnLength', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_new_drb_ID_req', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentExtIEs.id_new_drb_ID_req', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2801,
serialized_end=3037,
)
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENT = _descriptor.Descriptor(
name='E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresent',
full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sgNB_DL_GTP_TEIDatSCG', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresent.sgNB_DL_GTP_TEIDatSCG', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='secondary_sgNB_DL_GTP_TEIDatSCG', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresent.secondary_sgNB_DL_GTP_TEIDatSCG', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='iE_Extensions', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresent.iE_Extensions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3040,
serialized_end=3346,
)
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENTEXTIES = _descriptor.Descriptor(
name='E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresentExtIEs',
full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresentExtIEs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id_RLC_Status', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresentExtIEs.id_RLC_Status', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_lCID', full_name='streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresentExtIEs.id_lCID', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3349,
serialized_end=3510,
)
_SGNBMODIFICATIONREQUIRED.fields_by_name['protocolIEs'].message_type = _SGNBMODIFICATIONREQUIRED_IES
_SGNBMODIFICATIONREQUIRED_IES.fields_by_name['id_Cause'].message_type = error__cause__pb2._CAUSE
_SGNBMODIFICATIONREQUIRED_IES.fields_by_name['id_PDCPChangeIndication'].message_type = x2ap__common__types__pb2._PDCPCHANGEINDICATION
_SGNBMODIFICATIONREQUIRED_IES.fields_by_name['id_E_RABs_ToBeReleased_SgNBModReqdList'].message_type = _E_RABS_TOBERELEASED_SGNBMODREQDLIST
_SGNBMODIFICATIONREQUIRED_IES.fields_by_name['id_SgNBtoMeNBContainer'].message_type = rrc__cg__config__pb2._CG_CONFIG
_SGNBMODIFICATIONREQUIRED_IES.fields_by_name['id_MeNB_UE_X2AP_ID_Extension'].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT32VALUE
_SGNBMODIFICATIONREQUIRED_IES.fields_by_name['id_E_RABs_ToBeModified_SgNBModReqdList'].message_type = _E_RABS_TOBEMODIFIED_SGNBMODREQDLIST
_SGNBMODIFICATIONREQUIRED_IES.fields_by_name['id_SgNBResourceCoordinationInformation'].message_type = x2ap__common__types__pb2._SGNBRESOURCECOORDINATIONINFORMATION
_SGNBMODIFICATIONREQUIRED_IES.fields_by_name['id_RRCConfigIndication'].message_type = x2ap__common__types__pb2._RRC_CONFIG_IND
_E_RABS_TOBERELEASED_SGNBMODREQDLIST.fields_by_name['items'].message_type = _E_RABS_TOBERELEASED_SGNBMODREQD_ITEMIES
_E_RABS_TOBERELEASED_SGNBMODREQD_ITEMIES.fields_by_name['id_E_RABs_ToBeReleased_SgNBModReqd_Item'].message_type = _E_RABS_TOBERELEASED_SGNBMODREQD_ITEM
_E_RABS_TOBERELEASED_SGNBMODREQD_ITEM.fields_by_name['cause'].message_type = error__cause__pb2._CAUSE
_E_RABS_TOBERELEASED_SGNBMODREQD_ITEM.fields_by_name['iE_Extensions'].message_type = _E_RABS_TOBERELEASED_SGNBMODREQD_ITEMEXTIES
_E_RABS_TOBERELEASED_SGNBMODREQD_ITEMEXTIES.fields_by_name['id_RLCMode_transferred'].message_type = x2ap__common__types__pb2._RLCMODE
_E_RABS_TOBEMODIFIED_SGNBMODREQDLIST.fields_by_name['items'].message_type = _E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEMIES
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEMIES.fields_by_name['id_E_RABs_ToBeModified_SgNBModReqd_Item'].message_type = _E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM.fields_by_name['en_DC_ResourceConfiguration'].message_type = x2ap__common__types__pb2._EN_DC_RESOURCECONFIGURATION
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM.fields_by_name['sgNBPDCPpresent'].message_type = _E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENT
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM.fields_by_name['sgNBPDCPnotpresent'].message_type = _E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENT
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM.fields_by_name['iE_Extensions'].message_type = _E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEMEXTIES
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM.oneofs_by_name['resource_configuration'].fields.append(
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM.fields_by_name['sgNBPDCPpresent'])
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM.fields_by_name['sgNBPDCPpresent'].containing_oneof = _E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM.oneofs_by_name['resource_configuration']
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM.oneofs_by_name['resource_configuration'].fields.append(
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM.fields_by_name['sgNBPDCPnotpresent'])
_E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM.fields_by_name['sgNBPDCPnotpresent'].containing_oneof = _E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM.oneofs_by_name['resource_configuration']
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENT.fields_by_name['requested_MCG_E_RAB_Level_QoS_Parameters'].message_type = x2ap__common__types__pb2._E_RAB_LEVEL_QOS_PARAMETERS
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENT.fields_by_name['uL_Configuration'].message_type = x2ap__common__types__pb2._ULCONFIGURATION
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENT.fields_by_name['sgNB_UL_GTP_TEIDatPDCP'].message_type = x2ap__common__types__pb2._GTPTUNNELENDPOINT
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENT.fields_by_name['s1_DL_GTP_TEIDatSgNB'].message_type = x2ap__common__types__pb2._GTPTUNNELENDPOINT
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENT.fields_by_name['iE_Extensions'].message_type = _E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENTEXTIES
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENTEXTIES.fields_by_name['id_uLpDCPSnLength'].message_type = x2ap__common__types__pb2._PDCPSNLENGTH
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENTEXTIES.fields_by_name['id_dLPDCPSnLength'].message_type = x2ap__common__types__pb2._PDCPSNLENGTH
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENTEXTIES.fields_by_name['id_new_drb_ID_req'].message_type = common__types__pb2._TRUEOPT
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENT.fields_by_name['sgNB_DL_GTP_TEIDatSCG'].message_type = x2ap__common__types__pb2._GTPTUNNELENDPOINT
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENT.fields_by_name['secondary_sgNB_DL_GTP_TEIDatSCG'].message_type = x2ap__common__types__pb2._GTPTUNNELENDPOINT
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENT.fields_by_name['iE_Extensions'].message_type = _E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENTEXTIES
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENTEXTIES.fields_by_name['id_RLC_Status'].message_type = x2ap__common__types__pb2._RLC_STATUS
_E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENTEXTIES.fields_by_name['id_lCID'].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT32VALUE
DESCRIPTOR.message_types_by_name['SgNBModificationRequired'] = _SGNBMODIFICATIONREQUIRED
DESCRIPTOR.message_types_by_name['SgNBModificationRequired_IEs'] = _SGNBMODIFICATIONREQUIRED_IES
DESCRIPTOR.message_types_by_name['E_RABs_ToBeReleased_SgNBModReqdList'] = _E_RABS_TOBERELEASED_SGNBMODREQDLIST
DESCRIPTOR.message_types_by_name['E_RABs_ToBeReleased_SgNBModReqd_ItemIEs'] = _E_RABS_TOBERELEASED_SGNBMODREQD_ITEMIES
DESCRIPTOR.message_types_by_name['E_RABs_ToBeReleased_SgNBModReqd_Item'] = _E_RABS_TOBERELEASED_SGNBMODREQD_ITEM
DESCRIPTOR.message_types_by_name['E_RABs_ToBeReleased_SgNBModReqd_ItemExtIEs'] = _E_RABS_TOBERELEASED_SGNBMODREQD_ITEMEXTIES
DESCRIPTOR.message_types_by_name['E_RABs_ToBeModified_SgNBModReqdList'] = _E_RABS_TOBEMODIFIED_SGNBMODREQDLIST
DESCRIPTOR.message_types_by_name['E_RABs_ToBeModified_SgNBModReqd_ItemIEs'] = _E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEMIES
DESCRIPTOR.message_types_by_name['E_RABs_ToBeModified_SgNBModReqd_Item'] = _E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM
DESCRIPTOR.message_types_by_name['E_RABs_ToBeModified_SgNBModReqd_ItemExtIEs'] = _E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEMEXTIES
DESCRIPTOR.message_types_by_name['E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent'] = _E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENT
DESCRIPTOR.message_types_by_name['E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentExtIEs'] = _E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENTEXTIES
DESCRIPTOR.message_types_by_name['E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresent'] = _E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENT
DESCRIPTOR.message_types_by_name['E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresentExtIEs'] = _E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENTEXTIES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SgNBModificationRequired = _reflection.GeneratedProtocolMessageType('SgNBModificationRequired', (_message.Message,), {
'DESCRIPTOR' : _SGNBMODIFICATIONREQUIRED,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.SgNBModificationRequired)
})
_sym_db.RegisterMessage(SgNBModificationRequired)
SgNBModificationRequired_IEs = _reflection.GeneratedProtocolMessageType('SgNBModificationRequired_IEs', (_message.Message,), {
'DESCRIPTOR' : _SGNBMODIFICATIONREQUIRED_IES,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.SgNBModificationRequired_IEs)
})
_sym_db.RegisterMessage(SgNBModificationRequired_IEs)
E_RABs_ToBeReleased_SgNBModReqdList = _reflection.GeneratedProtocolMessageType('E_RABs_ToBeReleased_SgNBModReqdList', (_message.Message,), {
'DESCRIPTOR' : _E_RABS_TOBERELEASED_SGNBMODREQDLIST,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqdList)
})
_sym_db.RegisterMessage(E_RABs_ToBeReleased_SgNBModReqdList)
E_RABs_ToBeReleased_SgNBModReqd_ItemIEs = _reflection.GeneratedProtocolMessageType('E_RABs_ToBeReleased_SgNBModReqd_ItemIEs', (_message.Message,), {
'DESCRIPTOR' : _E_RABS_TOBERELEASED_SGNBMODREQD_ITEMIES,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_ItemIEs)
})
_sym_db.RegisterMessage(E_RABs_ToBeReleased_SgNBModReqd_ItemIEs)
E_RABs_ToBeReleased_SgNBModReqd_Item = _reflection.GeneratedProtocolMessageType('E_RABs_ToBeReleased_SgNBModReqd_Item', (_message.Message,), {
'DESCRIPTOR' : _E_RABS_TOBERELEASED_SGNBMODREQD_ITEM,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_Item)
})
_sym_db.RegisterMessage(E_RABs_ToBeReleased_SgNBModReqd_Item)
E_RABs_ToBeReleased_SgNBModReqd_ItemExtIEs = _reflection.GeneratedProtocolMessageType('E_RABs_ToBeReleased_SgNBModReqd_ItemExtIEs', (_message.Message,), {
'DESCRIPTOR' : _E_RABS_TOBERELEASED_SGNBMODREQD_ITEMEXTIES,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.E_RABs_ToBeReleased_SgNBModReqd_ItemExtIEs)
})
_sym_db.RegisterMessage(E_RABs_ToBeReleased_SgNBModReqd_ItemExtIEs)
E_RABs_ToBeModified_SgNBModReqdList = _reflection.GeneratedProtocolMessageType('E_RABs_ToBeModified_SgNBModReqdList', (_message.Message,), {
'DESCRIPTOR' : _E_RABS_TOBEMODIFIED_SGNBMODREQDLIST,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.E_RABs_ToBeModified_SgNBModReqdList)
})
_sym_db.RegisterMessage(E_RABs_ToBeModified_SgNBModReqdList)
E_RABs_ToBeModified_SgNBModReqd_ItemIEs = _reflection.GeneratedProtocolMessageType('E_RABs_ToBeModified_SgNBModReqd_ItemIEs', (_message.Message,), {
'DESCRIPTOR' : _E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEMIES,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_ItemIEs)
})
_sym_db.RegisterMessage(E_RABs_ToBeModified_SgNBModReqd_ItemIEs)
E_RABs_ToBeModified_SgNBModReqd_Item = _reflection.GeneratedProtocolMessageType('E_RABs_ToBeModified_SgNBModReqd_Item', (_message.Message,), {
'DESCRIPTOR' : _E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEM,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_Item)
})
_sym_db.RegisterMessage(E_RABs_ToBeModified_SgNBModReqd_Item)
E_RABs_ToBeModified_SgNBModReqd_ItemExtIEs = _reflection.GeneratedProtocolMessageType('E_RABs_ToBeModified_SgNBModReqd_ItemExtIEs', (_message.Message,), {
'DESCRIPTOR' : _E_RABS_TOBEMODIFIED_SGNBMODREQD_ITEMEXTIES,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_ItemExtIEs)
})
_sym_db.RegisterMessage(E_RABs_ToBeModified_SgNBModReqd_ItemExtIEs)
E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent = _reflection.GeneratedProtocolMessageType('E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent', (_message.Message,), {
'DESCRIPTOR' : _E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENT,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent)
})
_sym_db.RegisterMessage(E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresent)
E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentExtIEs = _reflection.GeneratedProtocolMessageType('E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentExtIEs', (_message.Message,), {
'DESCRIPTOR' : _E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPPRESENTEXTIES,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentExtIEs)
})
_sym_db.RegisterMessage(E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPpresentExtIEs)
E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresent = _reflection.GeneratedProtocolMessageType('E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresent', (_message.Message,), {
'DESCRIPTOR' : _E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENT,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresent)
})
_sym_db.RegisterMessage(E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresent)
E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresentExtIEs = _reflection.GeneratedProtocolMessageType('E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresentExtIEs', (_message.Message,), {
'DESCRIPTOR' : _E_RABS_TOBEMODIFIED_SGNBMODREQD_SGNBPDCPNOTPRESENTEXTIES,
'__module__' : 'sgnb_modification_required_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresentExtIEs)
})
_sym_db.RegisterMessage(E_RABs_ToBeModified_SgNBModReqd_SgNBPDCPnotpresentExtIEs)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 53.172589
| 4,617
| 0.819833
| 5,084
| 41,900
| 6.253934
| 0.059205
| 0.035855
| 0.083944
| 0.121529
| 0.85916
| 0.822173
| 0.742758
| 0.64441
| 0.61409
| 0.518195
| 0
| 0.027464
| 0.088401
| 41,900
| 787
| 4,618
| 53.240152
| 0.804953
| 0.037709
| 0
| 0.657343
| 1
| 0.004196
| 0.310794
| 0.285856
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013986
| 0
| 0.013986
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
04cd5c27371d0f2825642b8aa3957340f62f266c
| 1,037
|
py
|
Python
|
music_assistant/models/errors.py
|
music-assistant/music-assistant
|
7b4fd73b1281f74d61e29c23093d048a9acf541f
|
[
"Apache-2.0"
] | 8
|
2019-05-11T22:54:11.000Z
|
2020-09-08T08:06:13.000Z
|
music_assistant/models/errors.py
|
marcelveldt/musicassistant
|
0b70a7ae8db49722e563be54425ab2fbd7c7c916
|
[
"Apache-2.0"
] | null | null | null |
music_assistant/models/errors.py
|
marcelveldt/musicassistant
|
0b70a7ae8db49722e563be54425ab2fbd7c7c916
|
[
"Apache-2.0"
] | null | null | null |
"""Custom errors and exceptions."""
class MusicAssistantError(Exception):
"""Custom Exception for all errors."""
class ProviderUnavailableError(MusicAssistantError):
"""Error raised when trying to access mediaitem of unavailable provider."""
class MediaNotFoundError(MusicAssistantError):
"""Error raised when trying to access non existing media item."""
class InvalidDataError(MusicAssistantError):
"""Error raised when an object has invalid data."""
class AlreadyRegisteredError(MusicAssistantError):
"""Error raised when a duplicate music provider or player is registered."""
class SetupFailedError(MusicAssistantError):
"""Error raised when setup of a provider or player failed."""
class LoginFailed(MusicAssistantError):
"""Error raised when a login failed."""
class AudioError(MusicAssistantError):
"""Error raised when an issue arrised when processing audio."""
class QueueEmpty(MusicAssistantError):
"""Error raised when trying to start queue stream while queue is empty."""
| 27.289474
| 79
| 0.755063
| 112
| 1,037
| 6.991071
| 0.491071
| 0.245211
| 0.306513
| 0.347382
| 0.357599
| 0.176245
| 0.122605
| 0
| 0
| 0
| 0
| 0
| 0.151398
| 1,037
| 37
| 80
| 28.027027
| 0.889773
| 0.506268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
04d164d31fd4e38b8d8c467210ac770958114589
| 162
|
py
|
Python
|
medsoft/doctor/admin.py
|
mustaphee/TeamNova
|
68af373b1db604b29be2ce6a6292db9cbe8e1212
|
[
"MIT"
] | null | null | null |
medsoft/doctor/admin.py
|
mustaphee/TeamNova
|
68af373b1db604b29be2ce6a6292db9cbe8e1212
|
[
"MIT"
] | null | null | null |
medsoft/doctor/admin.py
|
mustaphee/TeamNova
|
68af373b1db604b29be2ce6a6292db9cbe8e1212
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Speciality, Doctor
# Register your models here.
admin.site.register(Speciality)
admin.site.register(Doctor)
| 23.142857
| 38
| 0.814815
| 22
| 162
| 6
| 0.545455
| 0.136364
| 0.257576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104938
| 162
| 6
| 39
| 27
| 0.910345
| 0.160494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
04e0c1ec352d7c6000c71ba8a700183785a72cc6
| 134
|
py
|
Python
|
model/seq2seq_base.py
|
FadedCosine/POS-Guided-Neural-Text-Generation
|
2b5c72d8f2e08cbf4fe0babc4a4f1db09b348505
|
[
"Apache-2.0"
] | 2
|
2021-06-23T08:52:20.000Z
|
2021-06-23T08:52:31.000Z
|
model/seq2seq_base.py
|
FadedCosine/POS-Guided-Neural-Text-Generation
|
2b5c72d8f2e08cbf4fe0babc4a4f1db09b348505
|
[
"Apache-2.0"
] | null | null | null |
model/seq2seq_base.py
|
FadedCosine/POS-Guided-Neural-Text-Generation
|
2b5c72d8f2e08cbf4fe0babc4a4f1db09b348505
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from .embeddings import *
from .softmax import *
from .initializer import *
from .layers import *
| 16.75
| 26
| 0.761194
| 19
| 134
| 5.368421
| 0.473684
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171642
| 134
| 7
| 27
| 19.142857
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b6ceb85cd2a26bd2a147868421f93545a31c2317
| 128
|
py
|
Python
|
qitensor/tests/bench.py
|
dstahlke/qitensor
|
2b430e01e3f0d3c8488e35f417faaca27f930af3
|
[
"BSD-2-Clause"
] | 6
|
2015-04-28T00:45:51.000Z
|
2019-02-08T17:28:43.000Z
|
qitensor/tests/bench.py
|
dstahlke/qitensor
|
2b430e01e3f0d3c8488e35f417faaca27f930af3
|
[
"BSD-2-Clause"
] | null | null | null |
qitensor/tests/bench.py
|
dstahlke/qitensor
|
2b430e01e3f0d3c8488e35f417faaca27f930af3
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
import qitensor.benchmark_cy as bc
#print "random_channels"
#bc.random_channels()
print("orbit")
bc.orbit()
| 14.222222
| 34
| 0.75
| 19
| 128
| 4.894737
| 0.684211
| 0.301075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 128
| 8
| 35
| 16
| 0.801724
| 0.460938
| 0
| 0
| 0
| 0
| 0.075758
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b6dabdfbed2b414f324e435c8dd4b17e402e609e
| 19
|
py
|
Python
|
airiam/version.py
|
bbarhight/AirIAM
|
6b672553403fbfc536f1571d21c02fc12cacdcc3
|
[
"Apache-2.0"
] | null | null | null |
airiam/version.py
|
bbarhight/AirIAM
|
6b672553403fbfc536f1571d21c02fc12cacdcc3
|
[
"Apache-2.0"
] | null | null | null |
airiam/version.py
|
bbarhight/AirIAM
|
6b672553403fbfc536f1571d21c02fc12cacdcc3
|
[
"Apache-2.0"
] | null | null | null |
version = '0.1.45'
| 9.5
| 18
| 0.578947
| 4
| 19
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.157895
| 19
| 1
| 19
| 19
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b6dd6904a0f672b592b7757724813f1256696dc0
| 86
|
py
|
Python
|
lottery.py
|
Icebluewolf/Wolfy-Discord-Bot
|
e8bc2fa84f1c688a51fad40f364cabc4a6a288c1
|
[
"MIT"
] | 2
|
2020-06-10T07:54:09.000Z
|
2021-01-06T15:47:47.000Z
|
lottery.py
|
Icebluewolf/Wolfy-Discord-Bot
|
e8bc2fa84f1c688a51fad40f364cabc4a6a288c1
|
[
"MIT"
] | 2
|
2021-02-05T17:50:21.000Z
|
2022-01-16T14:22:21.000Z
|
lottery.py
|
Icebluewolf/Wolfy-Discord-Bot
|
e8bc2fa84f1c688a51fad40f364cabc4a6a288c1
|
[
"MIT"
] | null | null | null |
import random
async def lotteryPick(guild):
return random.choice(guild.members)
| 14.333333
| 39
| 0.767442
| 11
| 86
| 6
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151163
| 86
| 5
| 40
| 17.2
| 0.90411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f3d95ed1dcf5f6b86cfebcb6bb15be0b55b105a1
| 95,747
|
py
|
Python
|
test34_bif.py
|
jpra2/Presto2
|
71525a8dece2bcc4f16ff4a2120d7627e9ecd776
|
[
"CNRI-Python"
] | 1
|
2018-12-09T05:31:38.000Z
|
2018-12-09T05:31:38.000Z
|
test34_bif.py
|
jpra2/Presto2
|
71525a8dece2bcc4f16ff4a2120d7627e9ecd776
|
[
"CNRI-Python"
] | null | null | null |
test34_bif.py
|
jpra2/Presto2
|
71525a8dece2bcc4f16ff4a2120d7627e9ecd776
|
[
"CNRI-Python"
] | null | null | null |
import numpy as np
from pymoab import core
from pymoab import types
from pymoab import topo_util
from PyTrilinos import Epetra, AztecOO, EpetraExt # , Amesos
import time
import sys
class Msclassic_bif:
def __init__(self):
self.comm = Epetra.PyComm()
self.mb = core.Core()
self.mb.load_file('out.h5m')
self.root_set = self.mb.get_root_set()
self.mesh_topo_util = topo_util.MeshTopoUtil(self.mb)
self.all_fine_vols = self.mb.get_entities_by_dimension(self.root_set, 3)
self.nf = len(self.all_fine_vols)
self.create_tags(self.mb)
self.read_structured()
self.primals = self.mb.get_entities_by_type_and_tag(
self.root_set, types.MBENTITYSET, np.array([self.primal_id_tag]),
np.array([None]))
self.nc = len(self.primals)
self.ident_primal = []
for primal in self.primals:
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
self.ident_primal.append(primal_id)
self.ident_primal = dict(zip(self.ident_primal, range(len(self.ident_primal))))
#self.ident_primal = remapeamento dos ids globais
self.loops = 200 # loops totais
self.t = 1000 # tempo total de simulacao
self.mi_w = 1.0 # viscosidade da agua
self.mi_o = 1.3 # viscosidade do oleo
self.ro_w = 1.0 # densidade da agua
self.ro_o = 0.98 # densidade do oleo
self.gama_w = 1.0 # peso especifico da agua
self.gama_o = 0.98 # peso especifico do oleo
self.gama_ = self.gama_w + self.gama_o
self.Swi = 0.2 # saturacao inicial para escoamento da agua
self.Swc = 0.2 # saturacao de agua conata
self.Sor = 0.2 # saturacao residual de oleo
self.nw = 2 # expoente da agua para calculo da permeabilidade relativa
self.no = 2 # expoente do oleo para calculo da permeabilidade relativa
self.set_k() # seta a permeabilidade em cada volume
self.set_fi() # seta a porosidade em cada volume
self.get_wells() # obtem os gids dos volumes que sao pocos
self.read_perm_rel() # le o arquivo txt perm_rel.txt
gids = self.mb.tag_get_data(self.global_id_tag, self.all_fine_vols , flat = True)
self.map_gids_in_all_fine_vols = dict(zip(gids, self.all_fine_vols)) # mapeamento dos gids nos elementos
self.neigh_wells_d = [] #volumes da malha fina vizinhos aos pocos de pressao prescrita
self.elems_wells_d = [] #elementos com pressao prescrita
for volume in self.wells:
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if global_volume in self.wells_d:
self.elems_wells_d.append(volume)
adjs_volume = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
for adj in adjs_volume:
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
if global_adj not in self.wells_d:
self.neigh_wells_d.append(adj)
self.all_fine_vols_ic = set(self.all_fine_vols) - set(self.elems_wells_d)
# self.all_volumes_ic = volumes da malha fina que sao incognitas
gids_vols_ic = self.mb.tag_get_data(self.global_id_tag, self.all_fine_vols_ic, flat=True)
self.map_vols_ic = dict(zip(gids_vols_ic, range(len(gids_vols_ic)))) # mapeamento dos elementos que sao incognitas
self.map_vols_ic_2 = dict(zip(range(len(gids_vols_ic)), gids_vols_ic)) # mapeamento contrario
self.nf_ic = len(self.all_fine_vols_ic) # numero de icognitas
def calculate_local_problem_het(self, elems, lesser_dim_meshsets, support_vals_tag):
std_map = Epetra.Map(len(elems), 0, self.comm)
linear_vals = np.arange(0, len(elems))
id_map = dict(zip(elems, linear_vals))
boundary_elms = set()
b = Epetra.Vector(std_map)
x = Epetra.Vector(std_map)
A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
for ms in lesser_dim_meshsets:
lesser_dim_elems = self.mb.get_entities_by_handle(ms)
for elem in lesser_dim_elems:
if elem in boundary_elms:
continue
boundary_elms.add(elem)
idx = id_map[elem]
A.InsertGlobalValues(idx, [1], [idx])
b[idx] = self.mb.tag_get_data(support_vals_tag, elem, flat=True)[0]
for elem in (set(elems) ^ boundary_elms):
k_elem = self.mb.tag_get_data(self.perm_tag, elem).reshape([3, 3])
lamb_w_elem = self.mb.tag_get_data(self.lamb_w_tag, elem)[0][0]
lamb_o_elem = self.mb.tag_get_data(self.lamb_o_tag, elem)[0][0]
centroid_elem = self.mesh_topo_util.get_average_position([elem])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(
np.asarray([elem]), 2, 3, 0)
values = []
ids = []
for adj in adj_volumes:
if adj in id_map:
k_adj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
centroid_adj = self.mesh_topo_util.get_average_position([adj])
direction = centroid_adj - centroid_elem
uni = self.unitary(direction)
k_elem = np.dot(np.dot(k_elem,uni),uni)
k_elem = k_elem*(lamb_w_elem + lamb_o_elem)
k_adj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
k_adj = np.dot(np.dot(k_adj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
k_adj = k_adj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(k_elem, k_adj)
#keq = keq/(np.dot(self.h2, uni))
keq = keq*(np.dot(self.A, uni)/(np.dot(self.h, uni)))
values.append(keq)
ids.append(id_map[adj])
k_elem = self.mb.tag_get_data(self.perm_tag, elem).reshape([3, 3])
values.append(-sum(values))
idx = id_map[elem]
ids.append(idx)
A.InsertGlobalValues(idx, values, ids)
A.FillComplete()
linearProblem = Epetra.LinearProblem(A, x, b)
solver = AztecOO.AztecOO(linearProblem)
# AZ_last, AZ_summary, AZ_warnings
solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_warnings)
solver.Iterate(1000, 1e-9)
self.mb.tag_set_data(support_vals_tag, elems, np.asarray(x))
def calculate_p_end(self):
for volume in self.wells:
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if global_volume in self.wells_d:
index = self.wells_d.index(global_volume)
pms = self.set_p[index]
mb.tag_set_data(self.pms_tag, volume, pms)
def calculate_prolongation_op_het(self):
zeros = np.zeros(self.nf)
std_map = Epetra.Map(self.nf, 0, self.comm)
self.trilOP = Epetra.CrsMatrix(Epetra.Copy, std_map, std_map, 0)
sets = self.mb.get_entities_by_type_and_tag(
0, types.MBENTITYSET, self.collocation_point_tag, np.array([None]))
i = 0
my_pairs = set()
for collocation_point_set in sets:
i += 1
childs = self.mb.get_child_meshsets(collocation_point_set)
collocation_point = self.mb.get_entities_by_handle(collocation_point_set)[0]
primal_elem = self.mb.tag_get_data(self.fine_to_primal_tag, collocation_point,
flat=True)[0]
primal_id = self.mb.tag_get_data(self.primal_id_tag, int(primal_elem), flat=True)[0]
primal_id = self.ident_primal[primal_id]
support_vals_tag = self.mb.tag_get_handle(
"TMP_SUPPORT_VALS {0}".format(primal_id), 1, types.MB_TYPE_DOUBLE, True,
types.MB_TAG_SPARSE, default_value=0.0)
self.mb.tag_set_data(support_vals_tag, self.all_fine_vols, zeros)
self.mb.tag_set_data(support_vals_tag, collocation_point, 1.0)
for vol in childs:
elems_vol = self.mb.get_entities_by_handle(vol)
c_faces = self.mb.get_child_meshsets(vol)
for face in c_faces:
elems_fac = self.mb.get_entities_by_handle(face)
c_edges = self.mb.get_child_meshsets(face)
for edge in c_edges:
elems_edg = self.mb.get_entities_by_handle(edge)
c_vertices = self.mb.get_child_meshsets(edge)
# a partir desse ponto op de prolongamento eh preenchido
self.calculate_local_problem_het(
elems_edg, c_vertices, support_vals_tag)
self.calculate_local_problem_het(
elems_fac, c_edges, support_vals_tag)
self.calculate_local_problem_het(
elems_vol, c_faces, support_vals_tag)
vals = self.mb.tag_get_data(support_vals_tag, elems_vol, flat=True)
gids = self.mb.tag_get_data(self.global_id_tag, elems_vol, flat=True)
primal_elems = self.mb.tag_get_data(self.fine_to_primal_tag, elems_vol,
flat=True)
for val, gid in zip(vals, gids):
if (gid, primal_id) not in my_pairs:
if val == 0.0:
pass
else:
self.trilOP.InsertGlobalValues([gid], [primal_id], val)
my_pairs.add((gid, primal_id))
def calculate_restriction_op(self):
std_map = Epetra.Map(self.nf, 0, self.comm)
self.trilOR = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
for primal in self.primals:
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
primal_id = self.ident_primal[primal_id]
restriction_tag = self.mb.tag_get_handle(
"RESTRICTION_PRIMAL {0}".format(primal_id), 1, types.MB_TYPE_INTEGER,
True, types.MB_TAG_SPARSE)
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
self.mb.tag_set_data(
self.elem_primal_id_tag,
fine_elems_in_primal,
np.repeat(primal_id, len(fine_elems_in_primal)))
gids = self.mb.tag_get_data(self.global_id_tag, fine_elems_in_primal, flat=True)
self.trilOR.InsertGlobalValues(primal_id, np.repeat(1, len(gids)), gids)
self.mb.tag_set_data(restriction_tag, fine_elems_in_primal, np.repeat(1, len(fine_elems_in_primal)))
self.trilOR.FillComplete()
"""for i in range(len(primals)):
p = trilOR.ExtractGlobalRowCopy(i)
print(p[0])
print(p[1])
print('\n')"""
def calculate_restriction_op_2(self):
"""
operador de restricao excluindo as colunas dos volumes com pressao prescrita
"""
#0
std_map = Epetra.Map(len(self.all_fine_vols_ic), 0, self.comm)
self.trilOR = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)
gids_vols_ic = self.mb.tag_get_data(self.global_id_tag, self.all_fine_vols_ic, flat=True)
for primal in self.primals:
#1
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
primal_id = self.ident_primal[primal_id]
restriction_tag = self.mb.tag_get_handle(
"RESTRICTION_PRIMAL {0}".format(primal_id), 1, types.MB_TYPE_INTEGER,
True, types.MB_TAG_SPARSE)
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
self.mb.tag_set_data(
self.elem_primal_id_tag,
fine_elems_in_primal,
np.repeat(primal_id, len(fine_elems_in_primal)))
elems_ic = self.all_fine_vols_ic & set(fine_elems_in_primal)
gids_elems_ic = self.mb.tag_get_data(self.global_id_tag, elems_ic, flat=True)
local_map = []
for gid in gids_elems_ic:
#2
local_map.append(self.map_vols_ic[gid])
#1
self.trilOR.InsertGlobalValues(primal_id, np.repeat(1, len(local_map)), local_map)
#gids = self.mb.tag_get_data(self.global_id_tag, fine_elems_in_primal, flat=True)
#self.trilOR.InsertGlobalValues(primal_id, np.repeat(1, len(gids)), gids)
self.mb.tag_set_data(restriction_tag, fine_elems_in_primal, np.repeat(1, len(fine_elems_in_primal)))
#0
self.trilOR.FillComplete()
"""for i in range(len(self.primals)):
p = self.trilOR.ExtractGlobalRowCopy(i)
print(p[0])
print(p[1])
print('\n')"""
def calculate_sat(self):
"""
calcula a saturacao do passo de tempo corrente
"""
lim = 10**(-10)
for volume in self.all_fine_vols:
gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if gid in self.wells_d:
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)[0][0]
if tipo_de_poco == 1:
continue
else:
pass
div = self.div_upwind_3(volume, self.pf_tag)
fi = 0.3 #self.mb.tag_get_data(self.fi_tag, volume)[0][0]
sat1 = self.mb.tag_get_data(self.sat_tag, volume)[0][0]
sat = sat1 + div*(self.delta_t/(fi*self.V))
if sat > 1.0:
print('saturacao maior que 1 na funcao calculate_sat')
import pdb; pdb.set_trace()
#if abs(div) < lim or sat1 == (1 - self.Sor) or sat < sat1:
#if abs(div) < lim or sat1 == (1 - self.Sor):
if abs(div) < lim or sat1 == 0.8:
continue
#elif sat > (1 - self.Sor):
elif sat > 0.8:
#sat = 1 - self.Sor
print("Sat > 0.8")
print(sat)
print('gid')
print(gid)
print('\n')
sat = 0.8
#elif sat < 0 or sat > (1 - self.Sor):
elif sat < 0 or sat > 0.8:
print('Erro: saturacao invalida')
print('Saturacao: {0}'.format(sat))
print('Saturacao anterior: {0}'.format(sat1))
print('div: {0}'.format(div))
print('gid: {0}'.format(gid))
print('fi: {0}'.format(fi))
print('V: {0}'.format(self.V))
print('delta_t: {0}'.format(self.delta_t))
print('loop: {0}'.format(self.loop))
sys.exit(0)
self.mb.tag_set_data(self.sat_tag, volume, sat)
def cfl(self, fi, qmax):
"""
cfl usando fluxo maximo
"""
cfl = 0.9
self.delta_t = cfl*(fi*self.V)/float(qmax)
def cfl_2(self, vmax, h, dfds):
"""
cfl usando velocidade maxima
"""
cfl = 1.0
self.delta_t = (cfl*h)/float(vmax*dfds)
def create_tags(self, mb):
self.prod_tag = mb.tag_get_handle(
"PROD", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.lbt_tag = mb.tag_get_handle(
"LBT", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.fw_tag = mb.tag_get_handle(
"FW", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.vel_tag = mb.tag_get_handle(
"VEL", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.Pc2_tag = mb.tag_get_handle(
"PC2", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.pf2_tag = mb.tag_get_handle(
"PF2", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.err_tag = mb.tag_get_handle(
"ERRO", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.err2_tag = mb.tag_get_handle(
"ERRO_2", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.pf_tag = mb.tag_get_handle(
"PF", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.k_tag = mb.tag_get_handle(
"K", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.contorno_tag = mb.tag_get_handle(
"CONTORNO", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.pc_tag = mb.tag_get_handle(
"PC", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.pms_tag = mb.tag_get_handle(
"PMS", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.pms2_tag = mb.tag_get_handle(
"PMS2", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.p_tag = mb.tag_get_handle(
"P", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.pcorr_tag = mb.tag_get_handle(
"P_CORR", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.perm_tag = mb.tag_get_handle(
"PERM", 9, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.global_id_tag = mb.tag_get_handle("GLOBAL_ID")
self.collocation_point_tag = mb.tag_get_handle("COLLOCATION_POINT")
self.elem_primal_id_tag = mb.tag_get_handle(
"FINE_PRIMAL_ID", 1, types.MB_TYPE_INTEGER, True,
types.MB_TAG_SPARSE)
self.sat_tag = mb.tag_get_handle(
"SAT", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.fi_tag = mb.tag_get_handle(
"FI", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.lamb_w_tag = mb.tag_get_handle(
"LAMB_W", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.lamb_o_tag = mb.tag_get_handle(
"LAMB_O", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.primal_id_tag = mb.tag_get_handle("PRIMAL_ID")
self.fine_to_primal_tag = mb.tag_get_handle("FINE_TO_PRIMAL")
self.valor_da_prescricao_tag = mb.tag_get_handle("VALOR_DA_PRESCRICAO")
self.tipo_de_prescricao_tag = mb.tag_get_handle("TIPO_DE_PRESCRICAO")
self.wells_tag = mb.tag_get_handle("WELLS")
self.tipo_de_poco_tag = mb.tag_get_handle("TIPO_DE_POCO")
def Dirichlet_problem(self):
"""
recalculo das pressoes dentro dos primais usando como condicao de contorno
pressao prescrita nos volumes da interface de cada primal
"""
#0
colocation_points = self.mb.get_entities_by_type_and_tag(
0, types.MBENTITYSET, self.collocation_point_tag, np.array([None]))
sets = []
for col in colocation_points:
#1
#col = mb.get_entities_by_handle(col)[0]
sets.append(self.mb.get_entities_by_handle(col)[0])
#0
sets = set(sets)
for primal in self.primals:
#1
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
volumes_in_primal = self.get_volumes_in_interfaces(
fine_elems_in_primal, primal_id, flag = 2)
all_volumes = list(fine_elems_in_primal)
all_volumes_ic = self.all_fine_vols_ic & set(all_volumes)
gids_vols_ic = self.mb.tag_get_data(self.global_id_tag, all_volumes_ic, flat=True)
# gids_vols_ic = volumes no primal que sao icognitas
# ou seja volumes no primal excluindo os que tem pressao prescrita
map_volumes = dict(zip(gids_vols_ic, range(len(gids_vols_ic))))
# map_volumes = mapeamento local
std_map = Epetra.Map(len(all_volumes_ic), 0, self.comm)
b = Epetra.Vector(std_map)
A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
dim = len(all_volumes_ic)
# b_np = np.zeros(dim)
# A_np = np.zeros((dim, dim))
for volume in all_volumes_ic:
#2
soma = 0
temp_id = []
temp_k = []
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
if (volume in sets) or (volume in volumes_in_primal):
#3
temp_k.append(1.0)
temp_id.append(map_volumes[global_volume])
b[map_volumes[global_volume]] = self.mb.tag_get_data(self.pms_tag, volume)[0]
# b_np[map_volumes[global_volume]] = self.mb.tag_get_data(self.pms_tag, volume)[0]
#2
else:
#3
for adj in adj_volumes:
#4
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(np.dot(self.h, uni))
soma = soma + keq
if global_adj in self.wells_d:
#5
index = self.wells_d.index(global_adj)
b[map_volumes[global_volume]] += self.set_p[index]*(keq)
# b_np[map_volumes[global_volume]] += self.set_p[index]*(keq)
#4
else:
#5
temp_id.append(map_volumes[global_adj])
temp_k.append(-keq)
#4
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#3
temp_k.append(soma)
temp_id.append(map_volumes[global_volume])
if global_volume in self.wells_n:
#4
index = self.wells_n.index(global_volume)
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)[0]
if tipo_de_poco == 1:
#5
b[map_volumes[global_volume]] += self.set_q[index]
# b_np[map_volumes[global_volume]] += self.set_q[index]
#4
else:
#5
b[map_volumes[global_volume]] += -self.set_q[index]
# b_np[map_volumes[global_volume]] += -self.set_q[index]
#2
A.InsertGlobalValues(map_volumes[global_volume], temp_k, temp_id)
# A_np[map_volumes[global_volume], temp_id] = temp_k
#1
A.FillComplete()
x = self.solve_linear_problem(A, b, dim)
# x_np = np.linalg.solve(A_np, b_np)
for volume in all_volumes_ic:
#2
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
self.mb.tag_set_data(self.pcorr_tag, volume, x[map_volumes[global_volume]])
# self.mb.tag_set_data(self.pms2_tag, volume, x_np[map_volumes[global_volume]])
#1
for volume in set(all_volumes) - all_volumes_ic:
#2
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
index = self.wells_d.index(global_volume)
p = self.set_p[index]
self.mb.tag_set_data(self.pcorr_tag, volume, p)
# self.mb.tag_set_data(self.pms2_tag, volume, p)
def div_max(self, p_tag):
q2 = 0.0
fi = 0.0
for volume in self.all_fine_vols:
soma1 = 0.0
soma2 = 0.0
pvol = mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = mesh_topo_util.get_average_position([volume])
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
for adj in adjs_vol:
padj = mb.tag_get_data(p_tag, adj)[0][0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq/(np.dot(self.h2, uni))
soma1 = soma1 - keq
soma2 = soma2 + keq*padj
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
soma1 = soma1*pvol
q = soma1 + soma2
if abs(q) > abs(q2):
q2 = q
fi = mb.tag_get_data(self.fi_tag, volume)[0][0]
return abs(q2), fi
def div_max_2(self, p_tag):
q2 = 0.0
fi = 0.0
for volume in self.all_fine_vols:
q = 0.0
pvol = mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = mesh_topo_util.get_average_position([volume])
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
for adj in adjs_vol:
padj = mb.tag_get_data(p_tag, adj)[0][0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(np.dot(self.h, uni))
q = q + keq*(padj - pvol)
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
if abs(q) > q2:
q2 = abs(q)
fi = mb.tag_get_data(self.fi_tag, volume)[0][0]
return q2, fi
def div_max_3(self, p_tag):
"""
Verifica qual é o fluxo maximo de agua que sai do volume de controle multiplicado pelo dfds
dfds = variacao do fluxo fracionario com a saturacao
"""
lim = 10**(-12)
q2 = 0.0
fi = 0.0
fi2 = 0.0
dfds2 = 0
for volume in self.all_fine_vols:
q = 0.0
pvol = self.mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = self.mesh_topo_util.get_average_position([volume])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
sat_vol = self.mb.tag_get_data(self.sat_tag, volume)[0][0]
fi = self.mb.tag_get_data(self.fi_tag, volume)[0][0]
if fi > fi2:
fi2 = fi
for adj in adjs_vol:
padj = self.mb.tag_get_data(p_tag, adj)[0][0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(np.dot(self.h, uni))
sat_adj = self.mb.tag_get_data(self.sat_tag, adj)[0][0]
if abs(sat_adj - sat_vol) < lim:
continue
dfds = ((lamb_w_adj/(lamb_w_adj+lamb_o_adj)) - (lamb_w_vol/(lamb_w_vol+lamb_o_vol)))/float((sat_adj - sat_vol))
q = abs(dfds*keq*(padj - pvol))
if q > q2:
q2 = q
dfds2 = abs(dfds)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
return q2, fi2
def div_upwind_1(self, volume, p_tag):
"""
a mobilidade da interface é dada pelo volume com a pressao maior dif fin
"""
soma1 = 0.0
soma2 = 0.0
pvol = mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = mesh_topo_util.get_average_position([volume])
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
for adj in adjs_vol:
padj = mb.tag_get_data(p_tag, adj)[0][0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
grad_p = padj - pvol
if grad_p > 0:
keq = (lamb_w_adj*kadj)/(np.dot(self.h2, uni))
else:
keq = (lamb_w_vol*kvol)/(np.dot(self.h2, uni))
soma1 = soma1 + keq
soma2 = soma2 + keq*padj
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
soma1 = -soma1*pvol
q = soma1 + soma2
return q
def div_upwind_2(self, volume, p_tag):
"""
calcula o fluxo total que entra no volume para calcular a saturacao
a mobilidade da interface é dada pelo volume com a pressao maior
"""
q = 0.0
pvol = mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = mesh_topo_util.get_average_position([volume])
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
for adj in adjs_vol:
padj = mb.tag_get_data(p_tag, adj)[0][0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
grad_p = (padj - pvol)/float((np.dot(self.h, uni)))
if grad_p > 0:
# keq = (lamb_w_adj*kadj*(np.dot(self.A, uni)))/(np.dot(self.h, uni))
keq = lamb_w_adj*kadj
else:
# keq = (lamb_w_vol*kvol*(np.dot(self.A, uni)))/(np.dot(self.h, uni))
keq = lamb_w_vol*kvol
q = q + keq*grad_p*(np.dot(self.A, uni))
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
return q
def div_upwind_3(self, volume, p_tag):
"""
calcula o fluxo total que entra no volume para calcular a saturacao
a mobilidade da interface é dada pela media das mobilidades
"""
qt = 0.0
qp = 0.0
q = 0.0
qw = 0.0
list_sat = []
list_lbw = []
list_gid = []
list_grad = []
list_q = []
list_p = []
list_lbeq = []
pvol = self.mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = self.mesh_topo_util.get_average_position([volume])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
sat_volume = self.mb.tag_get_data(self.sat_tag, volume, flat=True)[0]
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lbt_vol = self.mb.tag_get_data(self.lbt_tag, volume)[0][0]
fw_vol = self.mb.tag_get_data(self.fw_tag, volume)[0][0]
for adj in adjs_vol:
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
sat_adj = self.mb.tag_get_data(self.sat_tag, adj, flat=True)[0]
padj = self.mb.tag_get_data(p_tag, adj)[0][0]
lbt_adj = self.mb.tag_get_data(self.lbt_tag, adj)[0][0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
keq = self.kequiv(kvol, kadj)
# if global_adj > global_volume:
# grad_p = (padj - pvol)/float(np.dot(self.h, uni))
# else:
# grad_p = (pvol - padj)/float(np.dot(self.h, uni))
grad_p = (padj - pvol)/float(np.dot(self.h, uni))
lamb_eq = (lamb_w_vol + lamb_w_adj)/2.0
keq = keq*lamb_eq
q = q + keq*(grad_p)*(np.dot(self.A, uni))
# producao de oleo
if global_volume in self.wells_prod:
kvol2 = kvol*(lbt_vol)
kadj2 = kadj*(lbt_adj)
keq2 = self.kequiv(kvol2, kadj2)
qt += grad_p*(keq2)*(np.dot(self.A, uni)) #fluxo total que entra no volume
list_sat.append(sat_adj)
list_lbw.append(lamb_w_adj)
list_gid.append(global_adj)
list_grad.append(grad_p)
list_q.append(keq*(grad_p)*(np.dot(self.A, uni)))
list_p.append(padj)
list_lbeq.append(lamb_eq)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
if global_volume in self.wells_prod:
qp += (1 - fw_vol)*qt # fluxo de oleo que sai do volume
qw += (fw_vol)*qt #fluxo de agua que sai do volume
q = q - qw
self.mb.tag_set_data(self.prod_tag, volume, qp)
list_sat.append(sat_volume)
list_lbw.append(lamb_w_vol)
list_gid.append(global_volume)
list_q.append(q)
list_p.append(pvol)
if q < 0:
print('divergente upwind de agua menor que zero na funcao div_upwind_3')
import pdb; pdb.set_trace()
return q
def erro(self):
for volume in self.all_fine_vols:
Pf = self.mb.tag_get_data(self.pf_tag, volume, flat = True)[0]
Pms = self.mb.tag_get_data(self.pms_tag, volume, flat = True)[0]
erro = abs(Pf - Pms)#/float(abs(Pf))
self.mb.tag_set_data(self.err_tag, volume, erro)
def erro_2(self):
for volume in self.all_fine_vols:
Pf = self.mb.tag_get_data(self.pf_tag, volume, flat = True)[0]
Pms = self.mb.tag_get_data(self.pms2_tag, volume, flat = True)[0]
erro = abs(Pf - Pms)#/float(abs(Pf))
self.mb.tag_set_data(self.err2_tag, volume, erro)
def get_volumes_in_interfaces(self, fine_elems_in_primal, primal_id, **options):
"""
obtem uma lista com os elementos dos primais adjacentes que estao na interface do primal corrente
(primal_id)
se flag == 1 alem dos volumes na interface dos primais adjacentes (volumes_in_interface)
retorna tambem os volumes no primal corrente que estao na sua interface (volumes_in_primal)
se flag == 2 retorna apenas os volumes do primal corrente que estao na sua interface (volumes_in_primal)
"""
#0
volumes_in_primal = []
volumes_in_interface = []
# gids_in_primal = self.mb.tag_get_data(self.global_id_tag, fine_elems_in_primal, flat=True)
for volume in fine_elems_in_primal:
#1
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
adjs_volume = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
for adj in adjs_volume:
#2
fin_prim = self.mb.tag_get_data(self.fine_to_primal_tag, adj, flat=True)
primal_adj = self.mb.tag_get_data(
self.primal_id_tag, int(fin_prim), flat=True)[0]
if primal_adj != primal_id:
#3
volumes_in_interface.append(adj)
volumes_in_primal.append(volume)
#0
all_volumes = list(fine_elems_in_primal) + volumes_in_interface
if options.get("flag") == 1:
#1
return volumes_in_interface, volumes_in_primal
#0
elif options.get("flag") == 2:
#1
return volumes_in_primal
#0
else:
#1
return volumes_in_interface
def get_wells(self):
"""
obtem os gids dos volumes dos pocos
wells_d = gids do poco com pressao prescrita
wells_n = gids do poco com vazao prescrita
set_p = valor da pressao
set_q = valor da vazao
wells_inj = gids dos pocos injetores
wells_prod = gids dos pocos produtores
"""
wells_d = []
wells_n = []
set_p = []
set_q = []
wells_inj = []
wells_prod = []
wells_set = self.mb.tag_get_data(self.wells_tag, 0, flat=True)[0]
self.wells = self.mb.get_entities_by_handle(wells_set)
for well in self.wells:
global_id = self.mb.tag_get_data(self.global_id_tag, well, flat=True)[0]
valor_da_prescricao = self.mb.tag_get_data(self.valor_da_prescricao_tag, well, flat=True)[0]
tipo_de_prescricao = self.mb.tag_get_data(self.tipo_de_prescricao_tag, well, flat=True)[0]
#raio_do_poco = mb.tag_get_data(raio_do_poco_tag, well, flat=True)[0]
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, well, flat=True)[0]
#tipo_de_fluido = mb.tag_get_data(tipo_de_fluido_tag, well, flat=True)[0]
#pwf = mb.tag_get_data(pwf_tag, well, flat=True)[0]
if tipo_de_prescricao == 0:
wells_d.append(global_id)
set_p.append(valor_da_prescricao)
else:
wells_n.append(global_id)
set_q.append(valor_da_prescricao)
if tipo_de_poco == 1:
wells_inj.append(global_id)
else:
wells_prod.append(global_id)
self.wells_d = wells_d
self.wells_n = wells_n
self.set_p = set_p
self.set_q = set_q
self.wells_inj = wells_inj
self.wells_prod = wells_prod
def kequiv(self, k1, k2):
#keq = ((2*k1*k2)/(h1*h2))/((k1/h1) + (k2/h2))
keq = (2*k1*k2)/(k1+k2)
return keq
def modificar_matriz(self, A, rows, columns):
"""
realoca a matriz para o tamanho de linhas 'rows' e colunas 'columns'
"""
row_map = Epetra.Map(rows, 0, self.comm)
col_map = Epetra.Map(columns, 0, self.comm)
C = Epetra.CrsMatrix(Epetra.Copy, row_map, col_map, 3)
for i in range(rows):
p = A.ExtractGlobalRowCopy(i)
values = p[0]
index_columns = p[1]
C.InsertGlobalValues(i, values, index_columns)
C.FillComplete()
return C
def modificar_vetor(self, v, nc):
"""
realoca o tamanho do vetor 'v' para o tamanho 'nc'
"""
std_map = Epetra.Map(nc, 0, self.comm)
x = Epetra.Vector(std_map)
for i in range(nc):
x[i] = v[i]
return x
def multimat_vector(self, A, row, b):
"""
multiplica a matriz A pelo vetor 'b', 'row' é o numero de linhas de A ou tamanho de b
"""
std_map = Epetra.Map(row, 0, self.comm)
c = Epetra.Vector(std_map)
A.Multiply(False, b, c)
return c
def Neuman_problem_4(self):
colocation_points = mb.get_entities_by_type_and_tag(
0, types.MBENTITYSET, self.collocation_point_tag, np.array([None]))
sets = []
for col in colocation_points:
#col = mb.get_entities_by_handle(col)[0]
sets.append(self.mb.get_entities_by_handle(col)[0])
sets = set(sets)
for primal in self.primals:
volumes_in_interface = []#v1
volumes_in_primal = []#v2
primal_id = mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
fine_elems_in_primal = mb.get_entities_by_handle(primal)
#setfine_elems_in_primal = set(fine_elems_in_primal)
for fine_elem in fine_elems_in_primal:
global_volume = mb.tag_get_data(self.global_id_tag, fine_elem, flat=True)[0]
volumes_in_primal.append(fine_elem)
adj_fine_elems = mesh_topo_util.get_bridge_adjacencies(fine_elem, 2, 3)
for adj in adj_fine_elems:
fin_prim = mb.tag_get_data(self.fine_to_primal_tag, adj, flat=True)
primal_adj = mb.tag_get_data(
self.primal_id_tag, int(fin_prim), flat=True)[0]
if primal_adj != primal_id:
volumes_in_interface.append(adj)
volumes_in_primal.extend(volumes_in_interface)
id_map = dict(zip(volumes_in_primal, range(len(volumes_in_primal))))
std_map = Epetra.Map(len(volumes_in_primal), 0, comm)
b = Epetra.Vector(std_map)
A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
dim = len(volumes_in_primal)
b_np = np.zeros(dim)
A_np = np.zeros((dim, dim))
for volume in volumes_in_primal:
global_volume = mb.tag_get_data(self.global_id_tag, volume)[0][0]
temp_id = []
temp_k = []
centroid_volume = mesh_topo_util.get_average_position([volume])
k_vol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
adj_vol = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
if volume in self.wells:
tipo_de_prescricao = mb.tag_get_data(self.tipo_de_prescricao_tag, volume)[0][0]
if tipo_de_prescricao == 0:
valor_da_prescricao = mb.tag_get_data(self.valor_da_prescricao_tag, volume)[0][0]
temp_k.append(1.0)
temp_id.append(id_map[volume])
b[id_map[volume]] = valor_da_prescricao
b_np[id_map[volume]] = valor_da_prescricao
else:
soma = 0.0
for adj in adj_vol:
centroid_adj = self.mesh_topo_util.get_average_position([adj])
direction = centroid_adj - centroid_volume
uni = self.unitary(direction)
k_vol = np.dot(np.dot(k_vol,uni),uni)
k_vol = k_vol*(lamb_w_vol + lamb_o_vol)
k_adj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
k_adj = np.dot(np.dot(k_adj,uni),uni)
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(k_vol, k_adj)
keq = keq*(np.dot(self.A, uni)/(np.dot(self.h, uni)))
soma = soma + keq
temp_k.append(-keq)
temp_id.append(id_map[adj])
temp_k.append(soma)
temp_id.append(id_map[volume])
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)
valor_da_prescricao = self.mb.tag_get_data(self.valor_da_prescricao_tag, volume)[0][0]
if tipo_de_poco == 1:
b[id_map[volume]] = valor_da_prescricao
b_np[id_map[volume]] = valor_da_prescricao
else:
b[id_map[volume]] = -valor_da_prescricao
b_np[id_map[volume]] = -valor_da_prescricao
elif volume in sets:
temp_k.append(1.0)
temp_id.append(id_map[volume])
b[id_map[volume]] = self.mb.tag_get_data(self.pms_tag, volume)[0]
b_np[id_map[volume]] = self.mb.tag_get_data(self.pms_tag, volume)[0]
elif volume in volumes_in_interface:
for adj in adj_vol:
fin_prim = self.mb.tag_get_data(self.fine_to_primal_tag, adj, flat=True)
primal_adj = self.mb.tag_get_data(
self.primal_id_tag, int(fin_prim), flat=True)[0]
if primal_adj == primal_id:
pms_adj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
pms_volume = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
b[id_map[volume]] = pms_volume - pms_adj
b_np[id_map[volume]] = pms_volume - pms_adj
temp_k.append(1.0)
temp_id.append(id_map[volume])
temp_k.append(-1.0)
temp_id.append(id_map[adj])
else:
soma = 0.0
for adj in adj_vol:
centroid_adj = self.mesh_topo_util.get_average_position([adj])
direction = centroid_adj - centroid_volume
uni = self.unitary(direction)
k_vol = np.dot(np.dot(k_vol,uni),uni)
k_adj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
k_adj = np.dot(np.dot(k_adj,uni),uni)
keq = self.kequiv(k_vol, k_adj)
keq = keq/(np.dot(self.h2, uni))
soma = soma + keq
temp_k.append(-keq)
temp_id.append(id_map[adj])
temp_k.append(soma)
temp_id.append(id_map[volume])
A.InsertGlobalValues(id_map[volume], temp_k, temp_id)
A_np[id_map[volume], temp_id] = temp_k[:]
A.FillComplete()
x = self.solve_linear_problem(A, b, dim)
x_np = np.linalg.solve(A_np, b_np)
for i in range(len(volumes_in_primal) - len(volumes_in_interface)):
volume = volumes_in_primal[i]
self.mb.tag_set_data(self.p_tag, volume, x[i])
self.mb.tag_set_data(self.pms2_tag, volume, x_np[i])
def Neuman_problem_4_3(self):
"""
recalcula as pressoes em cada primal usando fluxo prescrito nas interfaces do primal
"""
#0
colocation_points = self.mb.get_entities_by_type_and_tag(
0, types.MBENTITYSET, self.collocation_point_tag, np.array([None]))
sets = []
for col in colocation_points:
#1
#col = mb.get_entities_by_handle(col)[0]
sets.append(self.mb.get_entities_by_handle(col)[0])
#0
sets = set(sets)
for primal in self.primals:
#1
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
volumes_in_interface = self.get_volumes_in_interfaces(
fine_elems_in_primal, primal_id)
all_volumes = list(fine_elems_in_primal) + volumes_in_interface
all_volumes_ic = self.all_fine_vols_ic & set(all_volumes)
gids_vols_ic = self.mb.tag_get_data(self.global_id_tag, all_volumes_ic, flat=True)
map_volumes = dict(zip(gids_vols_ic, range(len(gids_vols_ic))))
std_map = Epetra.Map(len(all_volumes_ic), 0, self.comm)
b = Epetra.Vector(std_map)
A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
dim = len(all_volumes_ic)
b_np = np.zeros(dim)
A_np = np.zeros((dim, dim))
for volume in all_volumes_ic:
#2
soma = 0
temp_id = []
temp_k = []
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
if volume in sets:
#3
temp_k.append(1.0)
temp_id.append(map_volumes[global_volume])
b[map_volumes[global_volume]] = self.mb.tag_get_data(self.pms_tag, volume)[0]
b_np[map_volumes[global_volume]] = self.mb.tag_get_data(self.pms_tag, volume)[0]
#2
elif volume in volumes_in_interface:
#3
for adj in adj_volumes:
#4
if adj in fine_elems_in_primal:
#5
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
pms_adj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
pms_volume = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
b[map_volumes[global_volume]] = pms_volume - pms_adj
b_np[map_volumes[global_volume]] = pms_volume - pms_adj
temp_k.append(1.0)
temp_id.append(map_volumes[global_volume])
temp_k.append(-1.0)
temp_id.append(map_volumes[global_adj])
#4
else:
#5
pass
#2
else:
#3
for adj in adj_volumes:
#4
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(np.dot(self.h, uni))
soma = soma + keq
if global_adj in self.wells_d:
#5
index = self.wells_d.index(global_adj)
b[map_volumes[global_volume]] += self.set_p[index]*(keq)
b_np[map_volumes[global_volume]] += self.set_p[index]*(keq)
#4
else:
#5
temp_id.append(map_volumes[global_adj])
temp_k.append(-keq)
#4
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#3
temp_k.append(soma)
temp_id.append(map_volumes[global_volume])
if global_volume in self.wells_n:
#4
index = self.wells_n.index(global_volume)
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)[0]
if tipo_de_poco == 1:
#5
b[map_volumes[global_volume]] += self.set_q[index]
b_np[map_volumes[global_volume]] += self.set_q[index]
#4
else:
#5
b[map_volumes[global_volume]] += -self.set_q[index]
b_np[map_volumes[global_volume]] += -self.set_q[index]
#2
A.InsertGlobalValues(map_volumes[global_volume], temp_k, temp_id)
A_np[map_volumes[global_volume], temp_id] = temp_k
#1
A.FillComplete()
x = self.solve_linear_problem(A, b, dim)
x_np = np.linalg.solve(A_np, b_np)
for volume in all_volumes_ic:
#2
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
self.mb.tag_set_data(self.pcorr_tag, volume, x[map_volumes[global_volume]])
self.mb.tag_set_data(self.pms2_tag, volume, x_np[map_volumes[global_volume]])
#1
for volume in set(all_volumes) - all_volumes_ic:
#2
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
index = self.wells_d.index(global_volume)
p = self.set_p[index]
self.mb.tag_set_data(self.pcorr_tag, volume, p)
self.mb.tag_set_data(self.pms2_tag, volume, p)
def organize_op(self):
"""
elimina as linhas do operador de prolongamento que se referem aos volumes com pressao prescrita
"""
#0
std_map = Epetra.Map(len(self.all_fine_vols_ic), 0, self.comm)
trilOP2 = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
gids_vols_ic = self.mb.tag_get_data(self.global_id_tag, self.all_fine_vols_ic, flat=True)
cont = 0
for i in gids_vols_ic:
#1
p = self.trilOP.ExtractGlobalRowCopy(i)
values = p[0]
index = p[1]
trilOP2.InsertGlobalValues(self.map_vols_ic[i], list(values), list(index))
#0
self.trilOP = trilOP2
self.trilOP.FillComplete()
def organize_Pf(self):
"""
organiza a solucao da malha fina para setar no arquivo de saida
"""
#0
std_map = Epetra.Map(len(self.all_fine_vols),0,self.comm)
Pf2 = Epetra.Vector(std_map)
for i in range(len(self.Pf)):
#1
value = self.Pf[i]
ind = self.map_vols_ic_2[i]
Pf2[ind] = value
#0
for i in range(len(self.wells_d)):
#1
value = self.set_p[i]
ind = self.wells_d[i]
Pf2[ind] = value
#0
self.Pf_all = Pf2
def organize_Pms(self):
"""
organiza a solucao do Pms para setar no arquivo de saida
"""
#0
std_map = Epetra.Map(len(self.all_fine_vols),0,self.comm)
Pms2 = Epetra.Vector(std_map)
for i in range(len(self.Pms)):
#1
value = self.Pms[i]
ind = self.map_vols_ic_2[i]
Pms2[ind] = value
#0
for i in range(len(self.wells_d)):
#1
value = self.set_p[i]
ind = self.wells_d[i]
Pms2[ind] = value
#0
self.Pms_all = Pms2
def pol_interp(self, S, x, y):
"""
retorna o resultado do polinomio interpolador da saturacao usando o metodo
das diferencas divididas, ou seja, retorna p(S)
x = vetor da saturacao
y = vetor que se deseja interpolar, y = f(x)
S = saturacao
"""
n = len(x)
cont = 1
est = 0
list_delta = []
for i in range(n-1):
if cont == 1:
temp = []
for i in range(n-cont):
a = y[i+cont] - y[i]
b = x[i+cont] - x[i]
c = a/float(b)
temp.append(c)
cont = cont+1
list_delta.append(temp[:])
else:
temp = []
for i in range(n-cont):
a = list_delta[est][i+1] - list_delta[est][i]
b = x[i+cont] - x[i]
c = a/float(b)
temp.append(c)
cont = cont+1
est = est+1
list_delta.append(temp[:])
a = []
for i in range(n-1):
e = list_delta[i][0]
a.append(e)
pol = y[0]
mult = 1
for i in range(n-1):
mult = (S - x[i])*mult
pol = pol + mult*a[i]
if y == self.krw_r:
if S <= 0.2:
pol = 0.0
else:
pass
elif y == self.kro_r:
if S <= 0:
pol = 1.0
elif S >= 0.9:
pol = 0.0
else:
pass
else:
pass
return abs(pol)
def pol_interp_2(self, S):
# S_temp = (S - self.Swc)/(1 - self.Swc - self.Sor)
# krw = (S_temp)**(self.nw)
# kro = (1 - S_temp)**(self.no)
krw = ((S - self.Swc)/float(1 - self.Swc - self.Sor))**(self.nw)
kro = ((1 - S - self.Swc)/float(1 - self.Swc - self.Sor))**(self.no)
if S > (1 - self.Sor):
krw = 1.0
kro = 0.0
elif S < self.Swc:
krw = 0.0
kro = 1.0
else:
pass
return krw, kro
def pymultimat(self, A, B, nf):
"""
multiplica a matriz A pela B
"""
nf_map = Epetra.Map(nf, 0, self.comm)
C = Epetra.CrsMatrix(Epetra.Copy, nf_map, 3)
EpetraExt.Multiply(A, False, B, False, C)
C.FillComplete()
return C
def read_perm_rel(self):
"""
le o arquivo perm_rel.py para usar na funcao pol_interp
"""
with open("perm_rel.py", "r") as arq:
text = arq.readlines()
self.Sw_r = []
self.krw_r = []
self.kro_r = []
self.pc_r = []
for i in range(1, len(text)):
a = text[i].split()
self.Sw_r.append(float(a[0]))
self.kro_r.append(float(a[1]))
self.krw_r.append(float(a[2]))
self.pc_r.append(float(a[3]))
def read_structured(self):
with open('structured.cfg', 'r') as arq:
text = arq.readlines()
a = text[11].strip()
a = a.split("=")
a = a[1].strip()
a = a.split(",")
crx = int(a[0].strip())
cry = int(a[1].strip())
crz = int(a[2].strip())
a = text[12].strip()
a = a.split("=")
a = a[1].strip()
a = a.split(",")
nx = int(a[0].strip())
ny = int(a[1].strip())
nz = int(a[2].strip())
a = text[13].strip()
a = a.split("=")
a = a[1].strip()
a = a.split(",")
tx = int(a[0].strip())
ty = int(a[1].strip())
tz = int(a[2].strip())
hx = tx/float(nx)
hy = ty/float(ny)
hz = tz/float(nz)
h = np.array([hx, hy, hz])
h2 = np.array([hx**2, hy**2, hz**2])
ax = hy*hz
ay = hx*hz
az = hx*hy
a = np.array([ax, ay, az])
hmin = min(hx, hy, hz)
V = hx*hy*hz
self.nx = nx # numero de volumes na direcao x
self.ny = ny # numero de volumes na direcao y
self.nz = nz # numero de volumes na direcao z
self.h2 = h2 # vetor com os tamanhos ao quadrado de cada volume
self.h = h # vetor com os tamanhos de cada volume
self.V = V # volume de um volume da malha fina
self.A = a # vetor com as areas
self.tz = tz # tamanho total na direcao z
self.viz_x = [1, -1]
self.viz_y = [nx, -nx]
self.viz_z = [nx*ny, -nx*ny]
def set_erro(self):
"""
modulo da diferenca entre a pressao da malha fina e a multiescala
"""
for volume in self.all_fine_vols:
Pf = mb.tag_get_data(self.pf_tag, volume, flat = True)[0]
Pms = mb.tag_get_data(self.pms_tag, volume, flat = True)[0]
erro = abs(Pf - Pms)/float(abs(Pf))
mb.tag_set_data(self.err_tag, volume, erro)
def set_fi(self):
fi = 0.3
for volume in self.all_fine_vols:
self.mb.tag_set_data(self.fi_tag, volume, fi)
def set_global_problem(self):
std_map = Epetra.Map(len(self.all_fine_vols), 0, comm)
self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)
self.b = Epetra.Vector(std_map)
for volume in self.all_fine_vols:
volume_centroid = mesh_topo_util.get_average_position([volume])
adj_volumes = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if global_volume not in self.wells_d:
soma = 0.0
temp_glob_adj = []
temp_k = []
for adj in adj_volumes:
global_adj = mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq/(np.dot(self.h2, uni))
temp_glob_adj.append(global_adj)
temp_k.append(keq)
soma = soma + keq
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
soma = -1*soma
temp_k.append(soma)
temp_glob_adj.append(global_volume)
#print(temp_k)
#print(temp_glob_adj)
self.trans_fine.InsertGlobalValues(global_volume, temp_k, temp_glob_adj)
if global_volume in self.wells_n:
index = self.wells_n.index(global_volume)
tipo_de_poco = mb.tag_get_data(self.tipo_de_poco_tag, volume)
if tipo_de_poco == 1:
self.b[global_volume] = -self.set_q[index]
else:
self.b[global_volume] = self.set_q[index]
else:
index = self.wells_d.index(global_volume)
self.trans_fine.InsertGlobalValues(global_volume, [1.0], [global_volume])
self.b[global_volume] = self.set_p[index]
self.trans_fine.FillComplete()
def set_global_problem_gr_vf(self):
"""
transmissibilidade da malha fina com gravidade _vf
"""
self.gama = 1.0
std_map = Epetra.Map(len(self.all_fine_vols),0,comm)
self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)
self.b = Epetra.Vector(std_map)
for volume in self.all_fine_vols:
volume_centroid = mesh_topo_util.get_average_position([volume])
adj_volumes = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if global_volume not in self.wells_d:
soma = 0.0
soma2 = 0.0
soma3 = 0.0
temp_glob_adj = []
temp_k = []
for adj in adj_volumes:
global_adj = mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
altura = adj_centroid[2]
uni = self.unitary(direction)
z = uni[2]
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(np.dot(self.h, uni))
if z == 1.0:
keq2 = keq*self.gama_
soma2 = soma2 + keq2
soma3 = soma3 + (-keq2*(self.tz-altura))
temp_glob_adj.append(global_adj)
temp_k.append(keq)
soma = soma + keq
soma2 = soma2*(self.tz-volume_centroid[2])
soma2 = -(soma2 + soma3)
soma = -1*soma
temp_k.append(soma)
temp_glob_adj.append(global_volume)
self.trans_fine.InsertGlobalValues(global_volume, temp_k, temp_glob_adj)
if global_volume in self.wells_n:
index = self.wells_n.index(global_volume)
tipo_de_poco = mb.tag_get_data(self.tipo_de_poco_tag, volume)[0][0]
if tipo_de_poco == 1:
self.b[global_volume] = -self.set_q[index] + soma2
else:
self.b[global_volume] = self.set_q[index] + soma2
else:
self.b[global_volume] = soma2
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
else:
index = self.wells_d.index(global_volume)
self.trans_fine.InsertGlobalValues(global_volume, [1.0], [global_volume])
self.b[global_volume] = self.set_p[index]
self.trans_fine.FillComplete()
def set_global_problem_vf(self):
std_map = Epetra.Map(len(self.all_fine_vols),0, comm)
self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)
self.b = Epetra.Vector(std_map)
for volume in self.all_fine_vols:
volume_centroid = mesh_topo_util.get_average_position([volume])
adj_volumes = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if global_volume not in self.wells_d:
soma = 0.0
temp_glob_adj = []
temp_k = []
for adj in adj_volumes:
global_adj = mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni)/(np.dot(self.h, uni)))
temp_glob_adj.append(global_adj)
temp_k.append(keq)
soma = soma + keq
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
soma = -1*soma
temp_k.append(soma)
temp_glob_adj.append(global_volume)
self.trans_fine.InsertGlobalValues(global_volume, temp_k, temp_glob_adj)
if global_volume in self.wells_n:
index = self.wells_n.index(global_volume)
tipo_de_poco = mb.tag_get_data(self.tipo_de_poco_tag, volume)
if tipo_de_poco == 1:
self.b[global_volume] = -self.set_q[index]
else:
self.b[global_volume] = self.set_q[index]
else:
index = self.wells_d.index(global_volume)
self.trans_fine.InsertGlobalValues(global_volume, [1.0], [global_volume])
self.b[global_volume] = self.set_p[index]
self.trans_fine.FillComplete()
"""for i in range(self.nf):
p = self.trans_fine.ExtractGlobalRowCopy(i)
print(p[0])
print(p[1])
print('soma')
print(sum(p[0]))
if abs(sum(p[0])) > 0.000001 and abs(sum(p[0])) != 1.0:
print('Erroooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo')
print('\n')"""
def set_global_problem_vf_2(self):
"""
transmissibilidade da malha fina excluindo os volumes com pressao prescrita
"""
#0
std_map = Epetra.Map(len(self.all_fine_vols_ic),0,self.comm)
self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)
self.b = Epetra.Vector(std_map)
for volume in self.all_fine_vols_ic - set(self.neigh_wells_d):
#1
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
soma = 0.0
temp_glob_adj = []
temp_k = []
for adj in adj_volumes:
#2
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni)/(np.dot(self.h, uni)))
temp_glob_adj.append(self.map_vols_ic[global_adj])
temp_k.append(-keq)
soma = soma + keq
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#1
temp_k.append(soma)
temp_glob_adj.append(self.map_vols_ic[global_volume])
self.trans_fine.InsertGlobalValues(self.map_vols_ic[global_volume], temp_k, temp_glob_adj)
if global_volume in self.wells_n:
#2
index = self.wells_n.index(global_volume)
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)
if tipo_de_poco == 1:
#3
self.b[self.map_vols_ic[global_volume]] = self.set_q[index]*self.V
#2
else:
#3
self.b[self.map_vols_ic[global_volume]] = -self.set_q[index]*self.V
#0
for volume in self.neigh_wells_d:
#1
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
soma = 0.0
temp_glob_adj = []
temp_k = []
for adj in adj_volumes:
#2
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni)/(np.dot(self.h, uni)))
if global_adj in self.wells_d:
#3
soma = soma + keq
index = self.wells_d.index(global_adj)
self.b[self.map_vols_ic[global_volume]] += self.set_p[index]*(keq)
#2
else:
#3
temp_glob_adj.append(self.map_vols_ic[global_adj])
temp_k.append(-keq)
soma = soma + keq
#2
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#1
temp_k.append(soma)
temp_glob_adj.append(self.map_vols_ic[global_volume])
self.trans_fine.InsertGlobalValues(self.map_vols_ic[global_volume], temp_k, temp_glob_adj)
if global_volume in self.wells_n:
#2
index = self.wells_n.index(global_volume)
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)
if tipo_de_poco == 1:
#3
self.b[self.map_vols_ic[global_volume]] += self.set_q[index]*V
#2
else:
#3
self.b[self.map_vols_ic[global_volume]] += -self.set_q[index]*V
#0
self.trans_fine.FillComplete()
def set_k(self):
"""
seta as permeabilidades dos volumes
"""
perm_tensor = [1, 0.0, 0.0,
0.0, 1, 0.0,
0.0, 0.0, 1]
for volume in self.all_fine_vols:
self.mb.tag_set_data(self.perm_tag, volume, perm_tensor)
def set_lamb(self):
"""
seta o lambda usando pol_interp
"""
for volume in self.all_fine_vols:
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat = True)[0]
S = self.mb.tag_get_data(self.sat_tag, volume)[0][0]
krw = self.pol_interp(S, self.Sw_r, self.krw_r)
kro = self.pol_interp(S, self.Sw_r, self.kro_r)
lamb_w = krw/self.mi_w
lamb_o = kro/self.mi_o
self.mb.tag_set_data(self.lamb_w_tag, volume, lamb_w)
self.mb.tag_set_data(self.lamb_o_tag, volume, lamb_o)
def set_lamb_2(self):
"""
seta o lambda usando pol_interp_2
"""
for volume in self.all_fine_vols:
S = self.mb.tag_get_data(self.sat_tag, volume)[0][0]
krw, kro = self.pol_interp_2(S)
lamb_w = krw/self.mi_w
lamb_o = kro/self.mi_o
lbt = lamb_w + lamb_o
fw = lamb_w/float(lbt)
self.mb.tag_set_data(self.lamb_w_tag, volume, lamb_w)
self.mb.tag_set_data(self.lamb_o_tag, volume, lamb_o)
self.mb.tag_set_data(self.fw_tag, volume, fw)
self.mb.tag_set_data(self.lbt_tag, volume, lbt)
def set_Pc(self):
"""
seta as pressoes da malha grossa primal
"""
for primal in self.primals:
primal_id = mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
primal_id = self.ident_primal[primal_id]
fine_elems_in_primal = mb.get_entities_by_handle(primal)
value = self.Pc[primal_id]
mb.tag_set_data(
self.pc_tag,
fine_elems_in_primal,
np.repeat(value, len(fine_elems_in_primal)))
def set_sat_in(self):
"""
seta a saturacao inicial
"""
l = []
for volume in self.wells:
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)[0][0]
if tipo_de_poco == 1:
gid = self.mb.tag_get_data(self.global_id_tag, volume)[0][0]
l.append(gid)
for volume in self.all_fine_vols:
gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if gid in l:
self.mb.tag_set_data(self.sat_tag, volume, 0.8)
else:
self.mb.tag_set_data(self.sat_tag, volume, 0.2)
def set_vel(self, p_tag):
for volume in self.all_fine_vols_ic:
v1 = np.zeros(3)
# v2 = np.zeros(3)
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = self.mesh_topo_util.get_average_position([volume])
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
front = np.array([global_volume + self.viz_x[0], global_volume + self.viz_y[0], global_volume + self.viz_z[0]])
back = np.array([global_volume - self.viz_x[0], global_volume - self.viz_y[0], global_volume - self.viz_z[0]])
viz_x = np.array([global_volume + self.viz_x[0], global_volume - self.viz_x[0]])
viz_y = np.array([global_volume + self.viz_y[0], global_volume - self.viz_y[0]])
viz_z = np.array([global_volume + self.viz_z[0], global_volume - self.viz_z[0]])
lbt_vol = self.mb.tag_get_data(self.lbt_tag, volume)[0][0]
pvol = self.mb.tag_get_data(self.p_tag, volume)[0][0]
for adj in adj_volumes:
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
padj = self.mb.tag_get_data(self.p_tag, adj)[0][0]
lbt_adj = self.mb.tag_get_data(self.lbt_tag, adj)[0][0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lbt_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
kadj = kadj*(lbt_adj)
keq = self.kequiv(kvol, kadj)
# keq = keq*(np.dot(self.A, uni)/(np.dot(self.h, uni)))
grad_p = (padj - pvol)/float(np.dot(self.h, uni))
vel = -(grad_p)*keq
# if global_adj in front:
if global_adj > global_volume:
if global_adj in viz_x:
v1[0] = vel
elif global_adj in viz_y:
v1[1] = vel
else:
v1[2] = vel
else:
# if global_adj in viz_x:
# v2[0] = vel
# elif global_adj in viz_y:
# v2[1] = vel
# else:
# v2[2] = vel
pass
#1
self.mb.tag_set_data(self.vel_tag, volume, v1)
def solve_linear_problem(self, A, b, n):
"""
resolve o sistema linear da matriz A e termo fonte b
"""
std_map = Epetra.Map(n, 0, self.comm)
x = Epetra.Vector(std_map)
linearProblem = Epetra.LinearProblem(A, x, b)
solver = AztecOO.AztecOO(linearProblem)
solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_warnings)
solver.Iterate(1000, 1e-9)
return x
def solve_linear_problem_numpy(self):
trans_fine_np = np.zeros((self.nf, self.nf))
b_np = np.zeros(self.nf)
for i in range(self.nf):
p = self.trans_fine.ExtractGlobalRowCopy(i)
#print(p[0])
#print(p[1])
trans_fine_np[i, p[1]] = p[0]
b_np[i] = self.b[i]
self.Pf2 = np.linalg.solve(trans_fine_np, b_np)
mb.tag_set_data(self.pf2_tag, self.all_fine_vols, np.asarray(self.Pf2))
def unitary(self, l):
"""
obtem o vetor unitario na direcao positiva de l
"""
uni = l/np.linalg.norm(l)
uni = uni*uni
return uni
def vel_max(self, p_tag):
"""
Calcula a velocidade maxima tambem a variacao do fluxo fracionario com a saturacao
"""
lim = 10**(-10)
v2 = 0.0
h2 = 0
dfds2 = 0
for volume in self.all_fine_vols:
v = 0.0
pvol = self.mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = self.mesh_topo_util.get_average_position([volume])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
sat_vol = self.mb.tag_get_data(self.sat_tag, volume)[0][0]
for adj in adjs_vol:
padj = self.mb.tag_get_data(p_tag, adj)[0][0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
h = (np.dot(self.h, uni))
keq = keq/h
sat_adj = self.mb.tag_get_data(self.sat_tag, adj)[0][0]
if abs(sat_adj - sat_vol) < lim:
continue
dfds = ((lamb_w_adj/(lamb_w_adj+lamb_o_adj)) - (lamb_w_vol/(lamb_w_vol+lamb_o_vol)))/float((sat_adj - sat_vol))
v = abs(keq*(padj - pvol)/float(h))
if v > v2:
v2 = v
h2 = h
if abs(dfds) > dfds2:
dfds2 = abs(dfds)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
if v2 < lim:
print('velocidade maxima de agua menor que lim')
import pdb; pdb.set_trace()
return v2, h2, dfds2
def run(self):
print('loop')
t_ = 0.0
loop = 0
"""
self.set_sat_in()
#self.set_lamb()
self.set_lamb_2()
#self.set_global_problem()
self.set_global_problem_vf()
#self.set_global_problem_gr_vf()
self.calculate_prolongation_op_het()
self.Pf = self.solve_linear_problem(self.trans_fine, self.b, self.nf)
mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf))
#self.solve_linear_problem_numpy()
qmax, fi = self.div_max_3(self.pf_tag)
self.cfl(fi, qmax)
#calculo da pressao multiescala
Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(self.trilOR, self.trans_fine, self.nf), self.trilOP, self.nf), self.nc, self.nc)
Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf, self.b), self.nc)
self.Pc = self.solve_linear_problem(Tc, Qc, self.nc)
self.set_Pc()
self.Pms = self.multimat_vector(self.trilOP, self.nf, self.Pc)
mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms))
self.calculate_p_end()
self.set_erro()"""
self.mb.write_file('new_out_bif{0}.vtk'.format(loop))
"""
loop = 1
t_ = t_ + self.delta_t
while t_ <= self.t and loop <= self.loops:
self.calculate_sat()
#self.set_lamb()
self.set_lamb_2()
#self.set_global_problem()
self.set_global_problem_vf()
self.calculate_prolongation_op_het()
self.Pf = self.solve_linear_problem(self.trans_fine, self.b, self.nf)
mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf))
#self.solve_linear_problem_numpy()
qmax, fi = self.div_max_2(self.pf_tag)
self.cfl(fi, qmax)
Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(self.trilOR, self.trans_fine, self.nf), self.trilOP, self.nf), self.nc, self.nc)
Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf, self.b), self.nc)
self.Pc = self.solve_linear_problem(Tc, Qc, self.nc)
self.set_Pc()
self.Pms = self.multimat_vector(self.trilOP, self.nf, self.Pc)
mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms))
self.calculate_p_end()
self.set_erro()
mb.write_file('new_out_bif{0}.vtk'.format(loop))
loop = loop+1
t_ = t_ + self.delta_t"""
def run_2(self):
#0
t_ = 0.0
self.loop = 0
self.set_sat_in()
#self.set_lamb()
self.set_lamb_2()
#self.calculate_restriction_op_2()
self.set_global_problem_vf_2()
self.Pf = self.solve_linear_problem(self.trans_fine, self.b, len(self.all_fine_vols_ic))
self.organize_Pf()
self.mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf_all))
#self.calculate_prolongation_op_het()
#self.organize_op()
#self.Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(
#self.trilOR, self.trans_fine, self.nf_ic), self.trilOP, self.nf_ic), self.nc, self.nc)
#self.Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf_ic, self.b), self.nc)
#self.Pc = self.solve_linear_problem(self.Tc, self.Qc, self.nc)
#self.Pms = self.multimat_vector(self.trilOP, self.nf_ic, self.Pc)
#self.organize_Pms()
#self.mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms_all))
#self.Neuman_problem_4_3()
#self.erro()
#self.erro_2()
#qmax, fi = self.div_max_3(self.pf_tag)
#self.cfl(fi, qmax)
#print('qmax')
#print(qmax)
#print('delta_t')
#print(self.delta_t)
vmax, h, dfds = self.vel_max(self.pf_tag)
self.cfl_2(vmax, h, dfds)
print('delta_t: {0}'.format(self.delta_t))
print('loop: {0}'.format(self.loop))
print('\n')
self.mb.write_file('new_out_bif{0}.vtk'.format(self.loop))
self.loop = 1
t_ = t_ + self.delta_t
print(t_)
print('t')
while t_ <= self.t and self.loop < self.loops:
#1
self.calculate_sat()
self.set_lamb_2()
#self.set_lamb()
self.set_global_problem_vf_2()
self.Pf = self.solve_linear_problem(self.trans_fine, self.b, len(self.all_fine_vols_ic))
self.organize_Pf()
self.mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf_all))
#self.calculate_prolongation_op_het()
#self.organize_op()
#self.Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(
#self.trilOR, self.trans_fine, self.nf_ic), self.trilOP, self.nf_ic), self.nc, self.nc)
#self.Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf_ic, self.b), self.nc)
#self.Pc = self.solve_linear_problem(self.Tc, self.Qc, self.nc)
#self.Pms = self.multimat_vector(self.trilOP, self.nf_ic, self.Pc)
#self.organize_Pms()
#self.mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms_all))
#self.Neuman_problem_4_3()
#self.erro()
#self.erro_2()
qmax, fi = self.div_max_3(self.pf_tag)
self.cfl(fi, qmax)
#vmax, h, dfds = self.vel_max(self.pf_tag)
#self.cfl_2(vmax, h, dfds)
print('delta_t: {0}'.format(self.delta_t))
print('loop: {0}'.format(self.loop))
print('\n')
self.mb.write_file('new_out_bif{0}.vtk'.format(self.loop))
self.loop += 1
t_ = t_ + self.delta_t
| 42.820662
| 151
| 0.544769
| 13,303
| 95,747
| 3.643088
| 0.040367
| 0.036109
| 0.046385
| 0.061406
| 0.800924
| 0.755097
| 0.72493
| 0.70541
| 0.684364
| 0.670271
| 0
| 0.018516
| 0.347363
| 95,747
| 2,235
| 152
| 42.839821
| 0.757057
| 0.0816
| 0
| 0.591142
| 0
| 0
| 0.008145
| 0
| 0
| 0
| 0
| 0.000447
| 0
| 1
| 0.034018
| false
| 0.005135
| 0.006418
| 0
| 0.053273
| 0.016688
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f3e36fec379c10e317ed3e40af2fa39c0f10a98b
| 10,936
|
py
|
Python
|
oops_fhir/r4/code_system/v3_hl7_approval_status.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/code_system/v3_hl7_approval_status.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/code_system/v3_hl7_approval_status.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["v3hl7ApprovalStatus"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class v3hl7ApprovalStatus:
"""
v3 Code System hl7ApprovalStatus
Description: Codes for concepts describing the approval level of HL7
artifacts. This code system reflects the concepts expressed in HL7's
Governance & Operations Manual (GOM) past and present.
Status: active - Version: 2018-08-12
Copyright None
http://terminology.hl7.org/CodeSystem/v3-hl7ApprovalStatus
"""
affd = CodeSystemConcept(
{
"code": "affd",
"definition": "Description: Content that is being presented to an international affiliate for consideration as a realm-specific draft standard for trial use.",
"display": "affiliate ballot - DSTU",
}
)
"""
affiliate ballot - DSTU
Description: Content that is being presented to an international affiliate for consideration as a realm-specific draft standard for trial use.
"""
affi = CodeSystemConcept(
{
"code": "affi",
"definition": "Description: Content that is being presented to an international affiliate for consideration as a realm-specific informative standard.",
"display": "affiliate ballot - informative",
}
)
"""
affiliate ballot - informative
Description: Content that is being presented to an international affiliate for consideration as a realm-specific informative standard.
"""
affn = CodeSystemConcept(
{
"code": "affn",
"definition": "Description: Content that is being presented to an international affiliate for consideration as a realm-specific normative standard.",
"display": "affiliate ballot - normative",
}
)
"""
affiliate ballot - normative
Description: Content that is being presented to an international affiliate for consideration as a realm-specific normative standard.
"""
appad = CodeSystemConcept(
{
"code": "appad",
"definition": "Description: Content that has passed ballot as a realm-specific draft standard for trial use.",
"display": "approved affiliate DSTU",
}
)
"""
approved affiliate DSTU
Description: Content that has passed ballot as a realm-specific draft standard for trial use.
"""
appai = CodeSystemConcept(
{
"code": "appai",
"definition": "Description: Content that has passed ballot as a realm-specific informative standard.",
"display": "approved affiliate informative",
}
)
"""
approved affiliate informative
Description: Content that has passed ballot as a realm-specific informative standard.
"""
appan = CodeSystemConcept(
{
"code": "appan",
"definition": "Description: Content that has passed ballot as a realm-specific normative standard",
"display": "approved affiliate normative",
}
)
"""
approved affiliate normative
Description: Content that has passed ballot as a realm-specific normative standard
"""
appd = CodeSystemConcept(
{
"code": "appd",
"definition": "Description: Content that has passed ballot as a draft standard for trial use.",
"display": "approved DSTU",
}
)
"""
approved DSTU
Description: Content that has passed ballot as a draft standard for trial use.
"""
appi = CodeSystemConcept(
{
"code": "appi",
"definition": "Description: Content that has passed ballot as a normative standard.",
"display": "approved informative",
}
)
"""
approved informative
Description: Content that has passed ballot as a normative standard.
"""
appn = CodeSystemConcept(
{
"code": "appn",
"definition": "Description: Content that has passed ballot as a normative standard.",
"display": "approved normative",
}
)
"""
approved normative
Description: Content that has passed ballot as a normative standard.
"""
comi = CodeSystemConcept(
{
"code": "comi",
"definition": "Description: Content prepared by a committee and submitted for internal consideration as an informative standard.\r\n\n \n \n Deprecation Comment\n No longer supported as ballot statuses within the HL7 Governance and Operations Manual. Use normative or informative variants instead.",
"display": "committee ballot - informative",
"property": [
{"code": "status", "valueCode": "deprecated"},
{"code": "deprecationDate", "valueDateTime": "2010-11-23"},
],
}
)
"""
committee ballot - informative
Description: Content prepared by a committee and submitted for internal consideration as an informative standard.
Deprecation Comment
No longer supported as ballot statuses within the HL7 Governance and Operations Manual. Use normative or informative variants instead.
"""
comn = CodeSystemConcept(
{
"code": "comn",
"definition": "Description: Content prepared by a committee and submitted for internal consideration as an informative standard.\r\n\n \n \n Deprecation Comment\n No longer supported as ballot statuses within the HL7 Governance and Operations Manual. Use normative or informative variants instead.",
"display": "committee ballot - normative",
"property": [
{"code": "status", "valueCode": "deprecated"},
{"code": "deprecationDate", "valueDateTime": "2010-11-23"},
],
}
)
"""
committee ballot - normative
Description: Content prepared by a committee and submitted for internal consideration as an informative standard.
Deprecation Comment
No longer supported as ballot statuses within the HL7 Governance and Operations Manual. Use normative or informative variants instead.
"""
draft = CodeSystemConcept(
{
"code": "draft",
"definition": "Description: Content that is under development and is not intended to be used.",
"display": "draft",
}
)
"""
draft
Description: Content that is under development and is not intended to be used.
"""
loc = CodeSystemConcept(
{
"code": "loc",
"definition": "Description: Content that represents an adaption of a implementable balloted material to represent the needs or capabilities of a particular installation.",
"display": "localized adaptation",
}
)
"""
localized adaptation
Description: Content that represents an adaption of a implementable balloted material to represent the needs or capabilities of a particular installation.
"""
memd = CodeSystemConcept(
{
"code": "memd",
"definition": "Description: Content prepared by a committee and submitted for membership consideration as a draft standard for trial use.",
"display": "membership ballot - DSTU",
}
)
"""
membership ballot - DSTU
Description: Content prepared by a committee and submitted for membership consideration as a draft standard for trial use.
"""
memi = CodeSystemConcept(
{
"code": "memi",
"definition": "Description: Content prepared by a committee and submitted for membership consideration as an informative standard.",
"display": "membership ballot - informative",
}
)
"""
membership ballot - informative
Description: Content prepared by a committee and submitted for membership consideration as an informative standard.
"""
memn = CodeSystemConcept(
{
"code": "memn",
"definition": "Description: Content prepared by a committee and submitted for membership consideration as a normative standard.",
"display": "membership ballot - normative",
}
)
"""
membership ballot - normative
Description: Content prepared by a committee and submitted for membership consideration as a normative standard.
"""
ns = CodeSystemConcept(
{
"code": "ns",
"definition": "Description: Content developed independently by an organization or individual that is declared to be 'usable' but for which there is no present intention to submit through the standards submission and review process.",
"display": "non-standard - available for use",
}
)
"""
non-standard - available for use
Description: Content developed independently by an organization or individual that is declared to be 'usable' but for which there is no present intention to submit through the standards submission and review process.
"""
prop = CodeSystemConcept(
{
"code": "prop",
"definition": "Description: Content submitted to a committee for consideration for future inclusion in the standard.",
"display": "proposal",
}
)
"""
proposal
Description: Content submitted to a committee for consideration for future inclusion in the standard.
"""
ref = CodeSystemConcept(
{
"code": "ref",
"definition": "Description: Content intended to support other content that is subject to approval, but which is not itself subject to formal approval.",
"display": "reference",
}
)
"""
reference
Description: Content intended to support other content that is subject to approval, but which is not itself subject to formal approval.
"""
wd = CodeSystemConcept(
{
"code": "wd",
"definition": "Description: Content that represents an item that was at one point a normative or informative standard, but was subsequently withdrawn.",
"display": "withdrawn",
}
)
"""
withdrawn
Description: Content that represents an item that was at one point a normative or informative standard, but was subsequently withdrawn.
"""
class Meta:
resource = _resource
| 35.506494
| 417
| 0.617593
| 1,074
| 10,936
| 6.276536
| 0.16946
| 0.106809
| 0.078327
| 0.056965
| 0.748999
| 0.738763
| 0.735796
| 0.729269
| 0.727192
| 0.718588
| 0
| 0.005136
| 0.305688
| 10,936
| 307
| 418
| 35.62215
| 0.882655
| 0.032919
| 0
| 0.076923
| 0
| 0.057692
| 0.57409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.038462
| 0.019231
| 0
| 0.160256
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6d212f4ae74384110b8a06806d7b00928d2bbad7
| 59
|
py
|
Python
|
lib/airtable/__init__.py
|
goztrk/django-htk
|
c56bf112e5d627780d2f4288460eae5cce80fa9e
|
[
"MIT"
] | 206
|
2015-10-15T07:05:08.000Z
|
2021-02-19T11:48:36.000Z
|
lib/airtable/__init__.py
|
goztrk/django-htk
|
c56bf112e5d627780d2f4288460eae5cce80fa9e
|
[
"MIT"
] | 8
|
2017-10-16T10:18:31.000Z
|
2022-03-09T14:24:27.000Z
|
lib/airtable/__init__.py
|
goztrk/django-htk
|
c56bf112e5d627780d2f4288460eae5cce80fa9e
|
[
"MIT"
] | 61
|
2015-10-15T08:12:44.000Z
|
2022-03-10T12:25:06.000Z
|
# HTK Imports
from htk.lib.airtable.api import AirtableAPI
| 19.666667
| 44
| 0.813559
| 9
| 59
| 5.333333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118644
| 59
| 2
| 45
| 29.5
| 0.923077
| 0.186441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6d60478d2a561d23e0b95803f6e36f77d7b87693
| 123
|
py
|
Python
|
pybloomfilter/hash_interface.py
|
mutalisk999/pybloomfilter
|
365532d2bbcee3ace56eee5c23fd789ce3fd7ef4
|
[
"MIT"
] | null | null | null |
pybloomfilter/hash_interface.py
|
mutalisk999/pybloomfilter
|
365532d2bbcee3ace56eee5c23fd789ce3fd7ef4
|
[
"MIT"
] | null | null | null |
pybloomfilter/hash_interface.py
|
mutalisk999/pybloomfilter
|
365532d2bbcee3ace56eee5c23fd789ce3fd7ef4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
class HashInterface(object):
@staticmethod
def hash(*arg):
pass
| 12.3
| 28
| 0.626016
| 15
| 123
| 5.133333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010753
| 0.243902
| 123
| 9
| 29
| 13.666667
| 0.817204
| 0.292683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
ed9e6e0af4c80fa67bf0f75e4a377fa91a3dc7e5
| 92
|
py
|
Python
|
LogIn/admin.py
|
code-xD/Django-Projects
|
41537bb21cc392c84e55bb029cfa09a3c7574fad
|
[
"MIT"
] | null | null | null |
LogIn/admin.py
|
code-xD/Django-Projects
|
41537bb21cc392c84e55bb029cfa09a3c7574fad
|
[
"MIT"
] | null | null | null |
LogIn/admin.py
|
code-xD/Django-Projects
|
41537bb21cc392c84e55bb029cfa09a3c7574fad
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
# modify
# modify2
# modify3
| 13.142857
| 32
| 0.76087
| 12
| 92
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.173913
| 92
| 6
| 33
| 15.333333
| 0.894737
| 0.532609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
edbe844d9b805e769fe7670df2427ce56ab1610a
| 115
|
py
|
Python
|
settings.py
|
mmanhertz/eloimage
|
438b9e48371e37bf09fe79448dd24594ead3c2a3
|
[
"BSD-2-Clause"
] | null | null | null |
settings.py
|
mmanhertz/eloimage
|
438b9e48371e37bf09fe79448dd24594ead3c2a3
|
[
"BSD-2-Clause"
] | null | null | null |
settings.py
|
mmanhertz/eloimage
|
438b9e48371e37bf09fe79448dd24594ead3c2a3
|
[
"BSD-2-Clause"
] | null | null | null |
from elopic.data.strategies import fully_random, one_random_rest_least_seen
STRATEGY = one_random_rest_least_seen
| 28.75
| 75
| 0.886957
| 18
| 115
| 5.166667
| 0.666667
| 0.193548
| 0.27957
| 0.387097
| 0.473118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078261
| 115
| 3
| 76
| 38.333333
| 0.877358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
edbf8c07aec94d30d72ffb30c963bf410beeef54
| 73
|
py
|
Python
|
notebooks/MutraffExperiments/__init__.py
|
uahservtel/uah-gist-mutraff-bastra
|
b5a4eab4763e1cf9d914c4af8a77426391e71e31
|
[
"Xnet",
"Linux-OpenIB",
"X11"
] | 3
|
2019-11-20T15:22:27.000Z
|
2021-06-13T07:52:14.000Z
|
notebooks/MutraffExperiments/__init__.py
|
uahservtel/uah-gist-mutraff-bastra
|
b5a4eab4763e1cf9d914c4af8a77426391e71e31
|
[
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null |
notebooks/MutraffExperiments/__init__.py
|
uahservtel/uah-gist-mutraff-bastra
|
b5a4eab4763e1cf9d914c4af8a77426391e71e31
|
[
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null |
from . import ExperimentCatalog as xs
print("Experiments Library init")
| 18.25
| 37
| 0.794521
| 9
| 73
| 6.444444
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136986
| 73
| 3
| 38
| 24.333333
| 0.920635
| 0
| 0
| 0
| 0
| 0
| 0.328767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
ede1389a597732f2c56b44257a410839f584c715
| 440
|
py
|
Python
|
broker/broker_settings.py
|
cassioeskelsen/rabbitmq_pause_continue
|
cbc984b8883e15edce2c44b91512ef714814d287
|
[
"CC0-1.0"
] | null | null | null |
broker/broker_settings.py
|
cassioeskelsen/rabbitmq_pause_continue
|
cbc984b8883e15edce2c44b91512ef714814d287
|
[
"CC0-1.0"
] | null | null | null |
broker/broker_settings.py
|
cassioeskelsen/rabbitmq_pause_continue
|
cbc984b8883e15edce2c44b91512ef714814d287
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from abc import ABC, abstractmethod
import os
class BrokerSettings(ABC):
@abstractmethod
def __init__(self, host=None, user=None, password=None, port=None):
pass
@abstractmethod
def get_host(self):
pass
@abstractmethod
def get_user(self):
pass
@abstractmethod
def get_password(self):
pass
@abstractmethod
def get_port(self):
pass
| 16.923077
| 71
| 0.625
| 50
| 440
| 5.34
| 0.4
| 0.318352
| 0.314607
| 0.359551
| 0.314607
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003165
| 0.281818
| 440
| 25
| 72
| 17.6
| 0.841772
| 0.047727
| 0
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.277778
| false
| 0.388889
| 0.111111
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
611277c20c549f5961c3af28f7ea82a9b0bbdf6e
| 138
|
py
|
Python
|
ldfparser/__init__.py
|
kayoub5/ldfparser
|
04c12ec12ca243ba46ce62140eeb1c3688584244
|
[
"MIT"
] | 1
|
2021-09-17T15:21:35.000Z
|
2021-09-17T15:21:35.000Z
|
ldfparser/__init__.py
|
kayoub5/ldfparser
|
04c12ec12ca243ba46ce62140eeb1c3688584244
|
[
"MIT"
] | null | null | null |
ldfparser/__init__.py
|
kayoub5/ldfparser
|
04c12ec12ca243ba46ce62140eeb1c3688584244
|
[
"MIT"
] | null | null | null |
from .parser import LDF, parseLDF, parseLDFtoDict
from .lin import LinFrame, LinSignal
from .node import LinMaster, LinSlave, LinProductId
| 46
| 51
| 0.826087
| 17
| 138
| 6.705882
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 138
| 3
| 51
| 46
| 0.934426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b64d1bee0ac9c46a6f4ff85b61baf0710ac5cfdb
| 174
|
py
|
Python
|
cinema_system/paymentSystem/admin.py
|
SJPark94/E-Cinema-Booking-System
|
dbb92f615a3c5f63def2cc7247183555176d79ef
|
[
"MIT"
] | null | null | null |
cinema_system/paymentSystem/admin.py
|
SJPark94/E-Cinema-Booking-System
|
dbb92f615a3c5f63def2cc7247183555176d79ef
|
[
"MIT"
] | null | null | null |
cinema_system/paymentSystem/admin.py
|
SJPark94/E-Cinema-Booking-System
|
dbb92f615a3c5f63def2cc7247183555176d79ef
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from paymentSystem.models import PromoCode, Tickets
# Register your models here.
admin.site.register(PromoCode)
admin.site.register(Tickets)
| 29
| 51
| 0.833333
| 23
| 174
| 6.304348
| 0.565217
| 0.124138
| 0.234483
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091954
| 174
| 6
| 52
| 29
| 0.917722
| 0.149425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b68e54196115fc822984f01aefda2fddb5f6cb35
| 34
|
py
|
Python
|
cli.py
|
SenZhangAI/urgent-lang
|
5b74d6b4eb23c00b5e955aee8f0237cc81e5c039
|
[
"MIT"
] | 7
|
2020-01-25T04:29:30.000Z
|
2021-05-01T09:52:03.000Z
|
cli.py
|
SenZhangAI/urgent-lang
|
5b74d6b4eb23c00b5e955aee8f0237cc81e5c039
|
[
"MIT"
] | 9
|
2020-01-23T06:57:47.000Z
|
2020-02-03T14:16:49.000Z
|
cli.py
|
SenZhangAI/urgent-lang
|
5b74d6b4eb23c00b5e955aee8f0237cc81e5c039
|
[
"MIT"
] | 3
|
2020-02-01T05:17:32.000Z
|
2020-02-03T14:09:53.000Z
|
from urgent.cli import main
main()
| 17
| 27
| 0.794118
| 6
| 34
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 2
| 28
| 17
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fccc5cc6110cd95e6dd88f1651b966fad2533348
| 19,654
|
py
|
Python
|
train.py
|
MSiam/seeing-the-world-2.0
|
84101faba0bcebb5ef0274b7cfb4a32c585a944d
|
[
"MIT"
] | null | null | null |
train.py
|
MSiam/seeing-the-world-2.0
|
84101faba0bcebb5ef0274b7cfb4a32c585a944d
|
[
"MIT"
] | null | null | null |
train.py
|
MSiam/seeing-the-world-2.0
|
84101faba0bcebb5ef0274b7cfb4a32c585a944d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# ## Seeing the World: Model Training
# ### Specify train and validate input folders
# In[1]:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-data_dir', type=str)
args = parser.parse_args()
train_input_folder = args.data_dir + 'train/'
validate_input_folder = args.data_dir + 'validate/'
##train input folder
#train_input_folder = '/data/data4/farmer_market'
#
##validation input folder
#validate_input_folder = '/data/data4/validate/farmer_market'
#
#
## In[2]:
#
#
from imutils import paths
import os
import shutil
import random
def split_data(directory, validate_directory='validation', split=0.8):
directories = [os.path.join(directory, o) for o in os.listdir(directory)
if os.path.isdir(os.path.join(directory,o))]
for directory in directories:
image_paths = list(paths.list_images(directory))
random.seed(32)
random.shuffle(image_paths)
image_paths
# compute the training and testing split
i = int(len(image_paths) * split)
train_paths = image_paths[:i]
selected_for_validation_paths = image_paths[i:]
for path in selected_for_validation_paths:
category = os.path.basename(os.path.normpath(directory))
dest_path = os.path.join(validate_directory, category)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
os.chmod(dest_path, 0o777)
try:
shutil.move(path, dest_path)
except OSError as e:
if e.errno == errno.EEXIST:
print('Image already exists.')
else:
raise
# In[3]:
#split_data(directory=train_input_folder,
# validate_directory= validate_input_folder)
# ### Create train and validate data generators
# In[4]:
#from tensorflow.keras.preprocessing.image import ImageDataGenerator
#
##apply image augmentation
#train_image_generator = ImageDataGenerator(
# rescale=1./255,
# shear_range=0.2,
# zoom_range=0.2,
# brightness_range=[0.5, 1.5],
# horizontal_flip=True,
# vertical_flip=True,
# rotation_range=40,
# width_shift_range=0.2,
# height_shift_range=0.2)
#
#validate_image_generator = ImageDataGenerator(rescale=1./255)
#
#
#
## In[5]:
#
#
#batch_size = 5#30
#image_width = 224
#image_height = 224
#IMAGE_WIDTH_HEIGHT = (image_width, image_height)
#
#class_mode = 'categorical'
#
##create train data generator flowing from train_input_folder
#train_generator = train_image_generator.flow_from_directory(
# train_input_folder,
# target_size=IMAGE_WIDTH_HEIGHT,
# batch_size=batch_size,
# class_mode=class_mode)
#
##create validation data generator flowing from validate_input_folder
#validation_generator = validate_image_generator.flow_from_directory(
# validate_input_folder,
# target_size=IMAGE_WIDTH_HEIGHT,
# batch_size=batch_size,
# class_mode=class_mode)
#
#
## ### Create Custom Model
#
## In[6]:
#
#
#from tensorflow.keras import layers
#from tensorflow.keras import Model
#from tensorflow.keras.optimizers import Adam
#
#total_classes = 60
#activation_function = 'softmax'
#loss = 'categorical_crossentropy'
#
#img_input = layers.Input(shape=(image_width, image_height, 3))
#
#x = layers.Conv2D(32, 3, activation='relu')(img_input)
#x = layers.MaxPooling2D(2)(x)
#
#x = layers.Conv2D(64, 3, activation='relu')(x)
#x = layers.MaxPooling2D(2)(x)
#
#x = layers.Flatten()(x)
#
#x = layers.Dense(512, activation='relu')(x)
#
#x = layers.Dropout(0.5)(x)
#
#output = layers.Dense(total_classes, activation=activation_function)(x)
#
#model = Model(img_input, output)
#model.compile(loss=loss,
# optimizer=Adam(lr=0.001),
# metrics=['accuracy'])
#
#
## ### Train Custom Model
#
## In[8]:
#
#
#import os, datetime
#import tensorflow as tf
#
#epochs = 5
#steps_per_epoch = train_generator.n // train_generator.batch_size
#validation_steps = validation_generator.n // validation_generator.batch_size
#
#logdir = os.path.join("tf_logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
#tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
#
#print('Started Training')
#history = model.fit_generator(
# train_generator,
# steps_per_epoch=steps_per_epoch,
# validation_data=validation_generator,
# validation_steps=validation_steps,
# callbacks=[tensorboard_callback],
# epochs=epochs)
#
#
## In[ ]:
#
#
#get_ipython().magic('matplotlib inline')
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
#
#acc = history.history['accuracy']
#val_acc = history.history['val_accuracy']
#
#loss = history.history['loss']
#val_loss = history.history['val_loss']
#
#plt.figure(figsize=(8, 8))
#plt.subplot(2, 1, 1)
#plt.plot(acc, label='Training Accuracy')
#plt.plot(val_acc, label='Validation Accuracy')
#plt.legend(loc='lower right')
#plt.ylabel('Accuracy')
#plt.ylim([min(plt.ylim()), 1])
#plt.title('Training and Validation Accuracy')
#
#plt.subplot(2, 1, 2)
#plt.plot(loss, label='Training Loss')
#plt.plot(val_loss, label='Validation Loss')
#plt.legend(loc='upper right')
#plt.ylabel('Cross Entropy')
#plt.ylim([0, 1.0])
#plt.title('Training and Validation Loss')
#plt.xlabel('epoch')
# ### Using Transfer Learning
# In[6]:
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg19 import VGG19
image_width=224
image_height=224
IMAGE_SHAPE = (image_width, image_height, 3)
base_model = tf.keras.applications.VGG19(input_shape=IMAGE_SHAPE, include_top=False,weights='imagenet')
base_model.summary()
# In[7]:
keras = tf.keras
IMAGE_WIDTH_HEIGHT = (image_width, image_height)
batch_size=30
class_mode="categorical"
total_classes = 64
activation_function = 'softmax'
loss = 'categorical_crossentropy'
train_image_generator = tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=keras.applications.vgg19.preprocess_input,
rescale=1.0/255.0,
shear_range=0.2,
zoom_range=[0.9, 1.25],
brightness_range=[0.5, 1.5],
horizontal_flip=True,
vertical_flip=True)
validation_image_generator = tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=keras.applications.vgg19.preprocess_input,
rescale=1.0/255.0)
train_generator = train_image_generator.flow_from_directory(
train_input_folder,
target_size=IMAGE_WIDTH_HEIGHT,
batch_size=batch_size,
class_mode=class_mode)
validation_generator = validation_image_generator.flow_from_directory(
validate_input_folder,
target_size=IMAGE_WIDTH_HEIGHT,
batch_size=batch_size,
class_mode=class_mode)
# In[8]:
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
import os
reload_checkpoint=True
total_classes=64
img_input = layers.Input(shape=(image_width, image_height, 3))
global_average_layer = layers.GlobalAveragePooling2D()
prediction_layer = layers.Dense(total_classes, activation='softmax')
model = tf.keras.Sequential([
base_model,
global_average_layer,
prediction_layer
])
checkpoint_path = args.data_dir+"train_model_fruit_veggie_9/chkpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
if (reload_checkpoint and os.path.isdir(checkpoint_path)):
try:
model.load_weights(checkpoint_path)
print('loaded weights from checkpoint')
except Exception:
print('no checkpointed weights')
pass
if not os.path.isdir(checkpoint_path):
os.makedirs(checkpoint_path)
print("Number of layers in the base model: ", len(base_model.layers))
base_model.trainable = False
model.compile(loss=loss,
optimizer=Adam(lr=0.001),
metrics=['accuracy'])
model.summary()
# In[9]:
import datetime, os
epochs = 80
steps_per_epoch = train_generator.n // train_generator.batch_size
validation_steps = validation_generator.n // validation_generator.batch_size
#steps_per_epoch = 5
#validation_steps = 5
logdir = os.path.join(args.data_dir+"tf_logs_9", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=0)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True, save_best_only=True,
verbose=1)
history = model.fit_generator(
train_generator,
steps_per_epoch=steps_per_epoch,
validation_data=validation_generator,
validation_steps=validation_steps,
callbacks=[checkpoint_callback, tensorboard_callback],
epochs=epochs)
# In[10]:
#get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pdb; pdb.set_trace()
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 3])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.savefig('data.png')
# ### Continue Training
# In[11]:
#import datetime, os
#
#epochs = 20
#steps_per_epoch = train_generator.n // train_generator.batch_size
#validation_steps = validation_generator.n // validation_generator.batch_size
##steps_per_epoch = 50
##validation_steps = 50
#
#logdir = os.path.join("/data/tf_logs_9", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
#tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
#checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
# save_weights_only=True, save_best_only=True,
# verbose=1)
#
#history = model.fit_generator(
# train_generator,
# steps_per_epoch=steps_per_epoch,
# validation_data=validation_generator,
# validation_steps=validation_steps,
# callbacks=[checkpoint_callback, tensorboard_callback],
# epochs=epochs)
#
#
#
## In[12]:
#
#
#get_ipython().magic('matplotlib inline')
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
#
#acc = history.history['accuracy']
#val_acc = history.history['val_accuracy']
#
#loss = history.history['loss']
#val_loss = history.history['val_loss']
#
#plt.figure(figsize=(8, 8))
#plt.subplot(2, 1, 1)
#plt.plot(acc, label='Training Accuracy')
#plt.plot(val_acc, label='Validation Accuracy')
#plt.legend(loc='lower right')
#plt.ylabel('Accuracy')
#plt.ylim([min(plt.ylim()), 1])
#plt.title('Training and Validation Accuracy')
#
#plt.subplot(2, 1, 2)
#plt.plot(loss, label='Training Loss')
#plt.plot(val_loss, label='Validation Loss')
#plt.legend(loc='upper right')
#plt.ylabel('Cross Entropy')
#plt.ylim([0, 3])
#plt.title('Training and Validation Loss')
#plt.xlabel('epoch')
#
#
## ### Continue Training
#
## In[13]:
#
#
#import datetime, os
#
#epochs = 20
#steps_per_epoch = train_generator.n // train_generator.batch_size
#validation_steps = validation_generator.n // validation_generator.batch_size
##steps_per_epoch = 50
##validation_steps = 50
#
#logdir = os.path.join("/data/tf_logs_9", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
#tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
#checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
# save_weights_only=True, save_best_only=True,
# verbose=1)
#
#history = model.fit_generator(
# train_generator,
# steps_per_epoch=steps_per_epoch,
# validation_data=validation_generator,
# validation_steps=validation_steps,
# callbacks=[checkpoint_callback, tensorboard_callback],
# epochs=epochs)
#
#
#
## In[14]:
#
#
#get_ipython().magic('matplotlib inline')
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
#
#acc = history.history['accuracy']
#val_acc = history.history['val_accuracy']
#
#loss = history.history['loss']
#val_loss = history.history['val_loss']
#
#plt.figure(figsize=(8, 8))
#plt.subplot(2, 1, 1)
#plt.plot(acc, label='Training Accuracy')
#plt.plot(val_acc, label='Validation Accuracy')
#plt.legend(loc='lower right')
#plt.ylabel('Accuracy')
#plt.ylim([min(plt.ylim()), 1])
#plt.title('Training and Validation Accuracy')
#
#plt.subplot(2, 1, 2)
#plt.plot(loss, label='Training Loss')
#plt.plot(val_loss, label='Validation Loss')
#plt.legend(loc='upper right')
#plt.ylabel('Cross Entropy')
#plt.ylim([0, 3])
#plt.title('Training and Validation Loss')
#plt.xlabel('epoch')
#
#
## ### Continue Training
#
## In[15]:
#
#
#import datetime, os
#
#epochs = 20
#steps_per_epoch = train_generator.n // train_generator.batch_size
#validation_steps = validation_generator.n // validation_generator.batch_size
##steps_per_epoch = 50
##validation_steps = 50
#
#logdir = os.path.join("/data/tf_logs_9", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
#tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
#checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
# save_weights_only=True, save_best_only=True,
# verbose=1)
#
#history = model.fit_generator(
# train_generator,
# steps_per_epoch=steps_per_epoch,
# validation_data=validation_generator,
# validation_steps=validation_steps,
# callbacks=[checkpoint_callback, tensorboard_callback],
# epochs=epochs)
#
#
#
## In[16]:
#
#
#get_ipython().magic('matplotlib inline')
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
#
#acc = history.history['accuracy']
#val_acc = history.history['val_accuracy']
#
#loss = history.history['loss']
#val_loss = history.history['val_loss']
#
#plt.figure(figsize=(8, 8))
#plt.subplot(2, 1, 1)
#plt.plot(acc, label='Training Accuracy')
#plt.plot(val_acc, label='Validation Accuracy')
#plt.legend(loc='lower right')
#plt.ylabel('Accuracy')
#plt.ylim([min(plt.ylim()), 1])
#plt.title('Training and Validation Accuracy')
#
#plt.subplot(2, 1, 2)
#plt.plot(loss, label='Training Loss')
#plt.plot(val_loss, label='Validation Loss')
#plt.legend(loc='upper right')
#plt.ylabel('Cross Entropy')
#plt.ylim([0, 3])
#plt.title('Training and Validation Loss')
#plt.xlabel('epoch')
#
#
## ### Continue Training
#
## In[17]:
#
#
#import datetime, os
#
#epochs = 20
#steps_per_epoch = train_generator.n // train_generator.batch_size
#validation_steps = validation_generator.n // validation_generator.batch_size
##steps_per_epoch = 50
##validation_steps = 50
#
#logdir = os.path.join("/data/tf_logs_9", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
#tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
#checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
# save_weights_only=True, save_best_only=True,
# verbose=1)
#
#history = model.fit_generator(
# train_generator,
# steps_per_epoch=steps_per_epoch,
# validation_data=validation_generator,
# validation_steps=validation_steps,
# callbacks=[checkpoint_callback, tensorboard_callback],
# epochs=epochs)
#
#
#
## In[18]:
#
#
#get_ipython().magic('matplotlib inline')
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
#
#acc = history.history['accuracy']
#val_acc = history.history['val_accuracy']
#
#loss = history.history['loss']
#val_loss = history.history['val_loss']
#
#plt.figure(figsize=(8, 8))
#plt.subplot(2, 1, 1)
#plt.plot(acc, label='Training Accuracy')
#plt.plot(val_acc, label='Validation Accuracy')
#plt.legend(loc='lower right')
#plt.ylabel('Accuracy')
#plt.ylim([min(plt.ylim()), 1])
#plt.title('Training and Validation Accuracy')
#
#plt.subplot(2, 1, 2)
#plt.plot(loss, label='Training Loss')
#plt.plot(val_loss, label='Validation Loss')
#plt.legend(loc='upper right')
#plt.ylabel('Cross Entropy')
#plt.ylim([0, 3])
#plt.title('Training and Validation Loss')
#plt.xlabel('epoch')
#
#
## ### Fine Tuning
#
## In[19]:
#
#
#import datetime, os
#
#loss = 'categorical_crossentropy'
#
#checkpoint_path = "/data/train_model_fruit_veggie_9/chkpt"
#checkpoint_dir = os.path.dirname(checkpoint_path)
#
#
#if (reload_checkpoint and os.path.isdir(checkpoint_path)):
# try:
# model.load_weights(checkpoint_path)
# except Exception:
# pass
#
#if not os.path.isdir(checkpoint_path):
# os.makedirs(checkpoint_path)
#
#base_model.trainable = True
#
## Fine tune start from layer 10
#fine_tune_at = 10
#
## Freeze all layers before the `fine_tune_at` layer
#for layer in base_model.layers[:fine_tune_at]:
# layer.trainable = False
#
#model.compile(loss=loss,
# optimizer=Adam(lr=0.001),
# metrics=['accuracy'])
#
#model.summary()
#
#
## In[20]:
#
#
#import datetime, os
#
#epochs = 10
#steps_per_epoch = train_generator.n // train_generator.batch_size
#validation_steps = validation_generator.n // validation_generator.batch_size
##steps_per_epoch = 50
##validation_steps = 50
#
#logdir = os.path.join("/data/tf_logs_9", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
#tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
#checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
# save_weights_only=True, save_best_only=True,
# verbose=1)
#history = model.fit_generator(
# train_generator,
# steps_per_epoch=steps_per_epoch,
# validation_data=validation_generator,
# validation_steps=validation_steps,
# callbacks=[checkpoint_callback, tensorboard_callback],
# epochs=epochs)
#
#
#
## In[21]:
#
#
#get_ipython().magic('matplotlib inline')
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
#
#acc = history.history['accuracy']
#val_acc = history.history['val_accuracy']
#
#loss = history.history['loss']
#val_loss = history.history['val_loss']
#
#plt.figure(figsize=(8, 8))
#plt.subplot(2, 1, 1)
#plt.plot(acc, label='Training Accuracy')
#plt.plot(val_acc, label='Validation Accuracy')
#plt.legend(loc='lower right')
#plt.ylabel('Accuracy')
#plt.ylim([min(plt.ylim()), 1])
#plt.title('Training and Validation Accuracy')
#
#plt.subplot(2, 1, 2)
#plt.plot(loss, label='Training Loss')
#plt.plot(val_loss, label='Validation Loss')
#plt.legend(loc='upper right')
#plt.ylabel('Cross Entropy')
#plt.ylim([0, 1.0])
#plt.title('Training and Validation Loss')
#plt.xlabel('epoch')
#
#
## ### Save Model
#
## In[19]:
#
#
#def export(model, path):
# model.save(path, save_format='tf')
#
#
## In[20]:
#
#
#model.save('/data/saved_model_2/')
#
#
## ### Reload Model
#
## In[11]:
#
#
#import tensorflow as tf
#model = tf.keras.models.load_model('/data/saved_model_2/')
#
#
## In[ ]:
#
#
#
#
| 26.205333
| 103
| 0.694413
| 2,562
| 19,654
| 5.131148
| 0.110851
| 0.029819
| 0.0267
| 0.01278
| 0.77689
| 0.761448
| 0.741442
| 0.723718
| 0.723718
| 0.723718
| 0
| 0.01871
| 0.16241
| 19,654
| 749
| 104
| 26.24032
| 0.779857
| 0.660273
| 0
| 0.099291
| 0
| 0
| 0.076669
| 0.009233
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007092
| false
| 0.007092
| 0.113475
| 0
| 0.120567
| 0.028369
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fcdf1a7879efc9d035c27f3ef067ebe19fb277d1
| 38
|
py
|
Python
|
skbonus/metrics/tests/__init__.py
|
Garve/scikit-bonus
|
46c985c6f2c0b371b031977592b23cf0e28c46e3
|
[
"BSD-3-Clause"
] | 8
|
2021-02-04T13:54:43.000Z
|
2021-12-26T16:50:31.000Z
|
skbonus/metrics/tests/__init__.py
|
JoshuaC3/scikit-bonus
|
3300427e7ada4c03937b4714dd1bc3033d1c1fff
|
[
"BSD-3-Clause"
] | 3
|
2021-03-01T15:27:21.000Z
|
2021-07-31T16:14:27.000Z
|
skbonus/metrics/tests/__init__.py
|
JoshuaC3/scikit-bonus
|
3300427e7ada4c03937b4714dd1bc3033d1c1fff
|
[
"BSD-3-Clause"
] | 2
|
2021-02-13T20:16:48.000Z
|
2021-04-07T07:29:06.000Z
|
"""Module for testing the metrics."""
| 19
| 37
| 0.684211
| 5
| 38
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 1
| 38
| 38
| 0.787879
| 0.815789
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1e1dff01e7391dff2795ee32b3a50f952226daf0
| 220
|
py
|
Python
|
examples/colab-py/md4.py
|
chunlin-pan/DYSTA
|
2f93ba044e35b0a465dd8200bd64f85a14d73fe4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/colab-py/md4.py
|
chunlin-pan/DYSTA
|
2f93ba044e35b0a465dd8200bd64f85a14d73fe4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/colab-py/md4.py
|
chunlin-pan/DYSTA
|
2f93ba044e35b0a465dd8200bd64f85a14d73fe4
|
[
"BSD-3-Clause"
] | 1
|
2020-08-26T11:46:54.000Z
|
2020-08-26T11:46:54.000Z
|
def module4():
print(123 == '123') #______
print(123 == int('123')) #______
print('ABC' == 'abc') #______
print(True == 1) #______
print(False == "") #______
print(False == bool("")) #______
| 31.428571
| 37
| 0.363636
| 20
| 220
| 4
| 0.5
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084848
| 0.25
| 220
| 7
| 38
| 31.428571
| 0.4
| 0.163636
| 0
| 0
| 0
| 0
| 0.067039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0
| 0
| 0.142857
| 0.857143
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1e355ee3475f8ffd304fe9cade4aad2c216044e1
| 78
|
py
|
Python
|
test/dictionary_translation.py
|
raphaelcharriez/youtube-to-anki
|
a0dcd84a8048a4cbe2231a77f634bf3daa9dcb54
|
[
"MIT"
] | null | null | null |
test/dictionary_translation.py
|
raphaelcharriez/youtube-to-anki
|
a0dcd84a8048a4cbe2231a77f634bf3daa9dcb54
|
[
"MIT"
] | null | null | null |
test/dictionary_translation.py
|
raphaelcharriez/youtube-to-anki
|
a0dcd84a8048a4cbe2231a77f634bf3daa9dcb54
|
[
"MIT"
] | null | null | null |
'''
g = generate_vocabulary(["monsieur", "gentil", "taciturne"], "fr")
'''
| 26
| 70
| 0.576923
| 7
| 78
| 6.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141026
| 78
| 3
| 71
| 26
| 0.656716
| 0.846154
| 0
| null | 1
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1e81b7d5c3f6ad453b50db79f2272ee3abef7826
| 461
|
py
|
Python
|
tests/img_metadata_lib/test_common.py
|
Austin-Schmidli/Image-Metadata-API
|
73e7f9cbcd397d6aefe53a75dbb9ff4e6a924f7d
|
[
"MIT"
] | null | null | null |
tests/img_metadata_lib/test_common.py
|
Austin-Schmidli/Image-Metadata-API
|
73e7f9cbcd397d6aefe53a75dbb9ff4e6a924f7d
|
[
"MIT"
] | null | null | null |
tests/img_metadata_lib/test_common.py
|
Austin-Schmidli/Image-Metadata-API
|
73e7f9cbcd397d6aefe53a75dbb9ff4e6a924f7d
|
[
"MIT"
] | null | null | null |
import logging
import pytest
from img_metadata_lib.common import setup_logger
from img_metadata_lib.common import get_event_body
def test_setup_logger_returns_logger():
assert isinstance(setup_logger(), logging.Logger)
def test_get_event_body_returns_dict():
assert isinstance(get_event_body({"body": '{"key": "value"}'}), dict)
def test_get_event_body_returns_body():
assert get_event_body({"body": '{"key": "value"}'}) == {"key": "value"}
| 24.263158
| 75
| 0.752711
| 65
| 461
| 4.938462
| 0.323077
| 0.124611
| 0.186916
| 0.11215
| 0.498442
| 0.498442
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114967
| 461
| 18
| 76
| 25.611111
| 0.786765
| 0
| 0
| 0
| 0
| 0
| 0.104121
| 0
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.3
| true
| 0
| 0.4
| 0
| 0.7
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1eadaca46dfe92f451b0d732f4e7cdfb68594de3
| 27
|
py
|
Python
|
btc_price/api/__init__.py
|
asahi417/BitcoinPriceAccumulator
|
46d45fc61fb71c128936b11dc4916e7a6a84283c
|
[
"MIT"
] | 1
|
2020-05-23T09:23:47.000Z
|
2020-05-23T09:23:47.000Z
|
btc_price/api/__init__.py
|
asahi417/BitcoinPriceAccumulator
|
46d45fc61fb71c128936b11dc4916e7a6a84283c
|
[
"MIT"
] | null | null | null |
btc_price/api/__init__.py
|
asahi417/BitcoinPriceAccumulator
|
46d45fc61fb71c128936b11dc4916e7a6a84283c
|
[
"MIT"
] | 1
|
2020-05-23T09:23:25.000Z
|
2020-05-23T09:23:25.000Z
|
from .public import Public
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1ecd4c6174d0e985a1cfcc53c78516a21c48271f
| 3,398
|
py
|
Python
|
modules/likelyhood_models.py
|
vb690/bayesian_ANN
|
c7b11469108c5519849e12e3f44c9b2c95de1bf6
|
[
"MIT"
] | null | null | null |
modules/likelyhood_models.py
|
vb690/bayesian_ANN
|
c7b11469108c5519849e12e3f44c9b2c95de1bf6
|
[
"MIT"
] | null | null | null |
modules/likelyhood_models.py
|
vb690/bayesian_ANN
|
c7b11469108c5519849e12e3f44c9b2c95de1bf6
|
[
"MIT"
] | null | null | null |
import pymc3 as pm
import theano.tensor as T
from .layers import Dense
class LikelyhoodModels:
def __init__(self):
"""
"""
pass
def gaussian_lk(self, shape_in, input_tensor, out_shape, observed,
total_size, prior, beta=5, **priors_kwargs):
"""
"""
with pm.Model() as lk_model:
mu = Dense(
shape_in=shape_in,
units=out_shape,
layer_name='mu',
prior=prior,
activation='linear',
**priors_kwargs
)(input_tensor)
sd = pm.HalfCauchy(
name='sigma',
beta=beta
)
out = pm.Normal(
'y',
mu=mu,
sd=sd,
observed=observed,
total_size=total_size,
)
return lk_model
def student_lk(self, shape_in, input_tensor, out_shape, observed,
total_size, prior, beta_cauchy=5, alpha_gamma=2,
beta_gamma=0.1, **priors_kwargs):
"""
"""
with pm.Model() as lk_model:
mu = Dense(
shape_in=shape_in,
units=out_shape,
layer_name='mu',
prior=prior,
activation='linear',
**priors_kwargs
)(input_tensor)
sd = pm.HalfCauchy(
name='sigma',
beta=beta_cauchy
)
nu = pm.Gamma(
'nu',
alpha=alpha_gamma,
beta=beta_gamma
)
out = pm.StudentT(
'y',
mu=mu,
sd=sd,
nu=nu,
observed=observed,
total_size=total_size,
)
return lk_model
def categorical_lk(self, shape_in, input_tensor, out_shape, observed,
total_size, prior, **priors_kwargs):
"""
"""
with pm.Model() as lk_model:
theta = Dense(
shape_in=shape_in,
units=out_shape,
layer_name='theta',
prior=prior,
activation='linear',
**priors_kwargs
)(input_tensor)
p = pm.Deterministic(
'p',
T.nnet.softmax(theta)
)
out = pm.Categorical(
'y',
p=p,
observed=observed,
total_size=total_size,
)
return lk_model
def bernoulli_lk(self, shape_in, input_tensor, out_shape, observed,
total_size, prior, **priors_kwargs):
"""
"""
with pm.Model() as lk_model:
theta = Dense(
shape_in=shape_in,
units=out_shape,
layer_name='theta',
prior=prior,
activation='linear',
**priors_kwargs
)(input_tensor)
p = pm.Deterministic(
'p',
T.nnet.sigmoid(theta)
)
out = pm.Bernoulli(
'y',
p=p,
observed=observed,
total_size=total_size,
)
return lk_model
| 25.17037
| 73
| 0.420541
| 306
| 3,398
| 4.428105
| 0.199346
| 0.061993
| 0.100369
| 0.038376
| 0.787454
| 0.77417
| 0.77417
| 0.77417
| 0.77417
| 0.77417
| 0
| 0.003468
| 0.490877
| 3,398
| 134
| 74
| 25.358209
| 0.779769
| 0
| 0
| 0.660194
| 0
| 0
| 0.016852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048544
| false
| 0.009709
| 0.029126
| 0
| 0.126214
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1ede3d3377c7cb796d43d38ac27be06ad5194af4
| 54
|
py
|
Python
|
test.py
|
michalurbanski/PythonLearning
|
0a984cf100f890eca7d99c4d5faf5b7433791d9d
|
[
"MIT"
] | null | null | null |
test.py
|
michalurbanski/PythonLearning
|
0a984cf100f890eca7d99c4d5faf5b7433791d9d
|
[
"MIT"
] | null | null | null |
test.py
|
michalurbanski/PythonLearning
|
0a984cf100f890eca7d99c4d5faf5b7433791d9d
|
[
"MIT"
] | null | null | null |
# First sample python program
print("test test test")
| 18
| 29
| 0.759259
| 8
| 54
| 5.125
| 0.75
| 0.390244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 54
| 2
| 30
| 27
| 0.891304
| 0.5
| 0
| 0
| 0
| 0
| 0.56
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
a20b0e1f95cc64c4c793ea2934a13a1f4a9115e8
| 301
|
py
|
Python
|
web-service/app/rest/veiculo_resource.py
|
macielandre/devnology-test
|
9e438c753007468ec46bb8d696e74fb93cc4f777
|
[
"MIT"
] | null | null | null |
web-service/app/rest/veiculo_resource.py
|
macielandre/devnology-test
|
9e438c753007468ec46bb8d696e74fb93cc4f777
|
[
"MIT"
] | null | null | null |
web-service/app/rest/veiculo_resource.py
|
macielandre/devnology-test
|
9e438c753007468ec46bb8d696e74fb93cc4f777
|
[
"MIT"
] | null | null | null |
import flask
import app.service.veiculo as cr
from . import bp
@bp.route('/api/post')
def inserir():
return cr.post()
@bp.route('/api/get')
def listar():
return cr.get()
@bp.route('/api/put')
def editar():
return cr.put()
@bp.route('/api/delete')
def deletar():
return cr.delete()
| 15.05
| 32
| 0.637874
| 47
| 301
| 4.085106
| 0.446809
| 0.145833
| 0.208333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166113
| 301
| 20
| 33
| 15.05
| 0.76494
| 0
| 0
| 0
| 0
| 0
| 0.119205
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.266667
| true
| 0
| 0.2
| 0.266667
| 0.733333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
bfb73d812978a27e7117c3eba5bde9eaac10d6bf
| 36
|
py
|
Python
|
sqs_workers/exceptions.py
|
sodre/sqs-workers
|
46e14694805c4c2185a29ce2e906143358d06d8c
|
[
"MIT"
] | 21
|
2018-10-06T21:51:51.000Z
|
2021-04-30T19:22:38.000Z
|
sqs_workers/exceptions.py
|
sodre/sqs-workers
|
46e14694805c4c2185a29ce2e906143358d06d8c
|
[
"MIT"
] | 15
|
2019-02-27T13:19:34.000Z
|
2022-03-16T17:40:05.000Z
|
sqs_workers/exceptions.py
|
sodre/sqs-workers
|
46e14694805c4c2185a29ce2e906143358d06d8c
|
[
"MIT"
] | 4
|
2019-02-27T12:21:26.000Z
|
2021-09-20T05:04:09.000Z
|
class SQSError(Exception):
pass
| 12
| 26
| 0.722222
| 4
| 36
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194444
| 36
| 2
| 27
| 18
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
44b2377bf947acbdbd99a56b437695d1333ef72f
| 130
|
py
|
Python
|
imgcurl/exceptions.py
|
fitoria/imgcurl
|
8294b03658a0e93edf46461163d1f6dccb48b1ac
|
[
"Beerware"
] | 1
|
2015-11-05T13:41:48.000Z
|
2015-11-05T13:41:48.000Z
|
imgcurl/exceptions.py
|
fitoria/imgcurl
|
8294b03658a0e93edf46461163d1f6dccb48b1ac
|
[
"Beerware"
] | null | null | null |
imgcurl/exceptions.py
|
fitoria/imgcurl
|
8294b03658a0e93edf46461163d1f6dccb48b1ac
|
[
"Beerware"
] | null | null | null |
'''Exceptions for my orm'''
class ObjectNotInitializedError(Exception):
pass
class ObjectNotFoundError(Exception):
pass
| 16.25
| 43
| 0.753846
| 12
| 130
| 8.166667
| 0.75
| 0.265306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 130
| 7
| 44
| 18.571429
| 0.890909
| 0.161538
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
44cabd8adf7ac94347ad5a491c5310a22b51e89d
| 60
|
py
|
Python
|
latent_variable_models/ppca/__init__.py
|
lubok-dot/lv_models
|
732a82edf6ed44e70ba1240252e72758ea1be314
|
[
"MIT"
] | null | null | null |
latent_variable_models/ppca/__init__.py
|
lubok-dot/lv_models
|
732a82edf6ed44e70ba1240252e72758ea1be314
|
[
"MIT"
] | null | null | null |
latent_variable_models/ppca/__init__.py
|
lubok-dot/lv_models
|
732a82edf6ed44e70ba1240252e72758ea1be314
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('latent_variable_models/ppca')
| 20
| 47
| 0.8
| 9
| 60
| 5.111111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 60
| 2
| 48
| 30
| 0.836364
| 0
| 0
| 0
| 0
| 0
| 0.465517
| 0.465517
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
44d916c7f02206e74f6035f691c1e427ada58115
| 146
|
py
|
Python
|
VictimClient/core/download.py
|
FrancescoLucarini/BackdoorPy
|
e2033414c3f1f0424865c38b7b902dfae7101a91
|
[
"MIT"
] | 3
|
2020-10-24T20:51:38.000Z
|
2020-11-20T11:23:41.000Z
|
VictimClient/core/download.py
|
FrancescoLucarini/BackdoorPy
|
e2033414c3f1f0424865c38b7b902dfae7101a91
|
[
"MIT"
] | null | null | null |
VictimClient/core/download.py
|
FrancescoLucarini/BackdoorPy
|
e2033414c3f1f0424865c38b7b902dfae7101a91
|
[
"MIT"
] | 1
|
2020-11-07T06:02:51.000Z
|
2020-11-07T06:02:51.000Z
|
def download_file(my_socket):
print("[+] Downloading file")
filename = my_socket.receive_data()
my_socket.receive_file(filename)
| 14.6
| 39
| 0.705479
| 18
| 146
| 5.388889
| 0.555556
| 0.247423
| 0.309278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178082
| 146
| 9
| 40
| 16.222222
| 0.808333
| 0
| 0
| 0
| 0
| 0
| 0.140845
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
782c98f4612015a0285fd2449be670d97ef8ccb3
| 143
|
py
|
Python
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/models.py
|
wpnzach/cookiecutter-cherrypy
|
a84bd8f4469aa42c6671b298fd63945b223ee7ae
|
[
"MIT"
] | 3
|
2019-09-17T19:19:42.000Z
|
2021-12-12T13:06:48.000Z
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/models.py
|
wpnzach/cookiecutter-cherrypy
|
a84bd8f4469aa42c6671b298fd63945b223ee7ae
|
[
"MIT"
] | 1
|
2019-09-17T19:21:51.000Z
|
2019-09-17T19:21:51.000Z
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/models.py
|
wpnzach/cookiecutter-cherrypy
|
a84bd8f4469aa42c6671b298fd63945b223ee7ae
|
[
"MIT"
] | 1
|
2019-10-02T23:35:32.000Z
|
2019-10-02T23:35:32.000Z
|
"""
Main models folder for holding database models made with PeeWee.
This is an opinionated but optional inclusion.
"""
import peewee as pw
| 15.888889
| 64
| 0.762238
| 21
| 143
| 5.190476
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 143
| 8
| 65
| 17.875
| 0.931624
| 0.783217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
782cc454851607bc54b9f19dbe06c67f427e7729
| 141
|
py
|
Python
|
digsby/src/jabber/objects/__init__.py
|
ifwe/digsby
|
f5fe00244744aa131e07f09348d10563f3d8fa99
|
[
"Python-2.0"
] | 35
|
2015-08-15T14:32:38.000Z
|
2021-12-09T16:21:26.000Z
|
digsby/src/jabber/objects/__init__.py
|
niterain/digsby
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
[
"Python-2.0"
] | 4
|
2015-09-12T10:42:57.000Z
|
2017-02-27T04:05:51.000Z
|
digsby/src/jabber/objects/__init__.py
|
niterain/digsby
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
[
"Python-2.0"
] | 15
|
2015-07-10T23:58:07.000Z
|
2022-01-23T22:16:33.000Z
|
import bytestreams
import si
import si_filetransfer
import iq_privacy
import vcard_avatar
import nick
import chatstates
import x_event
| 17.625
| 23
| 0.843972
| 20
| 141
| 5.75
| 0.6
| 0.13913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156028
| 141
| 8
| 24
| 17.625
| 0.966387
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
786704ae368e28c1df7fb180f63b19ec8d0ea925
| 117
|
py
|
Python
|
2019/07/08/Solutions/AkhirAlibhai/solution.py
|
WillDaSilva/daily-questions
|
6e86b3f625df5c60d9a57f1694fafdd24c4ff2c4
|
[
"MIT"
] | 12
|
2019-07-02T22:17:49.000Z
|
2020-10-08T16:02:04.000Z
|
2019/07/08/Solutions/AkhirAlibhai/solution.py
|
WillDaSilva/daily-questions
|
6e86b3f625df5c60d9a57f1694fafdd24c4ff2c4
|
[
"MIT"
] | 2
|
2019-07-03T12:22:22.000Z
|
2019-09-04T23:31:38.000Z
|
2019/07/08/Solutions/AkhirAlibhai/solution.py
|
WillDaSilva/daily-questions
|
6e86b3f625df5c60d9a57f1694fafdd24c4ff2c4
|
[
"MIT"
] | 15
|
2019-07-02T23:29:07.000Z
|
2020-05-11T15:53:07.000Z
|
def rotate_array(array, k):
k = k % len(array)
return(array[len(array)-k:len(array)] + array[0:len(array)-k])
| 39
| 66
| 0.632479
| 21
| 117
| 3.47619
| 0.333333
| 0.438356
| 0.246575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010101
| 0.153846
| 117
| 3
| 66
| 39
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
788a00710234c6422e0d02a830960e2518d33d45
| 20
|
py
|
Python
|
nn/board/__init__.py
|
mbed92/dao-perception
|
62b6e8a84a6704a50855434933a147f507f94263
|
[
"MIT"
] | 1
|
2022-01-19T07:53:23.000Z
|
2022-01-19T07:53:23.000Z
|
nn/board/__init__.py
|
mbed92/dao-perception
|
62b6e8a84a6704a50855434933a147f507f94263
|
[
"MIT"
] | 3
|
2021-09-01T16:16:42.000Z
|
2021-09-10T11:18:59.000Z
|
nn/board/__init__.py
|
mbed92/dao-perception
|
62b6e8a84a6704a50855434933a147f507f94263
|
[
"MIT"
] | 1
|
2021-08-30T08:26:21.000Z
|
2021-08-30T08:26:21.000Z
|
from . import board
| 10
| 19
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
789ed84e258ad854436703353129326eafecbf5f
| 45
|
py
|
Python
|
elevadorJahNoTerreoException.py
|
IpRocha1/dsoo_exercicio_6
|
69ece39b2189b3a17a9185dca8a6d17acb6b5aa5
|
[
"MIT"
] | null | null | null |
elevadorJahNoTerreoException.py
|
IpRocha1/dsoo_exercicio_6
|
69ece39b2189b3a17a9185dca8a6d17acb6b5aa5
|
[
"MIT"
] | null | null | null |
elevadorJahNoTerreoException.py
|
IpRocha1/dsoo_exercicio_6
|
69ece39b2189b3a17a9185dca8a6d17acb6b5aa5
|
[
"MIT"
] | null | null | null |
class ElevadorJahNoTerreoException( ...
| 15
| 39
| 0.733333
| 2
| 45
| 16.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 45
| 2
| 40
| 22.5
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1523c042ebe4b595d482bfafeb00462c93b6c945
| 367
|
py
|
Python
|
vim/template/skeleton.py
|
Shin-C/dotfiles
|
6d40dc3f0bfdb2fd3c652e9c2db01f56ae6d2620
|
[
"MIT"
] | null | null | null |
vim/template/skeleton.py
|
Shin-C/dotfiles
|
6d40dc3f0bfdb2fd3c652e9c2db01f56ae6d2620
|
[
"MIT"
] | null | null | null |
vim/template/skeleton.py
|
Shin-C/dotfiles
|
6d40dc3f0bfdb2fd3c652e9c2db01f56ae6d2620
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" <++>.py
<++>
@author: Shin Chen (jiayuanchen@outlook.com)
"""
__author__ = 'Shin Chen'
# ----------------------------------------------------------------------------
# test
# ----------------------------------------------------------------------------
if __name__ == "__main__":
<++>
| 15.291667
| 79
| 0.280654
| 21
| 367
| 4.333333
| 0.857143
| 0.21978
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006557
| 0.168937
| 367
| 23
| 80
| 15.956522
| 0.291803
| 0.558583
| 0
| 0
| 0
| 0
| 0.2125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
15284c9abf19494f69a4a471fe50b9854f8f096a
| 758
|
py
|
Python
|
cripts/relationships/urls.py
|
lakiw/cripts
|
43f62891a3724e1ec60629887d97c421fb302163
|
[
"MIT"
] | 2
|
2017-04-06T12:26:11.000Z
|
2018-11-05T19:17:15.000Z
|
cripts/relationships/urls.py
|
lakiw/cripts
|
43f62891a3724e1ec60629887d97c421fb302163
|
[
"MIT"
] | 9
|
2016-09-28T10:19:10.000Z
|
2017-02-24T17:58:43.000Z
|
cripts/relationships/urls.py
|
lakiw/cripts
|
43f62891a3724e1ec60629887d97c421fb302163
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
urlpatterns = [
url(r'^forge/$', 'add_new_relationship', prefix='cripts.relationships.views'),
url(r'^breakup/$', 'break_relationship', prefix='cripts.relationships.views'),
url(r'^get_dropdown/$', 'get_relationship_type_dropdown', prefix='cripts.relationships.views'),
url(r'^update_relationship_confidence/$', 'update_relationship_confidence', prefix='cripts.relationships.views'),
url(r'^update_relationship_reason/$', 'update_relationship_reason', prefix='cripts.relationships.views'),
url(r'^update_relationship_type/$', 'update_relationship_type', prefix='cripts.relationships.views'),
url(r'^update_relationship_date/$', 'update_relationship_date', prefix='cripts.relationships.views'),
]
| 63.166667
| 117
| 0.759894
| 86
| 758
| 6.430233
| 0.290698
| 0.260398
| 0.316456
| 0.379747
| 0.542495
| 0.542495
| 0.542495
| 0.37613
| 0
| 0
| 0
| 0
| 0.076517
| 758
| 11
| 118
| 68.909091
| 0.79
| 0
| 0
| 0
| 0
| 0
| 0.663588
| 0.569921
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
152bf0c9117c9ed89e2b5795feb14a260b0ba5f6
| 1,959
|
py
|
Python
|
pokebattle/models.py
|
raymundosaraiva/pokemon
|
fc1b1c054ce896c395b706958192a8f0723d1b0c
|
[
"MIT"
] | null | null | null |
pokebattle/models.py
|
raymundosaraiva/pokemon
|
fc1b1c054ce896c395b706958192a8f0723d1b0c
|
[
"MIT"
] | 6
|
2020-06-06T00:49:56.000Z
|
2021-09-22T18:04:13.000Z
|
pokebattle/models.py
|
raymundosaraiva/pokemon
|
fc1b1c054ce896c395b706958192a8f0723d1b0c
|
[
"MIT"
] | null | null | null |
from django.db import models
from .constants import *
class Pokemon(models.Model):
pokemon_id = models.IntegerField(primary_key=True, unique=True)
name = models.CharField(max_length=20)
attack = models.IntegerField()
defense = models.IntegerField()
stamina = models.IntegerField()
def __str__(self):
return f'{self.pokemon_id} - {self.name}'
class Trainer(models.Model):
nickname = models.CharField(max_length=20, default=ANONYMOUS)
img = models.ImageField(blank=True, null=True)
date_created = models.DateTimeField(auto_now=True)
last_login = models.DateTimeField(auto_now=True)
pokemon_collection = models.ManyToManyField(Pokemon, blank=True)
def __str__(self):
return f'{self.id} - {self.nickname}'
class Game(models.Model):
started = models.DateTimeField(auto_now=True)
mode = models.IntegerField(choices=GAME_MODE, default=0)
status = models.IntegerField(choices=GAME_STATUS, default=0)
current_battle = models.IntegerField(choices=BATTLE_NUM, default=0)
final_result = models.IntegerField(choices=FINAL_RESULT, default=0)
trainer = models.ForeignKey(Trainer, on_delete=models.CASCADE)
pokemon_trainer = models.ManyToManyField(Pokemon, blank=True, related_name='game_pokemon_trainer')
def __str__(self):
return f'G{self.id} - T{self.trainer.id}'
class Battle(models.Model):
num = models.IntegerField(choices=BATTLE_NUM)
type = models.IntegerField(choices=BATTLE_TYPE)
result = models.IntegerField(choices=FINAL_RESULT, blank=True, default=0)
game = models.ForeignKey(Game, on_delete=models.CASCADE)
pokemon_trainer = models.ForeignKey(Pokemon, related_name='pokemon_trainer', on_delete=models.CASCADE, blank=True, null=True)
pokemon_pc = models.ForeignKey(Pokemon, related_name='pokemon_pc', on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return f'G{self.game.id} - B{self.id}'
| 1,959
| 1,959
| 0.737111
| 251
| 1,959
| 5.553785
| 0.262948
| 0.142037
| 0.125538
| 0.045911
| 0.499283
| 0.292683
| 0.144907
| 0.054519
| 0
| 0
| 0
| 0.00538
| 0.145993
| 1,959
| 1
| 1,959
| 1,959
| 0.827854
| 0
| 0
| 0.108108
| 0
| 0
| 0.082695
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.054054
| 0.108108
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
1544ec75d60a761be5465d19ff0d89b80aa887de
| 53
|
py
|
Python
|
litex/soc/cores/cpu/femtorv/__init__.py
|
osterwood/litex
|
db20cb172dc982c5879aa8080ec7aa18de181cc5
|
[
"ADSL"
] | 1,501
|
2016-04-19T18:16:21.000Z
|
2022-03-31T17:46:31.000Z
|
litex/soc/cores/cpu/femtorv/__init__.py
|
osterwood/litex
|
db20cb172dc982c5879aa8080ec7aa18de181cc5
|
[
"ADSL"
] | 1,135
|
2016-04-19T05:49:14.000Z
|
2022-03-31T15:21:19.000Z
|
litex/soc/cores/cpu/femtorv/__init__.py
|
osterwood/litex
|
db20cb172dc982c5879aa8080ec7aa18de181cc5
|
[
"ADSL"
] | 357
|
2016-04-19T05:00:24.000Z
|
2022-03-31T11:28:32.000Z
|
from litex.soc.cores.cpu.femtorv.core import FemtoRV
| 26.5
| 52
| 0.830189
| 9
| 53
| 4.888889
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 53
| 1
| 53
| 53
| 0.897959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
158d58e6c649c62758d9a05f09bcaeff726c4dc8
| 32,294
|
py
|
Python
|
tests/test_pop_finder.py
|
terciopelo/pop_finder
|
4bcd77d774a5e3b8368b6276880042b1b701c8cf
|
[
"MIT"
] | 1
|
2022-01-13T20:18:34.000Z
|
2022-01-13T20:18:34.000Z
|
tests/test_pop_finder.py
|
terciopelo/pop_finder
|
4bcd77d774a5e3b8368b6276880042b1b701c8cf
|
[
"MIT"
] | 3
|
2021-03-23T17:09:20.000Z
|
2021-09-21T17:59:45.000Z
|
tests/test_pop_finder.py
|
terciopelo/pop_finder
|
4bcd77d774a5e3b8368b6276880042b1b701c8cf
|
[
"MIT"
] | 1
|
2022-02-04T20:05:25.000Z
|
2022-02-04T20:05:25.000Z
|
from pop_finder import __version__
from pop_finder import pop_finder
from pop_finder import contour_classifier
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import os
import shutil
import pytest
# helper data
infile_all = "tests/test_inputs/onlyAtl_500.recode.vcf.locator.hdf5"
infile_all_vcf = "tests/test_inputs/onlyAtl_500.recode.vcf"
infile_kfcv = "tests/test_inputs/onlyAtl_500_kfcv.recode.vcf"
sample_data1 = "tests/test_inputs/onlyAtl_truelocs.txt"
sample_data2 = "tests/test_inputs/onlyAtl_truelocs_NAs.txt"
sample_data3 = "tests/test_inputs/onlyAtl_truelocs_badsamps.txt"
sample_data4 = "tests/test_inputs/onlyAtl_truelocs_3col.txt"
pred_path = "tests/test_inputs/test_out/loc_boot0_predlocs.txt"
X_train = np.load("tests/test_inputs/X_train.npy")
X_train_empty = np.zeros(shape=0)
y_train = pd.read_csv("tests/test_inputs/y_train.csv")
y_train_empty = pd.DataFrame()
X_test = np.load("tests/test_inputs/X_test.npy")
X_test_empty = np.zeros(shape=0)
y_test = pd.read_csv("tests/test_inputs/y_test.csv")
y_test_empty = pd.DataFrame()
unknowns = pd.read_csv("tests/test_inputs/test_unknowns.csv")
unknowns_empty = pd.DataFrame()
ukgen = np.load("tests/test_inputs/ukgen.npy")
ukgen_empty = np.zeros(shape=0)
def test_version():
assert __version__ == "1.0.9"
def test_read_data():
# Read data w/o kfcv
x = pop_finder.read_data(infile_all, sample_data2)
assert isinstance(x, tuple)
assert isinstance(x[0], pd.core.frame.DataFrame)
assert isinstance(x[1], np.ndarray)
assert isinstance(x[2], pd.core.frame.DataFrame)
assert len(x) == 3
# Read data w/ kfcv
y = pop_finder.read_data(infile_all, sample_data1, kfcv=True)
assert isinstance(y, tuple)
assert isinstance(y[0], pd.core.frame.DataFrame)
assert isinstance(y[1], np.ndarray)
assert len(y) == 2
# Test inputs
with pytest.raises(ValueError, match="Path to infile does not exist"):
pop_finder.read_data(infile="hello", sample_data=sample_data2)
with pytest.raises(
ValueError, match="Infile must have extension 'zarr', 'vcf', or 'hdf5'"
):
pop_finder.read_data(infile=sample_data1, sample_data=sample_data2)
with pytest.raises(ValueError,
match="Path to sample_data does not exist"):
pop_finder.read_data(infile_all, sample_data="hello")
with pytest.raises(ValueError,
match="sample_data does not have correct columns"):
pop_finder.read_data(infile_all, sample_data=sample_data4)
with pytest.raises(
ValueError,
match="sample ordering failed! Check that sample IDs match VCF."
):
pop_finder.read_data(infile_kfcv, sample_data3)
def test_hp_tuning():
hm_test = pop_finder.classifierHyperModel(
input_shape=2, num_classes=2)
assert isinstance(hm_test,
pop_finder.classifierHyperModel)
assert hm_test.input_shape == 2
assert hm_test.num_classes == 2
def test_hyper_tune():
# General run
tuner_test = pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
)
assert type(
tuner_test[0] == "tensorflow.python.keras.engine.sequential.Sequential"
)
# Make sure correct files are output
assert os.path.exists("tests/hyper_tune_test_out")
assert os.path.exists("tests/hyper_tune_test_out/best_mod")
assert os.path.exists("tests/hyper_tune_test_out/X_train.npy")
assert os.path.exists("tests/hyper_tune_test_out/X_test.npy")
assert os.path.exists("tests/hyper_tune_test_out/y_train.csv")
assert os.path.exists("tests/hyper_tune_test_out/y_test.csv")
# Remove files for next run
if os.path.exists("tests/hyper_tune_test_out/best_mod"):
shutil.rmtree("tests/hyper_tune_test_out/best_mod")
# Test if value error thrown if y_val != y_train
with pytest.raises(ValueError, match="train_prop is too high"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
train_prop=0.99,
)
# Check all inputs
# infile does not exist
with pytest.raises(ValueError, match="infile does not exist"):
pop_finder.hyper_tune(
infile="tests/test_inputs/onlyAtl_500.vcf",
sample_data=sample_data2,
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
)
# sample_data does not exist
with pytest.raises(ValueError, match="sample_data does not exist"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data="hello.txt",
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
)
# max_trials not right format
with pytest.raises(ValueError, match="max_trials should be integer"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
max_trials=1.5,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
)
# runs_per_trial not right format
with pytest.raises(ValueError, match="runs_per_trial should be integer"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
runs_per_trial=1.2,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
)
# max_epochs not right format
with pytest.raises(ValueError, match="max_epochs should be integer"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs="10",
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
)
# train_prop not right format
with pytest.raises(ValueError, match="train_prop should be float"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
train_prop=1,
)
# seed wrong format
with pytest.raises(ValueError, match="seed should be integer or None"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
train_prop=0.8,
seed="2",
)
# save_dir wrong format
with pytest.raises(ValueError, match="save_dir should be string"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
save_dir=2,
mod_name="hyper_tune",
train_prop=0.8,
)
# mod_name wrong format
with pytest.raises(ValueError, match="mod_name should be string"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name=2,
train_prop=0.8,
)
def test_kfcv():
report = pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=3,
n_reps=1,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# Check output in correct format
assert isinstance(report, pd.DataFrame)
# Check that two outputs are created with ensemble
report, ensemble_report = pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=3,
n_reps=1,
ensemble=True,
nbags=2,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
assert isinstance(report, pd.DataFrame)
assert isinstance(ensemble_report, pd.DataFrame)
# Check input errors
# infile does not exist
with pytest.raises(ValueError, match="path to infile does not exist"):
pop_finder.kfcv(
infile="hello.txt",
sample_data=sample_data2,
n_splits=3,
n_reps=1,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# sample_data does not exist
with pytest.raises(ValueError, match="path to sample_data incorrect"):
pop_finder.kfcv(
infile=infile_all,
sample_data="hello.txt",
n_splits=3,
n_reps=1,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# n_splits wrong format
with pytest.raises(ValueError, match="n_splits should be an integer"):
pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=1.5,
n_reps=1,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# n_reps wrong format
with pytest.raises(ValueError, match="n_reps should be an integer"):
pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=3,
n_reps=1.5,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# ensemble wrong format
with pytest.raises(ValueError, match="ensemble should be a boolean"):
pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=3,
n_reps=1,
ensemble="True",
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# save_dir wrong format
with pytest.raises(ValueError, match="save_dir should be a string"):
pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=3,
n_reps=1,
patience=10,
max_epochs=10,
save_dir=2,
mod_path="hyper_tune_test_out",
)
# n_splits > 1
with pytest.raises(ValueError, match="n_splits must be greater than 1"):
pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=1,
n_reps=1,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# n_splits cannot be greater than smallest pop
with pytest.raises(
ValueError,
match="n_splits cannot be greater than number of samples",
):
pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=10,
n_reps=1,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
def test_pop_finder():
test_dict = pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
assert isinstance(test_dict, dict)
test_dict, tot_bag_df = pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
ensemble=True,
nbags=2,
save_dir="tests/test_output",
max_epochs=10,
)
assert isinstance(test_dict, dict)
assert isinstance(tot_bag_df, pd.DataFrame)
# Check inputs
with pytest.raises(ValueError, match="y_train is not a pandas dataframe"):
pop_finder.pop_finder(
X_train=X_train,
y_train=2,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="y_train exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train_empty,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="y_test is not a pandas dataframe"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=2,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="y_test exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test_empty,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="X_train is not a numpy array"):
pop_finder.pop_finder(
X_train=2,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="X_train exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train_empty,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="X_test is not a numpy array"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=2,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="X_test exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test_empty,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="ukgen is not a numpy array"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=2,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="ukgen exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen_empty,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="unknowns is not pandas dataframe"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns="unknowns",
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="unknowns exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns_empty,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="ensemble should be a boolean"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
ensemble="True",
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="try_stacking should be a boolean"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
try_stacking="True",
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="nbags should be an integer"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
ensemble=True,
nbags=1.5,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="train_prop should be a float"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
train_prop=1,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="predict should be a boolean"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
predict="True",
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="save_dir should be a string"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir=2,
max_epochs=10,
)
with pytest.raises(ValueError, match="save_weights should be a boolean"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_weights="True",
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="patience should be an integer"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
patience=5.6,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="batch_size should be an integer"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
batch_size=5.6,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="max_epochs should be an integer"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
max_epochs=5.6,
save_dir="tests/test_output",
)
with pytest.raises(ValueError, match="plot_history should be a boolean"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
plot_history="True",
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError,
match="mod_path should be a string or None"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
mod_path=2,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="unknowns is not pandas dataframe"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns="hello",
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="unknowns exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns_empty,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="train_prop is too high"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
train_prop=0.99,
seed=1234,
)
def test_run_neural_net():
save_path = "tests/test_output"
pop_finder.run_neural_net(
infile_all,
sample_data2,
patience=10,
max_epochs=2,
save_dir=save_path,
)
# Check correct files are created
assert os.path.isfile(save_path + "/metrics.csv")
assert os.path.isfile(save_path + "/pop_assign.csv")
shutil.rmtree(save_path)
pop_finder.run_neural_net(
infile_all,
sample_data2,
patience=10,
max_epochs=2,
ensemble=True,
nbags=2,
try_stacking=True,
save_dir=save_path,
)
# Check correct files are created
assert os.path.isfile(save_path + "/ensemble_test_results.csv")
assert os.path.isfile(save_path + "/pop_assign_ensemble.csv")
assert os.path.isfile(save_path + "/metrics.csv")
assert os.path.isfile(save_path + "/pop_assign_freqs.csv")
shutil.rmtree(save_path)
# Check inputs
with pytest.raises(ValueError, match="Path to infile does not exist"):
pop_finder.run_neural_net(
infile="hello",
sample_data=sample_data2,
patience=10,
max_epochs=2,
save_dir=save_path,
)
with pytest.raises(ValueError, match="Path to sample_data does not exist"):
pop_finder.run_neural_net(
infile_all,
sample_data="hello",
patience=10,
max_epochs=2,
save_dir=save_path,
)
with pytest.raises(ValueError,
match="save_allele_counts should be a boolean"):
pop_finder.run_neural_net(
infile_all,
sample_data2,
save_allele_counts="True",
patience=10,
max_epochs=2,
save_dir=save_path,
)
with pytest.raises(ValueError,
match="mod_path should either be a string or None"):
pop_finder.run_neural_net(
infile_all,
sample_data2,
mod_path=2,
patience=10,
max_epochs=2,
save_dir=save_path,
)
with pytest.raises(ValueError, match="Path to mod_path does not exist"):
pop_finder.run_neural_net(
infile_all,
sample_data2,
mod_path="hello",
patience=10,
max_epochs=2,
save_dir=save_path,
)
with pytest.raises(ValueError, match="train_prop should be a float"):
pop_finder.run_neural_net(
infile_all,
sample_data2,
patience=10,
max_epochs=2,
save_dir=save_path,
train_prop=1,
)
with pytest.raises(ValueError, match="train_prop is too high"):
pop_finder.run_neural_net(
infile_all,
sample_data2,
patience=10,
max_epochs=2,
save_dir=save_path,
train_prop=0.99,
)
def test_assign_plot():
# Check inputs
with pytest.raises(ValueError, match="save_dir should be string"):
pop_finder.assign_plot(save_dir=2)
with pytest.raises(ValueError, match="ensemble should be boolean"):
pop_finder.assign_plot(save_dir="hello", ensemble="True")
with pytest.raises(ValueError, match="col_scheme should be string"):
pop_finder.assign_plot(save_dir="hello",
ensemble=False,
col_scheme=1)
with pytest.raises(
ValueError,
match="pop_assign_freqs.csv does not exist in save_dir"
):
pop_finder.assign_plot(save_dir="hello", ensemble=True)
with pytest.raises(ValueError,
match="pop_assign.csv does not exist in save_dir"):
pop_finder.assign_plot(save_dir="hello", ensemble=False)
def test_structure_plot():
# Check outputs
pop_finder.structure_plot(save_dir="tests/test_inputs/kfcv_test_output")
assert os.path.exists(
"tests/test_inputs/kfcv_test_output/structure_plot.png")
if os.path.exists(
"tests/test_inputs/kfcv_test_output/structure_plot.png"
):
os.remove(
"tests/test_inputs/kfcv_test_output/structure_plot.png"
)
pop_finder.structure_plot(
save_dir="tests/test_inputs/kfcv_ensemble_test_output",
ensemble=True
)
assert os.path.exists(
"tests/test_inputs/kfcv_ensemble_test_output/structure_plot.png"
)
if os.path.exists(
"tests/test_inputs/kfcv_ensemble_test_output/structure_plot.png"
):
os.remove(
"tests/test_inputs/kfcv_ensemble_test_output/structure_plot.png"
)
# Check inputs
with pytest.raises(ValueError,
match="Path to ensemble_preds does not exist"):
pop_finder.structure_plot(save_dir="incorrect", ensemble=True)
with pytest.raises(ValueError,
match="Path to preds does not exist"):
pop_finder.structure_plot(save_dir="incorrect",
ensemble=False)
with pytest.raises(ValueError,
match="col_scheme should be a string"):
pop_finder.structure_plot(
save_dir="tests/test_inputs/kfcv_test_output",
ensemble=False, col_scheme=2
)
def test_contour_classifier():
with pytest.raises(ValueError, match="save_dir does not exist"):
contour_classifier.contour_classifier(
sample_data=sample_data1, save_dir="incorrect"
)
with pytest.raises(ValueError, match="path to sample_data incorrect"):
contour_classifier.contour_classifier(
sample_data="incorrect", save_dir="tests/test_inputs/test_out"
)
with pytest.raises(ValueError, match="path to genetic data incorrect"):
contour_classifier.contour_classifier(
sample_data=sample_data1,
run_locator=True,
gen_dat="incorrect",
save_dir="tests/test_inputs/test_out",
)
with pytest.raises(ValueError, match="Cannot use hdf5 file"):
contour_classifier.contour_classifier(
sample_data=sample_data1,
run_locator=True,
gen_dat=infile_all,
save_dir="tests/test_inputs/test_out",
)
with pytest.raises(ValueError, match="bootstraps"):
contour_classifier.contour_classifier(
sample_data=sample_data1,
nboots=25,
save_dir="tests/test_inputs/test_out",
multi_iter=1,
)
with pytest.raises(ValueError, match="bootstraps"):
contour_classifier.contour_classifier(
sample_data=sample_data1,
nboots=25,
save_dir="tests/test_inputs/test_out",
multi_iter=1,
)
with pytest.raises(
ValueError,
match="Something went wrong with the prediction data"
):
contour_classifier.contour_classifier(
sample_data=sample_data3,
save_dir="tests/test_inputs/test_out"
)
with pytest.raises(
ValueError,
match="sample_data file should have columns x, y, pop, and sampleID"
):
contour_classifier.contour_classifier(
sample_data=sample_data4,
save_dir="tests/test_inputs/test_out"
)
with pytest.raises(Exception,
match="Too few points to generate contours"):
contour_classifier.contour_classifier(
sample_data=sample_data2,
run_locator=True,
gen_dat=infile_all_vcf,
nboots=1,
max_epochs=1,
save_dir="tests/test_inputs/test_out",
)
class_df = contour_classifier.contour_classifier(
sample_data=sample_data2,
save_dir="tests/test_inputs/test_out"
)
assert isinstance(class_df, pd.core.frame.DataFrame)
assert (class_df.columns == ["sampleID",
"classification",
"kd_estimate"]).all()
assert (class_df["kd_estimate"] <= 1).all()
assert (class_df["kd_estimate"] >= 0).all()
def test_cont_finder():
pred_dat = pd.read_csv(pred_path)
pred_dat = pred_dat.rename({"x": "pred_x", "y": "pred_y"}, axis=1)
true_lab = pd.read_csv(sample_data1, sep="\t")
test_dat = pred_dat[pred_dat["sampleID"] == "LESP_65"]
d_x = (max(test_dat["pred_x"]) - min(test_dat["pred_x"])) / 10
d_y = (max(test_dat["pred_y"]) - min(test_dat["pred_y"])) / 10
test_xlim = min(test_dat["pred_x"]) - d_x, max(test_dat["pred_x"]) + d_x
test_ylim = min(test_dat["pred_y"]) - d_y, max(test_dat["pred_y"]) + d_y
X, Y = np.mgrid[
test_xlim[0]:test_xlim[1]:200j, test_ylim[0]:test_ylim[1]:200j
]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([test_dat["pred_x"], test_dat["pred_y"]])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
new_z = Z / np.max(Z)
fig = plt.figure(figsize=(8, 8))
ax = fig.gca()
cset = ax.contour(X, Y, new_z, 10, colors="black")
cset.levels = -np.sort(-cset.levels)
res = contour_classifier.cont_finder(true_lab, cset)
assert len(res) == 2
assert res[0] == "Baccalieu"
assert res[1] == 0.4
plt.close()
def test_kfcv_contour():
with pytest.raises(ValueError, match="path to sample_data incorrect"):
contour_classifier.kfcv(
sample_data="incorrect",
gen_dat=infile_all_vcf,
save_dir="tests/test_inputs/kfcv",
)
pred_labels, true_labels, report = contour_classifier.kfcv(
sample_data=sample_data1,
gen_dat=infile_all_vcf,
n_splits=2,
n_runs=2,
max_epochs=1,
nboots=10,
save_dir="tests/test_inputs/kfcv",
)
true_dat = pd.read_csv(sample_data1, sep="\t")
assert len(pred_labels) == len(true_labels)
# Because function was run for 2 iters
assert len(true_dat) * 2 == len(pred_labels)
assert isinstance(report, pd.core.frame.DataFrame)
| 32.358717
| 79
| 0.593175
| 4,083
| 32,294
| 4.388685
| 0.069802
| 0.056253
| 0.066968
| 0.107372
| 0.824767
| 0.784754
| 0.763324
| 0.703667
| 0.667448
| 0.622803
| 0
| 0.015773
| 0.312875
| 32,294
| 997
| 80
| 32.391174
| 0.791753
| 0.027343
| 0
| 0.641256
| 0
| 0
| 0.178147
| 0.068339
| 0
| 0
| 0
| 0
| 0.049327
| 1
| 0.013453
| false
| 0
| 0.011211
| 0
| 0.024664
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
15ad7d99d2ab2b877e5f62be4ece4ba0853d18de
| 41
|
py
|
Python
|
src/ConnectToOracleDB.py
|
Paarzivall/Wzorce-Projektowe---Projekt
|
ad440e7563c3ebd943df87d177fe85f5c86d1251
|
[
"MIT"
] | null | null | null |
src/ConnectToOracleDB.py
|
Paarzivall/Wzorce-Projektowe---Projekt
|
ad440e7563c3ebd943df87d177fe85f5c86d1251
|
[
"MIT"
] | null | null | null |
src/ConnectToOracleDB.py
|
Paarzivall/Wzorce-Projektowe---Projekt
|
ad440e7563c3ebd943df87d177fe85f5c86d1251
|
[
"MIT"
] | null | null | null |
class ConnectToOracleDB(object):
pass
| 20.5
| 32
| 0.780488
| 4
| 41
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 41
| 2
| 33
| 20.5
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
15bfd1cf365e60139fe49efbab3ecacf9596152e
| 70
|
py
|
Python
|
examples/a_example9.py
|
berkeman/examples
|
985e907fee4120e9266544e4fc66cdbddf5d87b9
|
[
"Apache-2.0"
] | null | null | null |
examples/a_example9.py
|
berkeman/examples
|
985e907fee4120e9266544e4fc66cdbddf5d87b9
|
[
"Apache-2.0"
] | null | null | null |
examples/a_example9.py
|
berkeman/examples
|
985e907fee4120e9266544e4fc66cdbddf5d87b9
|
[
"Apache-2.0"
] | 1
|
2020-02-15T17:10:20.000Z
|
2020-02-15T17:10:20.000Z
|
#tests: depend_extra
depend_extra=('pelle',)
def synthesis():
pass
| 10
| 23
| 0.714286
| 9
| 70
| 5.333333
| 0.777778
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128571
| 70
| 6
| 24
| 11.666667
| 0.786885
| 0.271429
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.