hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
82375c0aea9e29d0883a083fde9e2658a9cd5fee | 203 | py | Python | Python/Numpy/002_Shape_and_Reshape.py | o-silva-1/HackerRank | be5241c1439e7ed54fe4a1847984a9ac79c6e2a9 | [
"MIT"
] | null | null | null | Python/Numpy/002_Shape_and_Reshape.py | o-silva-1/HackerRank | be5241c1439e7ed54fe4a1847984a9ac79c6e2a9 | [
"MIT"
] | null | null | null | Python/Numpy/002_Shape_and_Reshape.py | o-silva-1/HackerRank | be5241c1439e7ed54fe4a1847984a9ac79c6e2a9 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/np-shape-reshape/problem?h_r=next-challenge&h_v=zen
import numpy as np
arr = input().split(' ')
np_arr = np.array(arr, int)
np_arr.shape = (3, 3)
print(np_arr)
| 20.3 | 91 | 0.714286 | 37 | 203 | 3.783784 | 0.675676 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01105 | 0.108374 | 203 | 9 | 92 | 22.555556 | 0.762431 | 0.438424 | 0 | 0 | 0 | 0 | 0.008929 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8246bef53f647ed24cdb9294f764c94f0316156b | 1,016 | py | Python | IRN/train.py | Barry-Menglong-Yao/conll2019-snopes-crawling | f803a99e27866036b92175c7d9786611ad1f389b | [
"Apache-2.0"
] | 1 | 2021-12-28T16:28:19.000Z | 2021-12-28T16:28:19.000Z | IRN/train.py | Barry-Menglong-Yao/conll2019-snopes-crawling | f803a99e27866036b92175c7d9786611ad1f389b | [
"Apache-2.0"
] | null | null | null | IRN/train.py | Barry-Menglong-Yao/conll2019-snopes-crawling | f803a99e27866036b92175c7d9786611ad1f389b | [
"Apache-2.0"
] | null | null | null | import os
import torch
import random
import numpy as np
from network.image_reconstrcution_network import ImageReconstrcutionNetwork
from trainer import ImageReconstrcutionTrainer
from util.config import Config
def init_seeds(seed=0):
#torch.manual_seed(seed)
#torch.cuda.manual_seed(seed)
#torch.cuda.manual_seed_all(seed)
#torch.autograd.set_detect_anomaly(True) # enable only for debugging
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
seed=0
init_seeds(seed=seed)
cuda_device = None
if torch.cuda.is_available():
cuda_device = 0
os.environ['CUDA_VISIBLE_DIVICES'] = "1"
config = Config("cfg")
config.load_config()
model = ImageReconstrcutionNetwork()
trainer = ImageReconstrcutionTrainer(model, config)
trainer.train()
print("completed")
#np. set_printoptions(precision=4, suppress=True, linewidth=250);torch.set_printoptions(sci_mode=False, precision=3, linewidth=250)
#torch.set_printoptions(sci_mode=False, precision=2, linewidth=250)
| 28.222222 | 132 | 0.787402 | 134 | 1,016 | 5.813433 | 0.470149 | 0.038511 | 0.033376 | 0.04878 | 0.207959 | 0.207959 | 0.207959 | 0.136072 | 0.136072 | 0 | 0 | 0.017738 | 0.112205 | 1,016 | 35 | 133 | 29.028571 | 0.845898 | 0.340551 | 0 | 0 | 0 | 0 | 0.049774 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.318182 | 0 | 0.363636 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
413791542e1e259fecd9575d5697cc211ad492b3 | 450 | py | Python | Python_version/elips.py | RomaniukVadim/ellipsometric-calculator | 629ae5c962dacd4416a180e0904b8ddcf642371a | [
"WTFPL"
] | null | null | null | Python_version/elips.py | RomaniukVadim/ellipsometric-calculator | 629ae5c962dacd4416a180e0904b8ddcf642371a | [
"WTFPL"
] | null | null | null | Python_version/elips.py | RomaniukVadim/ellipsometric-calculator | 629ae5c962dacd4416a180e0904b8ddcf642371a | [
"WTFPL"
] | null | null | null | #!/usr/env python3
import argparse
import math
def main():
parser = argparse.ArgumentParser(description='Ellipsometry calculator (console version).')
parser.add_argument("--x2", dest='zname', type=argparse.FileType('r'), help='specify zipfile')
parser.add_argument("-d", dest='dname', type=argparse.FileType('r'), help='specify dictionary file')
args = parser.parse_args()
print("")
if __name__ == '__main__':
main()
| 26.470588 | 104 | 0.684444 | 53 | 450 | 5.603774 | 0.660377 | 0.060606 | 0.114478 | 0.141414 | 0.215488 | 0.215488 | 0 | 0 | 0 | 0 | 0 | 0.005222 | 0.148889 | 450 | 16 | 105 | 28.125 | 0.770235 | 0.037778 | 0 | 0 | 0 | 0 | 0.24537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.2 | null | null | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
413809b3dc98458b4acb094a02a9c10328344308 | 685 | py | Python | tests/aliyun_iot_test.py | tenstone/xiaoyun-smart-speaker-server | 6afcdedd1a2485269afffb425803aff71b6cbd51 | [
"MIT"
] | 1 | 2020-05-18T06:58:25.000Z | 2020-05-18T06:58:25.000Z | tests/aliyun_iot_test.py | tenstone/xiaoyun-smart-speaker-server | 6afcdedd1a2485269afffb425803aff71b6cbd51 | [
"MIT"
] | 1 | 2022-02-10T12:45:45.000Z | 2022-02-10T12:45:45.000Z | tests/aliyun_iot_test.py | tenstone/xiaoyun-smart-speaker-server | 6afcdedd1a2485269afffb425803aff71b6cbd51 | [
"MIT"
] | null | null | null | # -*- coding: utf-8-*-
import unittest
import os
os.sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from app.components import logger
from app.components.aliyun_iot import IotServer
class TestComponentsAliyunIOT(unittest.TestCase):
"""
表格计算
"""
def setUp(self):
self.iot_server = IotServer.get_instance()
def test_send_device_message(self):
"""
批量读取行数据
:return:
"""
self.iot_server.send_device_message('马云')
def test_sync_iot_shadow(self):
r = self.iot_server.get_iot_shadow()
print(r)
pass
if __name__ == '__main__':
logger.init(info=True)
unittest.main()
| 20.147059 | 66 | 0.637956 | 84 | 685 | 4.880952 | 0.559524 | 0.05122 | 0.095122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001905 | 0.233577 | 685 | 33 | 67 | 20.757576 | 0.779048 | 0.062774 | 0 | 0 | 0 | 0 | 0.021886 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0.058824 | 0.235294 | 0 | 0.470588 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
413decd6b7ac0b8b57b4131955d7ec7af4d84bba | 3,201 | py | Python | eyelab/views/ui/ui_layergroup_entry.py | MedVisBonn/eyelab | 658997be88b87087e81820e26986b0fc938e5c2d | [
"MIT"
] | null | null | null | eyelab/views/ui/ui_layergroup_entry.py | MedVisBonn/eyelab | 658997be88b87087e81820e26986b0fc938e5c2d | [
"MIT"
] | null | null | null | eyelab/views/ui/ui_layergroup_entry.py | MedVisBonn/eyelab | 658997be88b87087e81820e26986b0fc938e5c2d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'ui_layergroup_entry.ui'
##
## Created by: Qt User Interface Compiler version 6.1.3
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import * # type: ignore
from PySide6.QtGui import * # type: ignore
from PySide6.QtWidgets import * # type: ignore
from . import resources_rc
class Ui_LayerGroupEntry(object):
def setupUi(self, LayerGroupEntry):
if not LayerGroupEntry.objectName():
LayerGroupEntry.setObjectName(u"LayerGroupEntry")
LayerGroupEntry.resize(200, 30)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(LayerGroupEntry.sizePolicy().hasHeightForWidth())
LayerGroupEntry.setSizePolicy(sizePolicy)
LayerGroupEntry.setMinimumSize(QSize(200, 30))
LayerGroupEntry.setMaximumSize(QSize(350, 30))
font = QFont()
font.setPointSize(10)
LayerGroupEntry.setFont(font)
LayerGroupEntry.setContextMenuPolicy(Qt.PreventContextMenu)
LayerGroupEntry.setAutoFillBackground(True)
self.horizontalLayout_2 = QHBoxLayout(LayerGroupEntry)
self.horizontalLayout_2.setSpacing(2)
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.horizontalLayout_2.setContentsMargins(2, 2, 2, 2)
self.hideButton = QToolButton(LayerGroupEntry)
self.hideButton.setObjectName(u"hideButton")
sizePolicy1 = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(self.hideButton.sizePolicy().hasHeightForWidth())
self.hideButton.setSizePolicy(sizePolicy1)
self.hideButton.setMinimumSize(QSize(26, 26))
self.hideButton.setMaximumSize(QSize(26, 26))
self.hideButton.setContextMenuPolicy(Qt.NoContextMenu)
self.hideButton.setAutoFillBackground(False)
icon = QIcon()
icon.addFile(
u":/icons/icons/baseline-visibility-24px.svg",
QSize(),
QIcon.Normal,
QIcon.Off,
)
self.hideButton.setIcon(icon)
self.hideButton.setIconSize(QSize(24, 24))
self.horizontalLayout_2.addWidget(self.hideButton)
self.label = QLabel(LayerGroupEntry)
self.label.setObjectName(u"label")
self.horizontalLayout_2.addWidget(self.label)
self.retranslateUi(LayerGroupEntry)
QMetaObject.connectSlotsByName(LayerGroupEntry)
# setupUi
def retranslateUi(self, LayerGroupEntry):
LayerGroupEntry.setWindowTitle(
QCoreApplication.translate("LayerGroupEntry", u"Form", None)
)
self.hideButton.setText(
QCoreApplication.translate("LayerGroupEntry", u"...", None)
)
self.label.setText("")
# retranslateUi
| 38.566265 | 87 | 0.659794 | 279 | 3,201 | 7.530466 | 0.405018 | 0.079962 | 0.059971 | 0.028558 | 0.079962 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023011 | 0.199 | 3,201 | 82 | 88 | 39.036585 | 0.796412 | 0.085598 | 0 | 0 | 1 | 0 | 0.046182 | 0.015273 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033898 | false | 0 | 0.067797 | 0 | 0.118644 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4142a722bde1ddff3f4dd78cd7f726007fba6f52 | 11,145 | py | Python | IdeaProjects/matplotDev/matplotlibDev1.py | sinomiko/project | 00fadb0033645f103692f5b06c861939a9d4aa0e | [
"BSD-3-Clause"
] | 1 | 2018-12-30T14:07:42.000Z | 2018-12-30T14:07:42.000Z | IdeaProjects/matplotDev/matplotlibDev1.py | sinomiko/project | 00fadb0033645f103692f5b06c861939a9d4aa0e | [
"BSD-3-Clause"
] | null | null | null | IdeaProjects/matplotDev/matplotlibDev1.py | sinomiko/project | 00fadb0033645f103692f5b06c861939a9d4aa0e | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf-8
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
#matplotlib inline
# 4.1. 一维数据集
#
# 在下面的所有例子中,我们将按照存储在 NumPy ndarray 对象中的数据绘制图表。当然,matplotlib 也能够按照以不同的 Python 格式存储的数据(如列表对象)绘制图表。首先,我们需要用于绘制图表的数据。为此,我们生成20个标准正态分布(伪)随机数,保存在一个NumPy ndarray中:
np.random.seed(1000)
y = np.random.standard_normal(20)
# pyplot 子库中的 plot 函数是最基础的绘图函数,但是也相当强大。原则上,它需要两组数值。
#
# x 值:包含 x 坐标(横坐标)的列表或者数组
# y 值:包含 y 坐标(纵坐标)的列表或者数组
# 当然,x 和 y 值的 数量 必须相等,考虑下面两行代码,其输出如图所示
x = range(len(y))
plt.plot(x, y)
plt.show()
plt.figure(1) # 创建图表1
plt.figure(2) # 创建图表2
ax1 = plt.subplot(211) # 在图表2中创建子图1
ax2 = plt.subplot(212) # 在图表2中创建子图2
x = np.linspace(0, 3, 100)
for i in range(5):
plt.figure(1) #❶ # 选择图表1
plt.plot(x, np.exp(i*x/3))
plt.sca(ax1) #❷ # 选择图表2的子图1
plt.plot(x, np.sin(i*x))
plt.sca(ax2) # 选择图表2的子图2
plt.plot(x, np.cos(i*x))
plt.show()
X1 = range(0, 50)
Y1 = [num**2 for num in X1] # y = x^2
X2 = [0, 1]
Y2 = [0, 1] # y = x
Fig = plt.figure(figsize=(8,4)) # Create a `figure' instance
Ax = Fig.add_subplot(111) # Create a `axes' instance in the figure
Ax.plot(X1, Y1, X2, Y2) # Create a Line2D instance in the axes
Fig.show()
Fig.savefig("test.pdf")
# plt 会注意到何时传递了 ndarray 对象。在这种情况下,没有必要提供 x 值的 “额外” 信息。如果你只提供 y 值,plot 以索引值作为对应的 x 值。因此,下面一行代码会生成完全一样的输出,如下图
# plt.plot(y)
#
#
# 可以简单地向 matplotlib 函数传递 Numpy ndarray 对象。函数能够解释数据结构以简化绘图工作。但是要注意,不要传递太大或者太复杂的数组。
#
# 由于大部分 ndarray 方法返回的仍然是一个 ndarray 对象,也可以附加一个方法(有些时候甚至可以附加多个方法)传递对象。我们用样板数据调用 ndarray 对象上的 cumsum 方法,可以获得这些数据的总和,并且和预想的一样得到不同的输出,如下图
y = np.random.standard_normal(20)
plt.plot(y.cumsum())
# 此处输入图片的描述
# 一般来说,默认绘图样式不能满足报表、出版等的典型要求。例如,你可能希望自定义所使用的字体(例如,为了 LaTeX 字体兼容)、在坐标轴上有标签或者为了更好的可辨认性而绘制网格。因此,matplotlib 提供了大量函数以自定义绘图样式。有些函数容易理解,其他的则需要更深入一步研究。例如,操纵坐标轴和增加网格及标签的函数很容易理解,如下图:
plt.plot(y.cumsum())
plt.grid(True) # 添加网格线
plt.axis('tight') # 紧凑坐标轴
# 下图列出了 plt.axis 的其它选项,大部分都以字符串对象的形式传递
#
#
# 此外,可以使用 plt.xlim 和 plt.ylim 设置每个坐标轴的最小值和最大值。下面的代码提供了一个示例,输出如图
plt.plot(y.cumsum())
plt.grid(True)
plt.xlim(-1,20)
plt.ylim(np.min(y.cumsum()) - 1,
np.max(y.cumsum()) + 1)
# 为了得到更好的易理解性,图表通常包含一些标签——例如描述x和y值性质的标题和标签。这些标签分别通过 plt.title, plt.xlabe 和 plt.ylabel 添加。默认情况下,即使提供的数据点是离散的,plot也绘制连续线条。离散点的绘制通过选择不同的样式选项实现。下图覆盖(红色)点和线宽为1.5个点的(蓝色)线条:
plt.figure(figsize=(7, 4))
# the figsize parameter defines the size of the figure in(width, height)
plt.plot(y.cumsum(), 'b', lw=1.5)
plt.plot(y.cumsum(), 'ro')
plt.grid(True)
plt.axis('tight')
plt.xlabel('index')
plt.ylabel('value')
plt.title('A Simple Plot')
# 默认情况下,plt.plot 支持下表中的颜色缩写
#
#
# 对于线和点的样式,plt.plot 支持下表中列出的字符
#
# 任何颜色缩写都可以与任何样式字符组合,这样,你可以确保不同的数据集能够轻松区分。我们将会看到,绘图样式也会反映到图例中
# 4.2 二维数据集
#
# 按照一维数据绘图可以看做一种特例。一般来说,数据集包含多个单独的子集。这种数据的处理遵循matplotlib处理一维数据时的原则。但是,这种情况会出现其他一些问题,例如,两个数据集可能有不同的刻度,无法用相同的y或x轴刻度绘制。另一个问题是,你可能希望以不同的方式可视化两组不同数据,例如,一组数据使用线图,另一组使用柱状图。
#
# 首先,我们生成一个二维样本数据集。下面的代码生成包含标准正态分布(伪)随机数的20×2 NumPy ndarray。在这个数组上调用 cumsum 计算样本数据在0轴(即第一维)上的总和:
np.random.seed(2000)
y = np.random.standard_normal((20,2)).cumsum(axis=0)
# 一般来说,也可以将这样的二维数组传递给 plt.plot。它将自动把包含的数据解释为单独的数据集(沿着1轴,即第二维)。如下图:
plt.figure(figsize=(7, 4))
plt.plot(y, lw=1.5)
plt.plot(y, 'ro')
plt.grid(True)
plt.axis('tight')
plt.xlabel('index')
plt.ylabel('value')
plt.title('A Simple Plot')
# 在这种情况下,进一步的注释有助于更好地理解图表,可以为每个数据集添加单独的标签并在图例中列出。plt.legend 接受不同的位置参数。0表示“最佳位置”,也就是图例尽可能少地遮盖数据。下图展示了包含两个数据集的图表,这一次带有图例。在生成代码中,我们没有传递整个 ndarray 对象,而是分别访问两个数据子集(y[:, 0]和y[:, 1]),可以为它们附加单独的标签
plt.figure(figsize=(7,4))
plt.plot(y[:, 0], lw=1.5, label='1st')
plt.plot(y[:, 1], lw=1.5, label='2nd')
plt.plot(y, 'ro')
plt.grid(True)
plt.legend(loc=0)
plt.axis('tight')
plt.xlabel('index')
plt.ylabel('value')
plt.title('A Simple Plot')
# plt.legend 的其它位置选项如下图所示
#
#
# 多个具有类似刻度的数据集(如同一金融风险因素的模拟路径)可以用单一的 y轴绘制。但是,数据集常常有不同的刻度,用单一 y轴刻度绘制这种数据的图表通常会导致可视化信息的显著丢失。为了说明这种效果,我们将两个数据子集中的第一个扩大 100倍,再次绘制该图
y[:, 0] = y[:, 0] * 100
plt.figure(figsize=(7,4))
plt.plot(y[:, 0], lw=1.5, label='1st')
plt.plot(y[:, 1], lw=1.5, label='2nd')
plt.plot(y, 'ro')
plt.grid(True)
plt.legend(loc=0)
plt.axis('tight')
plt.xlabel('index')
plt.ylabel('value')
plt.title('A Simple Plot')
# 观察上图我们可以知道,第一个数据集仍然是“在视觉上易于辨认的”,而第二个数据集在新的Y轴刻度上看起来像一条直线。在某种程度上,第二个数据集的有关信息现在“在视觉上已经丢失”。解决这个问题有两种基本方法:
#
# 使用 2 个 y 轴(左/右)
# 使用两个子图(上/下,左/右)
# 我们首先在图表中引入第二个 y 轴。下图中有两个不同的y轴,左侧的y轴用于第一个数据集,右侧的y轴用于第二个数据集,因此,有两个图例:
fig, ax1 = plt.subplots()
plt.plot(y[:, 0], 'b', lw=1.5, label='1st')
plt.plot(y[:, 0], 'ro')
plt.grid(True)
plt.legend(loc=8)
plt.axis('tight')
plt.xlabel('index')
plt.ylabel('value 1st')
plt.title('A Simple Plot')
ax2 = ax1.twinx()
plt.plot(y[:, 1], 'g', lw=1.5, label='2nd')
plt.plot(y[:, 1], 'ro')
plt.legend(loc=0)
plt.ylabel('value 2nd')
# 在上图中,管理坐标轴的代码行是关键,通过使用 plt.subplots 函数,可以直接访问底层绘图对象(图、子图等)。例如,可以用它生成和第一个子图共享x轴的第二个子图。上图中有两个相互重叠的子图。
#
# 接下来,考虑两个单独子图的情况。这个选项提供了处理两个数据集的更大自由度,如下图所示:
plt.figure(figsize=(7,5))
plt.subplot(211)
plt.plot(y[:, 0], lw=1.5, label='1st')
plt.plot(y[:, 0], 'ro')
plt.grid(True)
plt.legend(loc=0)
plt.axis('tight')
plt.ylabel('value')
plt.title('A Simple Plot')
plt.subplot(212)
plt.plot(y[:, 1], 'g', lw=1.5, label='2nd')
plt.plot(y[:, 1], 'ro')
plt.grid(True)
plt.legend(loc=0)
plt.axis('tight')
plt.xlabel('index')
plt.ylabel('value')
# matplotlib figure对象中子图的定位通过使用一种特殊的坐标系来实现。plt.subplot 有3个整数参数,即 numrows 、numcols 和 fignum (可能由逗号分隔,也可能没有)。numrows 指定行数,numcols 指定列数,fignum 指定子图编号(从1到numrows×numcols)。例如,有9个大小相同子图的图表有numrows=3,numcols=3,fignum=1,2,…,9。左下角的子图“坐标”如下:plt.subplot(3,3,9) 。
#
# 有时候,选择两个不同的图表类型来可视化数据可能是必要的或者是理想的。利用子图方法,就可以自由地组合matplotlib提供的任意图表类型,下图组合了线图/点图和柱状图:
plt.figure(figsize=(9, 4))
plt.subplot(121)
plt.plot(y[:, 0], lw=1.5, label='1st')
plt.plot(y[:, 0], 'ro')
plt.grid(True)
plt.legend(loc=0)
plt.axis('tight')
plt.xlabel('index')
plt.ylabel('value')
plt.title('1st Data Set')
plt.subplot(122)
plt.bar(np.arange(len(y)), y[:, 1], width=0.5, color='g', label='2nd')
plt.grid(True)
plt.legend(loc=0)
plt.axis('tight')
plt.xlabel('index')
plt.title('2nd Data Set')
一、其它绘图样式
对于二维绘图,线图和点图可能是金融学中最重要的;这是因为许多数据集用于表示时间序列数据,此类数据通常可以由这些图表进行可视化。
1.1 散点图
我们要介绍的第一种图表是散点图,这种图表中一个数据集的值作为其他数据集的x值。下图展示了一个这种图表。例如,这种图表类型可用于绘制一个金融时间序列的收益和另一个时间序列收益的对比。在下面的例子中,我们将使用二维数据集和其他一些数据:
y = np.random.standard_normal((1000, 2))
plt.figure(figsize=(7, 5))
plt.plot(y[:, 0], y[:, 1], 'ro')
plt.grid(True)
plt.xlabel('1st')
plt.ylabel('2nd')
plt.title('Scatter Plot')
此处输入图片的描述
matplotlib 还提供了生成散点图的一个特殊函数。它的工作方式本质上相同,但是提供了一些额外的功能。这次使用的是 scatter 函数:
plt.figure(figsize=(7, 5))
plt.scatter(y[:, 0], y[:, 1], marker='o')
plt.grid(True)
plt.xlabel('1st')
plt.ylabel('2nd')
plt.title('Scatter Plot')
此处输入图片的描述
例如,scatter 绘图函数可以加入第三维,通过不同的颜色进行可视化,并使用彩条加以描述。为此,我们用随机数据生成第三个数据集,这次使用的是 0和10 之间的整数:
c = np.random.randint(0, 10, len(y))
下图展示的散点图有不同颜色小点表示的第三维,还有作为颜色图例的彩条
plt.figure(figsize=(7, 5))
plt.scatter(y[:, 0], y[:, 1], c=c, marker='o')
plt.colorbar()
plt.grid(True)
plt.xlabel('1st')
plt.ylabel('2nd')
plt.title('Scatter Plot')
此处输入图片的描述
1.2 直方图
下图在同一个图表中放置两个数据集的频率值
plt.figure(figsize=(7, 4))
plt.hist(y, label=['1st', '2nd'], bins=25)
plt.grid(True)
plt.legend(loc=0)
plt.xlabel('value')
plt.ylabel('frequency')
plt.title('Histogram')
此处输入图片的描述
此处输入图片的描述
由于直方图是金融应用中的重要图表类型,我们要更认真地观察 plt.hist 的使用方法。下面的例子说明了所支持的参数:
plt.hist(x, bins=10, range=None, normed=False, weights=None, cumulative=False, bottom=None, histtype='bar', align='mid', orientation='vertical', rwidth=None, log=False, color=None, label=None, stacked=False, hold=None, **kwargs)
plt.hist 主要参数的描述如下
此处输入图片的描述
如下图,两个数据集的数据在直方图中堆叠
y = np.random.standard_normal((1000, 2))
plt.figure(figsize=(7, 4))
plt.hist(y, label=['1st', '2nd'], color=['b', 'g'], stacked=True, bins=20)
plt.grid(True)
plt.legend(loc=0)
plt.xlabel('value')
plt.ylabel('frequency')
plt.title('Histogram')
此处输入图片的描述
1.3 箱形图
另一种实用的图表类型是箱形图。和直方图类似,箱形图可以简洁地概述数据集的特性,很容易比较多个数据集。下图展示了按照我们的数据集绘制的这类图表:
fig, ax = plt.subplots(figsize=(7,4))
plt.boxplot(y)
plt.grid(True)
plt.setp(ax, xticklabels=['1st', '2nd'])
plt.xlabel('data set')
plt.ylabel('value')
plt.title('Boxplot')
此处输入图片的描述
1.4 数学示例
在本节的最后一个例证中,我们考虑一个受到数学启迪的图表,这个例子也可以在 matplotlib 的“展厅”中找到:http://www/matplotlib.org/gallery.html 。它绘制一个函数的图像,并以图形的方式说明了某个下限和上限之间函数图像下方区域的面积——换言之,从下限到上限之间的函数积分值。下图展示了结果图表,说明 matplotlib 能够无缝地处理 LaTeX 字体设置,在图表中加入数学公式:
from matplotlib.patches import Polygon
def func(x):
return 0.5 * np.exp(x) + 1
a, b = 0.5, 1.5
x = np.linspace(0, 2)
y = func(x)
fig, ax = plt.subplots(figsize=(7, 5))
plt.plot(x, y, 'b', linewidth=2)
plt.ylim(ymin=0)
Ix = np.linspace(a, b)
Iy = func(Ix)
verts = [(a, 0)] + list(zip(Ix, Iy)) + [(b, 0)]
poly = Polygon(verts, facecolor='0.7', edgecolor='0.5')
ax.add_patch(poly)
plt.text(0.5 * (a + b), 1, r"$\int_a^b fx\mathrm{d}x$", horizontalalignment='center', fontsize=20)
plt.figtext(0.9, 0.075, '$x$')
plt.figtext(0.075, 0.9, '$f(x)$')
ax.set_xticks((a, b))
ax.set_xticklabels(('$a$', '$b$'))
ax.set_yticks([func(a), func(b)])
ax.set_yticklabels(('$f(a)$', '$f(b)$'))
plt.grid(True)
此处输入图片的描述
下面我们一步步来描述这个图表的生成,第一步是定义需要求取积分的函数:
def func(x):
return 0.5 * np.exp(x) + 1
第二步是定义积分区间,生成必需的数值
a ,b = 0.5, 1.5
x = np.linspace(0, 2)
y = func(x)
第三步,绘制函数图形
fig, ax = plt.subplots(figsize=(7, 5))
plt.plot(x, y, 'b', linewidth=2)
plt.ylim(ymin=0)
第四步是核心,我们使用 Polygon 函数生成阴影部分(“补丁”),表示积分面积:
Ix = np.linspace(a, b)
Iy = func(Ix)
verts = [(a, 0)] + list(zip(Ix, Iy)) + [(b, 0)]
poly = Polygon(verts, facecolor='0.7', edgecolor='0.5')
ax.add_patch(poly)
第五步是用 plt.text 和 plt.figtext 在图表上添加数学公式和一些坐标轴标签。LaTeX 代码在两个美元符号之间传递($ … $)。两个函数的前两个参数都是放置对应文本的坐标值:
plt.text(0.5 * (a + b), 1, r"$\int_a^b fx\mathrm{d}x$", horizontalalignment='center', fontsize=20)
plt.figtext(0.9, 0.075, '$x$')
plt.figtext(0.075, 0.9, '$f(x)$')
最后,我们分别设置x和y刻度标签的位置。注意,尽管我们以 LaTeX 渲染变量名称,但是用于定位的是正确的数字值。我们还添加了网格,在这个特殊例子中,只是为了强调选中的刻度:
ax.set_xticks((a, b))
ax.set_xticklabels(('$a$', '$b$'))
ax.set_yticks([func(a), func(b)])
ax.set_yticklabels(('$f(a)$', '$f(b)$'))
plt.grid(True)
二、3D 绘图
金融中从3维可视化中获益的领域不是太多。但是,波动率平面是一个应用领域,它可以同时展示许多到期日和行权价的隐含波动率。在下面的例子中,我们人为生成一个类似波动率平面的图表。为此,我们考虑如下因素:
行权价格在50~150元之间;
到期日在0.5~2.5年之间。
这为我们提供了一个2维坐标系。我们可以使用 NumPy 的 meshgrid函数,根据两个 1 维 ndarray 对象生成这样的坐标系:
strike = np.linspace(50, 150, 24)
ttm = np.linspace(0.5, 2.5, 24)
strike, ttm = np.meshgrid(strike, ttm)
上述代码将两个 1 维数组转换为 2 维数组,在必要时重复原始坐标轴值:
此处输入图片的描述
现在,根据新的 ndarray 对象,我们通过简单的比例调整二次函数生成模拟的隐含波动率:
iv = (strike - 100) ** 2 / (100 * strike) / ttm
通过下面的代码得出一个三维图形
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(9,6))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(strike, ttm, iv, rstride=2, cstride=2, cmap=plt.cm.coolwarm, linewidth=0.5, antialiased=True)
ax.set_xlabel('strike')
ax.set_ylabel('time-to-maturity')
ax.set_zlabel('implied volatility')
fig.colorbar(surf, shrink=0.5, aspect=5)
此处输入图片的描述
下图提供了 plot_surface 函数使用的不同参数的描述
此处输入图片的描述
和2维图表一样,线样式可以由单个点或者下例中的单个三角形表示。下图用相同的数据绘制3D散点图,但是现在用 view_init 函数设置不同的视角:
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111, projection='3d')
ax.view_init(30, 60)
ax.scatter(strike, ttm, iv, zdir='z', s=25, c='b', marker='^')
ax.set_xlabel('strike')
ax.set_ylabel('time-to-maturity')
ax.set_zlabel('implied volatility')
| 27.518519 | 251 | 0.713863 | 1,775 | 11,145 | 4.472113 | 0.308169 | 0.029982 | 0.025195 | 0.028219 | 0.349836 | 0.330688 | 0.313051 | 0.306248 | 0.301965 | 0.292391 | 0 | 0.039772 | 0.102109 | 11,145 | 404 | 252 | 27.586634 | 0.752273 | 0.254643 | 0 | 0.613718 | 0 | 0 | 0.082433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.018051 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
414470a20b36382982558f1e95478f07692c39d3 | 472 | py | Python | tests/config.py | mizzy/maglica | 8bc19e7fc1bf632c9027ac80f6c3f39d8bd05f85 | [
"BSD-2-Clause"
] | 15 | 2015-02-01T17:36:18.000Z | 2021-06-10T20:51:14.000Z | tests/config.py | mizzy/maglica | 8bc19e7fc1bf632c9027ac80f6c3f39d8bd05f85 | [
"BSD-2-Clause"
] | null | null | null | tests/config.py | mizzy/maglica | 8bc19e7fc1bf632c9027ac80f6c3f39d8bd05f85 | [
"BSD-2-Clause"
] | null | null | null | # $ nosetests -v --rednose --with-coverage tests/config.py
from nose.tools import *
import maglica.config
def test_config_load():
config = maglica.config.load('etc/maglica.conf.example')
eq_(config.hosts[0], "host0.example.com")
eq_(config.hosts[1], "host1.example.com")
eq_(config.client["host"], "client.example.com")
eq_(config.client["pub_port"], 5555)
eq_(config.client["rep_port"], 5556)
if __name__ == "__main__":
test_config_load()
| 27.764706 | 60 | 0.690678 | 66 | 472 | 4.651515 | 0.545455 | 0.130293 | 0.117264 | 0.175896 | 0.156352 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029484 | 0.137712 | 472 | 16 | 61 | 29.5 | 0.724816 | 0.118644 | 0 | 0 | 0 | 0 | 0.251208 | 0.057971 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4150929e942157f1bce17e4545425c85a0a9dfef | 16,023 | py | Python | main.py | impressive8/Practice | 6fc30fce500d97cb2091387bf2742b8e4cc4495d | [
"MIT"
] | 197 | 2018-04-08T01:37:29.000Z | 2022-03-25T07:17:18.000Z | main.py | impressive8/Practice | 6fc30fce500d97cb2091387bf2742b8e4cc4495d | [
"MIT"
] | 2 | 2020-02-01T17:42:41.000Z | 2021-06-09T10:38:49.000Z | main.py | impressive8/Practice | 6fc30fce500d97cb2091387bf2742b8e4cc4495d | [
"MIT"
] | 80 | 2018-06-29T04:00:34.000Z | 2022-03-24T07:57:33.000Z | # ===================================
# Import the libraries
# ===================================
import numpy as np
from matplotlib import pylab as plt
import imaging
import utility
import os,sys
# ===================================
# Which stages to run
# ===================================
do_add_noise = False
do_black_level_correction = True
do_lens_shading_correction = True
do_bad_pixel_correction = True
do_channel_gain_white_balance = True
do_bayer_denoise = False
do_demosaic = True
do_demosaic_artifact_reduction = True
do_color_correction = True
do_gamma = True
do_chromatic_aberration_correction = True
do_tone_mapping = True
do_memory_color_enhancement = True
do_noise_reduction = True
do_sharpening = True
do_distortion_correction = False
# ===================================
# Remove all the .png files
os.system("rm images/*.png")
# ===================================
# ===================================
# raw image and set up the metadata
# ===================================
# uncomment the image_name to run it via pipeline
image_name = "DSC_1339_768x512_rggb" # image content: Rose rggb
# image_name = "DSC_1339_768x512_gbrg" # image content: Rose gbrg
# image_name = "DSC_1339_768x512_grbg" # image content: Rose grbg
# image_name = "DSC_1339_768x512_bggr" # image content: Rose bggr
# image_name = "DSC_1320_2048x2048_rggb" # image content: Potrait
# image_name = "DSC_1372_6032x4032_rggb" # image content: Downtown San Jose
# image_name = "DSC_1372_12096x6032_rgb_out_demosaic" # image content: Downtown San Jose after demosaic
# read the raw image
temp = np.fromfile("images/" + image_name + ".raw", dtype="uint16", sep="")
if (image_name == "DSC_1339_768x512_rggb"):
temp = temp.reshape([512, 768])
raw = imaging.ImageInfo("1339_768x512_rggb", temp)
raw.set_color_space("raw")
raw.set_bayer_pattern("rggb")
raw.set_channel_gain((1.94921875, 1.0, 1.0, 1.34375)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColorMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
data = raw.data
elif (image_name == "DSC_1339_768x512_gbrg"):
temp = temp.reshape([512, 768])
raw = imaging.ImageInfo("1339_768x512_gbrg", temp)
raw.set_color_space("raw")
raw.set_bayer_pattern("gbrg")
raw.set_channel_gain((1.0, 1.34375, 1.94921875, 1.0)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColorMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
data = raw.data
elif (image_name == "DSC_1339_768x512_grbg"):
temp = temp.reshape([512, 768])
raw = imaging.ImageInfo("1339_768x512_grbg", temp)
raw.set_color_space("raw")
raw.set_bayer_pattern("grbg")
raw.set_channel_gain((1.0, 1.94921875, 1.34375, 1.0)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColorMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
data = raw.data
elif (image_name == "DSC_1339_768x512_bggr"):
temp = temp.reshape([512, 768])
raw = imaging.ImageInfo("1339_768x512_bggr", temp)
raw.set_color_space("raw")
raw.set_bayer_pattern("bggr")
raw.set_channel_gain((1.34375, 1.0, 1.0, 1.94921875,)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColorMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
data = raw.data
elif (image_name == "DSC_1320_2048x2048_rggb"):
temp = temp.reshape([2048, 2048])
raw = imaging.ImageInfo("1320_2048x2048_rggb", temp)
raw.set_color_space("raw")
raw.set_bayer_pattern("rggb")
raw.set_channel_gain((1.94921875, 1.0, 1.0, 1.34375)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColotMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
data = raw.data
elif (image_name == "DSC_1372_6032x4032_rggb"):
temp = temp.reshape([4032, 6032])
raw = imaging.ImageInfo("DSC_1372_6032x4032_rggb", temp)
raw.set_color_space("raw")
raw.set_bayer_pattern("rggb")
raw.set_channel_gain((1.94921875, 1.0, 1.0, 1.34375)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColotMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
data = raw.data
elif (image_name == "DSC_1372_12096x6032_rgb_out_demosaic"):
temp = temp.reshape([12096, 6032])
temp = np.float32(temp)
data = np.empty((4032, 6032, 3), dtype=np.float32)
data[:, :, 0] = temp[0:4032, :]
data[:, :, 1] = temp[4032 : 2*4032, :]
data[:, :, 2] = temp[2*4032 : 3*4032, :]
raw = imaging.ImageInfo("DSC_1372_12096x6032_rgb_out_demosaic", data)
raw.set_color_space("raw")
raw.set_bayer_pattern("rggb")
raw.set_channel_gain((1.94921875, 1.0, 1.0, 1.34375)) # Please shuffle the values
# depending on bayer_pattern
raw.set_bit_depth(14)
raw.set_black_level((600, 600, 600, 600))
raw.set_white_level((15520, 15520, 15520, 15520))
# the ColotMatrix2 found from the metadata
raw.set_color_matrix([[.9020, -.2890, -.0715],\
[-.4535, 1.2436, .2348],\
[-.0934, .1919, .7086]])
else:
print("Warning! image_name not recognized.")
# ===================================
# Add noise
# ===================================
if do_add_noise:
noise_mean = 0
noise_standard_deviation = 100
seed = 100
clip_range = [600, 65535]
data = utility.synthetic_image_generate(\
raw.get_width(), raw.get_height()).create_noisy_image(\
data, noise_mean, noise_standard_deviation, seed, clip_range)
else:
pass
# ===================================
# Black level correction
# ===================================
if do_black_level_correction:
data = imaging.black_level_correction(data, \
raw.get_black_level(),\
raw.get_white_level(),\
[0, 2**raw.get_bit_depth() - 1])
utility.imsave(data, "images/" + image_name + "_out_black_level_correction.png", "uint16")
else:
pass
# ===================================
# Lens shading correction
# ===================================
if do_lens_shading_correction:
# normally dark_current_image and flat_field_image are
# captured in the image quality lab using flat field chart
# here we are synthetically generating thouse two images
dark_current_image, flat_field_image = utility.synthetic_image_generate(\
raw.get_width(), raw.get_height()).create_lens_shading_correction_images(\
0, 65535, 40000)
# save the dark_current_image and flat_field_image for viewing
utility.imsave(dark_current_image, "images/" + image_name + "_dark_current_image.png", "uint16")
utility.imsave(flat_field_image, "images/" + image_name + "_flat_field_image.png", "uint16")
data = imaging.lens_shading_correction(data).flat_field_compensation(\
dark_current_image, flat_field_image)
# data = lsc.approximate_mathematical_compensation([0.01759, -28.37, -13.36])
utility.imsave(data, "images/" + image_name + "_out_lens_shading_correction.png", "uint16")
else:
pass
# ===================================
# Bad pixel correction
# ===================================
if do_bad_pixel_correction:
neighborhood_size = 3
data = imaging.bad_pixel_correction(data, neighborhood_size)
utility.imsave(data, "images/" + image_name + "_out_bad_pixel_correction.png", "uint16")
else:
pass
# ===================================
# Channel gain for white balance
# ===================================
if do_channel_gain_white_balance:
data = imaging.channel_gain_white_balance(data,\
raw.get_channel_gain())
utility.imsave(data, "images/" + image_name + "_out_channel_gain_white_balance.png", "uint16")
else:
pass
# ===================================
# Bayer denoising
# ===================================
if do_bayer_denoise:
# bayer denoising parameters
neighborhood_size = 5
initial_noise_level = 65535 * 10 / 100
hvs_min = 1000
hvs_max = 2000
clip_range = [0, 65535]
threshold_red_blue = 1300
# data is the denoised output, ignoring the second output
data, _ = imaging.bayer_denoising(data).utilize_hvs_behavior(\
raw.get_bayer_pattern(), initial_noise_level, hvs_min, hvs_max, threshold_red_blue, clip_range)
utility.imsave(data, "images/" + image_name + "_out_bayer_denoising.png", "uint16")
# utility.imsave(np.clip(texture_degree_debug*65535, 0, 65535), "images/" + image_name + "_out_texture_degree_debug.png", "uint16")
else:
pass
# ===================================
# Demosacing
# ===================================
if do_demosaic:
#data = imaging.demosaic(data, raw.get_bayer_pattern()).mhc(False)
data = imaging.demosaic(data, raw.get_bayer_pattern()).directionally_weighted_gradient_based_interpolation()
utility.imsave(data, "images/" + image_name + "_out_demosaic.png", "uint16")
else:
pass
# ===================================
# Demosaic artifact reduction
# ===================================
if do_demosaic_artifact_reduction:
data = imaging.demosaic(data).post_process_local_color_ratio(0.80 * 65535)
utility.imsave(data, "images/" + image_name + "_out_local_color_ratio.png", "uint16")
edge_detection_kernel_size = 5
edge_threshold = 0.05
# first output is main output, second output is edge_location is a debug output
data, _ = imaging.demosaic(data).post_process_median_filter(edge_detection_kernel_size, edge_threshold)
utility.imsave(data, "images/" + image_name + "_out_median_filter.png", "uint16")
# utility.imsave(edge_location*65535, "images/" + image_name + "_edge_location.png", "uint16")
else:
pass
# ===================================
# Color correction
# ===================================
if do_color_correction:
data = imaging.color_correction(data, raw.get_color_matrix()).apply_cmatrix()
utility.imsave(data, "images/" + image_name + "_out_color_correction.png", "uint16")
else:
pass
# ===================================
# Gamma
# ===================================
if do_gamma:
# brightening
data = imaging.nonlinearity(data, "brightening").luma_adjustment(80.)
# gamma by value
#data = imaging.nonlinearity(data, "gamma").by_value(1/2.2, [0, 65535])
# gamma by table
# data = imaging.nonlinearity(data, "gamma").by_table("tables/gamma_2.4.txt", "gamma", [0, 65535])
# gamma by value
data = imaging.nonlinearity(data, "gamma").by_equation(-0.9, -8.0, [0, 65535])
utility.imsave(data, "images/" + image_name + "_out_gamma.png", "uint16")
else:
pass
# ===================================
# Chromatic aberration correction
# ===================================
if do_chromatic_aberration_correction:
nsr_threshold = 90.
cr_threshold = 6425./2
data = imaging.chromatic_aberration_correction(data).purple_fringe_removal(nsr_threshold, cr_threshold)
utility.imsave(data, "images/" + image_name + "_out_purple_fringe_removal.png", "uint16")
else:
pass
# ===================================
# Tone mapping
# ===================================
if do_tone_mapping:
data = imaging.tone_mapping(data).nonlinear_masking(1.0)
utility.imsave(data, "images/" + image_name + "_out_tone_mapping_nl_masking.png", "uint16")
# data = imaging.tone_mapping(data).dynamic_range_compression("normal", [-25., 260.], [0, 65535])
# utility.imsave(data, "images/" + image_name + "_out_tone_mapping_drc.png", "uint16")
else:
pass
# ===================================
# Memory color enhancement
# ===================================
if do_memory_color_enhancement:
# target_hue = [30., -115., 100.]
# hue_preference = [45., -90., 130.]
# hue_sigma = [20., 10., 5.]
# is_both_side = [True, False, False]
# multiplier = [0.6, 0.6, 0.6]
# chroma_preference = [25., 17., 30.]
# chroma_sigma = [10., 10., 5.]
target_hue = [30., -125., 100.]
hue_preference = [20., -118., 130.]
hue_sigma = [20., 10., 5.]
is_both_side = [True, False, False]
multiplier = [0.6, 0.6, 0.6]
chroma_preference = [25., 14., 30.]
chroma_sigma = [10., 10., 5.]
data = imaging.memory_color_enhancement(data).by_hue_squeeze(target_hue,\
hue_preference,\
hue_sigma,\
is_both_side,\
multiplier,\
chroma_preference,\
chroma_sigma)
utility.imsave(data, "images/" + image_name + "_out_memory_color_enhancement.png", "uint16")
else:
pass
# ===================================
# Noise reduction
# ===================================
if do_noise_reduction:
# sigma filter parameters
neighborhood_size = 7
sigma = [1000, 500, 500]
data = imaging.noise_reduction(data).sigma_filter(neighborhood_size, sigma)
utility.imsave(data, "images/" + image_name + "_out_noise_reduction.png", "uint16")
else:
pass
# ===================================
# Sharpening
# ===================================
if do_sharpening:
data = imaging.sharpening(data).unsharp_masking()
utility.imsave(data, "images/" + image_name + "_out_sharpening.png", "uint16")
else:
pass
# ===================================
# Distortion correction
# ===================================
if do_distortion_correction:
correction_type="barrel-1"
strength=0.5
zoom_type="fit"
clip_range=[0, 65535]
data = imaging.distortion_correction(data).empirical_correction(correction_type, strength, zoom_type, clip_range)
utility.imsave(data, "images/" + image_name + "_out_distortion_correction.png", "uint16")
else:
pass
| 35.215385 | 135 | 0.576421 | 1,853 | 16,023 | 4.700486 | 0.160281 | 0.033754 | 0.037887 | 0.037199 | 0.518829 | 0.47256 | 0.419288 | 0.37899 | 0.338347 | 0.301722 | 0 | 0.100427 | 0.225676 | 16,023 | 454 | 136 | 35.292952 | 0.601596 | 0.279224 | 0 | 0.408 | 0 | 0 | 0.10302 | 0.058031 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.064 | 0.02 | 0 | 0.02 | 0.004 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
4150a5e0f2085d293f7b12df820c38652a42f8aa | 12,579 | py | Python | coldsweat/fetcher.py | jeroenh/coldsweat | 339f413ea6393c50c7b98b85a8c6fd74a4586567 | [
"MIT"
] | 106 | 2015-02-01T22:24:32.000Z | 2021-11-08T08:50:55.000Z | coldsweat/fetcher.py | jeroenh/coldsweat | 339f413ea6393c50c7b98b85a8c6fd74a4586567 | [
"MIT"
] | 47 | 2015-02-19T13:50:45.000Z | 2019-09-02T19:58:20.000Z | coldsweat/fetcher.py | jeroenh/coldsweat | 339f413ea6393c50c7b98b85a8c6fd74a4586567 | [
"MIT"
] | 23 | 2015-02-20T21:21:54.000Z | 2019-11-30T05:13:17.000Z | # -*- coding: utf-8 -*-
'''
Description: the feed fetcher
Copyright (c) 2013—2016 Andrea Peltrin
Portions are copyright (c) 2013 Rui Carmo
License: MIT (see LICENSE for details)
'''
import sys, os, re, time, urlparse
from datetime import datetime
from peewee import IntegrityError
import feedparser
import requests
from requests.exceptions import *
from webob.exc import *
from coldsweat import *
from plugins import trigger_event
from models import *
from utilities import *
from translators import *
import markup
import filters
__all__ = [
'Fetcher',
'fetch_url'
]
FETCH_ICONS_DELTA = 30 # Days
class Fetcher(object):
'''
Fetch a single given feed
'''
def __init__(self, feed):
# Save timestamp for current fetch operation
self.instant = datetime.utcnow()
# Extract netloc
_, self.netloc, _, _, _ = urlparse.urlsplit(feed.self_link)
self.feed = feed
def handle_500(self, response):
'''
Internal server error
'''
self.feed.error_count += 1
self.feed.last_status = response.status_code
logger.warn(u"%s has caused an error on server, skipped" % self.netloc)
raise HTTPInternalServerError
def handle_403(self, response):
'''
Forbidden
'''
self.feed.error_count += 1
self.feed.last_status = response.status_code
logger.warn(u"%s access was denied, skipped" % self.netloc)
raise HTTPForbidden
def handle_404(self, response):
'''
Not Found
'''
self.feed.error_count += 1
self.feed.last_status = response.status_code
logger.warn(u"%s has been not found, skipped" % self.netloc)
raise HTTPNotFound
def handle_410(self, response):
'''
Gone
'''
self.feed.is_enabled = False
self.feed.error_count += 1
self.feed.last_status = response.status_code
logger.warn(u"%s is gone, disabled" % self.netloc)
self._synthesize_entry('Feed has been removed from the origin server.')
raise HTTPGone
def handle_304(self, response):
'''
Not modified
'''
logger.debug(u"%s hasn't been modified, skipped" % self.netloc)
self.feed.last_status = response.status_code
raise HTTPNotModified
def handle_301(self, response):
'''
Moved permanently
'''
self_link = response.url
try:
Feed.get(self_link=self_link)
except Feed.DoesNotExist:
self.feed.self_link = self_link
self.feed.last_status = response.status_code
logger.info(u"%s has changed its location, updated to %s" % (self.netloc, self_link))
else:
self.feed.is_enabled = False
self.feed.last_status = DuplicatedFeedError.code
self.feed.error_count += 1
self._synthesize_entry('Feed has a duplicated web address.')
logger.warn(u"new %s location %s is duplicated, disabled" % (self.netloc, self_link))
raise DuplicatedFeedError
def handle_200(self, response):
'''
OK plus redirects
'''
self.feed.etag = response.headers.get('ETag', None)
# Save final status code discarding redirects
self.feed.last_status = response.status_code
handle_307 = handle_200 # Alias
handle_302 = handle_200 # Alias
def update_feed(self):
logger.debug(u"updating %s" % self.netloc)
# Check freshness
for value in [self.feed.last_checked_on, self.feed.last_updated_on]:
if not value:
continue
# No datetime.timedelta since we need to
# deal with large seconds values
delta = datetime_as_epoch(self.instant) - datetime_as_epoch(value)
if delta < config.fetcher.min_interval:
logger.debug(u"%s is below minimun fetch interval, skipped" % self.netloc)
return
try:
response = fetch_url(self.feed.self_link,
timeout=config.fetcher.timeout,
etag=self.feed.etag,
modified_since=self.feed.last_updated_on)
except RequestException:
# Record any network error as 'Service Unavailable'
self.feed.last_status = HTTPServiceUnavailable.code
self.feed.error_count += 1
logger.warn(u"a network error occured while fetching %s, skipped" % self.netloc)
self.check_feed_health()
self.feed.save()
return
self.feed.last_checked_on = self.instant
# Check if we got a redirect first
if response.history:
status = response.history[0].status_code
else:
status = response.status_code
try:
handler = getattr(self, 'handle_%d' % status, None)
if handler:
logger.debug(u"got status %s from server" % status)
handler(response)
else:
self.feed.last_status = status
logger.warn(u"%s replied with unhandled status %d, aborted" % (self.netloc, status))
return
self._parse_feed(response.text)
self._fetch_icon()
except HTTPNotModified:
pass # Nothing to do
except (HTTPError, DuplicatedFeedError):
self.check_feed_health()
finally:
self.feed.save()
def check_feed_health(self):
if config.fetcher.max_errors and self.feed.error_count > config.fetcher.max_errors:
self._synthesize_entry('Feed has accumulated too many errors (last was %s).' % filters.status_title(self.feed.last_status))
logger.warn(u"%s has accomulated too many errors, disabled" % self.netloc)
self.feed.is_enabled = False
return
def update_feed_with_data(self, data):
self._parse_feed(data)
self.feed.save()
def _parse_feed(self, data):
soup = feedparser.parse(data)
# Got parsing error?
if hasattr(soup, 'bozo') and soup.bozo:
logger.debug(u"%s caused a parser error (%s), tried to parse it anyway" % (self.netloc, soup.bozo_exception))
ft = FeedTranslator(soup.feed)
self.feed.last_updated_on = ft.get_timestamp(self.instant)
self.feed.alternate_link = ft.get_alternate_link()
self.feed.title = self.feed.title or ft.get_title() # Do not set again if already set
#entries = []
feed_author = ft.get_author()
for entry_dict in soup.entries:
t = EntryTranslator(entry_dict)
link = t.get_link()
guid = t.get_guid(default=link)
if not guid:
logger.warn(u'could not find GUID for entry from %s, skipped' % self.netloc)
continue
timestamp = t.get_timestamp(self.instant)
content_type, content = t.get_content(('text/plain', ''))
# Skip ancient entries
if config.fetcher.max_history and (self.instant - timestamp).days > config.fetcher.max_history:
logger.debug(u"entry %s from %s is over maximum history, skipped" % (guid, self.netloc))
continue
try:
# If entry is already in database with same hashed GUID, skip it
Entry.get(guid_hash=make_sha1_hash(guid))
logger.debug(u"duplicated entry %s, skipped" % guid)
continue
except Entry.DoesNotExist:
pass
entry = Entry(
feed = self.feed,
guid = guid,
link = link,
title = t.get_title(default='Untitled'),
author = t.get_author() or feed_author,
content = content,
content_type = content_type,
last_updated_on = timestamp
)
# At this point we are pretty sure we doesn't have the entry
# already in the database so alert plugins and save data
trigger_event('entry_parsed', entry, entry_dict)
entry.save()
#@@TODO: entries.append(entry)
logger.debug(u"parsed entry %s from %s" % (guid, self.netloc))
#return entries
def _fetch_icon(self):
if not self.feed.icon or not self.feed.icon_last_updated_on or (self.instant - self.feed.icon_last_updated_on).days > FETCH_ICONS_DELTA:
# Prefer alternate_link if available since self_link could
# point to Feed Burner or similar services
self.feed.icon = self._google_favicon_fetcher(self.feed.alternate_link or self.feed.self_link)
self.feed.icon_last_updated_on = self.instant
logger.debug(u"fetched favicon %s..." % (self.feed.icon[:70]))
def _google_favicon_fetcher(self, url):
'''
Fetch a site favicon via Google service
'''
endpoint = "http://www.google.com/s2/favicons?domain=%s" % urlparse.urlsplit(url).hostname
try:
response = fetch_url(endpoint)
except RequestException, exc:
logger.warn(u"could not fetch favicon for %s (%s)" % (url, exc))
return Feed.DEFAULT_ICON
return make_data_uri(response.headers['Content-Type'], response.content)
def add_synthesized_entry(self, title, content_type, content):
'''
Create an HTML entry for this feed
'''
# Since we don't know the mechanism the feed used to build a GUID for its entries
# synthesize an tag URI from the link and a random string. This makes
# entries internally generated by Coldsweat reasonably globally unique
guid = ENTRY_TAG_URI % make_sha1_hash(self.feed.self_link + make_nonce())
entry = Entry(
feed = self.feed,
guid = guid,
title = title,
author = 'Coldsweat',
content = content,
content_type = content_type,
last_updated_on = self.instant
)
entry.save()
logger.debug(u"synthesized entry %s" % guid)
return entry
def _synthesize_entry(self, reason):
title = u'This feed has been disabled'
content = render_template(os.path.join(template_dir, '_entry_feed_disabled.html'), {'reason': reason})
return self.add_synthesized_entry(title, 'text/html', content)
def fetch_url(url, timeout=10, etag=None, modified_since=None):
'''
Fecth a given URL optionally issuing a 'Conditional GET' request
'''
request_headers = {
'User-Agent': USER_AGENT
}
# Conditional GET headers
if etag and modified_since:
logger.debug(u"fetching %s with a conditional GET (%s %s)" % (url, etag, format_http_datetime(modified_since)))
request_headers['If-None-Match'] = etag
request_headers['If-Modified-Since'] = format_http_datetime(modified_since)
try:
response = requests.get(url, timeout=timeout, headers=request_headers)
except RequestException, exc:
logger.debug(u"tried to fetch %s but got %s" % (url, exc.__class__.__name__))
raise exc
return response
# ------------------------------------------------------
# Custom error codes 9xx & exceptions
# ------------------------------------------------------
class DuplicatedFeedError(Exception):
code = 900
title = 'Duplicated feed'
explanation = 'Feed address matches another already present in the database.'
# Update WebOb status codes map
for klass in (DuplicatedFeedError,):
status_map[klass.code] = klass
| 35.634561 | 144 | 0.565864 | 1,410 | 12,579 | 4.897872 | 0.239716 | 0.056762 | 0.027802 | 0.028671 | 0.165798 | 0.111932 | 0.086591 | 0.059948 | 0.053866 | 0.039676 | 0 | 0.00798 | 0.342476 | 12,579 | 352 | 145 | 35.735795 | 0.826865 | 0.092456 | 0 | 0.253521 | 0 | 0 | 0.115618 | 0.002331 | 0 | 0 | 0 | 0.002841 | 0 | 0 | null | null | 0.00939 | 0.065728 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41569e5e017f94b8bc03473ce433df43099554cf | 286 | py | Python | phanterpwa/interface/Admin/production.py | PhanterJR/phanterpwa | 6daff40845b3a853cd08d319c4ce148f8deebed7 | [
"MIT"
] | 2 | 2019-06-06T10:37:01.000Z | 2021-10-16T03:36:28.000Z | phanterpwa/interface/Admin/production.py | PhanterJR/phanterpwa | 6daff40845b3a853cd08d319c4ce148f8deebed7 | [
"MIT"
] | null | null | null | phanterpwa/interface/Admin/production.py | PhanterJR/phanterpwa | 6daff40845b3a853cd08d319c4ce148f8deebed7 | [
"MIT"
] | null | null | null | from phanterpwa.server import PhanterPWATornado
if __name__ == "__main__":
import os
projectPath = os.path.dirname(__file__)
print(projectPath)
AppRunv = PhanterPWATornado(projectPath)
try:
AppRunv.run()
except KeyboardInterrupt:
AppRunv.stop()
| 23.833333 | 47 | 0.695804 | 27 | 286 | 6.925926 | 0.740741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.22028 | 286 | 11 | 48 | 26 | 0.838565 | 0 | 0 | 0 | 0 | 0 | 0.027972 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.1 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41581dd5a29d0d2428a3d341c3227a0c254875b8 | 271 | py | Python | join.py | florisdenhengst/pydial-benchmark-stopos | 0c2c70f327bc6ef41cfa849852ffe6300cc4f27e | [
"MIT"
] | null | null | null | join.py | florisdenhengst/pydial-benchmark-stopos | 0c2c70f327bc6ef41cfa849852ffe6300cc4f27e | [
"MIT"
] | null | null | null | join.py | florisdenhengst/pydial-benchmark-stopos | 0c2c70f327bc6ef41cfa849852ffe6300cc4f27e | [
"MIT"
] | null | null | null | import argparse
parser = argparse.ArgumentParser(description="Generate path to config file given input")
parser.add_argument('vars', nargs='+')
args = parser.parse_args()
# The last item of vars contains the random seed
result = '-'.join(args.vars[:-1])
print(result)
| 24.636364 | 88 | 0.745387 | 38 | 271 | 5.263158 | 0.763158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004202 | 0.121771 | 271 | 10 | 89 | 27.1 | 0.836134 | 0.169742 | 0 | 0 | 1 | 0 | 0.206278 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41612b688431101d4228b6a577f3b0ee96a44628 | 618 | py | Python | mysite/personal/urls.py | salimab/library1 | 9746329837ba40fa2d9816adf19ff9d9f559bbaa | [
"MIT"
] | null | null | null | mysite/personal/urls.py | salimab/library1 | 9746329837ba40fa2d9816adf19ff9d9f559bbaa | [
"MIT"
] | null | null | null | mysite/personal/urls.py | salimab/library1 | 9746329837ba40fa2d9816adf19ff9d9f559bbaa | [
"MIT"
] | null | null | null | from django.conf.urls import url
from personal.models import Book
from . import views
from django.contrib.auth.views import login
from django.views.generic import ListView, DetailView
urlpatterns = [
url(r'^$', views.index , name='index'),
url(r'^login/$', login, {'template_name': 'personal/login.html'}),
url(r'^lib/$', ListView.as_view(queryset=Book.objects.all().order_by("-date")[:25],
template_name = "personal/lib.html")),
url(r'^(?P<pk>\d+)$', DetailView.as_view(model=Book,
template_name='personal/book.html'))
]
| 34.333333 | 87 | 0.614887 | 78 | 618 | 4.794872 | 0.461538 | 0.042781 | 0.160428 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004158 | 0.221683 | 618 | 17 | 88 | 36.352941 | 0.773389 | 0 | 0 | 0 | 0 | 0 | 0.171799 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.384615 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
41619fc16893077852a588c0d3ebc29c67fdbfcc | 2,455 | py | Python | carbon/lib/carbon/log.py | ehazlett/graphite | b20573e92cb90de254505baa160210483f203be9 | [
"Apache-2.0"
] | 1 | 2015-05-21T10:23:03.000Z | 2015-05-21T10:23:03.000Z | carbon/lib/carbon/log.py | Cue/graphite | 450eeeb0eacc433bc5914c1dff2e05dbf420cf8d | [
"Apache-2.0"
] | null | null | null | carbon/lib/carbon/log.py | Cue/graphite | 450eeeb0eacc433bc5914c1dff2e05dbf420cf8d | [
"Apache-2.0"
] | null | null | null | import time
from sys import stdout, stderr
from zope.interface import implements
from twisted.python.log import startLoggingWithObserver, textFromEventDict, msg, err, ILogObserver
from twisted.python.logfile import DailyLogFile
class CarbonLogObserver(object):
implements(ILogObserver)
def log_to_dir(self, logdir):
self.logdir = logdir
self.console_logfile = DailyLogFile('console.log', logdir)
self.custom_logs = {}
self.observer = self.logdir_observer
def __call__(self, event):
return self.observer(event)
def stdout_observer(self, event):
stdout.write( formatEvent(event, includeType=True) + '\n' )
stdout.flush()
def logdir_observer(self, event):
message = formatEvent(event)
log_type = event.get('type')
if log_type is not None and log_type not in self.custom_logs:
self.custom_logs[log_type] = DailyLogFile(log_type + '.log', self.logdir)
logfile = self.custom_logs.get(log_type, self.console_logfile)
logfile.write(message + '\n')
logfile.flush()
# Default to stdout
observer = stdout_observer
carbonLogObserver = CarbonLogObserver()
def formatEvent(event, includeType=False):
event['isError'] = 'failure' in event
message = textFromEventDict(event)
timestamp = time.strftime("%d/%m/%Y %H:%M:%S")
if includeType:
typeTag = '[%s] ' % event.get('type', 'console')
else:
typeTag = ''
return "%s :: %s%s" % (timestamp, typeTag, message)
logToDir = carbonLogObserver.log_to_dir
def logToStdout():
startLoggingWithObserver(carbonLogObserver)
def cache(message, **context):
context['type'] = 'cache'
msg(message, **context)
def clients(message, **context):
context['type'] = 'clients'
msg(message, **context)
def creates(message, **context):
context['type'] = 'creates'
msg(message, **context)
def updates(message, **context):
context['type'] = 'updates'
msg(message, **context)
def listener(message, **context):
context['type'] = 'listener'
msg(message, **context)
def relay(message, **context):
context['type'] = 'relay'
msg(message, **context)
def aggregator(message, **context):
context['type'] = 'aggregator'
msg(message, **context)
def query(message, **context):
context['type'] = 'query'
msg(message, **context)
def debug(message, **context):
if debugEnabled:
msg(message, **context)
debugEnabled = False
def setDebugEnabled(enabled):
global debugEnabled
debugEnabled = enabled
| 24.306931 | 98 | 0.701833 | 287 | 2,455 | 5.919861 | 0.271777 | 0.148323 | 0.090053 | 0.117716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.159267 | 2,455 | 100 | 99 | 24.55 | 0.823159 | 0.006925 | 0 | 0.128571 | 0 | 0 | 0.068145 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.228571 | false | 0 | 0.071429 | 0.014286 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
416ea9fd8056f26889da4e772e2a3ac8b50231ab | 1,671 | py | Python | supports/pyload/src/pyload/plugins/accounts/EuroshareEu.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | 1 | 2020-04-02T17:03:39.000Z | 2020-04-02T17:03:39.000Z | supports/pyload/src/pyload/plugins/accounts/EuroshareEu.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | null | null | null | supports/pyload/src/pyload/plugins/accounts/EuroshareEu.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import re
import time
from ..base.account import BaseAccount
class EuroshareEu(BaseAccount):
__name__ = "EuroshareEu"
__type__ = "account"
__version__ = "0.12"
__status__ = "testing"
__pyload_version__ = "0.5"
__description__ = """Euroshare.eu account plugin"""
__license__ = "GPLv3"
__authors__ = [
("zoidberg", "zoidberg@mujmail.cz"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
def grab_info(self, user, password, data):
html = self.load("http://euroshare.eu/", get={"lang": "en"})
m = re.search(
r'<span class="btn btn--nav green darken-3">Premium account until: (\d+/\d+/\d+ \d+:\d+:\d+)<',
html,
)
if m is None:
premium = False
validuntil = -1
else:
premium = True
validuntil = time.mktime(time.strptime(m.group(1), "%d/%m/%Y %H:%M:%S"))
return {"validuntil": validuntil, "trafficleft": -1, "premium": premium}
def signin(self, user, password, data):
html = self.load("http://euroshare.eu/login.html")
if r'href="http://euroshare.eu/logout.html"' in html:
self.skip_login()
json_data = json.loads(
self.load(
"http://euroshare.eu/ajax/_account_login.ajax.php",
post={
"username": user,
"password": password,
"remember": "false",
"backlink": "",
},
)
)
if json_data.get("login_status") != "success":
self.fail_login()
| 27.393443 | 107 | 0.519449 | 177 | 1,671 | 4.672316 | 0.531073 | 0.066505 | 0.072551 | 0.076179 | 0.14873 | 0.113664 | 0.113664 | 0.113664 | 0.113664 | 0.113664 | 0 | 0.014172 | 0.324357 | 1,671 | 60 | 108 | 27.85 | 0.718335 | 0.012567 | 0 | 0 | 0 | 0.021739 | 0.279126 | 0.015777 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0.065217 | 0.086957 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
417221be3e316342a6fa9154928c6a83da99eeb9 | 499 | py | Python | tests/test_get_updates.py | concretesolutions/mysql-anonymous | a4246d657cb3749aa145c30320df78f9811b01d2 | [
"MIT"
] | null | null | null | tests/test_get_updates.py | concretesolutions/mysql-anonymous | a4246d657cb3749aa145c30320df78f9811b01d2 | [
"MIT"
] | null | null | null | tests/test_get_updates.py | concretesolutions/mysql-anonymous | a4246d657cb3749aa145c30320df78f9811b01d2 | [
"MIT"
] | null | null | null | from anonymize.anonymize import AnonymizeUpdate, AnonymizeScheme
def test_should_get_the_update_list():
data = AnonymizeUpdate(AnonymizeScheme("default", {
"tables": {
"user": {
"nullify": ["phone", ],
"random_email": ["email", ],
"random_ip": ['ip']
}
}
}))
r = ["UPDATE `user` SET `phone` = NULL, `ip` = INET_NTOA(RAND()*1000000000), `email` = CONCAT(id, '@example.com')"]
assert data == r
| 29.352941 | 119 | 0.531062 | 46 | 499 | 5.586957 | 0.717391 | 0.233463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028818 | 0.304609 | 499 | 16 | 120 | 31.1875 | 0.711816 | 0 | 0 | 0 | 0 | 0.076923 | 0.328657 | 0.058116 | 0 | 0 | 0 | 0 | 0.076923 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4176b3250f2179deaa30e43611510dfb698b6de1 | 2,143 | py | Python | python_project/DeleteElasticsearchIndices.py | wangyuqianily/bbotte.github.io | 7b7522c1db445179a5be029f1da4db70bcc154f4 | [
"Apache-2.0"
] | 2 | 2021-08-22T16:13:27.000Z | 2021-08-22T16:13:34.000Z | python_project/DeleteElasticsearchIndices.py | wangyuqianily/bbotte.github.io | 7b7522c1db445179a5be029f1da4db70bcc154f4 | [
"Apache-2.0"
] | null | null | null | python_project/DeleteElasticsearchIndices.py | wangyuqianily/bbotte.github.io | 7b7522c1db445179a5be029f1da4db70bcc154f4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#ecoding=utf8
#自动删除elasticsearch数据库保留较久的数据
#普通数据保留时间为daysToKeep,access日志保留时间为access_date
#pip install elasticsearch 安装elasticsearch包
import datetime
import re
from elasticsearch import Elasticsearch
daysToKeep = 30
es = Elasticsearch('localhost:9200',timeout=60.0)
keep_time = (datetime.datetime.now() - datetime.timedelta(days=daysToKeep)).strftime("%Y.%m.%d")
#获取indices
indices_list = []
result = es.cat.indices()
data = result.splitlines()
#过滤业务的indices
app_service = re.compile('service') #service 为index中包含字段,例 abc-system-service-2018.01.01
lines = [line for line in data if del_kibana.search(line) is not None ]
#print(lines)
for i in range(len(lines)):
indices_list.append(re.split(r' ',lines[i])[2])
print(indices_list)
#获取index,留着或许以后有用
#index_data = es.search()["hits"]["hits"]
#for i in range(len(lines)):
# index_list.append(index_data[i]["_id"])
#一次跑太多会提示out of range,所以分每50个一批跑,
#下面try/except是因为indices格式有*-*-date, *-*-*-date
times = len(indices_list)/50
x = 1
while x <= times:
index_max = 50*x
index_min = 50*(x-1)
for y in range(index_min,index_max):
try:
idx_date = re.split(r'\-',indices_list[y])[2]
except:
idx_date = re.split(r'\-',indices_list[y])[3]
else:
if idx_date < keep_time:
print "Deleting index: %s" % (indices_list[y])
es.indices.delete(index=indices_list[y])
x = x+1
#下面是system-ngxaccess-2018.01.01,web-ngxaccess-2018.01.01 保留7天
access_date = 7
access_keep_time = (datetime.datetime.now() - datetime.timedelta(days=access_date)).strftime("%Y.%m.%d")
save_access = re.compile('ngxaccess')
access_lines = [line for line in data if save_access.search(line) is not None ]
access_indices_list = []
for i in range(len(access_lines)):
access_indices_list.append(re.split(r' ',access_lines[i])[2])
for z in range(len(access_indices_list)):
access_idx_date = re.split(r'\-',access_indices_list[z])[2]
if access_idx_date < access_keep_time:
print "Deleting index: %s" % (access_indices_list[z])
#es.indices.delete(index=access_indices_list[z])
| 32.969231 | 104 | 0.698087 | 314 | 2,143 | 4.601911 | 0.33121 | 0.106574 | 0.070588 | 0.022837 | 0.281661 | 0.235294 | 0.137024 | 0.103806 | 0 | 0 | 0 | 0.028682 | 0.15399 | 2,143 | 64 | 105 | 33.484375 | 0.76834 | 0.251983 | 0 | 0 | 0 | 0 | 0.056747 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.075 | null | null | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4180c010d94ce77b0775ae90c79802a1245aa772 | 1,999 | py | Python | src/tmdb/api_testing.py | kierrez/movie-website | 74f4ed018aba545dec190b70d62abe0ac6085462 | [
"MIT"
] | 1 | 2019-03-02T20:06:16.000Z | 2019-03-02T20:06:16.000Z | src/tmdb/api_testing.py | kierrez/movie-website | 74f4ed018aba545dec190b70d62abe0ac6085462 | [
"MIT"
] | 1 | 2022-01-07T22:57:41.000Z | 2022-01-07T22:57:41.000Z | src/tmdb/api_testing.py | kierrez/movie-website | 74f4ed018aba545dec190b70d62abe0ac6085462 | [
"MIT"
] | null | null | null | import django
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
django.setup()
from django.contrib.auth import get_user_model
from importer.utils import import_ratings_from_csv
from titles.models import Title
from tmdb.api import TmdbWrapper, TitleDetailsGetter
User = get_user_model()
def test_csv():
user = User.objects.all().first()
import_ratings_from_csv(user, 'G:/code/PycharmProjects/movie website/media/test.csv')
# update_user_ratings_csv(user, 'G:/code/PycharmProjects/movie website/media/accounts/imdb.csv')
# test_csv()
# check popular
# check import/eexport
# for t in Title.objects.filter(tmdb_id='1414'):
# for t in Title.objects.filter(imdb_id='tt0454848'):
# # # print(t.similar.clear())
# # # print(t, t.imdb_id)
# tmdb_instance = t.get_tmdb_instance()
# tmdb_instance(title=t).update()
# Title.objects.filter(tmdb_id='1414').delete()
# for imdb_id in ['tt0286486', 'tt0133363']:
# for imdb_id in ['tt0454848']:
# TmdbWrapper().get(imdb_id=imdb_id)
imdb_id_movie = 'tt0120889'
# tmdb_id_movie = '12159'
# imdb_id_series = 'tt4574334'
tmdb_id_series = '66732'
t = Title.objects.get(imdb_id='tt1037705')
TitleDetailsGetter(t).run()
# deleted = Title.objects.filter(imdb_id=collection_id).delete()
# Collection.objects.all().delete()
# deleted = Title.objects.filter(imdb_id=imdb_id_movie).delete()
# deleted = Title.objects.filter(tmdb_id=tmdb_id_series).delete()
# print(deleted)
# title = TmdbWrapper().get(imdb_id_movie)
# title = client.get_title_or_create()
# print(title.collection.all())
# t = Title.objects.get(tmdb_id=tmdb_id_series)
# print(t.cast.all())
# print(t.crew.all())
# for x in t.casttitle_set.all():
# print(x)
# for x in t.castcrew_set.all():
# print(x)
# popular_movies = PopularMovies().get()
# print(popular_movies)
# for title in Title.objects.all():
# print(title.name)
# imdb_id = title.imdb_id
# print(title.delete())
# TmdbWrapper().get(imdb_id)
| 25.303797 | 100 | 0.72036 | 289 | 1,999 | 4.771626 | 0.269896 | 0.069616 | 0.078318 | 0.047861 | 0.248731 | 0.171139 | 0.063814 | 0.063814 | 0 | 0 | 0 | 0.038395 | 0.127064 | 1,999 | 78 | 101 | 25.628205 | 0.751862 | 0.654827 | 0 | 0 | 0 | 0 | 0.173375 | 0.113003 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.4375 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
4188b579ad1181892d567d0767ea95ba676c48c9 | 6,732 | py | Python | axonius_api_client/api/system/users.py | rwils83/axonius_api_client | 1990ed4d1287482a4648dc51edcaa5eb08255f5b | [
"MIT"
] | null | null | null | axonius_api_client/api/system/users.py | rwils83/axonius_api_client | 1990ed4d1287482a4648dc51edcaa5eb08255f5b | [
"MIT"
] | 3 | 2021-05-18T14:28:30.000Z | 2021-09-06T20:01:56.000Z | axonius_api_client/api/system/users.py | rwils83/axonius_api_client | 1990ed4d1287482a4648dc51edcaa5eb08255f5b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""API model for working with system configuration."""
from typing import List, Optional
from ...exceptions import ApiError, NotFoundError
from ..mixins import ChildMixins
from ..parsers import parse_unchanged
class Users(ChildMixins): # pragma: no cover
"""User Role controls."""
def add(
self,
name: str,
role_name: str,
password: Optional[str] = None,
generate_password_link: bool = False,
email_password_link: bool = False,
first_name: Optional[str] = None,
last_name: Optional[str] = None,
email: Optional[str] = None,
) -> dict:
"""Pass."""
if not any([password, generate_password_link, email_password_link]):
raise ApiError("Must supply password, generate_password_link, or email_password_link")
if email_password_link and not email:
raise ApiError("Must supply email if email_password_link is True")
role = self.parent.roles.get_by_name(name=role_name)
users = self.get()
names = [x["user_name"] for x in users]
if name in names:
raise ApiError(f"User named {name!r} already exists")
user = {}
user["user_name"] = name
user["first_name"] = first_name
user["last_name"] = last_name
user["password"] = password
user["email"] = email
user["role_id"] = role["uuid"]
user["role_obj"] = role
user["auto_generated_password"] = generate_password_link
self._add(user=user)
user_obj = self.get_by_name(name=name)
if generate_password_link or email_password_link:
password_reset_link = self._get_password_reset_link(uuid=user_obj["uuid"])
if generate_password_link:
user_obj["password_reset_link"] = password_reset_link
if email_password_link:
try:
self._email_password_reset_link(uuid=user_obj["uuid"], email=email, new_user=True)
user_obj["email_password_link_error"] = None
except Exception as exc:
user_obj["email_password_link_error"] = (
getattr(getattr(exc, "response", None), "text", None) or exc
)
return user_obj
def get(self) -> List[dict]:
"""Pass."""
users = self._get()
roles = self.parent.roles.get()
for user in users:
for role in roles:
if role["uuid"] == user["role_id"]:
user["role_obj"] = role
break
return users
def get_by_name(self, name: str) -> dict:
"""Pass."""
users = self.get()
valid = [x["user_name"] for x in users]
if name not in valid:
valid = "\n" + "\n".join(valid)
raise NotFoundError(f"User name {name!r} not found, valid users:{valid}")
return [x for x in users if x["user_name"] == name][0]
def update(
self,
name: str,
first_name: Optional[str] = None,
last_name: Optional[str] = None,
password: Optional[str] = None,
email: Optional[str] = None,
role_name: Optional[str] = None,
) -> dict:
"""Pass."""
user = self.get_by_name(name=name)
one_of = [first_name, last_name, password, email, role_name]
if all([x is None for x in one_of]):
req = ", ".join(["first_name", "last_name", "password", "email", "role_name"])
raise ApiError(f"Must supply at least one of: {req!r}")
uuid = user["uuid"]
if first_name is not None:
user["first_name"] = first_name
if last_name is not None:
user["last_name"] = last_name
if password is not None:
_, password = parse_unchanged(value=password)
user["password"] = password
if email is not None:
user["email"] = email
if role_name is not None:
role = self.parent.roles.get_by_name(name=role_name)
user["role_id"] = role["uuid"]
user["role_obj"] = role
self._update(uuid=uuid, user=user)
return self.get_by_name(name=name)
def delete(self, name: str) -> str:
"""Pass."""
user = self.get_by_name(name=name)
return self._delete(uuid=user["uuid"])
def get_password_reset_link(self, name: str) -> str:
"""Pass."""
user = self.get_by_name(name=name)
user["password_reset_link"] = self._get_password_reset_link(uuid=user["uuid"])
return user
def email_password_reset_link(
self,
name: str,
email: Optional[str] = None,
new_user: bool = False,
generate_first: bool = True,
) -> str:
"""Pass."""
user = self.get_by_name(name=name)
user_email = user.get("email")
email = email or user_email
if not email:
raise ApiError("User has no email address defined, must supply email")
if generate_first:
user["password_reset_link"] = self._get_password_reset_link(uuid=user["uuid"])
self._email_password_reset_link(uuid=user["uuid"], email=email, new_user=False)
return email
def _get(self, limit: Optional[int] = None, skip: Optional[int] = None) -> List[dict]:
"""Pass."""
data = {}
if limit is not None:
data["limit"] = limit
if skip is not None:
data["skip"] = skip
path = self.router.users
return self.request(method="get", path=path, params=data)
def _add(self, user: dict) -> str:
"""Pass."""
path = self.router.users
return self.request(method="put", path=path, json=user)
def _delete(self, uuid: str) -> str:
"""Pass."""
path = self.router.user.format(uuid=uuid)
return self.request(method="delete", path=path)
def _update(self, uuid: str, user: dict) -> str:
"""Pass."""
path = self.router.user.format(uuid=uuid)
return self.request(method="post", path=path, json=user, error_json_invalid=False)
def _get_password_reset_link(self, uuid: str) -> str:
"""Pass."""
path = f"{self.router._base}/tokens/reset"
data = {"user_id": uuid}
return self.request(method="put", path=path, json=data, error_json_invalid=False)
def _email_password_reset_link(self, uuid: str, email: str, new_user: bool = False) -> str:
"""Pass."""
path = f"{self.router._base}/tokens/notify"
data = {"user_id": uuid, "email": email, "invite": new_user}
return self.request(method="POST", path=path, json=data)
| 33.162562 | 98 | 0.58066 | 853 | 6,732 | 4.385698 | 0.143025 | 0.036354 | 0.063619 | 0.0278 | 0.496124 | 0.412724 | 0.347501 | 0.283881 | 0.187383 | 0.155841 | 0 | 0.00042 | 0.292484 | 6,732 | 202 | 99 | 33.326733 | 0.785009 | 0.027629 | 0 | 0.307692 | 0 | 0 | 0.121812 | 0.024888 | 0.006993 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.20979 | 0.027972 | 0 | 0.216783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
41a1a65579b9b9fb4619e6879a5263fb65b7ed83 | 1,206 | py | Python | gameball/exceptions/gameball_exception.py | aaelfiky/gameball-python | bd7aead60ac53c6539f10a41d9c47e87569ce8c3 | [
"MIT"
] | 1 | 2022-03-15T14:08:06.000Z | 2022-03-15T14:08:06.000Z | gameball/exceptions/gameball_exception.py | aaelfiky/gameball-python | bd7aead60ac53c6539f10a41d9c47e87569ce8c3 | [
"MIT"
] | null | null | null | gameball/exceptions/gameball_exception.py | aaelfiky/gameball-python | bd7aead60ac53c6539f10a41d9c47e87569ce8c3 | [
"MIT"
] | 1 | 2022-01-17T00:38:18.000Z | 2022-01-17T00:38:18.000Z | from __future__ import absolute_import, division, print_function
class GameballException(Exception):
def __init__(
self,
message=None,
http_body=None,
json_body=None,
headers=None,
code=None,
):
super(GameballException, self).__init__(message)
self._message = message
self.http_body = http_body
self.json_body = json_body
self.headers = headers or {}
self.code = code
def __str__(self):
msg = self._message or "<empty message>"
return msg
# Returns the underlying `Exception` (base class) message, which is usually
# the raw message returned by Gameball's API. This was previously available
# in python2 via `error.message`. Unlike `str(error)`, it omits "Request
# req_..." from the beginning of the string.
@property
def user_message(self):
return self._message
def __repr__(self):
return "%s(message=%r)" % (
self.__class__.__name__,
self._message,
)
class AuthenticationError(GameballException):
pass
class APIError(GameballException):
pass
| 26.8 | 80 | 0.611111 | 130 | 1,206 | 5.346154 | 0.484615 | 0.079137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001193 | 0.305141 | 1,206 | 44 | 81 | 27.409091 | 0.828162 | 0.216418 | 0 | 0.064516 | 0 | 0 | 0.032402 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0.064516 | 0.032258 | 0.064516 | 0.354839 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
41a7bf570de5af2196396f6db7d2f68a8abdfe45 | 5,984 | py | Python | ana/fresnel.py | hanswenzel/opticks | b75b5929b6cf36a5eedeffb3031af2920f75f9f0 | [
"Apache-2.0"
] | 11 | 2020-07-05T02:39:32.000Z | 2022-03-20T18:52:44.000Z | ana/fresnel.py | hanswenzel/opticks | b75b5929b6cf36a5eedeffb3031af2920f75f9f0 | [
"Apache-2.0"
] | null | null | null | ana/fresnel.py | hanswenzel/opticks | b75b5929b6cf36a5eedeffb3031af2920f75f9f0 | [
"Apache-2.0"
] | 4 | 2020-09-03T20:36:32.000Z | 2022-01-19T07:42:21.000Z | #!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
fresnel.py : analytic reflection expectations
==================================================
"""
import os, logging
log = logging.getLogger(__name__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from opticks.ana.base import opticks_environment
from opticks.ana.nbase import count_unique
np.set_printoptions(suppress=True, precision=3)
def fresnel(x, n1, n2, spol=True):
"""
https://en.wikipedia.org/wiki/Fresnel_equations
"""
cx = np.cos(x)
sx = np.sin(x)
disc = 1. - np.square(n1*sx/n2)
qdisc = np.sqrt(disc)
pass
if spol:
num = (n1*cx - n2*qdisc)
den = (n1*cx + n2*qdisc)
else:
num = (n1*qdisc - n2*cx)
den = (n1*qdisc + n2*cx)
pass
return np.square(num/den)
def fresnel_factor(seqhis, i, n1, n2, spol=True):
"""
:param seqhis: history sequence string eg "TO BT BR BT SA"
:param n1: refractive index of initial material
:param n2: refractive index of material that is transmitted into
Not aiming for generality, only works for simple geometries like raindrops, prisms, lens
"""
seqs = seqhis.split(" ")
rx = fresnel(i, n1, n2, spol=spol )
tx = 1 - rx
ff = np.ones(len(i))
for step in seqs:
#print step
if step in ("TO", "SA"):continue
if step == "BT":
ff *= tx
elif step == "BR":
ff *= rx
else:
assert 0, step
pass
pass
return ff
def fresnel_s( i, n, method=0):
"""
sin(i-r) si cr - ci sr
-------- = -------------
sin(i+r) si cr + ci sr
This form whilst pretty, gives nan at normal incidence, 0/0
"""
si = np.sin(i)
sr = si/n
if method == 0:
ci = np.sqrt( 1 - si*si )
cr = np.sqrt( 1 - sr*sr )
num = si*cr - ci*sr
den = si*cr + ci*sr
else:
i = np.arcsin(si)
r = np.arcsin(sr)
num = np.sin(i - r)
den = np.sin(i + r)
#log.info("i %s r %s num %s den %s " % (i,r,num,den))
pass
return np.square(num/den)
def fresnel_p( i, n):
"""
tan(i-r)
--------
tan(i+r)
"""
si = np.sin(i)
sr = si/n
i = np.arcsin(si)
r = np.arcsin(sr)
num = np.tan(i - r)
den = np.tan(i + r)
return np.square(num/den)
class Fresnel(object):
def __init__(self, n1, n2, dom=None ):
if dom is None:
dom = np.linspace(0,90,91)
n1 = np.asarray(n1)
n2 = np.asarray(n2)
th = dom*np.pi/180.
spol = fresnel(th, n1, n2, True)
ppol = fresnel(th, n1, n2, False)
pass
self.n1 = n1
self.n2 = n2
self.dom = dom
self.th = th
self.spol_0 = spol
self.ppol_0 = ppol
#self.alternative_check()
self.cen = (dom[:-1] + dom[1:])/2.
# avg of bin edge values
self.spol = (spol[:-1] + spol[1:])/2.
self.ppol = (ppol[:-1] + ppol[1:])/2.
self.upol = (self.spol+self.ppol)/2. # unpol?
self.brewster = np.arctan(n2/n1)*180./np.pi
self.critical = np.arcsin(n1/n2)*180./np.pi
def alternative_check(self):
"""
Alternative angle difference forms, misbehave at normal incidence
Otherwise they match
"""
th = self.th
n1 = self.n1
n2 = self.n2
spol_0 = self.spol_0
ppol_0 = self.ppol_0
spol_2 = fresnel_s( th, n2/n1, method=1)
spol_3 = fresnel_s( th, n2/n1, method=0)
assert np.allclose( spol_0[1:], spol_2[1:] ), np.dstack([spol_0,spol_2, spol_3])
assert np.allclose( spol_0[1:], spol_3[1:] ), np.dstack([spol_0,spol_2, spol_3])
ppol_2 = fresnel_p( th, n2/n1)
assert np.allclose( ppol_0[1:], ppol_2[1:] ), np.dstack([ppol_0, ppol_2])
def __call__(self, xd, n):
x = xd*np.pi/180.
n1 = self.n1
n2 = self.n2
cx = np.cos(x)
sx = np.sin(x)
disc = 1. - np.square(n1*sx/n2)
qdisc = np.sqrt(disc)
pass
spol = np.square((n1*cx - n2*qdisc)/(n1*cx + n2*qdisc))
ppol = np.square((n1*qdisc - n2*cx)/(n1*qdisc + n2*cx))
return n*(spol*f + (1.-f)*ppol)
def pl(self):
plt.plot(self.cen, self.spol, label="S (perp)", c="r")
plt.plot(self.cen, self.ppol, label="P (para)", c="b")
def title(self):
return "Fresnel %4.3f/%4.3f " % (self.n1, self.n2 )
def plot(self, fig, ny=1, nx=1, n=1, log_=False):
plt.title(self.title())
ax = fig.add_subplot(ny,nx,n)
self.pl()
self.angles()
legend = ax.legend(loc='upper left', shadow=True)
if log_:
ax.set_yscale('log')
def angles(self):
a = self.brewster
plt.plot([a, a], [1e-6, 1], 'k-', c="b", lw=2, label="Brewster")
a = self.critical
plt.plot([a, a], [1e-6, 1], 'k-', c="r", lw=2, label="Critical")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
opticks_environment()
n1 = np.array([1.])
n2 = np.array([1.458])
fr = Fresnel(n1,n2)
fig = plt.figure()
fr.plot(fig, log_=True)
fig.show()
| 24.42449 | 92 | 0.546123 | 906 | 5,984 | 3.538631 | 0.290287 | 0.013724 | 0.012477 | 0.013724 | 0.168434 | 0.150967 | 0.128509 | 0.09607 | 0.07486 | 0.05053 | 0 | 0.039496 | 0.297627 | 5,984 | 244 | 93 | 24.52459 | 0.723293 | 0.244151 | 0 | 0.246269 | 0 | 0 | 0.020656 | 0 | 0 | 0 | 0 | 0 | 0.029851 | 1 | 0.08209 | false | 0.052239 | 0.044776 | 0.007463 | 0.179104 | 0.007463 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
41a80a7bb2ea9f3edd7fce0c895966513940b18c | 17,695 | py | Python | examples/api-samples/inc_samples/sample31.py | groupdocs-legacy-sdk/python | 80e5ef5a9a14ac4a7815c6cf933b5b2997381455 | [
"Apache-2.0"
] | null | null | null | examples/api-samples/inc_samples/sample31.py | groupdocs-legacy-sdk/python | 80e5ef5a9a14ac4a7815c6cf933b5b2997381455 | [
"Apache-2.0"
] | null | null | null | examples/api-samples/inc_samples/sample31.py | groupdocs-legacy-sdk/python | 80e5ef5a9a14ac4a7815c6cf933b5b2997381455 | [
"Apache-2.0"
] | null | null | null | ### This sample will show how to Create and Upload Envelop to GroupDocs account using Python SDK
# Import of classes from libraries
import os, time, random
from pyramid.renderers import render_to_response
from groupdocs.ApiClient import ApiClient
from groupdocs.StorageApi import StorageApi
from groupdocs.SignatureApi import SignatureApi
from groupdocs.DocApi import DocApi
from groupdocs.AsyncApi import AsyncApi
from groupdocs.MergeApi import MergeApi
from groupdocs.GroupDocsRequestSigner import GroupDocsRequestSigner
from groupdocs.models.Datasource import Datasource
from groupdocs.models.DatasourceField import DatasourceField
from groupdocs.models.SignatureFieldSettingsInfo import SignatureFieldSettingsInfo
from groupdocs.models.SignatureEnvelopeFieldSettingsInfo import SignatureEnvelopeFieldSettingsInfo
from groupdocs.FileStream import FileStream
from groupdocs.models.WebhookInfo import WebhookInfo
import pdb
# Checking value on null
def IsNotNull(value):
return value is not None and len(value) > 0
# Set variables and get POST data
def sample31(request):
clientId = request.POST.get('client_id')
privateKey = request.POST.get('private_key')
email = request.POST.get('email')
name = request.POST.get('name')
country = request.POST.get('country')
street = request.POST.get('street')
city = request.POST.get('city')
basePath = request.POST.get('server_type')
fileId = request.POST.get('template_guid')
callbackUrl = request.POST.get('callbackUrl')
iframe = ''
message = ''
webHook = WebhookInfo
# Checking required parameters
if IsNotNull(clientId) == False or IsNotNull(privateKey) == False or IsNotNull(email) == False or IsNotNull(name) == False or IsNotNull(country) == False or IsNotNull(street) == False or IsNotNull(city) == False:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : 'You do not enter all parameters' })
#Get curent work directory
currentDir = os.path.dirname(os.path.realpath(__file__))
#Create text file
fp = open(currentDir + '/../user_info.txt', 'w')
#Write user info to text file
fp.write(clientId + "\r\n" + privateKey)
fp.close()
#Clear downloads folder
if os.path.isdir(currentDir + '/../downloads'):
#Get list of files
for the_file in os.listdir(currentDir + '/../downloads'):
file_path = os.path.join(currentDir + '/../downloads', the_file)
try:
#Delete file from folder
os.unlink(file_path)
except Exception, e:
print e
### Create Signer, ApiClient and Annotation Api objects
# Create signer object
signer = GroupDocsRequestSigner(privateKey)
# Create apiClient object
apiClient = ApiClient(signer)
# Create StorageApi object
storage = StorageApi(apiClient)
# Create SignatureApi object
signature = SignatureApi(apiClient)
docApi = DocApi(apiClient)
mergeApi = MergeApi(apiClient)
asyncApi = AsyncApi(apiClient)
if basePath == "":
basePath = 'https://api.groupdocs.com/v2.0'
#Set base path
storage.basePath = basePath
signature.basePath = basePath
docApi.basePath = basePath
mergeApi.basePath = basePath
asyncApi.basePath = basePath
guid = fileId
#Create list with entered data
enteredData = {"email": email, "country": country, "name": name, "street": street, "city": city}
#Create new Datasource object
dataSource = Datasource
array = []
#Create DataSourceField object and filing it with entered data
for key, data in enteredData.iteritems():
value = [data]
field = DatasourceField()
field.name = key
field.type = "text"
field.values = value
array.append(field)
#Set DataSourceField object to the fields parameter of the DataSource object
dataSource.fields = array
try:
#Add DataSource to GroupDocs
addDataSource = mergeApi.AddDataSource(clientId, dataSource)
#Check status
if addDataSource.status == "Ok":
try:
#Merge DataSource with documnet and convert it to PDF
job = mergeApi.MergeDatasource(clientId, guid, addDataSource.result.datasource_id, targetType = "pdf")
if job.status == "Ok":
#Time delay necessary for server side processing
time.sleep(5)
i = 0
for counter in range(5):
# Make request to api for get document info by job id
try:
jobInfo = asyncApi.GetJobDocuments(clientId, job.result.job_id)
if jobInfo.result.job_status == "Completed" or jobInfo.result.job_status == "Archived":
break;
except Exception, e:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : str(e) })
i = i + 1
#If job status Postponed throw exception with error
if jobInfo.result.job_status == "Postponed":
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : 'Merge datasource is failed' })
fileGuid = jobInfo.result.inputs[0].outputs[0].guid
try:
# Create envelope using user id and entered by user name
envelop = signature.CreateSignatureEnvelope(clientId, name=jobInfo.result.inputs[0].outputs[0].name)
if envelop.status == "Ok":
time.sleep(3)
try:
# Add uploaded document to envelope
addDocument = signature.AddSignatureEnvelopeDocument(clientId, envelop.result.envelope.id, fileGuid)
if addDocument.status == "Ok":
# Get role list for curent user
try:
recipient = signature.GetRolesList(clientId)
if recipient.status == "Ok":
# Get id of role which can sign
roleId = None
for item in recipient.result.roles:
if item.name == "Signer":
roleId = item.id
#Generate random field name
rand = random.randint(0, 500)
fieldName = "singSample" + str(rand)
#Create SignatureFieldSettings object
fieldSettings = SignatureFieldSettingsInfo
fieldSettings.name = fieldName
try:
#Create signatureField
createField = signature.CreateSignatureField(clientId, body = fieldSettings)
if createField.status == "Ok":
# add recipient
try:
addRecipient = signature.AddSignatureEnvelopeRecipient(clientId, envelop.result.envelope.id, email, name, "lastName", roleId)
if addRecipient.status == "Ok":
# Get recipient id
getRecipient = signature.GetSignatureEnvelopeRecipients(clientId, envelop.result.envelope.id)
if getRecipient.status == "Ok":
#Get recipient id
recipientId = getRecipient.result.recipients[0].id
#Convert callback string to stream
if (IsNotNull(callbackUrl)):
webHook.callbackUrl = callbackUrl
else:
webHook.callbackUrl = ''
try:
#Get SignatureEnvelopDocuments
getDocuments = signature.GetSignatureEnvelopeDocuments(clientId, envelop.result.envelope.id)
if getDocuments.status == "Ok":
#Create signature field for sign (max LocationX,Y can bee 1.0)
signatureSettings = SignatureEnvelopeFieldSettingsInfo
signatureSettings.locationX = "0.15"
signatureSettings.locationY = "0.73"
signatureSettings.locationWidth = "150"
signatureSettings.locationHeight = "50"
signatureSettings.name = fieldName
signatureSettings.forceNewField = True
signatureSettings.page = "1"
try:
#Add created sign field to the envelop
addSignatureField = signature.AddSignatureEnvelopeField(clientId, envelop.result.envelope.id, getDocuments.result.documents[0].documentId, recipientId, "0545e589fb3e27c9bb7a1f59d0e3fcb9", body = signatureSettings)
if addSignatureField.status == "Ok":
#Send created envelop
send = signature.SignatureEnvelopeSend(clientId, envelop.result.envelope.id, body = webHook)
# make result messages
if send.status == "Ok":
message = '<p>File was uploaded to GroupDocs. Here you can see your <strong>' + name + '</strong> file in the GroupDocs Embedded Viewer.</p>';
# Generation of iframe URL using jobInfo.result.outputs[0].guid
if basePath == "https://api.groupdocs.com/v2.0":
iframe = 'https://apps.groupdocs.com/signature/signembed/' + envelop.result.envelope.id + '/' + recipientId
elif basePath == "https://dev-api.groupdocs.com/v2.0":
iframe = 'https://dev-apps.groupdocs.com/signature/signembed/' + envelop.result.envelope.id + '/' + recipientId
elif basePath == "https://stage-api.groupdocs.com/v2.0":
iframe = 'https://stage-apps.groupdocs.com/signature/signembed/' + envelop.result.envelope.id + '/' + recipientId
iframe = signer.signUrl(iframe)
else:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : send.error_message })
except Exception, e:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : str(e) })
else:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : getDocuments.error_message })
except Exception, e:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : str(e) })
else:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : addRecipient.error_message })
except Exception, e:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : str(e) })
else:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : createField.error_message })
except Exception, e:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : str(e) })
else:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : recipient.error_message })
except Exception, e:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : str(e) })
else:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : addDocument.error_message })
except Exception, e:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : str(e) })
else:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : envelop.error_message })
except Exception, e:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : str(e) })
except Exception, e:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : str(e) })
else:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : job.error_message })
except Exception, e:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : str(e) })
else:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : addDataSource.error_message })
except Exception, e:
return render_to_response('__main__:templates/sample31.pt',
{ 'error' : str(e) })
# If request was successfull - set variables for template
return render_to_response('__main__:templates/sample31.pt',
{'userId' : clientId, 'privateKey' : privateKey, 'email':email, 'name':name, 'envId' : envelop.result.envelope.id, 'iframe': iframe, 'message': message, 'roleId' : roleId, 'callbackUrl' : callbackUrl},
request=request) | 64.345455 | 289 | 0.441989 | 1,267 | 17,695 | 6.041042 | 0.219416 | 0.025085 | 0.05017 | 0.066109 | 0.289522 | 0.261301 | 0.245362 | 0.228247 | 0.222367 | 0.209302 | 0 | 0.011637 | 0.490082 | 17,695 | 275 | 290 | 64.345455 | 0.83664 | 0.086635 | 0 | 0.322115 | 0 | 0 | 0.099981 | 0.044809 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.076923 | null | null | 0.004808 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41a8ad6c52784414d3afc665096b267a843b65be | 678 | py | Python | tests/test_data.py | hhsecond/stockroom | d1335773e33493df07cc95f9bb48fbc351313dd6 | [
"Apache-2.0"
] | null | null | null | tests/test_data.py | hhsecond/stockroom | d1335773e33493df07cc95f9bb48fbc351313dd6 | [
"Apache-2.0"
] | null | null | null | tests/test_data.py | hhsecond/stockroom | d1335773e33493df07cc95f9bb48fbc351313dd6 | [
"Apache-2.0"
] | null | null | null | import pytest
import numpy as np
def test_save_data(stock):
arr = np.arange(20).reshape(4, 5)
stock.data['aset', 1] = arr
stock.commit("added data")
assert np.allclose(stock.data['aset', 1], arr)
del stock
def test_save_to_non_existing_column(stock):
arr = np.arange(20).reshape(4, 5)
with pytest.raises(KeyError):
stock.data['wrongaset', 1] = arr
def test_save_to_different_typed_column(stock):
arr = np.arange(20).reshape(4, 5).astype(np.float)
with pytest.raises(ValueError):
stock.data['aset', 1] = arr
def test_fetch_non_existing_sample_key(stock):
with pytest.raises(KeyError):
stock.data['aset', 1]
| 24.214286 | 54 | 0.675516 | 104 | 678 | 4.240385 | 0.375 | 0.102041 | 0.117914 | 0.126984 | 0.47619 | 0.360544 | 0.210884 | 0.210884 | 0.14966 | 0 | 0 | 0.030797 | 0.185841 | 678 | 27 | 55 | 25.111111 | 0.768116 | 0 | 0 | 0.315789 | 0 | 0 | 0.051622 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.210526 | false | 0 | 0.105263 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41a8d342baf300c3e7063c86b9c63dcb4a0417e4 | 515 | py | Python | pandas_market_calendars/holidays_jp.py | mgellman-radix/pandas_market_calendars | 2de80686267cdbb0b3b6878521fd2aebc6dca3bb | [
"MIT"
] | null | null | null | pandas_market_calendars/holidays_jp.py | mgellman-radix/pandas_market_calendars | 2de80686267cdbb0b3b6878521fd2aebc6dca3bb | [
"MIT"
] | null | null | null | pandas_market_calendars/holidays_jp.py | mgellman-radix/pandas_market_calendars | 2de80686267cdbb0b3b6878521fd2aebc6dca3bb | [
"MIT"
] | null | null | null | from pandas import (
Timestamp
)
# Apr. 30 (Tue.) Abdication Day
# May 1 (Wed.) Accession Day
# May 2 (Thu.) National Holiday
# May 3 (Fri.) Constitution Memorial Day
# May 4 (Sat.) Greenery Day
# May 6 (Mon.) Children's Day (May 5) observed
AscensionDays = [
Timestamp('2019-04-30', tz='Asia/Tokyo'),
Timestamp('2019-05-01', tz='Asia/Tokyo'),
Timestamp('2019-05-02', tz='Asia/Tokyo'),
Timestamp('2019-05-03', tz='Asia/Tokyo'),
Timestamp('2019-05-06', tz='Asia/Tokyo'),
]
| 28.611111 | 49 | 0.619417 | 75 | 515 | 4.253333 | 0.546667 | 0.094044 | 0.172414 | 0.250784 | 0.326019 | 0.326019 | 0 | 0 | 0 | 0 | 0 | 0.116505 | 0.2 | 515 | 17 | 50 | 30.294118 | 0.657767 | 0.415534 | 0 | 0 | 0 | 0 | 0.340136 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41b028aa3880d819cacddc876ce5774ff3b88727 | 1,240 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/insert-delete-getrandom-o1.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/insert-delete-getrandom-o1.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/insert-delete-getrandom-o1.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(1)
# Space: O(n)
from random import randint
class RandomizedSet(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.__set = []
self.__used = {}
def insert(self, val):
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
:type val: int
:rtype: bool
"""
if val in self.__used:
return False
self.__set += val,
self.__used[val] = len(self.__set)-1
return True
def remove(self, val):
"""
Removes a value from the set. Returns true if the set contained the specified element.
:type val: int
:rtype: bool
"""
if val not in self.__used:
return False
self.__used[self.__set[-1]] = self.__used[val]
self.__set[self.__used[val]], self.__set[-1] = self.__set[-1], self.__set[self.__used[val]]
self.__used.pop(val)
self.__set.pop()
return True
def getRandom(self):
"""
Get a random element from the set.
:rtype: int
"""
return self.__set[randint(0, len(self.__set)-1)]
| 21.754386 | 106 | 0.542742 | 157 | 1,240 | 4.006369 | 0.33758 | 0.122417 | 0.063593 | 0.071542 | 0.402226 | 0.36566 | 0.216216 | 0.136725 | 0.136725 | 0.136725 | 0 | 0.0086 | 0.343548 | 1,240 | 56 | 107 | 22.142857 | 0.764128 | 0.282258 | 0 | 0.190476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0.047619 | 0 | 0.52381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
41b8fe61ea30cea12c5965b50e3135e870aa3f8f | 1,637 | py | Python | BuildSimHubAPI/measures/hvac_template.py | ruijis/buildsimhub_python_api | 67a88a421a5970b9134a97faf3d52a5a8a6c6258 | [
"MIT"
] | 19 | 2018-02-27T22:58:04.000Z | 2022-02-21T15:03:59.000Z | BuildSimHubAPI/measures/hvac_template.py | ruijis/buildsimhub_python_api | 67a88a421a5970b9134a97faf3d52a5a8a6c6258 | [
"MIT"
] | 11 | 2018-02-15T16:47:53.000Z | 2018-12-19T18:33:20.000Z | BuildSimHubAPI/measures/hvac_template.py | ruijis/buildsimhub_python_api | 67a88a421a5970b9134a97faf3d52a5a8a6c6258 | [
"MIT"
] | 11 | 2018-01-26T02:12:38.000Z | 2019-09-29T12:05:31.000Z | from .model_action import ModelAction
class HVACTemplate(ModelAction):
# this shows the ip to si conversion rate
# if unit is 'ip', then multiply this rate.
# for window it is the U-value
# convert U-value IP to SI
# The conversion will change w/ft2 to w/m2 if ip shows
NUM_HVAC = 14
def __init__(self, unit="si"):
ModelAction.__init__(self, 'hvac_template', unit)
self._measure_name = 'HVAC'
self._default_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
# DOAS + VRF as default
self._data = 10
self._lower_limit = 0
self._upper_limit = HVACTemplate.NUM_HVAC - 1
self._measure_help = '''
measure name: HVAC
Unit: not required
Minimum: 0
Maximum: 13
Type: categorical
This measure will change the HVAC system in the idf file
The HVAC system types are:
0. sys1: PTAC
1. sys2: PTHP
2. sys3: PSZ-AC
3. sys4: PSZ-HP
4. sys5: Packaged VAV with Reheat
5. sys6: Packaged VAV with PFP Boxes
6. sys7: VAV with Reheat
7. sys8: VAV with PFP Boxes
8. sys9: Warm air furnace, gas fired
9. sys10: Warm air furnace, electric
10. doasvrf: DOAS with variable refrigerant flow
11. doasfancoil: DOAS with Fan coils
12. doaswshp: DOAS with water source heat pump (ground as condenser)
13. doascbcb: DOAS with active cool beam + convective baseboard
14. vavfourpipebeam: VAV system with four pipe beam
'''
def _unit_convert_ratio(self):
return 1.0
| 34.104167 | 79 | 0.60843 | 234 | 1,637 | 4.145299 | 0.547009 | 0.028866 | 0.012371 | 0.030928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057399 | 0.318876 | 1,637 | 47 | 80 | 34.829787 | 0.812556 | 0.128283 | 0 | 0 | 0 | 0 | 0.637324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.027778 | 0.027778 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41baba6d45efb33b6b1668aec3423b969a4f4ce4 | 1,633 | py | Python | test/mc_init_test.py | lechat/multiconf | c41cbe9ab3fb768a7d4dbd7b9f5d983b1306bde3 | [
"BSD-3-Clause"
] | null | null | null | test/mc_init_test.py | lechat/multiconf | c41cbe9ab3fb768a7d4dbd7b9f5d983b1306bde3 | [
"BSD-3-Clause"
] | null | null | null | test/mc_init_test.py | lechat/multiconf | c41cbe9ab3fb768a7d4dbd7b9f5d983b1306bde3 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
# pylint: disable=E0611
from pytest import xfail
from .. import ConfigRoot, ConfigItem
from ..envs import EnvFactory
ef = EnvFactory()
dev1 = ef.Env('dev1')
dev2 = ef.Env('dev2')
g_dev = ef.EnvGroup('g_dev', dev1, dev2)
tst = ef.Env('tst')
ef.EnvGroup('g_dev_tst', g_dev, tst)
pp = ef.Env('pp')
prod = ef.Env('prod')
ef.EnvGroup('g_prod', pp, prod)
class item1(ConfigItem):
def mc_init(self):
print "MC_INIT"
self.setattr('aa', g_dev=2, dev1=7)
class item2(ConfigItem):
def mc_init(self):
self.setattr('aa', g_dev_tst=2)
def test_direct_env_in_mc_init_overrides_default_and_group_in_with():
with ConfigRoot(dev1, ef):
it = item1(aa=13)
assert it.aa == 7
with ConfigRoot(dev1, ef):
with item1() as it:
it.setattr('aa', default=13)
assert it.aa == 7
with ConfigRoot(dev1, ef):
with item1() as it:
it.setattr('aa', default=1, g_dev=13)
assert it.aa == 7
with ConfigRoot(dev1, ef):
with item1() as it:
it.setattr('aa', default=1, g_dev_tst=13)
assert it.aa == 7
def test_direct_env_in_with_overrides_mc_init():
with ConfigRoot(dev1, ef):
with item1() as it:
it.setattr('aa', dev1=1, tst=111, g_dev=7, g_prod=17)
assert it.aa == 1
def test_more_specific_group_in_with_overrides_mc_init():
with ConfigRoot(dev1, ef):
with item2() as it:
it.setattr('aa', g_dev=1, tst=111, g_prod=17)
assert it.aa == 1
| 23.666667 | 73 | 0.63319 | 262 | 1,633 | 3.774809 | 0.259542 | 0.040445 | 0.109201 | 0.121335 | 0.49545 | 0.351871 | 0.351871 | 0.31547 | 0.31547 | 0.31547 | 0 | 0.049402 | 0.231476 | 1,633 | 68 | 74 | 24.014706 | 0.738645 | 0.090018 | 0 | 0.4 | 0 | 0 | 0.039136 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 0 | null | null | 0 | 0.066667 | null | null | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41be8ad9b87e428d6c9fb93063564080df1a6f56 | 1,650 | py | Python | plotly/run_funcs.py | NREL/MetMastVis | 0c3dd87540471c061eb491c871fdb32e6dabd31b | [
"Apache-2.0"
] | 1 | 2018-05-25T20:03:48.000Z | 2018-05-25T20:03:48.000Z | plotly/run_funcs.py | nhamilto/MetMast | 38475682adb21081c86c58e9008a278971306c23 | [
"Apache-2.0"
] | null | null | null | plotly/run_funcs.py | nhamilto/MetMast | 38475682adb21081c86c58e9008a278971306c23 | [
"Apache-2.0"
] | 2 | 2018-06-07T20:00:03.000Z | 2020-11-26T21:52:04.000Z | import vis
import utils
import met_funcs
import plotly_vis
import MetMastData
import pandas as pd
import matplotlib.pyplot as plt
from colour import Color
import plotly
from plotly import tools
#import plotly.tools as tls
import plotly.plotly as py
import plotly.graph_objs as go
# Place input files here
#inputfiles_here = ['2012_August.csv']
#inputfiles_here = ['2013_January.csv','2013_February.csv','2013_March.csv','2013_April.csv','2013_May.csv','2013_June.csv','2013_July.csv','2013_August.csv','2013_September.csv','2013_October.csv','2013_November.csv','2013_December.csv']
#inputfiles_here = ['2013_January.csv']
year = 2017
inputfiles_here = [str(year) + '_' + s + '.csv' for s in utils.monthnames()]
'''
inputfiles_here = MetMastData()
actual_data =
cate_info = actual_data.cate_info
'''
# Load and filter data
actual_data = met_funcs.load_met_data(inputfiles_here)
actual_data = met_funcs.drop_nan_cols(actual_data)
actual_data = met_funcs.qc_mask(actual_data)
# Extract categorical information
keep_cats = met_funcs.categories_to_keep()
ex_cats = met_funcs.categories_to_exclude()
var_cats,var_units,var_labels,var_save = met_funcs.categorize_fields(actual_data,keep_cats,ex_cats)
# Extract more information
met_funcs.groom_data(actual_data,var_cats)
stab_conds,stab_cats = met_funcs.flag_stability(actual_data)
cate_info = met_funcs.get_catinfo(actual_data)
# Plot the data with the desired category and function
category = 'speed'
#fig1 = plotly_vis.monthly_rose_fig(actual_data,cate_info,category)
fig1 = plotly_vis.monthlyhourlyplot(actual_data,cate_info,category)
py.iplot(fig1, filename = 'MetMast-Test_funcMonthlyHourly') | 34.375 | 238 | 0.807273 | 255 | 1,650 | 4.909804 | 0.396078 | 0.103834 | 0.055911 | 0.071885 | 0.164537 | 0.049521 | 0 | 0 | 0 | 0 | 0 | 0.041944 | 0.089697 | 1,650 | 48 | 239 | 34.375 | 0.791611 | 0.338182 | 0 | 0 | 0 | 0 | 0.040201 | 0.030151 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.461538 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
41c78c68d065b859c2c2e5291510d5c31db6e4ea | 3,166 | py | Python | pandaserver/test/testSimulReco14.py | rybkine/panda-server | 30fdeaa658a38fe2049849446c300c1e1f5b5231 | [
"Apache-2.0"
] | 1 | 2019-08-30T13:47:51.000Z | 2019-08-30T13:47:51.000Z | pandaserver/test/testSimulReco14.py | mkycanopus/panda-server | 0f7c36800c033fada8bbde53dceaab98770b6df2 | [
"Apache-2.0"
] | null | null | null | pandaserver/test/testSimulReco14.py | mkycanopus/panda-server | 0f7c36800c033fada8bbde53dceaab98770b6df2 | [
"Apache-2.0"
] | null | null | null | import sys
import time
import random
import commands
import userinterface.Client as Client
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
if len(sys.argv)>1:
site = sys.argv[1]
cloud = None
else:
site = None
cloud = 'US'
#cloud = 'TW'
#Recent changes (BNL migration to LFC?) forvce the cloud to be specified
cloud = 'US'
datasetName = 'panda.destDB.%s' % commands.getoutput('uuidgen')
destName = 'BNL_ATLAS_2'
files = {
'EVNT.023986._00001.pool.root.1':None,
#'EVNT.023989._00001.pool.root.1':None,
}
jobList = []
index = 0
for lfn in files.keys():
index += 1
job = JobSpec()
job.jobDefinitionID = (time.time()) % 10000
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),index)
job.AtlasRelease = 'Atlas-14.2.20'
job.homepackage = 'AtlasProduction/14.2.20.1'
job.transformation = 'csc_simul_reco_trf.py'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.computingSite = site
job.prodDBlock = 'mc08.105031.Jimmy_jetsJ2.evgen.EVNT.e347_tid023986'
#job.prodDBlock = 'mc08.105034.Jimmy_jetsJ5.evgen.EVNT.e347_tid023989'
job.prodSourceLabel = 'test'
job.processingType = 'test'
job.currentPriority = 10000
job.cloud = cloud
fileI = FileSpec()
fileI.dataset = job.prodDBlock
fileI.prodDBlock = job.prodDBlock
fileI.lfn = lfn
fileI.type = 'input'
job.addFile(fileI)
fileD = FileSpec()
fileD.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v050601'
fileD.prodDBlock = 'ddo.000001.Atlas.Ideal.DBRelease.v050601'
fileD.lfn = 'DBRelease-5.6.1.tar.gz'
fileD.type = 'input'
job.addFile(fileD)
fileOA = FileSpec()
fileOA.lfn = "%s.AOD.pool.root" % job.jobName
fileOA.destinationDBlock = job.destinationDBlock
fileOA.destinationSE = job.destinationSE
fileOA.dataset = job.destinationDBlock
fileOA.destinationDBlockToken = 'ATLASDATADISK'
fileOA.type = 'output'
job.addFile(fileOA)
fileOE = FileSpec()
fileOE.lfn = "%s.ESD.pool.root" % job.jobName
fileOE.destinationDBlock = job.destinationDBlock
fileOE.destinationSE = job.destinationSE
fileOE.dataset = job.destinationDBlock
fileOE.destinationDBlockToken = 'ATLASDATADISK'
fileOE.type = 'output'
job.addFile(fileOE)
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.destinationDBlockToken = 'ATLASDATADISK'
fileOL.type = 'log'
job.addFile(fileOL)
job.jobParameters="%s %s 30 500 3 ATLAS-GEO-02-01-00 3 3 QGSP_BERT jobConfig.VertexPosFastIDKiller.py FastSimulationJobTransforms/FastCaloSimAddCellsRecConfig.py,NoTrackSlimming.py %s OFF NONE NONE %s NONE" % (fileI.lfn, fileOA.lfn, fileD.lfn, fileOE.lfn)
jobList.append(job)
s,o = Client.submitJobs(jobList)
print "---------------------"
print s
for x in o:
print "PandaID=%s" % x[0]
| 31.039216 | 259 | 0.669299 | 371 | 3,166 | 5.679245 | 0.371968 | 0.066445 | 0.052682 | 0.013289 | 0.055055 | 0.037969 | 0.037969 | 0 | 0 | 0 | 0 | 0.0512 | 0.21036 | 3,166 | 101 | 260 | 31.346535 | 0.7912 | 0.062539 | 0 | 0.025 | 0 | 0.0125 | 0.210864 | 0.121795 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.0875 | null | null | 0.0375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41c7bcd341ef6d9db7278e79f57fd4a8cff03453 | 8,012 | py | Python | TweakApi/models/billing_source_sofort.py | tweak-com-public/tweak-api-client-python | 019f86da11fdb12683d516f8f37db5d717380bcc | [
"Apache-2.0"
] | null | null | null | TweakApi/models/billing_source_sofort.py | tweak-com-public/tweak-api-client-python | 019f86da11fdb12683d516f8f37db5d717380bcc | [
"Apache-2.0"
] | null | null | null | TweakApi/models/billing_source_sofort.py | tweak-com-public/tweak-api-client-python | 019f86da11fdb12683d516f8f37db5d717380bcc | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
tweak-api
Tweak API to integrate with all the Tweak services. You can find out more about Tweak at <a href='https://www.tweak.com'>https://www.tweak.com</a>, #tweak.
OpenAPI spec version: 1.0.8-beta.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class BillingSourceSofort(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, country=None, bank_code=None, bic=None, bank_name=None, iban_last4=None, preferred_language=None, statement_descriptor=None, id=None):
"""
BillingSourceSofort - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'country': 'str',
'bank_code': 'str',
'bic': 'str',
'bank_name': 'str',
'iban_last4': 'str',
'preferred_language': 'str',
'statement_descriptor': 'str',
'id': 'str'
}
self.attribute_map = {
'country': 'country',
'bank_code': 'bankCode',
'bic': 'bic',
'bank_name': 'bankName',
'iban_last4': 'ibanLast4',
'preferred_language': 'preferredLanguage',
'statement_descriptor': 'statementDescriptor',
'id': 'id'
}
self._country = country
self._bank_code = bank_code
self._bic = bic
self._bank_name = bank_name
self._iban_last4 = iban_last4
self._preferred_language = preferred_language
self._statement_descriptor = statement_descriptor
self._id = id
@property
def country(self):
"""
Gets the country of this BillingSourceSofort.
:return: The country of this BillingSourceSofort.
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""
Sets the country of this BillingSourceSofort.
:param country: The country of this BillingSourceSofort.
:type: str
"""
self._country = country
@property
def bank_code(self):
"""
Gets the bank_code of this BillingSourceSofort.
:return: The bank_code of this BillingSourceSofort.
:rtype: str
"""
return self._bank_code
@bank_code.setter
def bank_code(self, bank_code):
"""
Sets the bank_code of this BillingSourceSofort.
:param bank_code: The bank_code of this BillingSourceSofort.
:type: str
"""
self._bank_code = bank_code
@property
def bic(self):
"""
Gets the bic of this BillingSourceSofort.
:return: The bic of this BillingSourceSofort.
:rtype: str
"""
return self._bic
@bic.setter
def bic(self, bic):
"""
Sets the bic of this BillingSourceSofort.
:param bic: The bic of this BillingSourceSofort.
:type: str
"""
self._bic = bic
@property
def bank_name(self):
"""
Gets the bank_name of this BillingSourceSofort.
:return: The bank_name of this BillingSourceSofort.
:rtype: str
"""
return self._bank_name
@bank_name.setter
def bank_name(self, bank_name):
"""
Sets the bank_name of this BillingSourceSofort.
:param bank_name: The bank_name of this BillingSourceSofort.
:type: str
"""
self._bank_name = bank_name
@property
def iban_last4(self):
"""
Gets the iban_last4 of this BillingSourceSofort.
:return: The iban_last4 of this BillingSourceSofort.
:rtype: str
"""
return self._iban_last4
@iban_last4.setter
def iban_last4(self, iban_last4):
"""
Sets the iban_last4 of this BillingSourceSofort.
:param iban_last4: The iban_last4 of this BillingSourceSofort.
:type: str
"""
self._iban_last4 = iban_last4
@property
def preferred_language(self):
"""
Gets the preferred_language of this BillingSourceSofort.
:return: The preferred_language of this BillingSourceSofort.
:rtype: str
"""
return self._preferred_language
@preferred_language.setter
def preferred_language(self, preferred_language):
"""
Sets the preferred_language of this BillingSourceSofort.
:param preferred_language: The preferred_language of this BillingSourceSofort.
:type: str
"""
self._preferred_language = preferred_language
@property
def statement_descriptor(self):
"""
Gets the statement_descriptor of this BillingSourceSofort.
:return: The statement_descriptor of this BillingSourceSofort.
:rtype: str
"""
return self._statement_descriptor
@statement_descriptor.setter
def statement_descriptor(self, statement_descriptor):
"""
Sets the statement_descriptor of this BillingSourceSofort.
:param statement_descriptor: The statement_descriptor of this BillingSourceSofort.
:type: str
"""
self._statement_descriptor = statement_descriptor
@property
def id(self):
"""
Gets the id of this BillingSourceSofort.
:return: The id of this BillingSourceSofort.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this BillingSourceSofort.
:param id: The id of this BillingSourceSofort.
:type: str
"""
self._id = id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.928803 | 165 | 0.585122 | 876 | 8,012 | 5.18379 | 0.192922 | 0.042281 | 0.176173 | 0.054614 | 0.443074 | 0.298172 | 0.125963 | 0.051531 | 0.014975 | 0 | 0 | 0.005593 | 0.330504 | 8,012 | 308 | 166 | 26.012987 | 0.840977 | 0.413255 | 0 | 0.238532 | 0 | 0 | 0.071653 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.201835 | false | 0 | 0.027523 | 0 | 0.357798 | 0.009174 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41cb6174b0eb3f32fce935cceb7087df91f7622f | 595 | py | Python | src/help/text/chsh.py | Hamzah-z/user-scripts | b345c812d45f1bd872c771809ecd3c0330ca236e | [
"Apache-2.0"
] | 1 | 2019-12-18T19:49:45.000Z | 2019-12-18T19:49:45.000Z | src/help/text/chsh.py | Hamzah-z/user-scripts | b345c812d45f1bd872c771809ecd3c0330ca236e | [
"Apache-2.0"
] | null | null | null | src/help/text/chsh.py | Hamzah-z/user-scripts | b345c812d45f1bd872c771809ecd3c0330ca236e | [
"Apache-2.0"
] | 1 | 2021-06-13T00:08:02.000Z | 2021-06-13T00:08:02.000Z | """chfn help text"""
CHSH = dict(
text="""chsh changes your "shell" on Redbrick.
** WARNING - Do not use this command if you are unsure
** of what you are doing! :-)
A "Shell" is the style of command line environment on RedBrick.
It is essentially, the 'prompt' and set of commands that you get
when you log in.
Changing this means that your screen prompt may look different,
& some commands may be different. Here are some possible shells:
/usr/local/shells/bash
/usr/local/shells/tcsh
/usr/local/shells/ksh
/usr/local/shells/zsh
However Redbrick only fully supports zsh or bash."""
)
| 27.045455 | 64 | 0.736134 | 98 | 595 | 4.469388 | 0.632653 | 0.073059 | 0.127854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.169748 | 595 | 21 | 65 | 28.333333 | 0.88664 | 0.023529 | 0 | 0 | 0 | 0 | 0.942609 | 0.149565 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
68bb9912ef13f9b892f27cca3983560845ebbab2 | 1,926 | py | Python | algorithms/2.1_add_two_numbers.py | ycpeng7/leetcode_challenges | 1a55e2af08f8b5c5d204c16cc161721ffe7d6647 | [
"MIT"
] | null | null | null | algorithms/2.1_add_two_numbers.py | ycpeng7/leetcode_challenges | 1a55e2af08f8b5c5d204c16cc161721ffe7d6647 | [
"MIT"
] | null | null | null | algorithms/2.1_add_two_numbers.py | ycpeng7/leetcode_challenges | 1a55e2af08f8b5c5d204c16cc161721ffe7d6647 | [
"MIT"
] | null | null | null | #-------------------------------------------------------------------------------
# Add Two Numbers
#-------------------------------------------------------------------------------
# By Ying Peng
# https://leetcode.com/problems/add-two-numbers/
# Completed 12/3/20
#-------------------------------------------------------------------------------
# Approach
#-------------------------------------------------------------------------------
"""
1. Start from head
2. While l1 or l2 is not null, keep advancing
3. Add l1 and l2 as value of l3, keep carry in mind
Time: O(n)
Space: O(n)
"""
#-------------------------------------------------------------------------------
# Soluton
#-------------------------------------------------------------------------------
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
l3 = ListNode()
l3_head = l3
carry = 0
while l1 is not None or l2 is not None:
l1_val = 0 if l1 is None else l1.val
l2_val = 0 if l2 is None else l2.val
_sum = l1_val + l2_val + carry
carry = 0
if _sum >= 10:
carry = _sum // 10
_sum = _sum % 10
l3.val = _sum
if l1:
l1 = l1.next
if l2:
l2 = l2.next
l3.next = ListNode()
prev = l3
l3 = l3.next
if carry > 0:
l3.val = carry
else:
del l3
prev.next = None
return l3_head
#-------------------------------------------------------------------------------
# Unit Test
#-------------------------------------------------------------------------------
import unittest
if __name__ == '__main__':
unittest.main() | 27.514286 | 80 | 0.330737 | 176 | 1,926 | 3.482955 | 0.375 | 0.019576 | 0.042414 | 0.029364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038961 | 0.280374 | 1,926 | 70 | 81 | 27.514286 | 0.403319 | 0.472482 | 0 | 0.058824 | 0 | 0 | 0.00804 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.029412 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
68c3b0e3c32c4d0937107143623d703a7f6d4cba | 2,279 | py | Python | todo/bucketlist/tests/test_models.py | NdagiStanley/not-by-might | 5216c066c0869056e47259b33e42f375366f022c | [
"MIT"
] | 1 | 2019-01-15T20:18:50.000Z | 2019-01-15T20:18:50.000Z | todo/bucketlist/tests/test_models.py | NdagiStanley/not-by-might | 5216c066c0869056e47259b33e42f375366f022c | [
"MIT"
] | 17 | 2018-09-24T16:46:30.000Z | 2019-01-21T17:19:26.000Z | todo/bucketlist/tests/test_models.py | NdagiStanley/not-by-might | 5216c066c0869056e47259b33e42f375366f022c | [
"MIT"
] | 1 | 2016-04-13T11:16:27.000Z | 2016-04-13T11:16:27.000Z | import datetime
from django.test import TestCase
from django.core.urlresolvers import reverse
from ..models import Bucketlist, BucketlistItem, User
class UserModelTest(TestCase):
"""Test User Model"""
def setUp(self):
"""Create instances of the models"""
self.user = User.objects.create(username='md', password='md')
self.bucketlist = Bucketlist.objects.create(
name="My entry Bucketlist", created_by=self.user)
self.item = BucketlistItem.objects.create(
title="My entry Bucketlist Item", bucketlist=self.bucketlist)
def tearDown(self):
"""Clean the test db"""
User.objects.all().delete()
Bucketlist.objects.all().delete()
BucketlistItem.objects.all().delete()
def test_user_string_representation(self):
"""Test the representation of user instance"""
self.assertEqual(str(self.user), self.user.username)
def test_user_fields(self):
"""Test the fields of user model"""
self.assertEqual(self.user.username, 'md')
def test_bl_fields(self):
"""Test the fields of bucketlist model"""
self.assertEqual(self.bucketlist.name, "My entry Bucketlist")
self.assertEqual(self.bucketlist.created_by, self.user)
def test_list_string_representation(self):
"""Test the representation of BucketlistItem instance"""
self.assertEqual(str(self.bucketlist), '<Bucketlist {}>'.format(self.bucketlist.name))
def test_items(self):
"""Test the list of items of bucketlist instance"""
self.assertEqual(len(self.bucketlist.items), 1)
def test_item_string_representation(self):
"""Test the representation of BucketlistItem instance"""
self.assertEqual(str(self.item), '<Item {}>'.format(self.item.title))
def test_bli_fields(self):
"""Test the fields of item model"""
self.assertEqual(self.bucketlist.bl_items.first().id, 1)
self.assertEqual(self.item.id, 1)
self.assertEqual(self.bucketlist.bl_items.first().title, "My entry Bucketlist Item")
self.assertEqual(self.item.title, "My entry Bucketlist Item")
self.assertEqual(self.bucketlist.bl_items.first().done, False)
self.assertEqual(self.item.done, False)
| 39.982456 | 94 | 0.678806 | 276 | 2,279 | 5.528986 | 0.202899 | 0.127785 | 0.112058 | 0.09502 | 0.440367 | 0.326343 | 0.277195 | 0.178244 | 0.119266 | 0.119266 | 0 | 0.001639 | 0.197016 | 2,279 | 56 | 95 | 40.696429 | 0.83224 | 0.153137 | 0 | 0 | 0 | 0 | 0.074508 | 0 | 0 | 0 | 0 | 0 | 0.371429 | 1 | 0.257143 | false | 0.028571 | 0.114286 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
68c8c49a947bfb4e3cdee097d291390bb5c31758 | 1,726 | py | Python | tests/test_backup.py | RSabet/rolling-backup | 9dcc9ca350d1e5d4d1ee031c36487253c60eafa0 | [
"MIT"
] | null | null | null | tests/test_backup.py | RSabet/rolling-backup | 9dcc9ca350d1e5d4d1ee031c36487253c60eafa0 | [
"MIT"
] | null | null | null | tests/test_backup.py | RSabet/rolling-backup | 9dcc9ca350d1e5d4d1ee031c36487253c60eafa0 | [
"MIT"
] | null | null | null | import random
import pytest
from rolling_backup import backup
CONTENT = "Hello"
def create_backups(image_file, num: int):
for i in range(num):
image_file.write(f"{CONTENT} - {i}")
assert backup(str(image_file), num_to_keep=num)
d = image_file.dirpath()
should = d / f"{image_file.basename}.{i:02d}"
assert should.exists()
for i in range(num):
d = image_file.dirpath()
should = d / f"{image_file.basename}.{i:02d}"
assert should.read() == f"{CONTENT} - {num - i - 1}"
@pytest.fixture(scope="function")
def image_file(tmpdir_factory):
fn = tmpdir_factory.mktemp("data").join("img.png")
fn.write(CONTENT)
return fn
def test_dummy(image_file):
create_backups(image_file, 12)
def test_rollover(image_file):
NUM = 12
create_backups(image_file, NUM)
assert len(image_file.dirpath().listdir()) == NUM + 1
assert backup(str(image_file), num_to_keep=NUM)
assert len(image_file.dirpath().listdir()) == NUM + 1
def test_missing(image_file):
NUM = 12
create_backups(image_file, NUM)
n = random.choice(range(NUM))
d = image_file.dirpath()
to_del = d / f"{image_file.basename}.{n:02d}"
to_del.remove()
assert not to_del.exists()
image_file.write("xxx")
assert backup(str(image_file), NUM)
assert to_del.exists()
assert (d / f"{image_file.basename}.00").read() == "xxx"
assert (d / f"{image_file.basename}.{n:02d}").read() == f"{CONTENT} - {NUM - n}"
assert (d / f"{image_file.basename}.{(n + 1):02d}").read() == f"{CONTENT} - {NUM - n - 2}"
def test_non_existing_dir():
assert not backup("xxxyyyzzz") | 28.295082 | 95 | 0.616454 | 245 | 1,726 | 4.159184 | 0.261224 | 0.211973 | 0.09421 | 0.064769 | 0.570167 | 0.526006 | 0.411187 | 0.334642 | 0.334642 | 0.119725 | 0 | 0.01728 | 0.228853 | 1,726 | 61 | 96 | 28.295082 | 0.74831 | 0 | 0 | 0.295455 | 0 | 0 | 0.179964 | 0.09898 | 0 | 0 | 0 | 0 | 0.295455 | 1 | 0.136364 | false | 0 | 0.068182 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
68caf27fb5451631df049d1573b4235a1bdc14a4 | 7,705 | py | Python | post/views.py | agiledesign2/drf-blog-post | a0dc7457e9e85e6b1a1f3aa81036b81f7054734f | [
"MIT"
] | null | null | null | post/views.py | agiledesign2/drf-blog-post | a0dc7457e9e85e6b1a1f3aa81036b81f7054734f | [
"MIT"
] | 1 | 2020-05-16T09:44:57.000Z | 2020-05-16T09:44:57.000Z | posts/views.py | agiledesign2/django-blog-post | 318636682b9d4993b5ef7c3e8ead8bb5460aba01 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, get_object_or_404
from django.utils import timezone
from .models import Post, Category
from taggit.models import Tag
from .forms import AddPostForm
#from .validator import group_required
# complex lookups (for searching)
from django.db.models import Q
from django.urls import reverse_lazy
# class based views
from django.views.generic.edit import CreateView, DeleteView, UpdateView, FormView
from django.views import View
from django.utils.decorators import method_decorator
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic.base import TemplateView
from django.views.generic.dates import YearArchiveView, MonthArchiveView, DayArchiveView
from django.contrib.auth.mixins import (
LoginRequiredMixin,
UserPassesTestMixin,
PermissionRequiredMixin,
)
from django.db import transaction
class CategoryDatesMixin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["categories"] = Category.objects.all()
# get queryset of datetime objects for all published posts
#context["dates"] = Post.objects.published().filter(tags__slug=self.kwargs['slug'])
context["dates"] = Post.objects.published().datetimes(
field_name="published", kind="month", order="DESC"
)
#context["dates"] = Post.objects.filter(status=Post.STATUS_PUBLISHED).datetimes(
# field_name="published", kind="month", order="DESC"
#)
context["recent_posts"] = Post.objects.published().order_by(
"-published"
)[:3]
#context["recent_posts"] = Post.objects.filter(status=Post.STATUS_PUBLISHED).order_by(
# "-published"
#)[:3]
return context
class ListPosts(CategoryDatesMixin, ListView):
model = Post
template_name = "posts/index.html"
context_object_name = "posts"
ordering = ("-published",)
paginate_by = 5
def get_queryset(self):
results = Post.objects.published()
return results
class ListByAuthor(CategoryDatesMixin, ListView):
model = Post
context_object_name = "posts"
template_name = "posts/post_by_author.html"
paginate_by = 5
ordering = ("-published",)
def get_queryset(self):
author = self.kwargs.get("author", None)
results = []
if author:
results = Post.objects.published().filter(author__username=author)
return results
def get_context_data(self, **kwargs):
"""
Pass author's name to the context
"""
context = super().get_context_data(**kwargs)
context["author"] = self.kwargs.get("author", None)
return context
class ListByTag(CategoryDatesMixin, ListView):
model = Post
context_object_name = "posts"
template_name = "posts/post_by_tag.html"
paginate_by = 5
ordering = ("-published",)
def get_queryset(self):
tag = self.kwargs.get("tag", None)
results = []
if tag:
results = Post.objects.published().filter(tags__name=tag)
return results
def get_context_data(self, **kwargs):
"""
Pass tag name to the context
"""
context = super().get_context_data(**kwargs)
context["tag"] = self.kwargs.get("tag", None)
return context
class ListByCategory(CategoryDatesMixin, ListView):
model = Post
context_object_name = "posts"
template_name = "posts/post_by_category.html"
paginate_by = 5
ordering = ("-published",)
def get_queryset(self):
category = self.kwargs.get("name", None)
results = []
if category:
results = Post.objects.published().filter(category__name=category)
return results
def get_context_data(self, **kwargs):
"""
Pass category's name to the context
"""
context = super().get_context_data(**kwargs)
context["category"] = self.kwargs.get("name", None)
return context
class DetailPost(CategoryDatesMixin, DetailView):
model = Post
template_name = "posts/post_detail.html"
#def get_queryset(self, queryset=None):
#item = super().get_object(self)
#item.viewed()
#return item
def get(self, request, *args, **kwargs):
res = super().get(request, *args, **kwargs)
self.object.viewed()
return res
# Post archive views
class ArchiveMixin:
model = Post
date_field = "published"
allow_future = False
context_object_name = "posts"
class PostYearArchive(CategoryDatesMixin, ArchiveMixin, YearArchiveView):
make_object_list = True
class PostYearMonthArchive(CategoryDatesMixin, ArchiveMixin, MonthArchiveView):
pass
# Create, delete and update post views
# @group_required('Editors')
class AddPost(
CategoryDatesMixin, PermissionRequiredMixin, LoginRequiredMixin, CreateView
):
form_class = AddPostForm
permission_required = "posts.add_post"
template_name = "posts/post_form.html"
# to process request.user in the form
def form_valid(self, form):
form.save(commit=False)
form.instance.author = self.request.user
if form.instance.status in [Post.STATUS_PUBLISHED]:
form.instance.published = timezone.now()
else:
form.instance.updated = timezone.now()
return super().form_valid(form)
def get_context_data(self, **kwargs):
"""
To use AddPostForm with 'Update' instead of 'Add' text in update view
"""
context = super().get_context_data(**kwargs)
context["update"] = False
return context
class PostDraftsList(
CategoryDatesMixin, PermissionRequiredMixin, LoginRequiredMixin, ListView
):
template_name = "posts/list_drafts.html"
permission_required = "posts.add_post"
context_object_name = "posts"
def get_queryset(self):
return Post.objects.draft().filter(
author__username=self.request.user.username
)
class DeletePost(
CategoryDatesMixin, LoginRequiredMixin, UserPassesTestMixin, DeleteView
):
model = Post
success_url = reverse_lazy("posts:index")
def test_func(self):
"""
Only let the user delete object if they own the object being deleted
"""
return self.get_object().author.username == self.request.user.username
class UpdatePost(
CategoryDatesMixin, LoginRequiredMixin, UserPassesTestMixin, UpdateView
):
model = Post
form_class = AddPostForm
def test_func(self):
"""
Only let the user update object if they own the object being updated
"""
return self.get_object().author.username == self.request.user.username
def get_context_data(self, **kwargs):
"""
To use AddPostForm with 'Update' instead of 'Add' text in update view
"""
context = super().get_context_data(**kwargs)
context["update"] = True
return context
class SearchPosts(CategoryDatesMixin, ListView):
context_object_name = "posts"
template_name = "posts/post_search.html"
paginate_by = 5
ordering = ("-published",)
def get_queryset(self):
search_query = self.request.GET.get("q", None)
results = []
if search_query:
results = Post.objects.filter(
Q(category__name__icontains=search_query)
| Q(author__username__icontains=search_query)
| Q(title__icontains=search_query)
| Q(content__icontains=search_query)
).distinct()
return results
| 29.185606 | 94 | 0.662297 | 850 | 7,705 | 5.849412 | 0.202353 | 0.027152 | 0.033789 | 0.030973 | 0.431215 | 0.348753 | 0.31074 | 0.266693 | 0.24638 | 0.219831 | 0 | 0.001697 | 0.235042 | 7,705 | 263 | 95 | 29.296578 | 0.841873 | 0.136924 | 0 | 0.421687 | 0 | 0 | 0.064925 | 0.021642 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096386 | false | 0.024096 | 0.096386 | 0.006024 | 0.620482 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
68cbf12bc70184cee6d43fa3ffcdca46bedc9617 | 9,137 | py | Python | python/snippet_utils.py | emory-irlab/Mouse2Gaze | 3280c905d76fb85b8a7b9803ec56a719730392d7 | [
"MIT"
] | null | null | null | python/snippet_utils.py | emory-irlab/Mouse2Gaze | 3280c905d76fb85b8a7b9803ec56a719730392d7 | [
"MIT"
] | null | null | null | python/snippet_utils.py | emory-irlab/Mouse2Gaze | 3280c905d76fb85b8a7b9803ec56a719730392d7 | [
"MIT"
] | null | null | null | # -- Writtenb by dsavenk
import psycopg2
from os import path
import re
import urllib2
#import emu_load_snippets
from BeautifulSoup import BeautifulSoup
#from html_resource_save import resource_extractor
import urllib
import httplib
import socket
#prefix = emu_load_snippets.prefix
prefix = 'dg21'
socket.setdefaulttimeout(5)
SE_GOOGLE = "google"
SE_BING = "bing"
SE_YAHOO = "yahoo"
class SearchResult(object):
def __init__(self, url = "", title = "", snip_body = "", snip_html = "", res_pos = -1, res_id = "", title_html = ""):
self.url = url
self.title = title
self.snip_body = snip_body
self.snip_html = snip_html
self.res_pos = res_pos
self.res_id = res_id
self.title_html = title_html
def url(self, url):
self.url = urllib.unquote(url)
def get_search_engine_name(url):
if url.find("google.com/search") != -1:
return SE_GOOGLE
elif url.find("bing.com/search") != -1:
return SE_BING
elif url.find("yahoo.com/search") != -1:
return SE_YAHOO
return None
# disabled right now
def util_load_serps(dbhost, dbport, dbname, table_name, documents_root, user, password):
connection = psycopg2.connect(host = dbhost, port = dbport, user = user, password = password, database = dbname)
cursor = connection.cursor()
select_serps = """SELECT query, url, content_id, content_path FROM %s
WHERE event_name = 'contentCache'
""" % table_name
cursor.execute(select_serps)
rec = cursor.fetchone()
not_found = []
count = 0
added = set([])
while rec:
search_engine = get_search_engine_name(rec[1])
if search_engine:
file_path = documents_root + "/".join([rec[3].strip(),rec[2].strip()]) + ".html"
if path.exists(file_path):
inp = open(file_path, "r")
page_html = inp.read()
inp.close()
(results, results_cnt) = parse_serp(search_engine, page_html)
for result in results:
if result.url not in added:
url = result.url
if url.startwith("/http/"):
url = "http://" + url.strip("/http/")
#jsnipobj = JudgementObject(url = url, title = result.title, snippet_body = result.snip_body, snippet_html = result.snip_html)
#jdocobj = JudgementObject(url = url, title = None, snippet_body = None, snippet_html = None)
#jsnipobj.save()
#jdocobj.save()
added.add(result.url)
count += 1
else:
not_found.append(file_path)
rec = cursor.fetchone()
connection.close()
return (count, not_found)
def parse_serp(search_engine, html):
html_soup = BeautifulSoup(html)
serp = None
if search_engine == SE_GOOGLE:
serp = parse_google_serp(html_soup)
elif search_engine == SE_YAHOO:
serp = parse_yahoo_serp(html_soup)
elif search_engine == SE_BING:
serp = parse_bing_serp(html_soup)
return serp
def parse_google_serp(html_soup):
div_result_cnt = html_soup.find('div', id='resultStats')
result_cnt = None
if div_result_cnt:
m = re.search(r'(\d+) result', div_result_cnt.text.replace(',', ''))
if m: result_cnt = int(m.group(1))
resdiv = html_soup.find('div', id='ires')
if resdiv is None:
return ([], 0)
reslist = resdiv.findAll('li', { 'class': re.compile(r'\bg\b') })
snippet_num = 0
search_results = []
for res in reslist:
snippet_num += 1
snippet_html = str(res)
snippet_text = re.sub(r'\s+', ' ', re.sub(r'<.*?>', ' ', snippet_html)).strip()
res_h3 = res.find('h3')
if res_h3 is None:
snippet_href = ''
snippet_title = ''
search_results.append(SearchResult(snippet_href, snippet_title, snippet_text, snippet_html, snippet_num, res['id']))
continue
res_l = res_h3.find('a')
if res_l is not None:
snippet_href = res_l.get('href')
snippet_title = re.sub(r'\s+', ' ', re.sub(r'<.*?>', ' ', str(res_l))).strip()
else:
snippet_href = ''
snippet_title = ''
search_results.append(SearchResult(snippet_href, snippet_title, snippet_text, snippet_html, snippet_num, res['id'], res_h3))
#try: snippet_cached_href = [a.get('href') for a in res.findAll('a') if a.text == 'Cached'][0]
#except: snippet_cached_href = None
return (search_results, result_cnt)
def parse_yahoo_serp(html_soup):
div_result_cnt = html_soup.find('strong', id='resultCount')
result_cnt = None
if div_result_cnt:
result_cnt = int(div_result_cnt.text.replace(',', ''))
resdiv = html_soup.find('div', id='web')
reslist = resdiv.findAll('div', {'class': re.compile(r'\bres\b')} )
snippet_num = 0
search_results = []
for res in reslist:
snippet_num += 1
snippet_html = str(res)
snippet_text = re.sub(r'\s+', ' ', re.sub(r'<.*?>', ' ', snippet_html)).strip()
res_l = res.find('a', { 'class': 'yschttl spt' } )
snippet_href = res_l.get('href')
if "**http" in snippet_href:
snippet_href = re.sub(r'.*\*\*', '', snippet_href)
snippet_href = urllib.unquote(snippet_href)
snippet_title = re.sub(r'\s+', ' ', re.sub(r'<.*?>', ' ', str(res_l))).strip()
search_results.append(SearchResult(snippet_href, snippet_title, snippet_text, snippet_html, snippet_num, res['id']))
return (search_results, result_cnt)
"""
def fetch_url(page_url, base_dir, filename):
success = False
try :
# set game cookie to pass the authentication
opener = urllib2.build_opener()
opener.addheaders =[('User-agent', 'Mozilla/5.0')]
opener.addheaders.append(('Cookie', 'hitid=' + emu_load_snippets.secret_cookie))
if page_url[0] != '/':
page_url = '/' + page_url
#f = opener.open('http://ir-ub.mathcs.emory.edu:8100'+page_url)
f = opener.open(page_url.replace('/http/','http://'))
html = f.read()
o = open(base_dir + "/" + filename, 'w+')
o.write(html)
o.close()
f.close();
success = True
except Exception, err:
print 'page downloader ERROR: %s \t %s \t %s \n' % (page_url.replace('/http/','http://'), filename, str(err))
try:
# fetch resources if any
extractor = resource_extractor(base_dir + "/")
extractor.extract_html_resources(page_url.replace('/http/','http://'), base_dir + "/" + filename)
except Exception, err:
print 'extractor : ERROR: %s\n' % str(err)
return success
def fetch_snippet_resources(base_dir, snip_id, snip_html):
soup = BeautifulSoup(snip_html)
res_cnt = 0
success = False
for tag in soup.find('h3'):
res_filename = '%s_%s.html' % (snip_id, res_cnt)
#if 'webcache.googleusercontent.com' in tag['href'] or len(tag['href']) < 2 or 'http://ir-ub.mathcs.emory.edu/' in tag['href']:
# continue
print tag['href']
s = fetch_url(tag['href'], base_dir, res_filename)
s = True
if res_cnt == 0:
success = s
tag['href'] = 'http://ir-ub.mathcs.emory.edu/snippets/%s/landing_pages/'% prefix + res_filename
res_cnt +=1
return (soup.prettify(), success)
"""
def parse_bing_serp(html_soup):
div_result_cnt = html_soup.find('span', id='count')
result_cnt = None
if div_result_cnt:
m = re.search(r'(\d+) result', div_result_cnt.text.replace(',', ''))
if m: result_cnt = int(m.group(1))
resdiv = html_soup.find('div', id='results')
reslist = resdiv.findAll('li', { 'class': 'sa_wr'} )
snippet_num = 0
search_results = []
for res in reslist:
snippet_num += 1
snippet_html = str(res)
snippet_text = re.sub(r'\s+', ' ', re.sub(r'<.*?>', ' ', snippet_html)).strip()
res_h3 = res.find('h3')
res_l = res_h3.find('a')
snippet_href = res_l.get('href')
snippet_title = re.sub(r'\s+', ' ', re.sub(r'<.*?>', ' ', str(res_l))).strip()
search_results.append(SearchResult(snippet_href, snippet_title, snippet_text, snippet_html, snippet_num))
return (search_results, result_cnt)
#to avoid incomplete reads
def patch_http_response_read(func):
def inner(*args):
try:
return func(*args)
except httplib.IncompleteRead, e:
return e.partial
return inner
httplib.HTTPResponse.read = patch_http_response_read(httplib.HTTPResponse.read) | 39.214592 | 150 | 0.572398 | 1,138 | 9,137 | 4.372583 | 0.189807 | 0.032556 | 0.015675 | 0.032355 | 0.37701 | 0.309084 | 0.26869 | 0.251206 | 0.251206 | 0.229502 | 0 | 0.0071 | 0.290905 | 9,137 | 233 | 151 | 39.214592 | 0.76092 | 0.059757 | 0 | 0.331169 | 0 | 0 | 0.070255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.012987 | 0.051948 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
68cd7fda8b73a02b4268932ba3d89d31da85143d | 3,749 | py | Python | utils/isa.py | skilkis/GENX | 049267ba7751013517d49939e4ce387484959500 | [
"MIT"
] | 2 | 2019-01-08T13:53:29.000Z | 2019-02-05T06:17:18.000Z | utils/isa.py | skilkis/GENX | 049267ba7751013517d49939e4ce387484959500 | [
"MIT"
] | null | null | null | utils/isa.py | skilkis/GENX | 049267ba7751013517d49939e4ce387484959500 | [
"MIT"
] | null | null | null | from constants import Constants
import numpy as np
# TODO finish implementing all regions of the atmosphere
class ISA(Constants):
def __init__(self, altitude=0):
""" Calculates International Standard Atmosphere properties for the specified geo-potential altitude
:param float altitude: Geo-potential Altitude in SI meter [m]
"""
if 0. <= altitude <= 84852. and isinstance(altitude, float):
self.altitude = altitude
else:
raise ValueError('Invalid altitude specified')
@property
def calculator(self):
h, R, T0, P0, rho0 = self.altitude, self.gas_constant, self.temperature_sl, self.pressure_sl, self.rho_sl
if h == 0:
Talt, Palt, rhoalt = T0, P0, rho0
elif 0 < h < 11000.:
a = -6.5e-3
Talt = T0 + (a * h)
Palt = P0 * (Talt / T0) ^ (-(self.g / (a * R)))
rhoalt = rho0 * ((Talt / T0) ^ (-((self.g / (a * R)) + 1)))
elif 11000 <= h < 25000:
a = -6.5e-3
Talt = 216.66
Palt = P0*(Talt/T0)**(-(self.g/(a*R)))
rhoalt = 0.36480*(np.exp(-1 * ((self.g*(h-11000.))/(R * T0))))
else:
Talt = None
Palt = None
rhoalt = None
return Talt, Palt, rhoalt
# function [T,Palt,rhoalt,a]=ISA(h)
# global Econst
# %Calculates the Temperature [K] using International Standard Atmosphere
# if(h>=0)&&(h<=11000);
# T=Econst.Temp0+(Econst.lambda*h);
# Palt=Econst.P0*(T/Econst.Temp0)^(-(Econst.g/(Econst.lambda*Econst.R)));
# rhoalt=Econst.rho0*((T/Econst.Temp0)^(-((Econst.g/(Econst.lambda*Econst.R))+1)));
# elseif(h>11000)&&(h<=25000);
# T=216.66;
# Palt=22700*((exp(1))^(-((Econst.g*(h-11000))/(Econst.R*T))));
# rhoalt=0.36480*((exp(1))^(-((Econst.g*(h-11000))/(Econst.R*T))));
# elseif(h>25000)&&(h<=47000);
# T=216.66+(1*((h-20000)/1000));
# Palt=5474.9*((216.65+(.001*(h-20000)))/216.65)^(-(Econst.g/(.001*Econst.R)));
# rhoalt=0.088035*((216.65+(.001*(h-20000)))/216.65)^(-((Econst.g/(.001*Econst.R))-1));
# elseif(h>32000)&&(h<=47000);
# T=228.65+(2.8*((h-32000)/1000));
# Palt=868.02*((228.65+(0.0028*(h-32000)))/228.65)^(-(Econst.g/(0.0028*Econst.R)));
# rhoalt=0.013225*((228.65+(0.0028*(h-32000)))/228.65)^(-((Econst.g/(0.0028*Econst.R))-1));
# elseif(h>47000)&&(h<=53000);
# T=270.65;
# Palt=110.91*((exp(1))^(-((Econst.g*(h-47000))/(Econst.R*270.65))));
# rhoalt=0.001428*((exp(1))^(-((Econst.g*(h-47000))/(Econst.R*270.65))));
# elseif(h>53000)&&(h<=79000);
# T=270.65+((-2.8)*((h-51000)/1000));
# Palt=66.939*((270.65+(-0.0028*(h-51000)))/270.65)^(-(Econst.g/(-0.0028*Econst.R)));
# rhoalt=0.000862*((270.65+(-0.0028*(h-51000)))/270.65)^(-((Econst.g/(-0.0028*Econst.R))-1));
# elseif(h>79000)&&(h<=90000);
# T=214.65+((-2.0)*((h-71000)/1000));
# Palt=3.9564*((214.65+(-0.002*(h-71000)))/214.65)^(-(Econst.g/(-0.002*Econst.R)));
# rhoalt=0.000064*((214.65+(-0.002*(h-71000)))/214.65)^(-((Econst.g/(-0.002*Econst.R))-1));
# end
# if(h<0)||(h>84852);
# disp('International Standard Atmosphere Calculations cannot be used for values above 84,852m')
# end
# if(h>=0)&&(h<=84852);
# a=sqrt(1.4*Econst.R*T);
# %FL=ceil(((h*1250)/381)/100);
# %disp(['Temperature at Flight Level ' num2str(FL) ' = ' num2str(T) 'K' ' = ' num2str(T-273.15) 'C'])
# %disp(['Pressure at Flight Level ' num2str(FL) ' = ' num2str(Palt/1000) 'kPa'])
# %disp(['Density at Flight Level ' num2str(FL) ' = ' num2str(rhoalt) ' [kg/m3]'])
# %disp(['Speed of Sound at Flight Level ' num2str(FL) ' = ' num2str(a) ' [m/s]'])
# end
# end
if __name__ == '__main__':
obj = ISA(11000.)
print(obj.altitude)
print(obj.temperature)
| 42.123596 | 113 | 0.558816 | 572 | 3,749 | 3.634615 | 0.262238 | 0.050505 | 0.034632 | 0.02886 | 0.366041 | 0.349687 | 0.275132 | 0.275132 | 0.275132 | 0.183742 | 0 | 0.183043 | 0.188317 | 3,749 | 88 | 114 | 42.602273 | 0.500164 | 0.657509 | 0 | 0.125 | 0 | 0 | 0.027961 | 0 | 0 | 0 | 0 | 0.011364 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.1875 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
68d4aa65d3bfe628d460fc7812a7481a60635970 | 7,768 | py | Python | django_cbtools/sync_gateway.py | smarttradeapp/django_couchbase | d96b32977bdb0dbf1a6e891ab29ea7b69ac6bed4 | [
"BSD-3-Clause"
] | 6 | 2016-06-23T08:21:43.000Z | 2018-07-19T09:42:32.000Z | django_cbtools/sync_gateway.py | smarttradeapp/django_cbtools | d96b32977bdb0dbf1a6e891ab29ea7b69ac6bed4 | [
"BSD-3-Clause"
] | 26 | 2015-10-17T08:59:36.000Z | 2021-06-10T17:48:41.000Z | django_cbtools/sync_gateway.py | smarttradeapp/django_couchbase | d96b32977bdb0dbf1a6e891ab29ea7b69ac6bed4 | [
"BSD-3-Clause"
] | 8 | 2015-11-28T13:47:19.000Z | 2020-12-15T12:58:00.000Z | import json
import logging
import requests
from requests.auth import HTTPBasicAuth
from django.conf import settings
logger = logging.getLogger(__name__)
class SyncGatewayException(Exception):
pass
class SyncGatewayConflict(SyncGatewayException):
pass
class SyncGateway(object):
@staticmethod
def put_user(username, email=None, password=None, admin_channels=None, disabled=False):
from .models import CHANNEL_PUBLIC
url = '%s/%s/_user/%s' % (settings.SYNC_GATEWAY_ADMIN_URL,
settings.SYNC_GATEWAY_BUCKET,
username)
if admin_channels is None:
admin_channels = []
if CHANNEL_PUBLIC not in admin_channels:
admin_channels.append(CHANNEL_PUBLIC)
dict_payload = dict(admin_channels=admin_channels,
disabled=disabled)
if email is not None:
dict_payload['email'] = email
if password is not None:
dict_payload['password'] = password
json_payload = json.dumps(dict_payload)
response = requests.put(url, data=json_payload, verify=False)
if response.status_code not in [200, 201]:
raise SyncGatewayException("Can not create / update sg-user, response code: %d" % response.status_code)
return True
@staticmethod
def get_user(username):
url = '%s/%s/_user/%s' % (settings.SYNC_GATEWAY_ADMIN_URL,
settings.SYNC_GATEWAY_BUCKET,
username)
response = requests.get(url, verify=False)
if response.status_code not in [200, 201]:
raise SyncGatewayException("Can not get user (%s), response code: %d" % (username, response.status_code))
return response.json()
@staticmethod
def get_users():
url = '%s/%s/_user/' % (settings.SYNC_GATEWAY_ADMIN_URL,
settings.SYNC_GATEWAY_BUCKET)
response = requests.get(url, verify=False)
if response.status_code not in [200, 201]:
raise SyncGatewayException("Can not get users, response code: %d" % response.status_code)
return response.json()
@staticmethod
def change_username(old_username, new_username, password):
if old_username == new_username:
return False
json_payload = SyncGateway.get_user(old_username)
SyncGateway.put_user(username=new_username,
email=new_username,
password=password,
admin_channels=json_payload['admin_channels'],
disabled=False)
SyncGateway.delete_user(old_username)
return True
@staticmethod
def create_session(username, ttl=None):
url = '%s/%s/_session' % (settings.SYNC_GATEWAY_ADMIN_URL,
settings.SYNC_GATEWAY_BUCKET)
dict_payload = dict(name=username)
if ttl is not None:
dict_payload['ttl'] = ttl
json_payload = json.dumps(dict_payload)
response = requests.post(url, data=json_payload, verify=False)
if response.status_code != 200:
message = "Can not create session for sg-user (%s), response code: %d" % (username, response.status_code)
raise SyncGatewayException(message)
return response
@staticmethod
def delete_user(username):
url = '%s/%s/_user/%s' % (settings.SYNC_GATEWAY_ADMIN_URL,
settings.SYNC_GATEWAY_BUCKET,
username)
response = requests.delete(url, verify=False)
if response.status_code not in [200, 201]:
raise SyncGatewayException("Can not delete user, response code: %d" % response.status_code)
return True
@staticmethod
def append_channels(username, channels):
json_payload = SyncGateway.get_user(username)
new_channels = set(json_payload['admin_channels'])
new_channels.update(channels)
return SyncGateway.put_user(username=username, admin_channels=list(new_channels))
@staticmethod
def remove_channels(username, channels):
json_payload = SyncGateway.get_user(username)
new_channels = set(json_payload['admin_channels'])
new_channels.difference_update(channels)
return SyncGateway.put_user(username=username, admin_channels=list(new_channels))
# def get_user(self, username):
# url = '%s/%s/_user/%s' % (settings.SYNC_GATEWAY_ADMIN_URL,
# settings.SYNC_GATEWAY_BUCKET,
# username)
# return requests.put(url)
@staticmethod
def put_admin_user():
username = settings.SYNC_GATEWAY_USER
password = settings.SYNC_GATEWAY_PASSWORD
SyncGateway.put_user(username, "smadmin@mail.com", password, ["*"])
@staticmethod
def put_guest_user():
from .models import CHANNEL_PUBLIC
username = settings.SYNC_GATEWAY_GUEST_USER
password = settings.SYNC_GATEWAY_GUEST_PASSWORD
SyncGateway.put_user(username, "smguest@mail.com", password, [CHANNEL_PUBLIC])
@staticmethod
def save_json(uid, data_dict):
"""
Saves dictinary `data_dict` to database via SyncGateway
"""
json_payload = json.dumps(data_dict)
url = '%s/%s/%s' % (settings.SYNC_GATEWAY_URL,
settings.SYNC_GATEWAY_BUCKET,
uid)
return requests.put(url, data=json_payload, auth=SyncGateway.get_auth(), verify=False)
@staticmethod
def save_document(document):
data_dict = document.to_dict()
if hasattr(document, 'rev') and document.rev:
data_dict['_rev'] = document.rev
response = SyncGateway.save_json(document.get_uid(), data_dict)
if response.status_code not in [200, 201]:
rev = document.rev if hasattr(document, 'rev') else 'n/a'
logger.error('error on doc saving, status {}, revision {}, uid {}'.format(
response.status_code, rev, document.get_uid()))
msg = "Can not save document %s, response code: %d" % (document, response.status_code)
if response.status_code == 409:
raise SyncGatewayConflict(msg)
raise SyncGatewayException(msg)
d = response.json()
document.rev = d['rev']
@staticmethod
def delete_document(uid, rev):
url = '%s/%s/%s?rev=%s' % (settings.SYNC_GATEWAY_URL,
settings.SYNC_GATEWAY_BUCKET,
uid, rev)
response = requests.delete(url, auth=SyncGateway.get_auth(), verify=False)
if response.status_code not in [200, 201]:
raise SyncGatewayException("Can not delete document %s, response code: %d" % (uid, response.status_code))
@staticmethod
def all_docs(uids, really_all=False):
if not uids and not really_all:
return {"rows": []}
url = '%s/%s/_all_docs?include_docs=true' % (settings.SYNC_GATEWAY_URL,
settings.SYNC_GATEWAY_BUCKET)
json_data = json.dumps(dict(keys=uids)) if uids else None
response = requests.post(url, data=json_data,
auth=SyncGateway.get_auth(),
verify=False)
# print response.json()
return response.json()
@staticmethod
def get_auth():
return HTTPBasicAuth(settings.SYNC_GATEWAY_USER,
settings.SYNC_GATEWAY_PASSWORD)
| 35.962963 | 117 | 0.607364 | 842 | 7,768 | 5.387173 | 0.135392 | 0.063492 | 0.100529 | 0.043651 | 0.530644 | 0.459877 | 0.417108 | 0.414242 | 0.357804 | 0.315476 | 0 | 0.007742 | 0.301622 | 7,768 | 215 | 118 | 36.130233 | 0.828387 | 0.03862 | 0 | 0.361842 | 0 | 0 | 0.080118 | 0.004436 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098684 | false | 0.078947 | 0.046053 | 0.006579 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
68d73c38a4b75247e3508965edda75784278bb36 | 727 | py | Python | app/services/interface.py | izconcept/Turnt | 28d25ebfbd43aa6472aa1f0eec7e73ec1b8d15d3 | [
"Apache-2.0"
] | 4 | 2018-01-29T05:51:32.000Z | 2018-02-08T05:18:47.000Z | app/services/interface.py | izconcept/Turnt | 28d25ebfbd43aa6472aa1f0eec7e73ec1b8d15d3 | [
"Apache-2.0"
] | 3 | 2018-01-30T21:41:09.000Z | 2018-01-31T18:20:01.000Z | app/services/interface.py | izconcept/Turnt | 28d25ebfbd43aa6472aa1f0eec7e73ec1b8d15d3 | [
"Apache-2.0"
] | 1 | 2019-03-29T19:32:28.000Z | 2019-03-29T19:32:28.000Z | import time
import pyautogui
def typer(command):
pyautogui.typewrite(command)
pyautogui.typewrite('\n')
def open_valve(axis, step):
typer("G91G0" + axis + "-" + str(step))
def close_valve(axis, step):
typer("G91G0" + axis + str(step))
def give_me_some_white_bottle(duration):
if duration == 0:
return
open_valve("X", 3)
time.sleep(duration)
close_valve("X", 3)
def give_me_some_green_bottle(duration):
if duration == 0:
return
close_valve("Y", 3)
time.sleep(duration)
open_valve("Y", 3)
def give_me_some_rear_bottle(duration):
if duration == 0:
return
close_valve("Z", 3)
time.sleep(duration)
open_valve("Z", 3)
time.sleep(5)
| 17.309524 | 43 | 0.636864 | 102 | 727 | 4.343137 | 0.313725 | 0.081264 | 0.090293 | 0.088036 | 0.634312 | 0.544018 | 0.352144 | 0.352144 | 0.167043 | 0 | 0 | 0.028419 | 0.225585 | 727 | 41 | 44 | 17.731707 | 0.758437 | 0 | 0 | 0.321429 | 0 | 0 | 0.026171 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.071429 | 0 | 0.392857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
68da264f2cfdcd5707af2f1031da22a94467e12b | 2,501 | py | Python | puzzler/puzzles/polyominoes45.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | null | null | null | puzzler/puzzles/polyominoes45.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | null | null | null | puzzler/puzzles/polyominoes45.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | 1 | 2022-01-02T16:54:14.000Z | 2022-01-02T16:54:14.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id$
# Author: David Goodger <goodger@python.org>
# Copyright: (C) 1998-2015 by David J. Goodger
# License: GPL 2 (see __init__.py)
"""
Concrete pentomino & tetromino (polyominoes of order 4 & 5) puzzles.
"""
from puzzler.puzzles.polyominoes import Polyominoes45, OneSidedPolyominoes45
class Polyominoes45_8x10(Polyominoes45):
"""many solutions"""
width = 10
height = 8
def customize_piece_data(self):
self.piece_data['P'][-1]['flips'] = None
self.piece_data['P'][-1]['rotations'] = (0, 1)
class Polyominoes45_5x16(Polyominoes45_8x10):
"""many solutions"""
width = 16
height = 5
class Polyominoes45_4x20(Polyominoes45_8x10):
"""many solutions"""
width = 20
height = 4
class Polyominoes45Square(Polyominoes45):
"""many solutions"""
width = 9
height = 9
hole = set(((4,4),))
def coordinates(self):
coords = set(self.coordinates_rectangle(9, 9)) - self.hole
return sorted(coords)
def customize_piece_data(self):
self.piece_data['P'][-1]['rotations'] = None
self.piece_data['P'][-1]['flips'] = None
class Polyominoes45Diamond(Polyominoes45):
"""7,302 solutions"""
width = 13
height = 13
holes = set(((5,6), (6,5), (6,6), (6,7), (7,6)))
def coordinates(self):
coords = set(self.coordinates_diamond(7)) - self.holes
return sorted(coords)
def customize_piece_data(self):
self.piece_data['P'][-1]['rotations'] = None
self.piece_data['P'][-1]['flips'] = None
class Polyominoes45AztecDiamond(Polyominoes45):
"""11,162 solutions"""
width = 12
height = 12
def coordinates(self):
coords = (
set(self.coordinates_aztec_diamond(6))
- set(self.coordinates_rectangle(2, 2, offset=(5,5))))
return sorted(coords)
def customize_piece_data(self):
self.piece_data['P'][-1]['rotations'] = None
self.piece_data['P'][-1]['flips'] = None
class Polyominoes45X_x1(Polyominoes45):
"""0 solutions"""
height = 14
width = 14
holes = set(Polyominoes45.coordinates_rectangle(4, 4, offset=(5,5)))
def coordinates(self):
coords = set(
list(self.coordinates_rectangle(14, 4, offset=(0,5)))
+ list(self.coordinates_rectangle(4, 14, offset=(5,0))))
for coord in sorted(coords):
if coord not in self.holes:
yield coord
| 21.938596 | 76 | 0.612555 | 305 | 2,501 | 4.911475 | 0.301639 | 0.072096 | 0.069426 | 0.074766 | 0.397864 | 0.333111 | 0.317089 | 0.226969 | 0.226969 | 0.226969 | 0 | 0.072813 | 0.236705 | 2,501 | 113 | 77 | 22.132743 | 0.711891 | 0.136745 | 0 | 0.327273 | 0 | 0 | 0.030317 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145455 | false | 0 | 0.018182 | 0 | 0.654545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
68df548e4248d1c3e8da7e5ae61685570defbde2 | 576 | py | Python | space_manager/branches/migrations/0011_branch_minimap_img.py | yoojat/Space-Manager | dd482eb8a3ac8b5d4d06c63e5a5d9ccaeb3ce7b9 | [
"MIT"
] | null | null | null | space_manager/branches/migrations/0011_branch_minimap_img.py | yoojat/Space-Manager | dd482eb8a3ac8b5d4d06c63e5a5d9ccaeb3ce7b9 | [
"MIT"
] | 1 | 2018-02-27T15:21:53.000Z | 2018-02-27T15:21:53.000Z | space_manager/branches/migrations/0011_branch_minimap_img.py | yoojat/Space-Manager | dd482eb8a3ac8b5d4d06c63e5a5d9ccaeb3ce7b9 | [
"MIT"
] | 1 | 2018-02-16T08:31:30.000Z | 2018-02-16T08:31:30.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-05-03 14:34
from __future__ import unicode_literals
from django.db import migrations, models
import space_manager.branches.models
class Migration(migrations.Migration):
dependencies = [
('branches', '0010_branch_lounge_img_cabinet'),
]
operations = [
migrations.AddField(
model_name='branch',
name='minimap_img',
field=models.ImageField(null=True, upload_to='', validators=[space_manager.branches.models.Branch.validate_image]),
),
]
| 26.181818 | 127 | 0.668403 | 66 | 576 | 5.606061 | 0.727273 | 0.064865 | 0.108108 | 0.140541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04646 | 0.215278 | 576 | 21 | 128 | 27.428571 | 0.772124 | 0.118056 | 0 | 0 | 1 | 0 | 0.108911 | 0.059406 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
68e02c589e8f1a8db10b0ff06b4d124889110e3b | 4,726 | py | Python | Source/sobrevivente.py | wesferr/Zombicide | a4ebec16565cd5aa0bd02dfeb53b108b1daca70a | [
"MIT"
] | 1 | 2020-02-27T16:09:00.000Z | 2020-02-27T16:09:00.000Z | Source/sobrevivente.py | wesferr/Zombicide | a4ebec16565cd5aa0bd02dfeb53b108b1daca70a | [
"MIT"
] | null | null | null | Source/sobrevivente.py | wesferr/Zombicide | a4ebec16565cd5aa0bd02dfeb53b108b1daca70a | [
"MIT"
] | null | null | null | # Copyright (c) 2018 by Wesley Ferreira. All Rights Reserved.
from pygame import *
from pygame.locals import *
from spritesGame import *
from math import *
class Sobrevivente(object):
def __init__(self, area, nome, imgPlayer = None):
self.area = area
self.grid = self.area.grid
self.selecionado = False
self.image = image.load(imgPlayer)
self.nome = nome
"""
Metodos de acao sobre o objeto
"""
def setGridGeometry(self, gridx, gridy, gridtamcel):
self.gridx = gridx
self.gridy = gridy
self.tamcel = gridtamcel
self.x = self.gridx
self.y = self.gridy
self.w = self.tamcel
self.h = self.tamcel
def movement(self, i, j):
self.x = self.gridx + i * self.tamcel
self.y = self.gridy + j * self.tamcel
def get_surface(self):
return self.image
def mostraRetangulo(self, screen, color):
self.size = self.area.tamcells
cell = Surface((self.size,self.size), SRCALPHA, 32)
cell.fill(color)
screen.blit(cell, (self.x,self.y))
def show(self, screen, w = None, h = None):
rect = self.image.get_rect()
size = self.image.get_size()
if(w):
h = w * size[0] / size[1]
else:
w = h * size[0] / size[1]
self.mostraRetangulo(screen, (255,255,255,128))
if(self.selecionado):
self.mostraRetangulo(screen, (0,0,255,128))
self.mostraRetangulo(screen, (255,0,0,200-2*self.vida))
self.surface = transform.scale(self.image, (h,w))
screen.blit(self.surface, (self.x+(self.tamcel//2)-(self.w//3),self.y))
self.rect = rect.move(self.x,self.y)
def pos(self, x,y):
if(self.positionGrid == (x,y)):
return True
else:
return False
def get_rect(self):
return self.surface.get_rect().move(self.x, self.y)
def collidepoint(self, pos):
return Rect((self.x, self.y), (self.size,self.size)).collidepoint(pos)
def meleeHit(self, i1, j1, i2, j2, dano, chain = ""):
if(chain == "chain"):
self.area.chainsaw_song.play()
self.grid[i2][j2].vida -= dano
self.grid[i1][j1].ataques -= 1
self.grid[i1][j1].selecionado = False
def move(self, i, j, pos):
if( self.grid[i][j].selecionado ):
self.grid[i][j].selecionado = False
try:
click = self.findclick(pos)
dis = self.distance(i,j,click)
except: pass
try:
if(dis == 1):
if( isinstance(self.grid[click[0]][click[1]], Rect) ): self.swap(i, j, click[0], click[1])
except: pass
def swap(self, i1, j1, i2 , j2):
aux = self.grid[i2][j2]
self.grid[i1][j1].movimentos -= 1
self.grid[i2][j2] = self.grid[i1][j1]
self.grid[i1][j1] = aux
self.grid[i1][j1].x = self.area.geometry[0]+(i1*self.area.tamcells)
self.grid[i1][j1].y = self.area.geometry[1]+(j1*self.area.tamcells)
self.grid[i2][j2].movement(i2, j2)
def findclick(self, pos):
for i in range(len(self.grid)):
for j in range(len(self.grid[i])):
try:
if(self.grid[i][j].collidepoint(pos)): return (i,j)
except: pass
def distance(self, i1, j1, (i2 , j2)):
ax, ay, dx, dy = i1, j1, i2, j2
dis = 0
while(True):
if(ax == dx and ay == dy):
break
else:
if(ax > dx and ay > dy):
ax-=1
ay-=1
dis+=1
elif(ax > dx and ay < dy):
ax-=1
ay+=1
dis+=1
elif(ax < dx and ay > dy):
ax+=1
ay-=1
dis+=1
elif(ax > dx and ay < dy):
ax+=1
ay+=1
dis+=1
elif(ax > dx):
ax-=1
dis+=1
elif(ax<dx):
ax+=1
dis+=1
elif(ay > dy):
ay -=1
dis+=1
elif(ay< dy):
ay+=1
dis+=1
return dis
def knockback(self, i1,j1,(i2,j2), par):
disx = i2 - i1
disy = j2 - j1
if(disx != 0):
ni2 = i2 + int(par/disx)
else:
ni2 = i2
if(disy != 0):
nj2 = j2 + int(par/disy)
else:
nj2 = j2
self.swap(i2,j2,ni2,nj2)
| 30.490323 | 114 | 0.470588 | 609 | 4,726 | 3.635468 | 0.200328 | 0.065041 | 0.018067 | 0.03794 | 0.208672 | 0.117886 | 0.111111 | 0.092141 | 0.070461 | 0.070461 | 0 | 0.046957 | 0.391663 | 4,726 | 154 | 115 | 30.688312 | 0.72313 | 0.012484 | 0 | 0.251908 | 0 | 0 | 0.001082 | 0 | 0 | 0 | 0 | 0.006494 | 0 | 0 | null | null | 0.022901 | 0.030534 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
68e217a5de06b8634e03ceb0c929ab67a9c87db3 | 406 | py | Python | cv/migrations/0003_auto_20200826_1142.py | Shuhao99/2020Bridging-Coursework | e1e4c2aee7f6c626729023392fdd8854f3a76903 | [
"MIT"
] | null | null | null | cv/migrations/0003_auto_20200826_1142.py | Shuhao99/2020Bridging-Coursework | e1e4c2aee7f6c626729023392fdd8854f3a76903 | [
"MIT"
] | null | null | null | cv/migrations/0003_auto_20200826_1142.py | Shuhao99/2020Bridging-Coursework | e1e4c2aee7f6c626729023392fdd8854f3a76903 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.15 on 2020-08-26 03:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cv', '0002_experience_experience_name'),
]
operations = [
migrations.AlterField(
model_name='experience',
name='experience_name',
field=models.TextField(default='Intern'),
),
]
| 21.368421 | 53 | 0.6133 | 42 | 406 | 5.809524 | 0.714286 | 0.172131 | 0.147541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067797 | 0.273399 | 406 | 18 | 54 | 22.555556 | 0.759322 | 0.1133 | 0 | 0 | 1 | 0 | 0.178771 | 0.086592 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
68e8ab2bfd54518c739c30012b012e93c5a898d4 | 427 | py | Python | otter/json_schema/__init__.py | codebyravi/otter | d58077ba4af24a586ae0a0becaf6da96b716a597 | [
"Apache-2.0"
] | 20 | 2015-02-11T16:32:07.000Z | 2019-11-12T03:27:54.000Z | otter/json_schema/__init__.py | codebyravi/otter | d58077ba4af24a586ae0a0becaf6da96b716a597 | [
"Apache-2.0"
] | 1,145 | 2015-01-01T00:00:47.000Z | 2022-02-11T03:40:39.000Z | otter/json_schema/__init__.py | codebyravi/otter | d58077ba4af24a586ae0a0becaf6da96b716a597 | [
"Apache-2.0"
] | 29 | 2015-01-08T15:00:11.000Z | 2021-02-16T16:33:53.000Z | """
Draft 3 JSON schemas (http://tools.ietf.org/html/draft-zyp-json-schema-03)
of data that will be transmitted to and from otter.
"""
import functools
from jsonschema import Draft3Validator, validate, FormatChecker
# This is there since later modules need to add specific format validators to this.
format_checker = FormatChecker()
validate = functools.partial(validate, cls=Draft3Validator, format_checker=format_checker)
| 32.846154 | 90 | 0.798595 | 59 | 427 | 5.728814 | 0.711864 | 0.115385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013298 | 0.119438 | 427 | 12 | 91 | 35.583333 | 0.885638 | 0.489461 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
68eb17fe8d008791a0fef854b9af4d27846c10b0 | 438 | py | Python | hirearefugee/userclass/migrations/0003_auto_20200812_1803.py | maximilianharr/hirearefugee | 307afb512af86128d5ce011f1779964dd71976fd | [
"Apache-2.0"
] | 1 | 2020-09-07T07:20:04.000Z | 2020-09-07T07:20:04.000Z | hirearefugee/userclass/migrations/0003_auto_20200812_1803.py | maximilianharr/hirearefugee | 307afb512af86128d5ce011f1779964dd71976fd | [
"Apache-2.0"
] | 2 | 2020-09-14T18:13:55.000Z | 2020-09-14T21:21:28.000Z | hirearefugee/userclass/migrations/0003_auto_20200812_1803.py | maximilianharr/hirearefugee | 307afb512af86128d5ce011f1779964dd71976fd | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1 on 2020-08-12 18:03
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('userclass', '0002_auto_20200812_1731'),
]
operations = [
migrations.RenameModel(
old_name='UserClass',
new_name='UserDetails',
),
]
| 21.9 | 66 | 0.652968 | 47 | 438 | 5.914894 | 0.765957 | 0.071942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.091463 | 0.251142 | 438 | 19 | 67 | 23.052632 | 0.756098 | 0.098174 | 0 | 0 | 1 | 0 | 0.132316 | 0.058524 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
68f9361114cb2bfb34f389991f3680138e33eed4 | 1,438 | py | Python | solutions/day2.py | alanahanson/adventofcode2017 | b73e545a6cef356682228266f5344cd40303ee0c | [
"MIT"
] | null | null | null | solutions/day2.py | alanahanson/adventofcode2017 | b73e545a6cef356682228266f5344cd40303ee0c | [
"MIT"
] | null | null | null | solutions/day2.py | alanahanson/adventofcode2017 | b73e545a6cef356682228266f5344cd40303ee0c | [
"MIT"
] | null | null | null | from unittest import TestCase
def checksum(data, operator):
rows = [[int(num) for num in row.strip().split()] for row in data.strip().split("\n")]
return sum(operator(rows))
def difference(rows):
return [max(row) - min(row) for row in rows]
def quotients(rows):
return [find_quotient(row) for row in rows]
def find_quotient(row):
row.sort()
while len(row) > 1:
num = row.pop()
for n in row:
if num % n == 0:
return num / n
def main():
with open('solutions/inputs/day2.txt') as f:
data = f.read().strip()
print(checksum(data, difference))
print(checksum(data, quotients))
if __name__ == '__main__':
main()
class TestChecksum(TestCase):
def test_checksum_computes_difference_for_one_row(self):
input = '5 1 9 5'
input2 = '7 5 3'
self.assertEqual(checksum(input, difference), 8)
self.assertEqual(checksum(input2, difference), 4)
def test_checksum_computes_and_sums_diff_for_all_rows(self):
input = "5 1 9 5\n7 5 3\n2 4 6 8\n"
self.assertEqual(checksum(input, difference), 18)
def test_checksum2_computes_correctly(self):
input = '5 9 2 8'
self.assertEqual(checksum(input, quotients), 4)
def test_checksum2_computes_and_sums_quotients_for_all_rows(self):
input = "5 9 2 8\n9 4 7 3\n3 8 6 5\n"
self.assertEqual(checksum(input, quotients), 9)
| 28.196078 | 90 | 0.639082 | 212 | 1,438 | 4.169811 | 0.339623 | 0.084842 | 0.130091 | 0.126697 | 0.294118 | 0.122172 | 0 | 0 | 0 | 0 | 0 | 0.04304 | 0.240612 | 1,438 | 50 | 91 | 28.76 | 0.766484 | 0 | 0 | 0 | 0 | 0 | 0.073765 | 0.017397 | 0 | 0 | 0 | 0 | 0.135135 | 1 | 0.243243 | false | 0 | 0.027027 | 0.054054 | 0.405405 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
68fdedab8714215232ad67d3eb905ec4469d59d7 | 745 | py | Python | olea/models/mango.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | 2 | 2020-06-18T03:25:52.000Z | 2020-06-18T07:33:45.000Z | olea/models/mango.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | 15 | 2021-01-28T07:11:04.000Z | 2021-05-24T07:11:37.000Z | olea/models/mango.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | null | null | null | __all__ = ['Mango']
from sqlalchemy_ import BaseModel, Column, ForeignKey, UniqueConstraint, relationship
from sqlalchemy_.types import JSONB, DateTime, Integer, String
class Mango(BaseModel):
__tablename__ = 'mango'
id = Column(String, primary_key=True)
pit_id = Column(String, ForeignKey('pit.id', ondelete='CASCADE'))
ver = Column(Integer, default=1)
mime = Column(String)
sha1 = Column(String, unique=True)
modified_at = Column(DateTime)
timestamp = Column(DateTime)
metainfo = Column(JSONB)
pit = relationship('Pit', back_populates='mangos')
__table_args__ = (UniqueConstraint('pit_id', 'ver', name='_mango_uc'), )
@property
def mtype(self):
return self.mime.split('/')[0]
| 28.653846 | 85 | 0.691275 | 85 | 745 | 5.8 | 0.564706 | 0.097363 | 0.056795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00491 | 0.179866 | 745 | 25 | 86 | 29.8 | 0.801964 | 0 | 0 | 0 | 0 | 0 | 0.068456 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0.055556 | 0.888889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
ec055eebc44bbfef95ec6bc6d02a5ba6d8be2c7b | 5,384 | py | Python | tabular_ml_toolkit/preprocessor.py | psmathur/tabular_ml_toolkit | 46bd88abc398aeb5c7024c5b24c7a7ea9b3b5fb1 | [
"Apache-2.0"
] | 1 | 2021-11-07T04:50:26.000Z | 2021-11-07T04:50:26.000Z | tabular_ml_toolkit/preprocessor.py | psmathur/tabular_ml_toolkit | 46bd88abc398aeb5c7024c5b24c7a7ea9b3b5fb1 | [
"Apache-2.0"
] | null | null | null | tabular_ml_toolkit/preprocessor.py | psmathur/tabular_ml_toolkit | 46bd88abc398aeb5c7024c5b24c7a7ea9b3b5fb1 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: 01_preprocessor.ipynb (unless otherwise specified).
__all__ = ['PreProcessor']
# Cell
from .dataframeloader import *
from .logger import *
# Cell
# hide
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler, MinMaxScaler, LabelEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
import numpy as np
import pandas as pd
# Cell
class PreProcessor:
"""
Represent PreProcessor class
Attributes:
numerical_transformer: Numerical Columns Tranformer
categorical_transformer: Categorical Columns Transformer
preprocessor: Preprocessor for Columns Tranformer
"""
def __init__(self):
self.columns_transfomer = None
self.target_cols__encoder = None
self.target_cols_pl = None
self.cat_cols_pl = None
self.num_cols_pl = None
def __str__(self):
"""Returns human readable string reprsentation"""
attr_str = "numerical_transformer, categorical_transformer,columns_transfomer"
return "PreProcessor object with attributes:" + attr_str
def __repr__(self):
return self.__str__()
# PreProcessor Pipeline core methods
# Create preprocessing pipeline for numerical data
def create_num_cols_pp_pl(self, num_cols__imputer, num_cols__scaler):
self.num_cols_pl = Pipeline(steps=[('imputer', num_cols__imputer), ('scaler', num_cols__scaler)],
#memory="pipeline_cache_dir"
)
# Create Preprocessing pipeline for categorical data
def create_cat_cols_pp_pl(self, cat_cols__imputer, cat_cols__encoder):
self.cat_cols_pl = Pipeline(steps=[('imputer', cat_cols__imputer), ('encoder', cat_cols__encoder)],
#memory="pipeline_cache_dir"
)
# # Create Preprocessing pipeline for target cols
# def create_target_cols_pp_pl(self, target_cols__encoder):
# self.target_cols_pl = Pipeline(steps=[('encoder', target_cols__encoder)],
# #memory="pipeline_cache_dir"
# )
# Bundle preprocessing pipelines based upon types of columns
def preprocess_all_cols(self, dataframeloader, problem_type="regression",
num_cols__imputer=SimpleImputer(strategy='constant'),
num_cols__scaler=StandardScaler(),
cat_cols__imputer=SimpleImputer(strategy='constant'),
cat_cols__encoder=OneHotEncoder(handle_unknown='ignore'),
target_cols__encoder=LabelEncoder()):
#cat_cols__encoder=OrdinalEncoder(handle_unknown='use_encoded_value',
#unknown_value=np.nan)):
#TODO: REALLY NOT HAPPY WITH THIS LENGTH BASED REPEATED FLOW CHECK!
tranformer_tuple_list = []
# change preprocessor according to type of column found
if len(dataframeloader.categorical_cols) < 1:
logger.info("categorical columns are None, Preprocessing will done accordingly!")
# create scikit-learn pipelines instance
self.create_num_cols_pp_pl(num_cols__imputer, num_cols__scaler)
#now setup columns tranformer
num_cols_tuple = ("num_cols_pl", self.num_cols_pl, dataframeloader.numerical_cols)
tranformer_tuple_list.append(num_cols_tuple)
elif len(dataframeloader.numerical_cols) < 1:
logger.info("numerical columns are None, Preprocessing will done accordingly!")
# create sklearn pipelines instance
self.create_cat_cols_pp_pl(cat_cols__imputer, cat_cols__encoder)
#now setup columns tranformer
cat_cols_tuple = ("cat_cols_pl", self.cat_cols_pl, dataframeloader.categorical_cols)
tranformer_tuple_list.append(cat_cols_tuple)
else:
# create scikit-learn pipelines instance
logger.info("Both Numerical & Categorical columns found, Preprocessing will done accordingly!")
self.create_num_cols_pp_pl(num_cols__imputer, num_cols__scaler)
self.create_cat_cols_pp_pl(cat_cols__imputer, cat_cols__encoder)
#now setup columns tranformer
num_cols_tuple = ("num_cols_pl", self.num_cols_pl, dataframeloader.numerical_cols)
tranformer_tuple_list.append(num_cols_tuple)
cat_cols_tuple = ("cat_cols_pl", self.cat_cols_pl, dataframeloader.categorical_cols)
tranformer_tuple_list.append(cat_cols_tuple)
# encode target based upon problem type
if "classification" in problem_type:
logger.info("PreProcessing will include target(s) encoding!")
self.target_cols__encoder = target_cols__encoder
#now make final column tranfomer object
self.columns_transfomer = ColumnTransformer(tranformer_tuple_list, remainder='passthrough', sparse_threshold=0)
#logger.info(f"self.transformer_type: {self.transformer_type}")
return self | 46.017094 | 119 | 0.676634 | 589 | 5,384 | 5.806452 | 0.252971 | 0.049123 | 0.016374 | 0.015205 | 0.353216 | 0.28655 | 0.265497 | 0.256433 | 0.195614 | 0.195614 | 0 | 0.001248 | 0.255572 | 5,384 | 117 | 120 | 46.017094 | 0.852046 | 0.266159 | 0 | 0.20339 | 1 | 0 | 0.127928 | 0.016474 | 0 | 0 | 0 | 0.008547 | 0 | 1 | 0.101695 | false | 0.016949 | 0.169492 | 0.016949 | 0.338983 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec0c809a1777f7f4d7cd38a0f0c627c6975ec5e6 | 2,105 | py | Python | C Project Files/py/postscriptNameMap.py | colinmford/font-production-project-template-glyphs | ee74ee217f32a6192cf27df122076d397aa1b743 | [
"MIT"
] | 1 | 2021-08-04T21:08:39.000Z | 2021-08-04T21:08:39.000Z | C Project Files/py/postscriptNameMap.py | colinmford/font-production-project-template-glyphs | ee74ee217f32a6192cf27df122076d397aa1b743 | [
"MIT"
] | null | null | null | C Project Files/py/postscriptNameMap.py | colinmford/font-production-project-template-glyphs | ee74ee217f32a6192cf27df122076d397aa1b743 | [
"MIT"
] | null | null | null | def generatePostscriptNameMap(glyphList):
"""
Generate a PostScript Name Map to be stored in the "public.postscriptNames" lib.
Used to rename glyphs during generation, like so:
{"indianrupee.tab": "uni20B9.tab"}
Args:
glyphList (list): A list of Glyph objects or a Font object (Defcon or FontParts)
AGL (bool): Keep the names that appear in the Adobe Glyph List the same
"""
from fontTools.agl import UV2AGL
import re
unicodeMap = {}
unicodeMap.update(UV2AGL)
# 1. Make a map from old glyph order to new glyph order
renameMap = dict(zip([glyph.name for glyph in glyphList], [glyph.name for glyph in glyphList]))
# 2. For every glyph that has a unicode, make a unicode name for it
# unless AGL is enabled, use the Adobe-given names for that
for g in glyphList:
u = g.unicode
if u:
if u in unicodeMap.keys():
renameMap[g.name] = unicodeMap[u]
else:
renameMap[g.name] = "uni%04X" % u
# 3. Now go through all the glyphs that have not been mapped yet
# and split them into parts. If they are more than 1 part, run through
# each part and use the existing map to rename that part to what it
# should be. i.e.
# "indianrupee.tab" -> ["indianrupee", ".", "tab"] -> ["uni20B9", ".", "tab"] -> "uni20B9.tab"
# resulting in the map:
# "indianrupee.tab": "uni20B9.tab"
for k,v in renameMap.items():
if k == v:
splitName = re.split(r"((?<!^)[\W\_\-]+)", k)
if len(splitName) > 1:
for i, n in enumerate(splitName):
if n in renameMap.keys():
splitName[i] = renameMap[n]
recomposed = "".join(splitName)
renameMap[k] = recomposed
# 4. Return only the items that are different
return {k:v for k,v in renameMap.items() if k != v}
f = CurrentFont()
f.lib["public.postscriptNames"] = generatePostscriptNameMap(f)
print(f.lib["public.postscriptNames"])
print("Done!") | 35.677966 | 104 | 0.593349 | 282 | 2,105 | 4.425532 | 0.439716 | 0.008013 | 0.041667 | 0.057692 | 0.084936 | 0.084936 | 0.040064 | 0.040064 | 0.040064 | 0 | 0 | 0.014875 | 0.297387 | 2,105 | 59 | 105 | 35.677966 | 0.828938 | 0.449881 | 0 | 0 | 1 | 0 | 0.065589 | 0.039533 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0 | 0.148148 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec0e4acb60e907436a56c51a02a6b7cd8feac0fe | 369 | py | Python | test/__init__.py | Cazoo-uk/py-logger | 97f5e4cbb800262615130a20c70ad6e2fd039b6f | [
"MIT"
] | 1 | 2021-09-30T10:01:36.000Z | 2021-09-30T10:01:36.000Z | test/__init__.py | Cazoo-uk/py-logger | 97f5e4cbb800262615130a20c70ad6e2fd039b6f | [
"MIT"
] | 1 | 2020-09-25T07:54:24.000Z | 2020-10-12T11:11:48.000Z | test/__init__.py | Cazoo-uk/py-logger | 97f5e4cbb800262615130a20c70ad6e2fd039b6f | [
"MIT"
] | null | null | null | class LambdaContext(object):
def __init__(
self,
request_id="request_id",
function_name="my-function",
function_version="v1.0",
):
self.aws_request_id = request_id
self.function_name = function_name
self.function_version = function_version
def get_remaining_time_in_millis(self):
return None
| 26.357143 | 48 | 0.650407 | 43 | 369 | 5.139535 | 0.511628 | 0.162896 | 0.144796 | 0.162896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007407 | 0.268293 | 369 | 13 | 49 | 28.384615 | 0.811111 | 0 | 0 | 0 | 0 | 0 | 0.067751 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0.083333 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec0fa4f849b4d211abfec3d93bde5caf14d92597 | 1,743 | py | Python | raspberrypy/motor/L289N.py | slipstreamJumper/RaspberryPy | 40e91b73a1b21c84b0b66f193a955e53c5286a38 | [
"MIT"
] | 1 | 2022-01-23T19:21:08.000Z | 2022-01-23T19:21:08.000Z | raspberrypy/motor/L289N.py | slipstreamJumper/RaspberryPy | 40e91b73a1b21c84b0b66f193a955e53c5286a38 | [
"MIT"
] | 2 | 2020-07-22T17:20:33.000Z | 2020-07-23T00:30:55.000Z | raspberrypy/motor/L289N.py | slipstreamJumper/RaspberryPy | 40e91b73a1b21c84b0b66f193a955e53c5286a38 | [
"MIT"
] | 2 | 2019-07-02T00:26:25.000Z | 2020-07-22T17:26:56.000Z | from ..utils.GPIO_utils import setup_output, output, GPIO_Base
from time import sleep
import random
def keep_decorate(func):
def func_wrapper(self, keep=None):
func(self, keep)
if keep is None: keep = self.keep
if keep > 0:
sleep(keep)
self._stop()
return func_wrapper
class L289N(GPIO_Base):
def __init__(self, pins=(23,22, 19,21), keep=1.0, **kwargs):
'''
mode: the pin mode, 'BOARD' or 'BCM'.
pins: pins for left forward, left backward, right forward, right backward.
keep: the duration an action is kept, if keep <= 0 then the motor will not stop
'''
super(L289N, self).__init__(**kwargs)
self.pins = pins
for pin in pins: setup_output(pin)
self.keep = keep
# ============== actions ================
def _stop(self, keep=None):
output(self.pins, [0,0,0,0])
@keep_decorate
def stop(self, keep=None):
output(self.pins, [0,0,0,0])
@keep_decorate
def left_backward(self, keep=None):
output(self.pins[:2], [0, 1])
@keep_decorate
def left_forward(self, keep=None):
output(self.pins[:2], [1, 0])
@keep_decorate
def right_backward(self, keep=None):
output(self.pins[-2:], [0, 1])
@keep_decorate
def right_forward(self, keep=None):
output(self.pins[-2:], [1, 0])
@keep_decorate
def forward(self, keep=None):
self.right_forward(keep=-1)
self.left_forward(keep=-1)
@keep_decorate
def backward(self, keep=None):
self.right_backward(keep=-1)
self.left_backward(keep=-1)
@keep_decorate
def spin_right(self, keep=None):
self.right_backward(keep=-1)
self.left_forward(keep=-1)
@keep_decorate
def spin_left(self, keep=None):
self.right_forward(keep=-1)
self.left_backward(keep=-1)
| 27.234375 | 85 | 0.652324 | 267 | 1,743 | 4.104869 | 0.209738 | 0.10219 | 0.120438 | 0.09854 | 0.525547 | 0.525547 | 0.504562 | 0.480839 | 0.480839 | 0.480839 | 0 | 0.032647 | 0.191624 | 1,743 | 63 | 86 | 27.666667 | 0.745209 | 0.133678 | 0 | 0.38 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.26 | false | 0 | 0.06 | 0 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec10ee62b4a33eb795748660e631547aad46c65a | 6,653 | py | Python | fsm.py | Tattos/TOC-Project-2017 | 5e7b5603772583499b1339c9c963a8783f01f767 | [
"MIT"
] | null | null | null | fsm.py | Tattos/TOC-Project-2017 | 5e7b5603772583499b1339c9c963a8783f01f767 | [
"MIT"
] | null | null | null | fsm.py | Tattos/TOC-Project-2017 | 5e7b5603772583499b1339c9c963a8783f01f767 | [
"MIT"
] | null | null | null | # -- coding: UTF-8 --
from transitions.extensions import GraphMachine
global name
global reserve
class TocMachine(GraphMachine):
def __init__(self, **machine_configs):
self.machine = GraphMachine(
model = self,
**machine_configs
)
#state 1(finish)
def is_going_to_state1(self, update):
text = update.message.text
return text.lower() == "who are you?" or text.lower() == "what can you do?"
def on_enter_state1(self, update):
tmp = "I'm TattosBot,a Chatbot,maybe can provide you some service.\n";
tmp2 = "If you need some service,please input \"I need some service\""
tmp += tmp2;
update.message.reply_text(tmp)
self.go_back(update)
def on_exit_state1(self, update):
print('Leaving state1')
#state 2
def is_going_to_state2(self, update):
text = update.message.text
return text.lower() == 'i need some service'
def on_enter_state2(self, update):
update.message.reply_text("What type of service you need?\n(food,book,video)")
self.advance(update)
def on_exit_state2(self, update):
print('Leaving state2')
#state 3
def is_going_to_state3(self, update):
text = update.message.text
return text.lower() == 'food'
def on_enter_state3(self, update):
update.message.reply_text("What do you need?\n(please input number)\n(1)reserve a table\n(2)order a meal\n(3)ask other information")
self.advance(update)
def on_exit_state3(self, update):
print('Leaving state3')
#state 3_1
def is_going_to_state3_1(self, update):
text = update.message.text
return text.lower() == '1' or text.lower() == '(1)' or text.lower() == 'reserve a table'
def is_going_back_to_state3_1(self, update):
text = update.message.text
return text.lower() == 'n' or text.lower() == 'no'
def on_enter_state3_1(self, update):
update.message.reply_text("Please input the total number of person and reservation time.\n (example: for five at 7:20 pm.)")
self.advance(update)
def on_exit_state3_1(self, update):
print('Leaving state3_1')
#state 4_1
def is_going_to_state4_1(self, update):
text = update.message.text
global reserve
reserve = text
return (text.lower() != '1' and text.lower() != '(1)' and text.lower() != 'reserve a table' and text.lower() != 'n' and text.lower() != 'no')
def on_enter_state4_1(self, update):
update.message.reply_text("Check if the following information is correct(Y/N):\n" +
"You reserve a table " + reserve.lower())
update.message.text = ""
self.advance(update)
def on_exit_state4_1(self, update):
print('Leaving state4_1')
#state 4_1_check
def is_going_to_state4_1_check(self, update):
text = update.message.text
return text.lower() == 'y' or text.lower() == 'yes'
def on_enter_state4_1_check(self, update):
update.message.reply_text("All right.You has been booked successfully.")
self.go_back(update)
def on_exit_state4_1_check(self, update):
print('Leaving state4_1_check')
#state 3_2
def is_going_to_state3_2(self, update):
text = update.message.text
return text.lower() == '2' or text.lower() == '(2)' or text.lower() == 'order a meal'
def on_enter_state3_2(self, update):
update.message.reply_text("What would you like to eat?\n(please input number)\n(1)Fried rice\n(2)Pasta\n")
update.message.text = ""
self.advance(update)
def on_exit_state3_2(self, update):
print('Leaving state3_2')
#state 4_2
def is_going_to_state4_2(self, update):
text = update.message.text
return (text.lower() == '1' or text.lower() == '(1)' or text.lower() == 'fried rice'
or text.lower() == '2' or text.lower() == '(2)' or text.lower() == 'pasta')
def on_enter_state4_2(self, update):
update.message.reply_text("OK,we will serve you shortly.")
self.go_back(update)
def on_exit_state4_2(self, update):
print('Leaving state4_2')
#state 3_3
def is_going_to_state3_3(self, update):
text = update.message.text
return text.lower() == '3' or text.lower() == '(3)' or text.lower() == 'ask other information'
def on_enter_state3_3(self, update):
update.message.reply_text("If you have any other questions, please call customer service:\n0800-XXX-XXX\n")
self.go_back(update)
def on_exit_state3_3(self, update):
print('Leaving state3_3')
#state_book
def is_going_to_state_book(self, update):
text = update.message.text
return text.lower() == 'book'
def on_enter_state_book(self, update):
update.message.reply_text("You may like this:\n[博客來]\n(http://www.books.com.tw/?gclid=CjwKCAjw07nJBRBGEiwAUXBPmfuWUBqlGHbSA08eP6nwThe814rd5aa-62PI6UsTWs5C8bp634oKXRoCnoYQAvD_BwE)")
self.go_back(update)
def on_exit_state_book(self, update):
print('Leaving state_book')
#state_video
def is_going_to_state_video(self, update):
text = update.message.text
return text.lower() == 'video'
def on_enter_state_video(self, update):
update.message.reply_text("You may like this:\n[Youtube]\n(https://www.youtube.com)")
self.go_back(update)
def on_exit_state_video(self, update):
print('Leaving state4')
#state 5 (5 , 6 finish)
def is_going_to_state5(self, update):
text = update.message.text
return (text.lower() == 'hello' or text.lower() == 'hi'
or text.lower() == 'good morning'
or text.lower() == 'good afternoon'
or text.lower() == 'good evening'
or text.lower() == 'good night')
def on_enter_state5(self, update):
update.message.reply_text("Hi,what's your name?")
self.advance(update)
def on_exit_state5(self, update):
print('Leaving state5')
#state 6
def is_going_to_state6(self, update):
text = update.message.text
global name
name = text
return (text.lower() != 'hello' and text.lower() != 'hi'
and text.lower() != 'good morning'
and text.lower() != 'good afternoon'
and text.lower() != 'good evening'
and text.lower() != 'good night')
def on_enter_state6(self, update):
update.message.reply_text("Hi~"+name+",nice to meet you!")
self.go_back(update)
def on_exit_state6(self, update):
print('Leaving state6')
| 43.201299 | 188 | 0.632948 | 933 | 6,653 | 4.334405 | 0.163987 | 0.093472 | 0.051682 | 0.069238 | 0.579624 | 0.477498 | 0.37908 | 0.242087 | 0.211919 | 0.099654 | 0 | 0.026435 | 0.238088 | 6,653 | 153 | 189 | 43.48366 | 0.771355 | 0.023749 | 0 | 0.244444 | 0 | 0.02963 | 0.213183 | 0.00741 | 0 | 0 | 0 | 0 | 0 | 1 | 0.303704 | false | 0 | 0.007407 | 0 | 0.422222 | 0.096296 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec12c5e43990e93dd5973a26cd375c935d1d422e | 29,947 | py | Python | header_common.py | invisiblebob395/awefawe | 42daf9d3ae06bcdb3b91973d94eed8bed1303e2b | [
"BSD-3-Clause"
] | null | null | null | header_common.py | invisiblebob395/awefawe | 42daf9d3ae06bcdb3b91973d94eed8bed1303e2b | [
"BSD-3-Clause"
] | null | null | null | header_common.py | invisiblebob395/awefawe | 42daf9d3ae06bcdb3b91973d94eed8bed1303e2b | [
"BSD-3-Clause"
] | null | null | null | ###################################################
# header_common.py
# This file contains common declarations.
# DO NOT EDIT THIS FILE!
###################################################
server_event_preset_message = 0
server_event_play_sound = 1
server_event_scene_prop_play_sound = 2
server_event_play_sound_at_position = 3
server_event_agent_equip_armor = 4
server_event_player_set_slot = 5
server_event_scene_prop_set_slot = 6
server_event_faction_set_slot = 7
server_event_troop_set_slot = 8
server_event_agent_set_slot = 9
server_event_show_inventory = 10
server_event_chat_message_recieved = 11
server_event_local_chat = 12
server_event_local_chat_shout = 13
server_event_faction_set_name = 14
server_event_return_game_rules = 15
server_event_return_server_name = 16
server_event_return_password = 17
server_event_set_player_score_kill_death = 18
server_event_show_poll = 19
server_event_set_overflow_gold = 20
server_event_faction_chat = 21
server_event_faction_chat_announce = 22
server_event_admin_chat = 23
server_event_admin_chat_announce = 24
server_event_admin_set_permissions = 25
server_event_set_attached_scene_prop = 26
server_event_local_animation = 27
server_event_update_scene_prop_hit_points = 28
#GGG:new server
server_event_agent_stop_sound = 29
server_event_agent_play_sound = 30
##Arthur begins
server_event_player_exit_return = 31
server_event_player_set_cam = 32
# Add new events here: above if sent from the server, below if from clients.
#GGG:new client
##Arthur begins
client_event_player_request_exit = 92
##Arthur ends
client_event_admin_faction_action = 93
client_event_player_set_slot = 94
#
client_event_request_animation = 95
agent_shield_bash = 0 #GGG:shield bash
agent_loot_weapon = 1 #GGG:loot weapon
client_event_reveal_money_pouch = 96
client_event_agent_loot_armor = 97
client_event_toggle_drop_armor = 98
client_event_admin_equip_item = 99
client_event_poll_vote = 100
client_event_request_poll = 101
client_event_request_spawn_point = 102
client_event_request_game_rules = 103
client_event_admin_set_server_name = 104
client_event_admin_set_password = 105
client_event_admin_set_welcome_message = 106
client_event_admin_set_game_rule = 107
client_event_admin_action = 108
client_event_faction_admin_action = 109
client_event_chat_message_begin = 110
client_event_chat_message_end = 120
client_event_chat_message_type = 120
client_event_transfer_gold = 121
client_event_request_stock_count = 122
client_event_drop_money_bag = 123
client_event_change_faction_banner = 124
client_event_transfer_inventory = 125
client_event_control_scene_prop = 126
client_event_attach_scene_prop = 127
# Network events are limited to numbers between 0 and 127 by the game engine.
preset_message_default = 0x0
preset_message_item = 0x2 # converts value 1 from item id into name string
preset_message_agent = 0x3 # converts value 1 from agent id into name string
preset_message_player = 0x4 # converts value 1 from player id into username string
preset_message_faction = 0x5 # converts value 1 from faction id into name string
preset_message_faction_castle = 0x6 # converts value 1 from castle id into name string
preset_message_params_mask = 0xF
preset_message_white = 0x00
preset_message_red = 0x10
preset_message_green = 0x20
preset_message_blue = 0x30
preset_message_yellow = 0x40
preset_message_color_mask = 0xF0
preset_message_small = 0x000
preset_message_big = 0x100
preset_message_read_object = 0x200 # displays the presentation for reading a book
preset_message_chat_only = 0x300 # only displays in the chat log
preset_message_type_mask = 0xF00
preset_message_log = 0x1000 # add message to the chat log
preset_message_fail_sound = 0x2000 # play a failure sound
preset_message_error = preset_message_red|preset_message_fail_sound
preset_message_info = preset_message_yellow
preset_message_chat_log = preset_message_chat_only|preset_message_log
# Module system commands
command_get_bot_count = 1
command_set_bot_count = 2
command_get_round_max_seconds = 3
command_set_round_max_seconds = 4
command_get_respawn_period = 5
command_set_respawn_period = 6
command_get_num_bots_voteable = 7
command_set_num_bots_voteable = 8
command_get_maps_voteable = 9
command_set_maps_voteable = 10
command_get_factions_voteable = 11
command_set_factions_voteable = 12
command_get_player_respawn_as_bot = 13
command_set_player_respawn_as_bot = 14
command_get_kick_voteable = 15
command_set_kick_voteable = 16
command_get_ban_voteable = 17
command_set_ban_voteable = 18
command_get_valid_vote_ratio = 19
command_set_valid_vote_ratio = 20
command_get_auto_team_balance_limit = 21
command_set_auto_team_balance_limit = 22
command_get_starting_gold = 23
command_set_starting_gold = 24
command_get_combat_gold_bonus = 25
command_set_combat_gold_bonus = 26
command_get_round_gold_bonus = 27
command_set_round_gold_bonus = 28
command_get_player_banners_allowed = 29
command_set_player_banners_allowed = 30
command_get_force_default_armor = 31
command_set_force_default_armor = 32
command_get_team_points_gained_for_flags = 33
command_set_team_points_gained_for_flags = 34
command_get_points_gained_for_capturing_flags = 35
command_set_points_gained_for_capturing_flags = 36
command_get_map_time_limit = 37
command_set_map_time_limit = 38
command_get_team_point_limit = 39
command_set_team_point_limit = 40
command_get_defender_spawn_count = 41
command_set_defender_spawn_count = 42
command_get_disallow_ranged_weapons = 43
command_set_disallow_ranged_weapons = 44
# Napoleonic Wars commands
command_use_class_limits = 50
command_class_limit_player_count = 51
command_squad_size = 52
command_scale_squad = 53
command_build_points_team1 = 54
command_build_points_team2 = 55
command_allow_multiple_firearms = 56
command_enable_bonuses = 57
command_bonus_strength = 58
command_bonus_range = 59
command_fall_off_horse = 60
command_horse_dying = 61
command_auto_kick = 62
command_max_teamkills_before_kick = 63
command_auto_horse = 64
command_auto_swap = 65
command_limit_grenadier = 66
command_limit_skirmisher = 67
command_limit_rifle = 68
command_limit_cavalry = 69
command_limit_lancer = 70
command_limit_hussar = 71
command_limit_dragoon = 72
command_limit_cuirassier = 73
command_limit_heavycav = 74
command_limit_artillery = 75
command_limit_rocket = 76
command_limit_sapper = 77
command_limit_musician = 78
command_limit_sergeant = 79
command_limit_officer = 80
command_limit_general = 81
# Hard coded commands
command_get_max_players = 101
command_set_max_players = 102
command_get_friendly_fire = 103
command_set_friendly_fire = 104
command_get_melee_friendly_fire = 105
command_set_melee_friendly_fire = 106
command_get_friendly_fire_damage_self_ratio = 107
command_set_friendly_fire_damage_self_ratio = 108
command_get_friendly_fire_damage_friend_ratio = 109
command_set_friendly_fire_damage_friend_ratio = 110
command_get_ghost_mode = 111
command_set_ghost_mode = 112
command_get_control_block_direction = 113
command_set_control_block_direction = 114
command_get_combat_speed = 115
command_set_combat_speed = 116
command_get_add_to_game_servers_list = 117
command_set_add_to_game_servers_list = 118
command_get_anti_cheat = 119
command_set_anti_cheat = 120
command_get_renaming_server_allowed = 121
command_set_renaming_server_allowed = 122
command_get_changing_game_type_allowed = 123
command_set_changing_game_type_allowed = 124
command_start_scene = 130
command_open_admin_panel = 132
command_open_game_rules = 134
command_set_server_mission_timer = 136
commands_module_system_begin = command_get_bot_count
commands_module_system_end = command_set_disallow_ranged_weapons + 1
commands_napoleonic_wars_begin = command_use_class_limits
commands_napoleonic_wars_end = command_limit_general + 1
commands_hard_coded_begin = command_get_max_players
commands_hard_coded_end = command_set_anti_cheat + 1
min_num_players = 2
max_num_players = 250 # limited by the game engine
min_respawn_period = 3
max_respawn_period = 31 # dead agents are removed after approximately this interval
team_default = 0 # default team, members can attack each other like deathmatch - since multiplayer is hard coded to handle only 2 teams
team_spawn_invulnerable = 1 # team set to be neutral to each other and the default team, so they can't attack or be attacked
team_spectators = 2 # hard coded spectators team
team_bots = 3 #GGG:foreign invasion
net_value_upper_bound = 1 << 31
net_sound_shift = 16
net_sound_mask = (1 << net_sound_shift) - 1
net_pack_3_shift_2 = 10
net_pack_3_shift_3 = 20
net_pack_3_value_upper_bound = 1 << net_pack_3_shift_2
net_pack_3_mask_1 = net_pack_3_value_upper_bound - 1
net_pack_3_mask_2 = net_pack_3_mask_1 << net_pack_3_shift_2
net_pack_3_mask_3 = net_pack_3_mask_1 << net_pack_3_shift_3
net_chat_type_shift = 8
net_chat_param_1_shift = net_chat_type_shift * 2
net_chat_event_mask = (1 << net_chat_type_shift) - 1
stats_chart_score_shift = 8
stats_chart_ranking_shift = 24
stats_chart_score_max = 1 << (stats_chart_ranking_shift - stats_chart_score_shift)
stats_chart_player_mask = (1 << stats_chart_score_shift) - 1
admin_action_kick_player = 0
admin_action_ban_player_temp = 1
admin_action_ban_player_perm = 2
admin_action_mute_player = 3
admin_action_kill_player = 4
admin_action_fade_player_out = 5
admin_action_freeze_player = 6
#GGG:admin tools
admin_action_add_outlaw_player = 7
admin_action_sub_outlaw_player = 8
admin_action_teleport_player_to_admin = 9
#
admin_action_add_outlaw_player
admin_action_teleport_to_player = 10
admin_action_teleport_behind_player = 11
admin_action_teleport_forwards = 12
admin_action_get_armor = 13
admin_action_get_invisible = 14
admin_action_refill_health = 15
admin_action_become_godlike = 16
admin_action_get_horse = 17
admin_action_remove_horses = 18
admin_action_remove_stray_horses = 19
#GGG:admin tools
admin_action_remove_wild_animals = 20
admin_action_remove_corpses = 21
admin_action_remove_all_weapons = 21
admin_action_reset_carts = 22
#
admin_action_teleport_to_ships = 23
admin_action_reset_ships = 24
admin_action_lock_faction = 25
admin_action_lock_faction_capture = 26 #GGG
faction_admin_action_change_banner = 0
faction_admin_action_kick_player = 1
faction_admin_action_toggle_player_door_key = 2
faction_admin_action_toggle_player_money_key = 3
faction_admin_action_toggle_player_item_key = 4
faction_admin_action_set_relation_hostile = 5
faction_admin_action_set_relation_peaceful = 6
faction_admin_action_outlaw_player = 7
faction_admin_action_mute_player = 8
faction_admin_action_toggle_player_announce = 9
max_possible_gold = 1000000000
max_correctly_displayed_gold = 131071 # player gold over this value will not be updated correctly by the game engine
max_correctly_displayed_hp = 15000 # scene prop hit points over approximately this value will not be displayed correctly in the engine hit points bar
min_scene_prop_hit_points = 1
profile_banner_id_option_bits_begin = 9
profile_banner_id_option_bits_end = 30
profile_banner_id_mask = (1 << profile_banner_id_option_bits_begin) - 1
bignum = 0x40000000000000000000000000000000
op_num_value_bits = 24 + 32
tag_register = 1
tag_variable = 2
tag_string = 3
tag_item = 4
tag_troop = 5
tag_faction = 6
tag_quest = 7
tag_party_tpl = 8
tag_party = 9
tag_scene = 10
tag_mission_tpl = 11
tag_menu = 12
tag_script = 13
tag_particle_sys = 14
tag_scene_prop = 15
tag_sound = 16
tag_local_variable = 17
tag_map_icon = 18
tag_skill = 19
tag_mesh = 20
tag_presentation = 21
tag_quick_string = 22
tag_track = 23
tag_tableau = 24
tag_animation = 25
tags_end = 26
opmask_register = tag_register << op_num_value_bits
opmask_variable = tag_variable << op_num_value_bits
opmask_string = tag_string << op_num_value_bits
opmask_item_index = tag_item << op_num_value_bits
opmask_troop_index = tag_troop << op_num_value_bits
opmask_faction_index = tag_faction << op_num_value_bits
opmask_quest_index = tag_quest << op_num_value_bits
opmask_p_template_index = tag_party_tpl << op_num_value_bits
opmask_party_index = tag_party << op_num_value_bits
opmask_scene_index = tag_scene << op_num_value_bits
opmask_mission_tpl_index = tag_mission_tpl << op_num_value_bits
opmask_menu_index = tag_menu << op_num_value_bits
opmask_script = tag_script << op_num_value_bits
opmask_particle_sys = tag_particle_sys << op_num_value_bits
opmask_scene_prop = tag_scene_prop << op_num_value_bits
opmask_sound = tag_sound << op_num_value_bits
opmask_map_icon = tag_map_icon << op_num_value_bits
opmask_local_variable = tag_local_variable << op_num_value_bits
opmask_quick_string = tag_quick_string << op_num_value_bits
def reg(reg_no):
if not 0 < reg_no < 128:
raise Exception("ERROR: invalid register number.")
return opmask_register | reg_no
s0 = 0
s1 = 1
s2 = 2
s3 = 3
s4 = 4
s5 = 5
s6 = 6
s7 = 7
s8 = 8
s9 = 9
s10 = 10
s11 = 11
s12 = 12
s13 = 13
s14 = 14
s15 = 15
s16 = 16
s17 = 17
s18 = 18
s19 = 19
s20 = 20
s21 = 21
s22 = 22
s23 = 23
s24 = 24
s25 = 25
s26 = 26
s27 = 27
s28 = 28
s29 = 29
s30 = 30
s31 = 31
s32 = 32
s33 = 33
s34 = 34
s35 = 35
s36 = 36
s37 = 37
s38 = 38
s39 = 39
s40 = 40
s41 = 41
s42 = 42
s43 = 43
s44 = 44
s45 = 45
s46 = 46
s47 = 47
s48 = 48
s49 = 49
s50 = 50
s51 = 51
s52 = 52
s53 = 53
s54 = 54
s55 = 55
s56 = 56
s57 = 57
s58 = 58
s59 = 59
s60 = 60
s61 = 61
s62 = 62
s63 = 63
s64 = 64
s65 = 65
s66 = 66
s67 = 67
s68 = 68
s69 = 69
s70 = 70
s71 = 71
s72 = 72
s73 = 73
s74 = 74
s75 = 75
s76 = 76
s77 = 77
s78 = 78
s79 = 79
s80 = 80
s81 = 81
s82 = 82
s83 = 83
s84 = 84
s85 = 85
s86 = 86
s87 = 87
s88 = 88
s89 = 89
s90 = 90
s91 = 91
s92 = 92
s93 = 93
s94 = 94
s95 = 95
s96 = 96
s97 = 97
s98 = 98
s99 = 99
s100 = 100
s101 = 101
s102 = 102
s103 = 103
s104 = 104
s105 = 105
s106 = 106
s107 = 107
s108 = 108
s109 = 109
s110 = 110
s111 = 111
s112 = 112
s113 = 113
s114 = 114
s115 = 115
s116 = 116
s117 = 117
s118 = 118
s119 = 119
s120 = 120
s121 = 121
s122 = 122
s123 = 123
s124 = 124
s125 = 125
s126 = 126
s127 = 127
pos0 = 0
pos1 = 1
pos2 = 2
pos3 = 3
pos4 = 4
pos5 = 5
pos6 = 6
pos7 = 7
pos8 = 8
pos9 = 9
pos10 = 10
pos11 = 11
pos12 = 12
pos13 = 13
pos14 = 14
pos15 = 15
pos16 = 16
pos17 = 17
pos18 = 18
pos19 = 19
pos20 = 20
pos21 = 21
pos22 = 22
pos23 = 23
pos24 = 24
pos25 = 25
pos26 = 26
pos27 = 27
pos28 = 28
pos29 = 29
pos30 = 30
pos31 = 31
pos32 = 32
pos33 = 33
pos34 = 34
pos35 = 35
pos36 = 36
pos37 = 37
pos38 = 38
pos39 = 39
pos40 = 40
pos41 = 41
pos42 = 42
pos43 = 43
pos44 = 44
pos45 = 45
pos46 = 46
pos47 = 47
pos48 = 48
pos49 = 49
pos50 = 50
pos51 = 51
pos52 = 52
pos53 = 53
pos54 = 54
pos55 = 55
pos56 = 56
pos57 = 57
pos58 = 58
pos59 = 59
pos60 = 60
pos61 = 61
pos62 = 62
pos63 = 63
pos64 = 64
pos65 = 65
pos66 = 66
pos67 = 67
pos68 = 68
pos69 = 69
pos70 = 70
pos71 = 71
pos72 = 72
pos73 = 73
pos74 = 74
pos75 = 75
pos76 = 76
pos77 = 77
pos78 = 78
pos79 = 79
pos80 = 80
pos81 = 81
pos82 = 82
pos83 = 83
pos84 = 84
pos85 = 85
pos86 = 86
pos87 = 87
pos88 = 88
pos89 = 89
pos90 = 90
pos91 = 91
pos92 = 92
pos93 = 93
pos94 = 94
pos95 = 95
pos96 = 96
pos97 = 97
pos98 = 98
pos99 = 99
pos100 = 100
pos101 = 101
pos102 = 102
pos103 = 103
pos104 = 104
pos105 = 105
pos106 = 106
pos107 = 107
pos108 = 108
pos109 = 109
pos110 = 110
pos111 = 111
pos112 = 112
pos113 = 113
pos114 = 114
pos115 = 115
pos116 = 116
pos117 = 117
pos118 = 118
pos119 = 119
pos120 = 120
pos121 = 121
pos122 = 122
pos123 = 123
pos124 = 124
pos125 = 125
pos126 = 126
pos127 = 127
reg0 = opmask_register| 0
reg1 = opmask_register| 1
reg2 = opmask_register| 2
reg3 = opmask_register| 3
reg4 = opmask_register| 4
reg5 = opmask_register| 5
reg6 = opmask_register| 6
reg7 = opmask_register| 7
reg8 = opmask_register| 8
reg9 = opmask_register| 9
reg10 = opmask_register|10
reg11 = opmask_register|11
reg12 = opmask_register|12
reg13 = opmask_register|13
reg14 = opmask_register|14
reg15 = opmask_register|15
reg16 = opmask_register|16
reg17 = opmask_register|17
reg18 = opmask_register|18
reg19 = opmask_register|19
reg20 = opmask_register|20
reg21 = opmask_register|21
reg22 = opmask_register|22
reg23 = opmask_register|23
reg24 = opmask_register|24
reg25 = opmask_register|25
reg26 = opmask_register|26
reg27 = opmask_register|27
reg28 = opmask_register|28
reg29 = opmask_register|29
reg30 = opmask_register|30
reg31 = opmask_register|31
reg32 = opmask_register|32
reg33 = opmask_register|33
reg34 = opmask_register|34
reg35 = opmask_register|35
reg36 = opmask_register|36
reg37 = opmask_register|37
reg38 = opmask_register|38
reg39 = opmask_register|39
reg40 = opmask_register|40
reg41 = opmask_register|41
reg42 = opmask_register|42
reg43 = opmask_register|43
reg44 = opmask_register|44
reg45 = opmask_register|45
reg46 = opmask_register|46
reg47 = opmask_register|47
reg48 = opmask_register|48
reg49 = opmask_register|49
reg50 = opmask_register|50
reg51 = opmask_register|51
reg52 = opmask_register|52
reg53 = opmask_register|53
reg54 = opmask_register|54
reg55 = opmask_register|55
reg56 = opmask_register|56
reg57 = opmask_register|57
reg58 = opmask_register|58
reg59 = opmask_register|59
reg60 = opmask_register|60
reg61 = opmask_register|61
reg62 = opmask_register|62
reg63 = opmask_register|63
reg64 = opmask_register|64
reg65 = opmask_register|65
reg66 = opmask_register|66
reg67 = opmask_register|67
reg68 = opmask_register|68
reg69 = opmask_register|69
reg70 = opmask_register|70
reg71 = opmask_register|71
reg72 = opmask_register|72
reg73 = opmask_register|73
reg74 = opmask_register|74
reg75 = opmask_register|75
reg76 = opmask_register|76
reg77 = opmask_register|77
reg78 = opmask_register|78
reg79 = opmask_register|79
reg80 = opmask_register|80
reg81 = opmask_register|81
reg82 = opmask_register|82
reg83 = opmask_register|83
reg84 = opmask_register|84
reg85 = opmask_register|85
reg86 = opmask_register|86
reg87 = opmask_register|87
reg88 = opmask_register|88
reg89 = opmask_register|89
reg90 = opmask_register|90
reg91 = opmask_register|91
reg92 = opmask_register|92
reg93 = opmask_register|93
reg94 = opmask_register|94
reg95 = opmask_register|95
reg96 = opmask_register|96
reg97 = opmask_register|97
reg98 = opmask_register|98
reg99 = opmask_register|99
reg100 = opmask_register|100
reg101 = opmask_register|101
reg102 = opmask_register|102
reg103 = opmask_register|103
reg104 = opmask_register|104
reg105 = opmask_register|105
reg106 = opmask_register|106
reg107 = opmask_register|107
reg108 = opmask_register|108
reg109 = opmask_register|109
reg110 = opmask_register|110
reg111 = opmask_register|111
reg112 = opmask_register|112
reg113 = opmask_register|113
reg114 = opmask_register|114
reg115 = opmask_register|115
reg116 = opmask_register|116
reg117 = opmask_register|117
reg118 = opmask_register|118
reg119 = opmask_register|119
reg120 = opmask_register|120
reg121 = opmask_register|121
reg122 = opmask_register|122
reg123 = opmask_register|123
reg124 = opmask_register|124
reg125 = opmask_register|125
reg126 = opmask_register|126
reg127 = opmask_register|127
spf_all_teams_are_enemy = 0x00000001
spf_is_horseman = 0x00000002
spf_examine_all_spawn_points = 0x00000004
spf_team_0_spawn_far_from_entry_32 = 0x00000008
spf_team_1_spawn_far_from_entry_0 = 0x00000010
spf_team_1_spawn_far_from_entry_66 = 0x00000020
spf_team_0_spawn_near_entry_0 = 0x00000040
spf_team_0_spawn_near_entry_66 = 0x00000080
spf_team_1_spawn_near_entry_32 = 0x00000100
spf_team_0_walkers_spawn_at_high_points = 0x00000200
spf_team_1_walkers_spawn_at_high_points = 0x00000400
spf_try_to_spawn_close_to_at_least_one_enemy = 0x00000800
spf_care_agent_to_agent_distances_less = 0x00001000
# Human bones
hb_abdomen = 0
hb_thigh_l = 1
hb_calf_l = 2
hb_foot_l = 3
hb_thigh_r = 4
hb_calf_r = 5
hb_foot_r = 6
hb_spine = 7
hb_thorax = 8
hb_head = 9
hb_shoulder_l = 10
hb_upperarm_l = 11
hb_forearm_l = 12
hb_hand_l = 13
hb_item_l = 14
hb_shoulder_r = 15
hb_upperarm_r = 16
hb_forearm_r = 17
hb_hand_r = 18
hb_item_r = 19
# Horse bones
hrsb_pelvis = 0
hrsb_spine_1 = 1
hrsb_spine_2 = 2
hrsb_spine_3 = 3
hrsb_neck_1 = 4
hrsb_neck_2 = 5
hrsb_neck_3 = 6
hrsb_head = 7
hrsb_l_clavicle = 8
hrsb_l_upper_arm = 9
hrsb_l_forearm = 10
hrsb_l_hand = 11
hrsb_l_front_hoof = 12
hrsb_r_clavicle = 13
hrsb_r_upper_arm = 14
hrsb_r_forearm = 15
hrsb_r_hand = 16
hrsb_r_front_hoof = 17
hrsb_l_thigh = 18
hrsb_l_calf = 19
hrsb_l_foot = 20
hrsb_l_back_hoof = 21
hrsb_r_thigh = 22
hrsb_r_calf = 23
hrsb_r_foot = 24
hrsb_r_back_hoof = 25
hrsb_tail_1 = 26
hrsb_tail_2 = 27
#Tooltip types
tooltip_agent = 1
tooltip_horse = 2
tooltip_my_horse = 3
tooltip_container = 5
tooltip_door = 6
tooltip_item = 7
tooltip_leave_area = 8
tooltip_prop = 9
tooltip_destructible_prop = 10
#Human bones
hb_abdomen = 0
hb_thigh_l = 1
hb_calf_l = 2
hb_foot_l = 3
hb_thigh_r = 4
hb_calf_r = 5
hb_foot_r = 6
hb_spine = 7
hb_thorax = 8
hb_head = 9
hb_shoulder_l = 10
hb_upperarm_l = 11
hb_forearm_l = 12
hb_hand_l = 13
hb_item_l = 14
hb_shoulder_r = 15
hb_upperarm_r = 16
hb_forearm_r = 17
hb_hand_r = 18
hb_item_r = 19
#Horse bones
hrsb_pelvis = 0
hrsb_spine_1 = 1
hrsb_spine_2 = 2
hrsb_spine_3 = 3
hrsb_neck_1 = 4
hrsb_neck_2 = 5
hrsb_neck_3 = 6
hrsb_head = 7
hrsb_l_clavicle = 8
hrsb_l_upper_arm = 9
hrsb_l_forearm = 10
hrsb_l_hand = 11
hrsb_l_front_hoof = 12
hrsb_r_clavicle = 13
hrsb_r_upper_arm = 14
hrsb_r_forearm = 15
hrsb_r_hand = 16
hrsb_r_front_hoof = 17
hrsb_l_thigh = 18
hrsb_l_calf = 19
hrsb_l_foot = 20
hrsb_l_back_hoof = 21
hrsb_r_thigh = 22
hrsb_r_calf = 23
hrsb_r_foot = 24
hrsb_r_back_hoof = 25
hrsb_tail_1 = 26
hrsb_tail_2 = 27
#Attack directions
atk_thrust = 0
atk_right_swing = 1
atk_left_swing = 2
atk_overhead = 3
#Game windows
window_inventory = 7
window_party = 8
window_character = 11
#Agent body meta meshes
bmm_head = 0
bmm_beard = 1
bmm_hair = 2
bmm_helmet = 3
bmm_armor = 4
bmm_trousers = 5
bmm_left_foot = 6
bmm_right_foot = 7
bmm_armature = 8
bmm_item_1 = 9
bmm_item_2 = 10
bmm_item_3 = 11
bmm_item_4 = 12
bmm_missile_1 = 13
bmm_missile_2 = 14
bmm_missile_3 = 15
bmm_missile_4 = 16
bmm_carry_1 = 17
bmm_carry_2 = 18
bmm_carry_3 = 19
bmm_carry_4 = 20
bmm_unknown_2 = 21
bmm_left_hand = 22
bmm_right_hand = 23
bmm_left_bracer = 24
bmm_right_bracer = 25
bmm_banner = 26
bmm_name = 27
#Floating point registers
fp0 = 0
fp1 = 1
fp2 = 2
fp3 = 3
fp4 = 4
fp5 = 5
fp6 = 6
fp7 = 7
fp8 = 8
fp9 = 9
fp10 = 10
fp11 = 11
fp12 = 12
fp13 = 13
fp14 = 14
fp15 = 15
fp16 = 16
fp17 = 17
fp18 = 18
fp19 = 19
fp20 = 20
fp21 = 21
fp22 = 22
fp23 = 23
fp24 = 24
fp25 = 25
fp26 = 26
fp27 = 27
fp28 = 28
fp29 = 29
fp30 = 30
fp31 = 31
fp32 = 32
fp33 = 33
fp34 = 34
fp35 = 35
fp36 = 36
fp37 = 37
fp38 = 38
fp39 = 39
fp40 = 40
fp41 = 41
fp42 = 42
fp43 = 43
fp44 = 44
fp45 = 45
fp46 = 46
fp47 = 47
fp48 = 48
fp49 = 49
fp50 = 50
fp51 = 51
fp52 = 52
fp53 = 53
fp54 = 54
fp55 = 55
fp56 = 56
fp57 = 57
fp58 = 58
fp59 = 59
fp60 = 60
fp61 = 61
fp62 = 62
fp63 = 63
fp64 = 64
fp65 = 65
fp66 = 66
fp67 = 67
fp68 = 68
fp69 = 69
fp70 = 70
fp71 = 71
fp72 = 72
fp73 = 73
fp74 = 74
fp75 = 75
fp76 = 76
fp77 = 77
fp78 = 78
fp79 = 79
fp80 = 80
fp81 = 81
fp82 = 82
fp83 = 83
fp84 = 84
fp85 = 85
fp86 = 86
fp87 = 87
fp88 = 88
fp89 = 89
fp90 = 90
fp91 = 91
fp92 = 92
fp93 = 93
fp94 = 94
fp95 = 95
fp96 = 96
fp97 = 97
fp98 = 98
fp99 = 99
fp100 = 100
fp101 = 101
fp102 = 102
fp103 = 103
fp104 = 104
fp105 = 105
fp106 = 106
fp107 = 107
fp108 = 108
fp109 = 109
fp110 = 110
fp111 = 111
fp112 = 112
fp113 = 113
fp114 = 114
fp115 = 115
fp116 = 116
fp117 = 117
fp118 = 118
fp119 = 119
fp120 = 120
fp121 = 121
fp122 = 122
fp123 = 123
fp124 = 124
fp125 = 125
fp126 = 126
fp127 = 127
sort_f_desc = 1
sort_f_ci = 2
sort_m_int_asc = 0
sort_m_int_desc = sort_f_desc
sort_m_str_cs_asc = 0
sort_m_str_cs_desc = sort_f_desc
sort_m_str_ci_asc = sort_f_ci
sort_m_str_ci_desc = sort_f_ci | sort_f_desc
LUA_TNONE = -1
LUA_TNIL = 0
LUA_TBOOLEAN = 1
LUA_TLIGHTUSERDATA = 2
LUA_TNUMBER = 3
LUA_TSTRING = 4
LUA_TTABLE = 5
LUA_TFUNCTION = 6
LUA_TUSERDATA = 7
LUA_TTHREAD = 8
| 27.883613 | 170 | 0.631182 | 4,099 | 29,947 | 4.204928 | 0.277629 | 0.105593 | 0.011604 | 0.016245 | 0.165526 | 0.101416 | 0.082444 | 0.073509 | 0.073509 | 0.071188 | 0 | 0.159939 | 0.322503 | 29,947 | 1,073 | 171 | 27.909599 | 0.689585 | 0.04892 | 0 | 0.096 | 1 | 0 | 0.001094 | 0 | 0 | 0 | 0.008684 | 0 | 0 | 1 | 0.001 | false | 0.002 | 0 | 0 | 0.002 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec156505e91c441a8ad50c79db3df921782b1221 | 440 | py | Python | tests/resources/my_dummy_handlers/dummy_handler_multiple_args_too_few.py | stude1/robotframework-oxygen | e9845ab5d392d5d75d296d8cbca47f3a66659697 | [
"MIT"
] | 13 | 2020-05-15T08:30:13.000Z | 2022-01-24T01:10:29.000Z | tests/resources/my_dummy_handlers/dummy_handler_multiple_args_too_few.py | stude1/robotframework-oxygen | e9845ab5d392d5d75d296d8cbca47f3a66659697 | [
"MIT"
] | 23 | 2020-05-18T09:00:16.000Z | 2022-01-20T06:32:38.000Z | tests/resources/my_dummy_handlers/dummy_handler_multiple_args_too_few.py | stude1/robotframework-oxygen | e9845ab5d392d5d75d296d8cbca47f3a66659697 | [
"MIT"
] | 5 | 2020-08-21T07:08:18.000Z | 2021-11-29T18:04:46.000Z | from oxygen import BaseHandler
class MyDummyHandler(BaseHandler):
'''
A test handler that throws mismatch argument exception because
parse_results expects too many arguments
'''
def run_my_dummy_handler(self, result_file):
return result_file, 'foo'
def parse_results(self, result_file, foo, bar):
return {
'name': result_file,
'foo': foo,
'bar': bar
}
| 23.157895 | 66 | 0.625 | 50 | 440 | 5.32 | 0.64 | 0.150376 | 0.146617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.295455 | 440 | 18 | 67 | 24.444444 | 0.858065 | 0.234091 | 0 | 0 | 0 | 0 | 0.041009 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.1 | 0.2 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
ec187475e9c297665ae42158746fc88cffcf55f6 | 3,534 | py | Python | deep_qa-master/deep_qa/data/instances/sequence_tagging/tagging_instance.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | 1 | 2017-04-11T13:03:55.000Z | 2017-04-11T13:03:55.000Z | deep_qa-master/deep_qa/data/instances/sequence_tagging/tagging_instance.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | null | null | null | deep_qa-master/deep_qa/data/instances/sequence_tagging/tagging_instance.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | null | null | null | from typing import Dict, List, Any
import numpy
from overrides import overrides
from ..instance import TextInstance, IndexedInstance
from ...data_indexer import DataIndexer
class TaggingInstance(TextInstance):
"""
A ``TaggingInstance`` represents a passage of text and a tag sequence over that text.
There are some sticky issues with tokenization and how exactly the label is specified. For
example, if your label is a sequence of tags, that assumes a particular tokenization, which
interacts in a funny way with our tokenization code. This is a general superclass containing
common functionality for most simple sequence tagging tasks. The specifics of reading in data
from a file and converting that data into properly-indexed tag sequences is left to subclasses.
"""
def __init__(self, text: str, label: Any, index: int=None):
super(TaggingInstance, self).__init__(label, index)
self.text = text
def __str__(self):
return "TaggedSequenceInstance(" + self.text + ", " + str(self.label) + ")"
@overrides
def words(self) -> Dict[str, List[str]]:
words = self._words_from_text(self.text)
words['tags'] = self.tags_in_label()
return words
def tags_in_label(self):
"""
Returns all of the tag words in this instance, so that we can convert them into indices.
This is called in ``self.words()``. Not necessary if you have some pre-indexed labeling
scheme.
"""
raise NotImplementedError
def _index_label(self, label: Any, data_indexer: DataIndexer) -> List[int]:
"""
Index the labels. Since we don't know what form the label takes, we leave it to subclasses
to implement this method. If you need to convert tag names into indices, use the namespace
'tags' in the ``DataIndexer``.
"""
raise NotImplementedError
def to_indexed_instance(self, data_indexer: DataIndexer):
text_indices = self._index_text(self.text, data_indexer)
label_indices = self._index_label(self.label, data_indexer)
assert len(text_indices) == len(label_indices), "Tokenization is off somehow"
return IndexedTaggingInstance(text_indices, label_indices, self.index)
class IndexedTaggingInstance(IndexedInstance):
def __init__(self, text_indices: List[int], label: List[int], index: int=None):
super(IndexedTaggingInstance, self).__init__(label, index)
self.text_indices = text_indices
@classmethod
@overrides
def empty_instance(cls):
return TaggingInstance([], label=None, index=None)
@overrides
def get_lengths(self) -> Dict[str, int]:
return self._get_word_sequence_lengths(self.text_indices)
@overrides
def pad(self, max_lengths: Dict[str, int]):
self.text_indices = self.pad_word_sequence(self.text_indices, max_lengths,
truncate_from_right=False)
self.label = self.pad_sequence_to_length(self.label,
desired_length=max_lengths['num_sentence_words'],
default_value=lambda: self.label[0],
truncate_from_right=False)
@overrides
def as_training_data(self):
text_array = numpy.asarray(self.text_indices, dtype='int32')
label_array = numpy.asarray(self.label, dtype='int32')
return text_array, label_array
| 42.071429 | 99 | 0.665535 | 437 | 3,534 | 5.196796 | 0.340961 | 0.042272 | 0.03963 | 0.01321 | 0.022897 | 0.022897 | 0 | 0 | 0 | 0 | 0 | 0.001895 | 0.253254 | 3,534 | 83 | 100 | 42.578313 | 0.858659 | 0.270232 | 0 | 0.183673 | 0 | 0 | 0.034483 | 0.009331 | 0 | 0 | 0 | 0 | 0.020408 | 1 | 0.22449 | false | 0 | 0.102041 | 0.061224 | 0.489796 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec18af381f04c60608f7445c92c54531b795a35a | 494 | py | Python | readFromWrite.py | openNuke/toolkit | 3ee4d13865f8327532906d42af53a929a55e354c | [
"BSD-3-Clause"
] | 36 | 2015-11-01T20:13:18.000Z | 2021-06-24T22:01:07.000Z | readFromWrite.py | ZBYVFX/toolset | aaf213ddbaa0f924306271544b04760ddea7eee2 | [
"BSD-3-Clause"
] | 2 | 2015-10-19T09:04:34.000Z | 2015-12-16T13:58:03.000Z | readFromWrite.py | ZBYVFX/toolset | aaf213ddbaa0f924306271544b04760ddea7eee2 | [
"BSD-3-Clause"
] | 9 | 2015-06-15T17:38:20.000Z | 2021-05-23T06:05:13.000Z | #rafal kaniewski
# todo mov not working
import nuke
from PySide import QtGui
def run(node):
clipboard = QtGui.QApplication.clipboard()
filename = node['file'].evaluate()
filesplit = filename.rsplit('.',-2)
filesplit[1] = '%0'+str(len(filesplit[1]))+'d'
filep = '.'.join(filesplit)
filenameFrame = nuke.getFileNameList(os.path.dirname(filep))[0].rsplit(' ',-1)[1]
clipboard.setText(( filep+" "+filenameFrame))
nuke.nodePaste("%clipboard%")
#run(nuke.selectedNode())
| 23.52381 | 84 | 0.678138 | 59 | 494 | 5.677966 | 0.627119 | 0.059701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016432 | 0.137652 | 494 | 20 | 85 | 24.7 | 0.769953 | 0.121457 | 0 | 0 | 0 | 0 | 0.051044 | 0 | 0 | 0 | 0 | 0.05 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec1c799b1e47e5da04c6afaaf74aef8c07f2bf38 | 543 | py | Python | src/settings.py | psykzz/st3-gitblame | 42179a999d03b19d9f20b85d2f30fe5c288d0f59 | [
"MIT"
] | 28 | 2017-02-01T19:28:53.000Z | 2019-04-03T13:55:37.000Z | src/settings.py | frou/st3-gitblame | 154163dc5e08eb37a3f7181f500719d577b61cfa | [
"MIT"
] | 43 | 2019-04-07T14:07:55.000Z | 2022-01-29T22:20:09.000Z | src/settings.py | psykzz/st3-gitblame | 42179a999d03b19d9f20b85d2f30fe5c288d0f59 | [
"MIT"
] | 14 | 2017-02-10T22:00:35.000Z | 2019-04-01T17:45:43.000Z | import sublime
def pkg_settings():
# NOTE: The sublime.load_settings(...) call has to be deferred to this function,
# rather than just being called immediately and assigning a module-level variable,
# because of: https://www.sublimetext.com/docs/3/api_reference.html#plugin_lifecycle
return sublime.load_settings("Git blame.sublime-settings")
PKG_SETTINGS_KEY_CUSTOMBLAMEFLAGS = "custom_blame_flags"
PKG_SETTINGS_KEY_INLINE_BLAME_ENABLED = "inline_blame_enabled"
PKG_SETTINGS_KEY_INLINE_BLAME_DELAY = "inline_blame_delay"
| 36.2 | 88 | 0.801105 | 76 | 543 | 5.407895 | 0.657895 | 0.107056 | 0.10219 | 0.097324 | 0.121655 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002092 | 0.119705 | 543 | 14 | 89 | 38.785714 | 0.857741 | 0.443831 | 0 | 0 | 0 | 0 | 0.276094 | 0.074074 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0.166667 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
ec1f2ee5219b5c68ec78fd02b208a221d7e1c648 | 395 | py | Python | recruiting/migrations/0002_adminresume_bottom_msg.py | NSYT0607/DONGKEY | 83f926f22a10a28895c9ad71038c9a27d200e231 | [
"MIT"
] | 1 | 2018-04-10T11:47:16.000Z | 2018-04-10T11:47:16.000Z | recruiting/migrations/0002_adminresume_bottom_msg.py | NSYT0607/DONGKEY | 83f926f22a10a28895c9ad71038c9a27d200e231 | [
"MIT"
] | null | null | null | recruiting/migrations/0002_adminresume_bottom_msg.py | NSYT0607/DONGKEY | 83f926f22a10a28895c9ad71038c9a27d200e231 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.1 on 2018-02-15 22:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recruiting', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='adminresume',
name='bottom_msg',
field=models.TextField(default='지원해주셔서 감사합니다.'),
),
]
| 20.789474 | 60 | 0.597468 | 41 | 395 | 5.682927 | 0.853659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067138 | 0.283544 | 395 | 18 | 61 | 21.944444 | 0.756184 | 0.113924 | 0 | 0 | 1 | 0 | 0.16092 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec286e80af59f4eccb8146eeff506741d8b1caa5 | 1,162 | py | Python | sdk/core/azure-common/azure/common/cloud.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | sdk/core/azure-common/azure/common/cloud.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/core/azure-common/azure/common/cloud.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
def get_cli_active_cloud():
"""Return a CLI active cloud.
*Disclaimer*: This method is not working for azure-cli-core>=2.21.0 (released in March 2021).
.. versionadded:: 1.1.6
.. deprecated:: 1.1.28
:return: A CLI Cloud
:rtype: azure.cli.core.cloud.Cloud
:raises: ImportError if azure-cli-core package is not available
"""
try:
from azure.cli.core.cloud import get_active_cloud
except ImportError:
raise ImportError(
"The public API of azure-cli-core has been deprecated starting 2.21.0, " +
"and this method no longer can return a cloud instance. " +
"If you want to use this method, you need to install 'azure-cli-core<2.21.0'. " +
"You may corrupt data if you use current CLI and old azure-cli-core."
)
return get_active_cloud()
| 37.483871 | 97 | 0.575731 | 150 | 1,162 | 4.413333 | 0.52 | 0.084592 | 0.126888 | 0.039275 | 0.048338 | 0.048338 | 0 | 0 | 0 | 0 | 0 | 0.025219 | 0.215146 | 1,162 | 30 | 98 | 38.733333 | 0.700658 | 0.507745 | 0 | 0 | 0 | 0.090909 | 0.50469 | 0.045028 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | true | 0 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec291a6b6790452f3e97a82363e5973cfb65a86a | 3,473 | py | Python | vega/app.py | YEZHIAN1996/pythonstudy | a8774ef97266e8ab0289484ef190d2ee55f1d37c | [
"Apache-2.0"
] | 1 | 2022-02-22T01:54:56.000Z | 2022-02-22T01:54:56.000Z | vega/app.py | YEZHIAN1996/pythonstudy | a8774ef97266e8ab0289484ef190d2ee55f1d37c | [
"Apache-2.0"
] | null | null | null | vega/app.py | YEZHIAN1996/pythonstudy | a8774ef97266e8ab0289484ef190d2ee55f1d37c | [
"Apache-2.0"
] | null | null | null | from colorama import Fore, Style
from getpass import getpass
from service.user_service import UserService
from service.news_service import NewsService
import mysql
import os
import sys
import time
__user_service = UserService()
__news_service = NewsService()
while 1:
os.system('clear')
print(Fore.LIGHTBLUE_EX, "\n\t================")
print(Fore.LIGHTBLUE_EX, "\n\t欢迎使用新闻管理系统")
print(Fore.LIGHTBLUE_EX, "\n\t================")
print(Fore.LIGHTBLUE_EX, "\n\t1.登陆系统")
print(Fore.LIGHTBLUE_EX, "\n\t2.退出系统")
print(Style.RESET_ALL)
opt = input("\n\t输入操作编号:")
if opt=="1":
username=input("\n\t用户名:")
password=input("\n\t密码:")
result = __user_service.login(username, password)
# 登陆成功
if result:
role = __user_service.search_user_role(username)
os.system("clear")
while 1:
if role=="新闻编辑":
pass
elif role=="管理员":
print(Fore.LIGHTGREEN_EX, "\n\t1.新闻管理")
print(Fore.LIGHTGREEN_EX, "\n\t2.用户管理")
print(Fore.LIGHTGREEN_EX, "\n\tback.退出登陆")
print(Fore.LIGHTGREEN_EX, "\n\texit.退出系统")
print(Style.RESET_ALL)
opt = input("\n\t输入操作编号:")
if opt=="1":
os.system("clear")
print(Fore.LIGHTGREEN_EX, "\n\t1.审批新闻")
print(Fore.LIGHTGREEN_EX, "\n\t2.删除新闻")
print(Fore.LIGHTGREEN_EX, "\n\tback.返回上一层")
print(Style.RESET_ALL)
opt = input("\n\t输入操作编号:")
if opt=="1":
page = 1
while 1:
os.system("clear")
count_page = __news_service.search_unreview_count_page()
result = __news_service.search_unreview_list(page)
for index in range(len(result)):
one = result[index]
print(Fore.LIGHTBLUE_EX, "\n\t%d\t%s\t%s\t%s" % (index+1, one[1], one[2], one[3]))
print(Fore.LIGHTBLUE_EX, "\n\t------------------")
print(Fore.LIGHTBLUE_EX, "\n\t%d/%d" % (page, count_page))
print(Fore.LIGHTBLUE_EX, "\n\t------------------")
print(Fore.LIGHTRED_EX, "\n\tback.返回上一层")
print(Fore.LIGHTRED_EX, "\n\tprev.上一页")
print(Fore.LIGHTRED_EX, "\n\tnext.下一页")
print(Style.RESET_ALL)
opt = input("\n\t输入操作编号:")
if opt=="back":
break
elif opt=="prev" and page>1:
page-=1
elif opt=="next" and page<count_page:
page+=1
elif opt=="2":
pass
elif opt=="back":
break
elif opt=="exit":
sys.exit(0)
else:
print("\n\t登陆失败(3s自动返回)")
time.sleep(3)
elif opt=="2":
sys.exit(0) | 43.4125 | 118 | 0.424417 | 346 | 3,473 | 4.106936 | 0.251445 | 0.120338 | 0.114004 | 0.126671 | 0.4905 | 0.372273 | 0.247713 | 0.23012 | 0.208304 | 0.208304 | 0 | 0.013423 | 0.442269 | 3,473 | 80 | 119 | 43.4125 | 0.720186 | 0.001152 | 0 | 0.376623 | 0 | 0 | 0.112457 | 0.012687 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.064935 | 0.103896 | 0 | 0.103896 | 0.311688 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
ec2a05caf70b6d5b3ac7425d6fe6c408d8020aaa | 2,362 | py | Python | xbox/webapi/api/provider/titlehub/models.py | OpenXbox/xbox-webapi-python | 1a5aeb1b1ce94f38b5dae7f6b59938bc9ec112b2 | [
"MIT"
] | 122 | 2018-03-17T05:20:35.000Z | 2022-03-30T23:30:14.000Z | xbox/webapi/api/provider/titlehub/models.py | OpenXbox/xbox-webapi-python | 1a5aeb1b1ce94f38b5dae7f6b59938bc9ec112b2 | [
"MIT"
] | 62 | 2018-03-27T14:17:11.000Z | 2022-03-30T16:36:03.000Z | xbox/webapi/api/provider/titlehub/models.py | OpenXbox/xbox-webapi-python | 1a5aeb1b1ce94f38b5dae7f6b59938bc9ec112b2 | [
"MIT"
] | 38 | 2018-05-09T19:17:48.000Z | 2022-02-03T06:55:04.000Z | from datetime import datetime
from enum import Enum
from typing import Any, List, Optional
from xbox.webapi.common.models import CamelCaseModel, PascalCaseModel
class TitleFields(str, Enum):
SERVICE_CONFIG_ID = "scid"
ACHIEVEMENT = "achievement"
STATS = "stats"
GAME_PASS = "gamepass"
IMAGE = "image"
DETAIL = "detail"
FRIENDS_WHO_PLAYED = "friendswhoplayed"
ALTERNATE_TITLE_ID = "alternateTitleId"
class Achievement(CamelCaseModel):
current_achievements: int
total_achievements: int
current_gamerscore: int
total_gamerscore: int
progress_percentage: float
source_version: int
class Stats(CamelCaseModel):
source_version: int
class GamePass(CamelCaseModel):
is_game_pass: bool
class Image(CamelCaseModel):
url: str
type: str
class TitleHistory(CamelCaseModel):
last_time_played: datetime
visible: bool
can_hide: bool
class Attribute(CamelCaseModel):
applicable_platforms: Optional[List[str]]
maximum: Optional[int]
minimum: Optional[int]
name: str
class Availability(PascalCaseModel):
actions: List[str]
availability_id: str
platforms: List[str]
sku_id: str
class Detail(CamelCaseModel):
attributes: List[Attribute]
availabilities: List[Availability]
capabilities: List[str]
description: str
developer_name: str
genres: Optional[List[str]]
publisher_name: str
min_age: int
release_date: Optional[datetime]
short_description: Optional[str]
vui_display_name: Optional[str]
xbox_live_gold_required: bool
class Title(CamelCaseModel):
title_id: str
pfn: Optional[str]
bing_id: Optional[str]
service_config_id: Optional[str]
windows_phone_product_id: Optional[str]
name: str
type: str
devices: List[str]
display_image: str
media_item_type: str
modern_title_id: Optional[str]
is_bundle: bool
achievement: Optional[Achievement]
stats: Optional[Stats]
game_pass: Optional[GamePass]
images: Optional[List[Image]]
title_history: Optional[TitleHistory]
detail: Optional[Detail]
friends_who_played: Any
alternate_title_ids: Any
content_boards: Any
xbox_live_tier: Optional[str]
is_streamable: Optional[bool]
class TitleHubResponse(CamelCaseModel):
xuid: Optional[str]
titles: List[Title]
| 22.495238 | 69 | 0.723539 | 274 | 2,362 | 6.036496 | 0.368613 | 0.059855 | 0.031439 | 0.026602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.198561 | 2,362 | 104 | 70 | 22.711538 | 0.873745 | 0 | 0 | 0.074074 | 0 | 0 | 0.030059 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.049383 | 0.049383 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
ec2bf9162b78137b2733f5b84b3dbb7d05de0458 | 429 | py | Python | other/_first.py | ngd-b/python-demo | 0341c1620bcde1c1d886cb9e75dc6db3722273c8 | [
"MIT"
] | 1 | 2019-10-09T13:40:13.000Z | 2019-10-09T13:40:13.000Z | other/_first.py | ngd-b/python-demo | 0341c1620bcde1c1d886cb9e75dc6db3722273c8 | [
"MIT"
] | null | null | null | other/_first.py | ngd-b/python-demo | 0341c1620bcde1c1d886cb9e75dc6db3722273c8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
print("hello world")
# pthon 中的关键字
import keyword
print(keyword.kwlist)
# 注释
'''
一行注释
二行注释
'''
"""
三行注释
四行注释
"""
print("多行注释")
# 单行语句换行
print("you"
"are"
"my"
"friend")
# 这种运算则需要使用 \ 符
count = 23+ \
12+ \
34
print(count)
# 字符串 复用
print("祖国"*2)
# print() 不换行
print("祖国",end="")
print("70")
# 变量赋值
a = b = c = 20
b = 30
print(a,b,c)
a,b,c = 32,32,44
print(a,b,c) | 9.75 | 22 | 0.550117 | 70 | 429 | 3.371429 | 0.657143 | 0.033898 | 0.050847 | 0.067797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05988 | 0.221445 | 429 | 44 | 23 | 9.75 | 0.646707 | 0.226107 | 0 | 0.1 | 0 | 0 | 0.12069 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0.5 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
ec363ba66c10f615ccd1efaa4439368d1082bab4 | 1,257 | py | Python | backend/portfolios/factories/certification_factory.py | NumanIbnMazid/numanibnmazid.com | 905e3afab285316d88bafa30dc080dfbb0611731 | [
"MIT"
] | 1 | 2022-01-28T18:20:19.000Z | 2022-01-28T18:20:19.000Z | backend/portfolios/factories/certification_factory.py | NumanIbnMazid/numanibnmazid.com | 905e3afab285316d88bafa30dc080dfbb0611731 | [
"MIT"
] | null | null | null | backend/portfolios/factories/certification_factory.py | NumanIbnMazid/numanibnmazid.com | 905e3afab285316d88bafa30dc080dfbb0611731 | [
"MIT"
] | null | null | null | import factory
from portfolios.models import Certification
from users.factories.user_factory import UserFactory
from factory.django import DjangoModelFactory
from utils.helpers import create_factory_data
class CertificationFactory(DjangoModelFactory):
class Meta:
model = Certification
user = factory.SubFactory(UserFactory)
name = factory.Faker('word')
organization = factory.Faker('word')
address = factory.Faker('word')
issue_date = factory.Faker('date_time_between', issue_date='-1y', expiration_date='now')
expiration_date = factory.Faker('date_time_between', issue_date='-1y', expiration_date='now')
does_not_expire = True if expiration_date is None else False
credential_id = factory.Faker('number')
credential_url = factory.Faker('url')
description = factory.Faker('text')
def create_certifications_with_factory(
num_of_data=7, display_name="certification",
display_name_plural="certifications", delete_old_data=False
):
return create_factory_data(
factory=CertificationFactory,
num_of_data=num_of_data,
display_name=display_name,
display_name_plural=display_name_plural,
delete_old_data=delete_old_data,
model=Certification
)
| 33.972973 | 97 | 0.752586 | 150 | 1,257 | 6.013333 | 0.393333 | 0.10643 | 0.053215 | 0.044346 | 0.13082 | 0.13082 | 0.13082 | 0.13082 | 0.13082 | 0.13082 | 0 | 0.002846 | 0.161496 | 1,257 | 36 | 98 | 34.916667 | 0.852941 | 0 | 0 | 0.066667 | 0 | 0 | 0.077963 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.166667 | 0.033333 | 0.633333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
ec3aa0d3dbdedfe9a32ff3de2e04e3173e137571 | 1,862 | py | Python | src/esri/arcpy/SamplePythonToolbox.py | TrieuLe0801/Arcgis-python | 7b6c3d33bd7b9a6bea495f288a0d8015ffcc48b0 | [
"Apache-2.0"
] | null | null | null | src/esri/arcpy/SamplePythonToolbox.py | TrieuLe0801/Arcgis-python | 7b6c3d33bd7b9a6bea495f288a0d8015ffcc48b0 | [
"Apache-2.0"
] | null | null | null | src/esri/arcpy/SamplePythonToolbox.py | TrieuLe0801/Arcgis-python | 7b6c3d33bd7b9a6bea495f288a0d8015ffcc48b0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import arcpy
import os
import foo
class Toolbox(object):
def __init__(self):
"""Define the toolbox (the name of the toolbox is the name of the
.pyt file)."""
self.label = "Sample Python Toolbox"
self.alias = "SamplePythonToolbox"
# List of tool classes associated with this toolbox
self.tools = [SampleTool]
class SampleTool(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Sample Tool"
self.description = ""
self.canRunInBackground = False
def getParameterInfo(self):
"""Define parameter definitions"""
parameters=[arcpy.Parameter(displayName='Msg',
name='msg',
datatype='GPString',
parameterType='Derived',
direction='Output')
]
return parameters
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
result = foo.bar.hello()
messages.AddMessage(f"{result}, welcome to the sample tool")
parameters[0].value = result
return | 34.481481 | 77 | 0.566058 | 187 | 1,862 | 5.593583 | 0.475936 | 0.01912 | 0.025813 | 0.034417 | 0.076482 | 0.049713 | 0 | 0 | 0 | 0 | 0 | 0.001647 | 0.348013 | 1,862 | 54 | 78 | 34.481481 | 0.859967 | 0.309345 | 0 | 0.15625 | 0 | 0 | 0.097854 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.21875 | false | 0 | 0.09375 | 0 | 0.53125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec40bbd517e3c02e18567a2d5b21aca1aa15f393 | 896 | py | Python | labellab-flask/api/routes/images.py | AkMo3/LabelLab | 1f16905bba1a332035d082cfc6337b8551478e05 | [
"Apache-2.0"
] | null | null | null | labellab-flask/api/routes/images.py | AkMo3/LabelLab | 1f16905bba1a332035d082cfc6337b8551478e05 | [
"Apache-2.0"
] | null | null | null | labellab-flask/api/routes/images.py | AkMo3/LabelLab | 1f16905bba1a332035d082cfc6337b8551478e05 | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint
from api.controllers import imagescontroller
imagesprint = Blueprint("images", __name__)
imagesprint.add_url_rule(
"/image/create/<int:project_id>",
view_func=imagescontroller.imageController["save_image"],
methods=["POST"]
)
imagesprint.add_url_rule(
"/image/get/<int:project_id>",
view_func=imagescontroller.imageController["get_all_images"],
methods=["GET"]
)
imagesprint.add_url_rule(
"/image/get_image/<int:project_id>/<int:image_id>",
view_func=imagescontroller.imageController["get_image"],
methods=["GET"]
)
imagesprint.add_url_rule(
"/image/delete/<int:project_id>",
view_func=imagescontroller.imageController["delete_images"],
methods=["POST"]
)
imagesprint.add_url_rule(
"/image/update/<int:image_id>",
view_func=imagescontroller.imageController["update_labels"],
methods=["PUT"]
) | 25.6 | 66 | 0.728795 | 103 | 896 | 6.029126 | 0.291262 | 0.112721 | 0.136876 | 0.169082 | 0.700483 | 0.658615 | 0.639291 | 0 | 0 | 0 | 0 | 0 | 0.125 | 896 | 35 | 67 | 25.6 | 0.792092 | 0 | 0 | 0.321429 | 0 | 0 | 0.273133 | 0.181717 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec4378bb4ce358808d41359aa59acc20ceb4f275 | 278 | py | Python | PyLaGriT/setup.py | millerta/LaGriT-1 | 511ef22f3b7e839c7e0484604cd7f6a2278ae6b9 | [
"CNRI-Python"
] | 1 | 2019-11-01T18:12:10.000Z | 2019-11-01T18:12:10.000Z | PyLaGriT/setup.py | millerta/LaGriT-1 | 511ef22f3b7e839c7e0484604cd7f6a2278ae6b9 | [
"CNRI-Python"
] | null | null | null | PyLaGriT/setup.py | millerta/LaGriT-1 | 511ef22f3b7e839c7e0484604cd7f6a2278ae6b9 | [
"CNRI-Python"
] | 1 | 2019-09-29T08:35:55.000Z | 2019-09-29T08:35:55.000Z | from distutils.core import setup
import os
setup(name='pylagrit',
version='1.0.0',
description='Python interface for LaGriT',
author='Dylan R. Harp',
author_email='dharp@lanl.gov',
url='lagrit.lanl.gov',
license='LGPL',
packages=[
'pylagrit',
'pylagrit.pexpect']
)
| 18.533333 | 43 | 0.705036 | 38 | 278 | 5.131579 | 0.763158 | 0.071795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012346 | 0.125899 | 278 | 14 | 44 | 19.857143 | 0.790123 | 0 | 0 | 0 | 0 | 0 | 0.395683 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.153846 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec47c391472807dff9a5e36963375db6a8649611 | 4,177 | py | Python | scripts/bib_poli_poi_manager.py | sdcioc/bibpoli | fba86ff4e949c92aa6ef93a629b5e73fcf97a581 | [
"MIT"
] | null | null | null | scripts/bib_poli_poi_manager.py | sdcioc/bibpoli | fba86ff4e949c92aa6ef93a629b5e73fcf97a581 | [
"MIT"
] | null | null | null | scripts/bib_poli_poi_manager.py | sdcioc/bibpoli | fba86ff4e949c92aa6ef93a629b5e73fcf97a581 | [
"MIT"
] | null | null | null | #! /usr/bin/python
# Author: Ciocirlan Stefan-Dan 19/09/2018
import json
import copy
import rospy
#functie de converire a numelui unui punct de interes in calea lui ca parametru ros
def convert_POIName_RosparamName(poi_name):
prefix = '/mmap/poi/submap_0/';
if not poi_name.startswith(prefix):
poi_name = prefix + poi_name;
return poi_name;
# clasa ce incarca dintr-un fisier informatiile despre punctele de interes
# si le seteaza ca parametri ros pentru a fi folosite informatiile si de alte
# module (aceste fisier sunt sub format json)
class POILoader:
#constructor
def __init__(self, filename):
#citesc un fisier xml cu datele punctelor de interes din camera
self.filename = filename;
with open(self.filename) as fd:
self.points = json.loads(fd.read());
def set_POIs(self):
for point in self.points:
path = convert_POIName_RosparamName(point['poi_name']);
#tipul informatiei pentru un punct de interes este de forma
# subharta pe care se afla, numele punctului de interes, pozitia x, pozitia y, pozitia w -> se poate ajunge la Z
poi_data = ['submap_0', point['poi_name'], point['x'], point['y'], point['w']];
rospy.set_param(path, poi_data);
# clasa care salveaza din parametri ros punctele de inters in fisier
# sau creeaza un fisier cu informatii suplimentare despre punctele de inters
# acest fisiere sunt sub format json
class POISaver:
#constructor
def __init__(self, filename):
#citesc un fisier json cu datele punctelor de interes din camera
# sau o sa scriu in acesta
self.filename = filename;
self.points = [];
#salveaza punctele de interes intr-un fisier json
def save_POIs(self):
params = rospy.get_param_names();
prefix = '/mmap/poi/submap_0/';
params = filter(lambda x: x.startswith(prefix), params);
poi_names = [p[len(prefix):] for p in params];
for poi_name in poi_names:
path = convert_POIName_RosparamName(poi_name)
rosparam_data = rospy.get_param(path)
new_data = {};
new_data['poi_name'] = poi_name;
new_data['x'] = rosparam_data[2];
new_data['y'] = rosparam_data[3];
new_data['w'] = rosparam_data[4];
self.points.append(copy.deepcopy(new_data));
data_to_write = json.dumps(self.points);
with open(self.filename, 'w') as fd:
fd.write(data_to_write);
#salveza informatii suplimentare despre punctele de inters intr-un fisier
def save_info(self):
params = rospy.get_param_names();
prefix = '/mmap/poi/submap_0/';
params = filter(lambda x: x.startswith(prefix), params);
poi_names = [p[len(prefix):] for p in params];
for poi_name in poi_names:
rospy.loginfo("poi_name {}".format(poi_name))
new_data = {};
new_data['poi_name'] = poi_name;
self.points.append(copy.deepcopy(new_data));
data_to_write = json.dumps(self.points);
with open(self.filename, 'w') as fd:
fd.write(data_to_write);
# testarea functioanrii claselor de mai sus
def test_POI_classes():
poiSaver = POISaver("/home/pal/poiinfo_saver.json")
poiSaver.save_POIs();
poiLoader= POILoader("/home/pal/poiinfo_loader.json");
poiLoader.set_POIs();
if __name__ == '__main__':
#initializarea nodului si aflarea modului de operare si a numelui fisierului din
#parametri ros dati
rospy.init_node('bib_poli_poi_manager', anonymous=True);
operation_type = rospy.get_param('~operation_type', 'load')
filename = rospy.get_param('~filename', '/home/pal/default_pois.json')
print "[INFO][POI_MANAGER] operation type : {} ; filename : {}".format(operation_type, filename);
#daca incarca dintr-un fisier punctele de inters in parametrii ros
if (operation_type == "load"):
poiLoader= POILoader(filename);
poiLoader.set_POIs();
#daca salveaza intr-un fisier punctele de interes sub forma parametrii ros
elif (operation_type == "save"):
poiSaver = POISaver(filename);
poiSaver.save_POIs();
#daca salveaza intr-un fisier informatii suplimentare despre punctele de interes sub forma
# de parametrii ros
elif (operation_type == "info"):
poiSaver = POISaver(filename);
poiSaver.save_info();
else:
print "[ERROR][POI_MANAGER] wrong operation type please use load or save";
| 37.972727 | 115 | 0.721331 | 608 | 4,177 | 4.787829 | 0.304276 | 0.038475 | 0.022329 | 0.019581 | 0.410168 | 0.29337 | 0.243902 | 0.219856 | 0.170388 | 0.170388 | 0 | 0.004315 | 0.167824 | 4,177 | 109 | 116 | 38.321101 | 0.833142 | 0.324396 | 0 | 0.452055 | 0 | 0 | 0.13872 | 0.030032 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.041096 | null | null | 0.027397 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec4888f3f9bb05215e75e4d095dd646b049f0424 | 3,976 | py | Python | pysnmp-with-texts/CISCO-IETF-SCTP-EXT-CAPABILITY.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/CISCO-IETF-SCTP-EXT-CAPABILITY.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/CISCO-IETF-SCTP-EXT-CAPABILITY.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CISCO-IETF-SCTP-EXT-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-IETF-SCTP-EXT-CAPABILITY
# Produced by pysmi-0.3.4 at Wed May 1 12:01:04 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
NotificationGroup, AgentCapabilities, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "AgentCapabilities", "ModuleCompliance")
iso, ObjectIdentity, TimeTicks, Gauge32, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, NotificationType, Counter32, Unsigned32, IpAddress, Integer32, ModuleIdentity, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "ObjectIdentity", "TimeTicks", "Gauge32", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "NotificationType", "Counter32", "Unsigned32", "IpAddress", "Integer32", "ModuleIdentity", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ciscoSctpExtCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 220))
ciscoSctpExtCapability.setRevisions(('2002-01-21 00:00', '2001-11-30 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoSctpExtCapability.setRevisionsDescriptions(('Updated capabilities to support additional objects and a new notification.', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoSctpExtCapability.setLastUpdated('200201210000Z')
if mibBuilder.loadTexts: ciscoSctpExtCapability.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoSctpExtCapability.setContactInfo(' Cisco Systems Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-sctp@cisco.com')
if mibBuilder.loadTexts: ciscoSctpExtCapability.setDescription('Agent capabilities for the CISCO-IETF-SCTP-EXT-MIB.')
ciscoSctpExtCapabilityV12R024MB1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 220, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSctpExtCapabilityV12R024MB1 = ciscoSctpExtCapabilityV12R024MB1.setProductRelease('Cisco IOS 12.2(4)MB1')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSctpExtCapabilityV12R024MB1 = ciscoSctpExtCapabilityV12R024MB1.setStatus('current')
if mibBuilder.loadTexts: ciscoSctpExtCapabilityV12R024MB1.setDescription('IOS 12.2(4)MB1 Cisco CISCO-IETF-SCTP-EXT-MIB.my User Agent MIB capabilities.')
ciscoSctpExtCapabilityV12R0204MB3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 220, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSctpExtCapabilityV12R0204MB3 = ciscoSctpExtCapabilityV12R0204MB3.setProductRelease('Cisco IOS 12.2(4)MB3')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSctpExtCapabilityV12R0204MB3 = ciscoSctpExtCapabilityV12R0204MB3.setStatus('current')
if mibBuilder.loadTexts: ciscoSctpExtCapabilityV12R0204MB3.setDescription('IOS 12.2(4)MB3 Cisco CISCO-IETF-SCTP-EXT-MIB.my User Agent MIB capabilities.')
mibBuilder.exportSymbols("CISCO-IETF-SCTP-EXT-CAPABILITY", ciscoSctpExtCapability=ciscoSctpExtCapability, PYSNMP_MODULE_ID=ciscoSctpExtCapability, ciscoSctpExtCapabilityV12R024MB1=ciscoSctpExtCapabilityV12R024MB1, ciscoSctpExtCapabilityV12R0204MB3=ciscoSctpExtCapabilityV12R0204MB3)
| 107.459459 | 477 | 0.791499 | 421 | 3,976 | 7.470309 | 0.382423 | 0.006359 | 0.046741 | 0.030525 | 0.446423 | 0.311924 | 0.293482 | 0.293482 | 0.293482 | 0.289984 | 0 | 0.084476 | 0.082998 | 3,976 | 36 | 478 | 110.444444 | 0.778113 | 0.089034 | 0 | 0.178571 | 0 | 0.107143 | 0.307777 | 0.041517 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec4ac519fa41cce9811a254aee52f6208f542e4f | 4,699 | py | Python | src/project/video_labels.py | KumarLabJax/JABS-behavior-classifier | 8c038a7510ae08d90418403a723e396344bb671c | [
"FSFAP"
] | null | null | null | src/project/video_labels.py | KumarLabJax/JABS-behavior-classifier | 8c038a7510ae08d90418403a723e396344bb671c | [
"FSFAP"
] | null | null | null | src/project/video_labels.py | KumarLabJax/JABS-behavior-classifier | 8c038a7510ae08d90418403a723e396344bb671c | [
"FSFAP"
] | null | null | null | from .track_labels import TrackLabels
class VideoLabels:
"""
store the labels associated with a video file.
labels are organized by "identity". Each identity may have multiple
behaviors labeled. An identity and behavior uniquely identifies a
TrackLabels object, which stores labels for each frame in the video. Each
frame can have one of three label values: TrackLabels.Label.NONE,
Tracklabels.Label.BEHAVIOR, and TrackLabels.Label.NOT_BEHAVIOR
TODO stop using str for identities in method parameters, switch to int
"""
def __init__(self, filename, num_frames):
self._filename = filename
self._num_frames = num_frames
self._identity_labels = {}
@property
def filename(self):
""" return filename of video this object represents """
return self._filename
@property
def num_frames(self):
return self._num_frames
def get_track_labels(self, identity, behavior):
"""
return a TrackLabels for an identity & behavior
:param identity: string representation of identity
:param behavior: string behavior label
:return: TrackLabels object for this identity and behavior
:raises: ValueError if identity is not a valid string
# TODO handle integer identity
"""
# require identity to be a string for serialization
if not isinstance(identity, str):
raise ValueError("Identity must be a string")
identity_labels = self._identity_labels.get(identity)
# identity not already present
if identity_labels is None:
self._identity_labels[identity] = {}
track_labels = self._identity_labels[identity].get(behavior)
# identity doesn't have annotations for this behavior, create a new
# TrackLabels object
if track_labels is None:
self._identity_labels[identity][behavior] = \
TrackLabels(self._num_frames)
# return TrackLabels object for this identity & behavior
return self._identity_labels[identity][behavior]
def counts(self, behavior):
"""
get the count of labeled frames and bouts for each identity in this
video for a specified behavior
:param behavior: behavior to get label counts for
:return: list of tuples with the following form
(
identity,
(behavior frame count, not behavior frame count),
(behavior bout count, not behavior bout count)
)
"""
counts = []
for identity in self._identity_labels:
if behavior in self._identity_labels[identity]:
c = self._identity_labels[identity][behavior].counts
counts.append((identity, c[0], c[1]))
return counts
def as_dict(self):
"""
return dict representation of self, useful for JSON serialization and
saving to disk or caching in memory without storing the full
numpy label array when user switches to a different video
example return value:
{
"file": "filename.avi",
"num_frames": 100,
"labels": {
"identity": {
"behavior": [
{
"start": 25,
"end": 50,
"present": True
}
]
}
}
}
"""
labels = {}
for identity in self._identity_labels:
labels[identity] = {}
for behavior in self._identity_labels[identity]:
blocks = self._identity_labels[identity][behavior].get_blocks()
if len(blocks):
labels[identity][behavior] = blocks
return {
'file': self._filename,
'num_frames': self._num_frames,
'labels': labels
}
@classmethod
def load(cls, video_label_dict):
"""
return a VideoLabels object initialized with data from a dict previously
exported using the export() method
"""
labels = cls(video_label_dict['file'], video_label_dict['num_frames'])
for identity in video_label_dict['labels']:
labels._identity_labels[identity] = {}
for behavior in video_label_dict['labels'][identity]:
labels._identity_labels[identity][behavior] = TrackLabels.load(
video_label_dict['num_frames'],
video_label_dict['labels'][identity][behavior])
return labels
| 35.067164 | 80 | 0.593318 | 502 | 4,699 | 5.406375 | 0.272908 | 0.087693 | 0.079587 | 0.07664 | 0.245394 | 0.10538 | 0.028003 | 0 | 0 | 0 | 0 | 0.002877 | 0.334326 | 4,699 | 133 | 81 | 35.330827 | 0.86477 | 0.411364 | 0 | 0.075472 | 0 | 0 | 0.037179 | 0 | 0 | 0 | 0 | 0.015038 | 0 | 1 | 0.132075 | false | 0 | 0.018868 | 0.018868 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec4c8db2c51a568d0df46d4f28891c13344f4017 | 443 | py | Python | PersonalWebApp/ClientsManagement/migrations/0018_projectsrequest_project_request_slug.py | CiganOliviu/personal_website | abedf67efc2e7e212c32815f645d3b3709f9f177 | [
"MIT"
] | 1 | 2021-04-02T16:45:52.000Z | 2021-04-02T16:45:52.000Z | ClientsManagementSystem/ClientsManagement/migrations/0018_projectsrequest_project_request_slug.py | CiganOliviu/ClientsManagementSystem | 6271dd007e549fd0369c4df7c017980b915a91b5 | [
"MIT"
] | null | null | null | ClientsManagementSystem/ClientsManagement/migrations/0018_projectsrequest_project_request_slug.py | CiganOliviu/ClientsManagementSystem | 6271dd007e549fd0369c4df7c017980b915a91b5 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-11-09 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ClientsManagement', '0017_auto_20201108_1950'),
]
operations = [
migrations.AddField(
model_name='projectsrequest',
name='project_request_slug',
field=models.SlugField(default='', max_length=200, unique=True),
),
]
| 23.315789 | 76 | 0.632054 | 47 | 443 | 5.808511 | 0.87234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.10303 | 0.255079 | 443 | 18 | 77 | 24.611111 | 0.724242 | 0.10158 | 0 | 0 | 1 | 0 | 0.189394 | 0.058081 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec4e5fa4d1849a441dee901738f4a4985d58a3c1 | 989 | py | Python | setup.py | fengsp/lookup | c10bb571809d4ead095ce17f822a30f8fde4eaef | [
"BSD-3-Clause"
] | 9 | 2015-04-09T21:43:29.000Z | 2022-02-22T19:03:34.000Z | setup.py | 3Demonica/lookup | c10bb571809d4ead095ce17f822a30f8fde4eaef | [
"BSD-3-Clause"
] | 1 | 2015-11-11T16:24:57.000Z | 2015-11-19T06:08:42.000Z | setup.py | 3Demonica/lookup | c10bb571809d4ead095ce17f822a30f8fde4eaef | [
"BSD-3-Clause"
] | 4 | 2015-01-05T11:49:50.000Z | 2022-02-22T19:03:35.000Z | """
lookup
------
Look up words via the command line.
Links
`````
* `documentation <https://github.com/fengsp/lookup>`_
* `development version
<http://github.com/fengsp/lookup/zipball/master#egg=lookup-dev>`_
"""
from setuptools import setup
setup(
name='lookup',
version='0.2',
url='https://github.com/fengsp/lookup',
license='BSD',
author='Shipeng Feng',
author_email='fsp261@gmail.com',
description='Dictionary via the command line',
long_description=__doc__,
py_modules=['lookup'],
zip_safe=False,
platforms='any',
entry_points={
'console_scripts': [
'lookup = lookup:command_line',
]
},
install_requires=[
'pyquery',
'requests'
],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
| 21.042553 | 67 | 0.605662 | 100 | 989 | 5.85 | 0.7 | 0.05641 | 0.076923 | 0.107692 | 0.088889 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006667 | 0.241658 | 989 | 46 | 68 | 21.5 | 0.773333 | 0.211325 | 0 | 0 | 0 | 0 | 0.42153 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.033333 | 0 | 0.033333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec51ee86ac4ba81ac68fefbc2989c1af1ada01a8 | 2,013 | py | Python | sdk/python/pulumi_aws_native/codestar/_inputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/codestar/_inputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/codestar/_inputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GitHubRepositoryCodeArgs',
'GitHubRepositoryS3Args',
]
@pulumi.input_type
class GitHubRepositoryCodeArgs:
def __init__(__self__, *,
s3: pulumi.Input['GitHubRepositoryS3Args']):
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['GitHubRepositoryS3Args']:
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['GitHubRepositoryS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class GitHubRepositoryS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str],
object_version: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
if object_version is not None:
pulumi.set(__self__, "object_version", object_version)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="objectVersion")
def object_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "object_version")
@object_version.setter
def object_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_version", value)
| 28.352113 | 80 | 0.648286 | 234 | 2,013 | 5.371795 | 0.273504 | 0.122514 | 0.100239 | 0.060461 | 0.302307 | 0.240255 | 0.218775 | 0.11615 | 0 | 0 | 0 | 0.008963 | 0.224044 | 2,013 | 70 | 81 | 28.757143 | 0.795775 | 0.07998 | 0 | 0.211538 | 1 | 0 | 0.108342 | 0.060672 | 0 | 0 | 0 | 0 | 0 | 1 | 0.192308 | false | 0 | 0.096154 | 0.076923 | 0.403846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ec53d18772cf322357771f2b6a79f3b1cfae651a | 1,970 | py | Python | f5/bigip/__init__.py | yuanfm/f5-common-python | 7d6588d56323e7065fec929ff00020081f9d6ade | [
"Apache-2.0"
] | null | null | null | f5/bigip/__init__.py | yuanfm/f5-common-python | 7d6588d56323e7065fec929ff00020081f9d6ade | [
"Apache-2.0"
] | null | null | null | f5/bigip/__init__.py | yuanfm/f5-common-python | 7d6588d56323e7065fec929ff00020081f9d6ade | [
"Apache-2.0"
] | null | null | null | """Classes and functions for configuring BIG-IP"""
# Copyright 2014 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from f5.bigip.cm import Cm
from f5.bigip.ltm import Ltm
from f5.bigip.net import Net
from f5.bigip.resource import OrganizingCollection
from f5.bigip.sys import Sys
from icontrol.session import iControlRESTSession
LOG = logging.getLogger(__name__)
allowed_lazy_attributes = [Cm, Ltm, Net, Sys]
class BigIP(OrganizingCollection):
"""An interface to a single BIG-IP"""
def __init__(self, hostname, username, password, **kwargs):
timeout = kwargs.pop('timeout', 30)
loglevel = kwargs.pop('loglevel', logging.WARNING)
allowed_lazy_attrs = kwargs.pop('allowed_lazy_attributes',
allowed_lazy_attributes)
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
# _meta_data variable values
iCRS = iControlRESTSession(username, password, timeout=timeout,
loglevel=loglevel)
# define _meta_data
self._meta_data = {'allowed_lazy_attributes': allowed_lazy_attrs,
'hostname': hostname,
'uri': 'https://%s/mgmt/tm/' % hostname,
'icr_session': iCRS,
'device_name': None,
'local_ip': None,
'bigip': self}
| 39.4 | 74 | 0.644162 | 236 | 1,970 | 5.254237 | 0.525424 | 0.048387 | 0.044355 | 0.025806 | 0.051613 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011127 | 0.270051 | 1,970 | 49 | 75 | 40.204082 | 0.851182 | 0.34264 | 0 | 0 | 0 | 0 | 0.117415 | 0.036249 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0.076923 | 0.269231 | 0 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
ec572a85699eea732946d1fcc2724486847f42b4 | 1,634 | py | Python | Practicals/gist_multiplication_table-tutorial.py | dev-bhargav/Python3-for-dummies | 8be093ada6de2070d8c14346a0c15d78074b79d7 | [
"MIT"
] | null | null | null | Practicals/gist_multiplication_table-tutorial.py | dev-bhargav/Python3-for-dummies | 8be093ada6de2070d8c14346a0c15d78074b79d7 | [
"MIT"
] | null | null | null | Practicals/gist_multiplication_table-tutorial.py | dev-bhargav/Python3-for-dummies | 8be093ada6de2070d8c14346a0c15d78074b79d7 | [
"MIT"
] | null | null | null | import random as RND
gen_row = lambda: [ RND.randint(10, 99) for c in range(3) ]
# 'data' is a nested list comprised of five rows and three columns
data = [gen_row() for c in range(5)]
# create some column headers
col_headers = ["col{}".format(i) for i in range(1, 4)]
# print the column headers first
print("{:^8} {:^8} {:^8}".format(*col_headers)
# now print the rows
for row in
aligned_row = "{:^8} {:^8} {:^8}".format(*row)
print(aligned_row)
# i have not used placeholders in my print statement, eg, {0}, {1}, as of python 3.3 i believe,
# you can omit them and the sequence is implicit because i have four sets of curly braces and four arguments
# passed to 'format'
'''
{:^8} says (seems more intuitive if i read it backwards:
{} => create one field
8 => it will have a width of 8
^ => center the data within this field (use '<' for left align and '>' for right align)
every token inside the curly braces is either
(i) an index to the sequence passed to 'format', or
(ii) a format specifier,
depending on whether it is to the left or right of the colon,
{index, format specifier}
'''
# more format specifiers:
# to control number formatting (eg, number of places to the right of the decimal to print for floats) eg,
# the statement below says to print the value for that field with two decimal places
"{:^8.2f}".format(v1)
# but how do you know what order to place these format specifiers?
# ie, why not
"{:.2f^8}" # wrong
# the python docs publish a 'general form for a standard format specifier:
# http://docs.python.org/2.6/library/string.html#formatstrings
| 34.765957 | 108 | 0.684211 | 276 | 1,634 | 4.028986 | 0.496377 | 0.007194 | 0.010791 | 0.019784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021622 | 0.207466 | 1,634 | 46 | 109 | 35.521739 | 0.837066 | 0 | 0 | 0 | 0 | 0 | 0.149457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.1 | null | null | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6b4d51076b403049aba5f4d3863e2292c59d0be7 | 12,524 | py | Python | cogs/fun.py | Skullknight011/Cyanmaton | 76da2b6944666bab32630c96108b61b511acb6eb | [
"CC0-1.0"
] | 1 | 2021-04-01T13:42:34.000Z | 2021-04-01T13:42:34.000Z | cogs/fun.py | Skullknight011/Cyanmaton | 76da2b6944666bab32630c96108b61b511acb6eb | [
"CC0-1.0"
] | null | null | null | cogs/fun.py | Skullknight011/Cyanmaton | 76da2b6944666bab32630c96108b61b511acb6eb | [
"CC0-1.0"
] | 1 | 2021-02-19T14:56:16.000Z | 2021-02-19T14:56:16.000Z | import discord
from discord.ext import commands
import os
import numpy
import requests
import json
import random
import json
import sys
import traceback
import importlib
from math import *
from cogs.lists import cowboy_sayings
from cogs.lists import roasts
from cogs.lists import colour
from cogs.lists import lenny_faces
import aiohttp
from cogs.lists import _8ballresponses
class FunCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(name="8ball", aliases=['advice', 'badadvice'])
async def advice(self, ctx, *, question):
e = discord.Embed(color=random.randint(0, 16777216))
e.add_field(name="Your Question:", value=question, inline=False)
e.add_field(name="The Answer:", value=random.choice(
_8ballresponses), inline=False)
await ctx.send(embed=e)
@commands.command(name='person', aliases=['thispersondoesnotexist', 'personnotexist', 'this_person_does_not_exist', 'this-person-does-not-exist', 'Person'])
@commands.guild_only()
async def person(self, ctx):
if message.author == client.user:
return
if message.author.bot:
return
embed = discord.Embed(
title='This Person Doesn\'t Exist', url="https://thispersondoesnotexist.com", colour=random.randint(0, 16777216), type="image")
embed.set_image(
url=f"https://thispersondoesnotexist.com/image?cum={numpy.random.rand()}")
await ctx.send(content=None, embed=embed)
@commands.command(name='cat', aliases=['thiscatdoesnotexist', 'catnotexist', 'this_cat_does_not_exist', 'this-cat-does-not-exist', 'Cat'])
@commands.guild_only()
async def cat(self, ctx):
embed = discord.Embed(
title='This Cat Doesn\'t Exist', url="https://thiscatdoesnotexist.com/", colour=random.randint(0, 16777216), type="image")
embed.set_image(
url=f"https://thiscatdoesnotexist.com/?cum={numpy.random.rand()}")
await ctx.send(content=None, embed=embed)
@commands.command(name='startup', aliases=['thisstartupdoesnotexist', 'startupnotexist', 'this_startup_does_not_exist', 'this-startup-does-not-exist', 'Startup'])
@commands.guild_only()
async def startup(self, ctx):
embed = discord.Embed(
title='This Startup Doesn\'t Exist', url="https://thisstartupdoesnotexist.com", colour=random.randint(0, 16777216), type="image")
embed.set_image(
url=f"https://thisstartupdoesnotexist.com/?cum={numpy.random.rand()}")
await ctx.send(content=None, embed=embed)
@commands.command(name='horse', aliases=['thishorsedoesnotexist', 'horsenotexist', 'this_horse_does_not_exist', 'this-horse-does-not-exist', 'Horse'])
@commands.guild_only()
async def horse(self, ctx):
embed = discord.Embed(
title='This Horse Doesn\'t Exist', url="https://thishorsedoesnotexist.com", colour=random.randint(0, 16777216), type="image")
embed.set_image(
url=f"https://thishorsedoesnotexist.com/?cum={numpy.random.rand()}")
await ctx.send(content=None, embed=embed)
@commands.command(name='art', aliases=['thisartdoesnotexist', 'artwork', 'this_art_does_not_exist', 'this-art-does-not-exist', 'Art'])
@commands.guild_only()
async def art(self, ctx):
embed = discord.Embed(
title='This Art Doesn\'t Exist', url="https://thisartworkdoesnotexist.com", colour=random.randint(0, 16777216), type="image")
embed.set_image(
url=f"https://thisartworkdoesnotexist.com/?cum={numpy.random.rand()}")
await ctx.send(content=None, embed=embed)
@commands.command(name='cyan')
@commands.cooldown(rate=60, per=1, type=commands.BucketType.user)
async def cyan(self, ctx):
embed = discord.Embed(
title='Cyan', colour=0x00FFFF, type="image")
embed.set_image(
url=random.choice(['https://media.discordapp.net/attachments/768298303108415509/801645804250464296/Cyan.png', 'https://media.discordapp.net/attachments/768298303108415509/801648526429913118/cyan.png']))
await ctx.send(content=None, embed=embed)
@commands.command(name='googlesearch')
async def googlesearch(self, ctx, *, googlequestion):
await ctx.send(f'https://lmgtfy.app/?q={googlequestion}')
@commands.command(name='hello')
async def hello(self, ctx):
await ctx.send('Why hello!')
@commands.command(name='cyantext', aliases=['ct'])
async def cyantext(self, ctx, *, text_to_make_cyan):
await ctx.send(f"""```yaml
{text_to_make_cyan}```""")
@commands.command(name='epicgamer', aliases=['epic_gamer'])
async def epicgamer(self, ctx):
how_epic_gamer = random.randrange(0, 101)
embed = discord.Embed(title="How Epic Gamer Robot", colour=random.randint(0, 16777216),
description=f"You are {how_epic_gamer}% epic gamer :video_game:, but I am 101%")
embed.set_footer(
text="I see that you have found me.")
await ctx.send(embed=embed)
@commands.command(name='howgenius', aliases=['howsmart', 'genius'])
async def howgenius(self, ctx):
how_genius = random.randrange(0, 101)
embed = discord.Embed(title="How genius you are!", colour=random.randint(0, 16777216),
description=f"You are {how_genius}% genius :nerd:, but I am still smarter than you!")
await ctx.send(embed=embed)
@commands.command(name='howdumb', aliases=['howstupid', 'dumb'])
async def howdumb(self, ctx):
how_dumb = random.randrange(0, 101)
embed = discord.Embed(title="How dumb you are!", colour=random.randint(0, 16777216),
description=f"You are {how_dumb}% dumb.")
await ctx.send(embed=embed)
@commands.command(name='howsimp', aliases=['simp'])
async def howsimp(self, ctx):
how_simp = random.randrange(0, 101)
embed = discord.Embed(title="How simp!", colour=random.randint(0, 16777216),
description=f"You are {how_simp}% simp.")
await ctx.send(embed=embed)
@commands.command(name='trademark', aliases=['tm', 'r'])
async def trademark(self, ctx, stufftotm):
await ctx.send(random.choice(f'{stufftotm}™', f'{stufftotm}®'))
@commands.command(name='owo')
async def cyantext(self, ctx):
await ctx.send(f'OwO')
@commands.command(name='lennyface', aliases=['lenny'])
async def lennyface(self, ctx):
await ctx.send(random.choice(lenny_faces))
@commands.command(name='cool')
async def cool(self, ctx):
await ctx.send('This bot is soooo cool! Smiley Face :smiley:')
@commands.command(name='cowboysaying', aliases=['cowboysay', 'cowsaying', 'cowboysays', 'cosa', 'cowboysayings'])
async def cowboysaying(self, ctx):
await ctx.send(random.choice(cowboy_sayings))
@commands.command(name='roast', aliases=['meanthing', 'roasts'])
async def roast(self, ctx):
await ctx.send(random.choice(roasts))
@commands.command(name='bot')
async def bot(self, ctx):
await ctx.send('Yes, this is a bot!')
@commands.command(name='iwant', aliases=['be_passive_aggressive', 'I_want', 'Iwant', 'whatIwant'])
async def iwant(self, ctx):
await ctx.send('If that\'s what you want :upside_down:')
@commands.command(name='hewants', aliases=['he_passive_aggressive', 'he_wants', 'Hewants', 'whathewants'])
async def iwant(self, ctx):
await ctx.send('If that\'s what he wants :upside_down:')
@commands.command(name='userwants', aliases=['passive_aggressive', 'Wants', 'wants'])
async def userwants(self, ctx, *, member: discord.Member = None):
if member is None:
member = ctx.author
await ctx.send(f'If that\'s what {member.name} wants :upside_down:')
@commands.command(name='smile')
async def smile(self, ctx):
await ctx.send(':smiley:')
@commands.command(name='coin', aliases=["flip", "coinflip", 'Coin'])
async def coin(self, ctx):
await ctx.send(random.choice(["Heads!", "Tails!"]))
@commands.command(name='yesno', aliases=["YesNo", "noyes", 'NoYes', 'yes/no'])
async def yesno(self, ctx):
await ctx.send(random.choice(["Yes!", "No!"]))
@commands.command(name='number', aliases=["num"])
async def number(self, ctx):
await ctx.send(f"Your random number is {random.randint(0, 1000000001)}")
@commands.command(name='dice', aliases=["roll", 'rd'])
async def dice(self, ctx, dicenum):
dicenum = int(dicenum)
dicenum = dicenum + 1
await ctx.send(f"You rolled a {random.randrange(0, dicenum)}")
@commands.command(name='backwards', aliases=['reverse'])
async def backwards(self, ctx, *, text):
backwardstext = text[::-1]
await ctx.send(f'{backwardstext}')
@commands.command(name='say', aliases=['talk'])
@commands.guild_only()
async def say(self, ctx, *, whattosay):
await ctx.send(f'''{whattosay}
**-{ctx.author}**''')
@commands.command(hidden=True)
async def cat(self, ctx):
"""Gives you a random cat."""
async with ctx.session.get('https://api.thecatapi.com/v1/images/search') as resp:
if resp.status != 200:
return await ctx.send('No cat found :(')
js = await resp.json()
await ctx.send(embed=discord.Embed(title='Random Cat').set_image(url=js[0]['url']))
@commands.command(name="spam")
@commands.cooldown(1, 160 , commands.BucketType.user)
async def spam(self,ctx, member:discord.Member):
rannge = range(1, 20)
await ctx.channel.send('how many times to spam this user chose 1-20')
def check(msg):
return msg.author == ctx.author and msg.channel == ctx.channel and \
int(msg.content) in rannge
msg = await self.bot.wait_for("message", check=check)
times = int(msg.content)
while True:
#await ctx.channel.send(f"haha i spam {member.mention}!")
await member.send( "hahahhaa i spam you :D , i spam you hahahah")
times = times - 1
await asyncio.sleep(2)
if times == 0:
break
@commands.command(name="calculateage")
async def calculate_age(self, ctx):
# getting the current date
now = datetime.datetime.now()
current_day = int(now.strftime('%d'))
current_month = int(now.strftime('%m'))
current_year = int(now.strftime('%y')) + 2000
await ctx.author.send('Type your age in this form : yyyymmdd')
rannge = range(19500101, 20211231)
def check(msg):
return msg.author == ctx.author and \
int(msg.content) in rannge
msg = await self.bot.wait_for("message", check=check)
age = int(msg.content)
user_year = int(age / 10000)
user_month = int((age % 10000) / 100)
user_day = int(age % 100)
await ctx.author.send(f"Birthday : {user_day} / {user_month} / {user_year} ")
await ctx.author.send(f"\nCurent date : {current_day} / {current_month} / {current_year}")
# if birth date is greater then current birth_month
# then donot count this month and add 30 to the date so
# as to subtract the date and get the remaining days
month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if (user_day > current_day):
current_month = current_month - 1
current_day = current_day + month[user_month - 1]
# if birth month exceeds current month, then
# donot count this year and add 12 to the
# month so that we can subtract and find out
# the difference
if (user_month > current_month):
current_year = current_year - 1
current_month = current_month + 12
# calculate date, month, year
calculated_date = current_day - user_day
calculated_month = current_month - user_month
calculated_year = current_year - user_year
# print present age
await ctx.author.send(f"you are {calculated_year} years ")
await ctx.author.send(f" {calculated_month} month(s) and ")
await ctx.author.send(f" {calculated_date} days old ")
def setup(bot):
bot.add_cog(FunCog(bot))
| 43.637631 | 214 | 0.6355 | 1,595 | 12,524 | 4.917868 | 0.196865 | 0.041815 | 0.079934 | 0.02486 | 0.402473 | 0.27639 | 0.235084 | 0.198496 | 0.177588 | 0.145844 | 0 | 0.029837 | 0.221255 | 12,524 | 286 | 215 | 43.79021 | 0.774121 | 0 | 0 | 0.181818 | 0 | 0.038961 | 0.233333 | 0.031343 | 0 | 0 | 0.000663 | 0 | 0 | 0 | null | null | 0.012987 | 0.077922 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6b5140207f30a322a8c72e6c8cce57aadcbcce4b | 2,429 | py | Python | keras/lstm_my.py | mobarski/sandbox | 64ac79143750d5dcbd4d0f3abdab6efeb9bdf50c | [
"MIT"
] | null | null | null | keras/lstm_my.py | mobarski/sandbox | 64ac79143750d5dcbd4d0f3abdab6efeb9bdf50c | [
"MIT"
] | null | null | null | keras/lstm_my.py | mobarski/sandbox | 64ac79143750d5dcbd4d0f3abdab6efeb9bdf50c | [
"MIT"
] | null | null | null | text = """
ala ma kota
a kot ma ale
"""
# ------------------------------------------------------------------------------
# TODO as class
chars = list(sorted(set(text))) # stabilne indeksy
len_chars = len(chars)+1
c_to_i = {c:i+1 for i,c in enumerate(chars)}
i_to_c = {i+1:c for i,c in enumerate(chars)}
def text_to_i(text):
return [c_to_i.get(c,0) for c in text]
def text_to_hot(text):
out = [[0]*len_chars for _ in text]
i_list = text_to_i(text)
for n,i in enumerate(i_list):
out[n][i] = 1
return out
# ------------------------------------------------------------------------------
import numpy as np
INPUT = 4
sentences = [text[i:i+INPUT+1] for i in range(len(text)-INPUT-1)]
x = np.zeros((len(sentences),INPUT,len_chars),dtype='b')
y = np.zeros((len(sentences),len_chars),dtype='b')
for i,text in enumerate(sentences):
x[i]=text_to_hot(text[:-1])
y[i]=text_to_hot(text[-1:])[0]
print(x)
print(y)
# ------------------------------------------------------------------------------
from keras.models import Sequential,load_model
from keras.layers import Dense,LSTM
from keras.optimizers import RMSprop
if 0:
model = Sequential()
model.add(LSTM(4, input_shape=(INPUT,len_chars)))
#model.add(LSTM(4, batch_input_shape=(3,INPUT,len_chars), stateful=True))
model.add(Dense(len_chars,activation='softmax'))
optimizer=RMSprop(learning_rate=0.1)
model.compile(optimizer,loss='categorical_crossentropy')
model.fit(x,y,batch_size=3,epochs=20)
model.save('lstm_my.h5')
else:
model = load_model('lstm_my.h5')
# ------------------------------------------------------------------------------
def sample(p_list, t=1.0):
if not t: return np.argmax(p_list)
p_list = p_list.astype('float64') # bez tego suma norm_p moze byc > 1
log_p = np.log(p_list) / t
exp_p = np.exp(log_p)
norm_p = exp_p / np.sum(exp_p)
results = np.random.multinomial(1, norm_p, 1)
return np.argmax(results)
# ------------------------------------------------------------------------------
if 0:
px = np.array([text_to_hot('kot')])
py = model.predict(px)
pi=sample(py,0)
pc=i_to_c[pi]
print(px)
print(py)
print(i_to_c)
print(pi)
print(pc)
# ------------------------------------------------------------------------------
# TODO function
text = 'ala '
out = text[:]
for j in range(20):
x = np.array([text_to_hot(text)])
py = model.predict(x)[0]
pc=i_to_c[sample(py,1.0)]
out = out + pc
text = out[-INPUT:]
print(out)
| 24.535354 | 80 | 0.553314 | 377 | 2,429 | 3.408488 | 0.281167 | 0.049805 | 0.035019 | 0.040467 | 0.091829 | 0.056031 | 0 | 0 | 0 | 0 | 0 | 0.017257 | 0.117332 | 2,429 | 98 | 81 | 24.785714 | 0.58209 | 0.256896 | 0 | 0.030303 | 0 | 0 | 0.051868 | 0.013385 | 0 | 0 | 0 | 0.010204 | 0 | 1 | 0.045455 | false | 0 | 0.060606 | 0.015152 | 0.151515 | 0.121212 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6b5b587cb0ce620906a483609ab64dc143ca2768 | 1,169 | py | Python | doc/examples/python_logging.py | ralphm/udplog | 04fa2045f0eb23dfd73be704ac7713384c6860d7 | [
"MIT"
] | null | null | null | doc/examples/python_logging.py | ralphm/udplog | 04fa2045f0eb23dfd73be704ac7713384c6860d7 | [
"MIT"
] | 1 | 2018-02-27T20:09:35.000Z | 2018-02-27T20:09:35.000Z | doc/examples/python_logging.py | ralphm/udplog | 04fa2045f0eb23dfd73be704ac7713384c6860d7 | [
"MIT"
] | 1 | 2016-10-11T12:27:33.000Z | 2016-10-11T12:27:33.000Z | """
Example of using the standard logging facility to send events to udplog.
"""
import logging
import socket
import warnings
from udplog.udplog import UDPLogger, UDPLogHandler
# Get a logger in the idiomatic way.
logger = logging.getLogger(__name__)
# Set up logging to stdout
logging.basicConfig(level=logging.DEBUG)
# Capture warnings, too.
logging.captureWarnings(True)
# Add the UDPLog handler to the root logger.
udplogger = UDPLogger(defaultFields={
'appname': 'example',
'hostname': socket.gethostname(),
})
root = logging.getLogger()
root.setLevel(logging.DEBUG)
root.addHandler(UDPLogHandler(udplogger, category="python_logging"))
def main():
logger.debug("Starting!")
logger.info("This is a simple message")
logger.info("This is a message with %(what)s", {'what': 'variables'})
extra_logger = logging.LoggerAdapter(logger, {'bonus': 'extra data'})
extra_logger.info("Bonus ahead!")
a = {}
try:
print a['something']
except:
logger.exception("Oops!")
warnings.warn("Don't do foo, do bar instead!", stacklevel=2)
main()
| 25.413043 | 73 | 0.664671 | 138 | 1,169 | 5.57971 | 0.565217 | 0.038961 | 0.036364 | 0.041558 | 0.044156 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001091 | 0.215569 | 1,169 | 45 | 74 | 25.977778 | 0.838604 | 0.106929 | 0 | 0 | 0 | 0 | 0.190824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.148148 | null | null | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6b60b2d12c5d9dbd477a2f5ad06e24a5440736ae | 2,190 | py | Python | tests/test_gf.py | senk8/crypto-math | 91c7b02a28e91190089b0213065498ce3c6b2e18 | [
"MIT"
] | 1 | 2022-01-01T07:48:29.000Z | 2022-01-01T07:48:29.000Z | tests/test_gf.py | senk8/crypto-math | 91c7b02a28e91190089b0213065498ce3c6b2e18 | [
"MIT"
] | null | null | null | tests/test_gf.py | senk8/crypto-math | 91c7b02a28e91190089b0213065498ce3c6b2e18 | [
"MIT"
] | null | null | null | from crypto_math import GF, field_extension
import pytest
@pytest.fixture
def setup():
F7 = GF(7)
F7_4 = field_extension(F7, 4)
return F7_4
@pytest.mark.parametrize(
"x,y,expect",
[
([1, 1, 1, 1], [2, 3, 1], [1, 3, 4, 2]),
([2, 3, 1], [6, 1, 3, 1], [6, 3, 6, 2]),
([1, 1, 1, 1], [6, 1, 3, 1], [2, 4, 2]),
],
)
def test_add(setup, x, y, expect):
F7_4 = setup
x = F7_4(x)
y = F7_4(y)
assert x + y == F7_4(expect)
@pytest.mark.parametrize(
"x,y,expect",
[
([1, 1, 1, 1], [2, 3, 1], [1, 6, 5, 0]),
([2, 3, 1], [6, 1, 3, 1], [1, 1, 0, 0]),
([1, 1, 1, 1], [6, 1, 3, 1], [2, 0, 5, 0]),
([1, 1, 1, 1], [1, 1, 1, 1], [0]),
],
)
def test_sub(setup, x, y, expect):
F7_4 = setup
x = F7_4(x)
y = F7_4(y)
assert x - y == F7_4(expect)
@pytest.mark.parametrize(
"x,y,expect",
[
([1, 1, 0], [3, 2], [3, 5, 2, 0]),
([1, 1], [1, 3, 6], [1, 4, 2, 6]),
([1, 1, 1, 1], [2, 3, 1], [3, 1, 6, 0]),
([2, 3, 1], [6, 1, 3, 1], [4, 4, 2, 4]),
([1, 1, 1, 1], [6, 1, 3, 1], [1, 3, 0, 5]),
([1, 1, 1, 1], [0], [0]),
],
)
def test_mul(setup, x, y, expect):
F7_4 = setup
x = F7_4(x)
y = F7_4(y)
assert x * y == F7_4(expect)
@pytest.mark.parametrize(
"x,y,expect",
[
([1, 1, 1, 1], [2, 3, 1], [2, 6, 4, 6]),
([2, 3, 1], [6, 1, 3, 1], [0, 6, 5, 4]),
([1, 1, 1, 1], [6, 1, 3, 1], [2, 5, 1, 5]),
],
)
def test_division(setup, x, y, expect):
F7_4 = setup
x = F7_4(x)
y = F7_4(y)
assert x / y == F7_4(expect)
@pytest.mark.parametrize(
"x",
[
([1, 1, 1, 1]),
([2, 3, 1]),
([6, 1, 3, 1]),
],
)
def test_inverse(setup, x):
F7_4 = setup
x = F7_4(x)
assert x * x.inverse() == F7_4([1])
@pytest.mark.parametrize(
"x,e",
[
([1, 1, 1, 1], 3),
([2, 3, 1], 3),
([6, 1, 3, 1], 3),
([6, 1, 3, 1], 0),
([6, 1, 3, 1], 1),
],
)
def test_pow(setup, x, e):
F7_4 = setup
x = F7_4(x)
a = F7_4.one()
for _ in range(e):
a *= x
assert x ** e == a
| 18.559322 | 51 | 0.387671 | 403 | 2,190 | 2.022333 | 0.099256 | 0.120245 | 0.110429 | 0.078528 | 0.622086 | 0.603681 | 0.596319 | 0.500614 | 0.480982 | 0.435583 | 0 | 0.181303 | 0.355251 | 2,190 | 117 | 52 | 18.717949 | 0.395892 | 0 | 0 | 0.347826 | 0 | 0 | 0.020091 | 0 | 0 | 0 | 0 | 0 | 0.065217 | 1 | 0.076087 | false | 0 | 0.021739 | 0 | 0.108696 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6b612f1385de3aea9ee32b7c61b778ab34bc50ba | 4,495 | py | Python | fgietAdmission/settings/prod.py | rpsingh21/Fgiet-Admission | a6871da939c2ec16e480844254d801c3a486c5c2 | [
"MIT"
] | 5 | 2018-09-28T20:05:14.000Z | 2019-05-31T19:12:48.000Z | fgietAdmission/settings/prod.py | rpsingh21/Fgiet-Admission | a6871da939c2ec16e480844254d801c3a486c5c2 | [
"MIT"
] | 5 | 2019-09-30T18:40:15.000Z | 2020-04-20T20:28:29.000Z | fgietAdmission/settings/prod.py | rpsingh21/Fgiet-Admission | a6871da939c2ec16e480844254d801c3a486c5c2 | [
"MIT"
] | 3 | 2019-09-30T20:54:55.000Z | 2020-04-15T15:39:37.000Z | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get(
'SECRET_KEY', 'j2b_z(*4w+#)t^nz3)0n3da(tcj&3##klo73m76(x7%3z)b%85n!')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = eval(os.environ.get('DEBUG'))
ALLOWED_HOSTS = ['www.fgiet.in', 'fgiet.in']
if DEBUG:
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_celery_beat',
'django_celery_results',
'crispy_forms',
'pagedown',
'widget_tweaks',
'admission',
'account',
'utils'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fgietAdmission.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'libraries':{
'my_filters': 'fgietAdmission.templatetags.my_filters',
}
},
},
]
WSGI_APPLICATION = 'fgietAdmission.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('POSTGRES_DB'),
'USER': os.environ.get('POSTGRES_USER'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'HOST': os.environ.get('POSTGRES_HOST', 'localhost'),
'PORT': os.environ.get('POSTGRES_PORT', '5432')
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MAX_UPLOAD_SIZE = 225280
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = os.path.join(BASE_DIR, "static_cdn")
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media_cdn")
CRISPY_TEMPLATE_PACK = 'bootstrap4'
CELERY_BROKER_URL = os.environ.get('REDIS_URL', 'redis://localhost:6379')
CELERY_RESULT_BACKEND = os.environ.get('REDIS_URL', 'redis://localhost:6379')
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = TIME_ZONE
CELERY_IMPORTS = (
"utils.tasks",
)
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_PORT = os.environ.get('EMAIL_PORT')
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = True
if not DEBUG:
CORS_REPLACE_HTTPS_REFERER = True
HOST_SCHEME = "https://"
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_SECONDS = 1000000
SECURE_FRAME_DENY = True
| 27.746914 | 91 | 0.691435 | 515 | 4,495 | 5.817476 | 0.390291 | 0.065087 | 0.052069 | 0.023364 | 0.202603 | 0.175901 | 0.091789 | 0.071762 | 0.056742 | 0 | 0 | 0.015809 | 0.169744 | 4,495 | 161 | 92 | 27.919255 | 0.786977 | 0.124583 | 0 | 0 | 0 | 0.008621 | 0.440561 | 0.298214 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.060345 | 0.017241 | 0 | 0.017241 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
6b78c685cbe88fe52be31d9316aa083ac998c4fb | 902 | py | Python | lesson9/napalm_simple.py | mfeindt0705/pynetmf | 02fc092fd42ce5be5e160fa88b65c63d23408a6a | [
"Apache-2.0"
] | null | null | null | lesson9/napalm_simple.py | mfeindt0705/pynetmf | 02fc092fd42ce5be5e160fa88b65c63d23408a6a | [
"Apache-2.0"
] | 7 | 2021-03-18T21:28:13.000Z | 2022-02-10T10:39:10.000Z | lesson9/napalm_simple.py | mfeindt0705/pynetmf | 02fc092fd42ce5be5e160fa88b65c63d23408a6a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from getpass import getpass
from pprint import pprint
from napalm import get_network_driver
# Supress SSL Certificate Warnings
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
cisco3 = dict(
hostname="cisco3.lasthop.io",
device_type="ios",
username="pyclass",
password=getpass(),
optional_args={},
)
nxos1 = dict(
hostname="nxos1.lasthop.io",
device_type="nxos",
username="pyclass",
password=getpass(),
optional_args={"port": 8443},
)
# device_type = cisco3.pop("device_type")
device_type = nxos1.pop("device_type")
driver = get_network_driver(device_type)
# device = driver(**cisco3)
device = driver(**nxos1)
print()
print("\n\n>>>Test device open")
device.open()
print()
output = device.get_facts()
pprint(output)
print()
| 20.044444 | 71 | 0.732816 | 110 | 902 | 5.872727 | 0.418182 | 0.108359 | 0.049536 | 0.058824 | 0.130031 | 0.130031 | 0 | 0 | 0 | 0 | 0 | 0.017972 | 0.136364 | 902 | 44 | 72 | 20.5 | 0.811297 | 0.131929 | 0 | 0.233333 | 0 | 0 | 0.118252 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.1 | 0.166667 | 0 | 0.166667 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
6b7ba61ef023c1db826796f4b0845a69f907a4aa | 535 | py | Python | run_scraper.py | justinslud/scrape-wikipedia-current-events | fa4a261a15e86b2470506ecb72ab0d488240c77d | [
"MIT"
] | null | null | null | run_scraper.py | justinslud/scrape-wikipedia-current-events | fa4a261a15e86b2470506ecb72ab0d488240c77d | [
"MIT"
] | null | null | null | run_scraper.py | justinslud/scrape-wikipedia-current-events | fa4a261a15e86b2470506ecb72ab0d488240c77d | [
"MIT"
] | null | null | null | from wiki_scrape_db import get_headlines
headlines = []
for year in list(range(1995, 2020)):
try:
headlines.append(get_headlines(year))
except Exception as e:
print(e)
headlines.append(get_headlines(1994, start_month='July'))
headlines.append(get_headlines(2020, end_month='November'))
with open('headlines.csv', 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(['day', 'month', 'year', 'subject', 'event', 'text'])
csvwriter.writerows(list(chain(*headlines)))
| 29.722222 | 76 | 0.695327 | 68 | 535 | 5.352941 | 0.617647 | 0.131868 | 0.148352 | 0.222527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035088 | 0.147664 | 535 | 17 | 77 | 31.470588 | 0.763158 | 0 | 0 | 0 | 0 | 0 | 0.102804 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6b7e9434ecd7d1a7626dabe916fb3aaec1e3cb2a | 9,265 | py | Python | yasql/apps/sqlquery/migrations/0001_initial.py | Fanduzi/YaSQL | bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5 | [
"Apache-2.0"
] | 443 | 2018-02-08T02:53:48.000Z | 2020-10-13T10:01:55.000Z | yasql/apps/sqlquery/migrations/0001_initial.py | Fanduzi/YaSQL | bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5 | [
"Apache-2.0"
] | 27 | 2018-03-06T03:50:07.000Z | 2020-08-18T08:09:49.000Z | yasql/apps/sqlquery/migrations/0001_initial.py | Fanduzi/YaSQL | bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5 | [
"Apache-2.0"
] | 148 | 2018-03-15T06:07:25.000Z | 2020-08-17T14:58:45.000Z | # Generated by Django 2.2.16 on 2020-12-15 07:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('sqlorders', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DbQuerySchemas',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('schema', models.CharField(default='', max_length=64, verbose_name='库名')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('cid', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlorders.DbConfig', verbose_name='数据库')),
],
options={
'verbose_name': 'DB查询库',
'verbose_name_plural': 'DB查询库',
'db_table': 'yasql_sqlquery_schemas',
'default_permissions': (),
'unique_together': {('cid', 'schema')},
},
),
migrations.CreateModel(
name='DbQueryTables',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('table', models.CharField(default='', max_length=128, verbose_name='表名')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('schema', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQuerySchemas', verbose_name='库名')),
],
options={
'verbose_name': 'DB查询表',
'verbose_name_plural': 'DB查询表',
'db_table': 'yasql_sqlquery_tables',
'default_permissions': (),
},
),
migrations.CreateModel(
name='DbQueryUserPrivs',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('schemas', models.ManyToManyField(to='sqlquery.DbQuerySchemas', verbose_name='允许访问的库')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': 'DB查询用户权限',
'verbose_name_plural': 'DB查询用户权限',
'db_table': 'yasql_sqlquery_user_privileges',
'default_permissions': (),
},
),
migrations.CreateModel(
name='DbQueryUserDenyTables',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('tables', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryTables', verbose_name='表')),
('user_privs', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryUserPrivs', verbose_name='权限')),
],
options={
'verbose_name': '禁止用户访问的表',
'verbose_name_plural': '禁止用户访问的表',
'db_table': 'yasql_sqlquery_user_deny_tables',
'default_permissions': (),
},
),
migrations.CreateModel(
name='DbQueryUserAllowedTables',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('tables', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryTables', verbose_name='表')),
('user_privs', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryUserPrivs', verbose_name='权限')),
],
options={
'verbose_name': '允许用户访问的表',
'verbose_name_plural': '允许用户访问的表',
'db_table': 'yasql_sqlquery_user_allowed_tables',
'default_permissions': (),
},
),
migrations.CreateModel(
name='DbQueryLog',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键id')),
('username', models.CharField(max_length=64, verbose_name='用户名')),
('host', models.CharField(max_length=256, verbose_name='目标数据库地址')),
('schema', models.CharField(default='', max_length=128, verbose_name='目标数据库')),
('tables', models.CharField(default='', max_length=200, verbose_name='目标表名')),
('query_sql', models.TextField(default='', verbose_name='查询SQL')),
('query_consume_time', models.FloatField(default=0.0, verbose_name='查询耗时,单位s')),
('query_status', models.CharField(default='', max_length=2048, verbose_name='查询是否成功或失败的原因')),
('affected_rows', models.IntegerField(default=0, verbose_name='影响影响行数')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='查询时间')),
],
options={
'verbose_name': 'DB查询日志',
'verbose_name_plural': 'DB查询日志',
'db_table': 'yasql_sqlquery_log',
'default_permissions': (),
'index_together': {('tables',), ('schema',), ('username',)},
},
),
migrations.CreateModel(
name='DbQueryGroupPrivs',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('group', models.CharField(default='', max_length=128, verbose_name='组名')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('schemas', models.ManyToManyField(to='sqlquery.DbQuerySchemas', verbose_name='允许访问的库')),
('user', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': 'DB查询组权限',
'verbose_name_plural': 'DB查询组权限',
'db_table': 'yasql_sqlquery_group_privileges',
'default_permissions': (),
},
),
migrations.CreateModel(
name='DbQueryGroupDenyTables',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('group_privs', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryGroupPrivs', verbose_name='权限')),
('tables', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryTables', verbose_name='表')),
],
options={
'verbose_name': '禁止组访问的表',
'verbose_name_plural': '禁止组访问的表',
'db_table': 'yasql_sqlquery_group_deny_tables',
'default_permissions': (),
},
),
migrations.CreateModel(
name='DbQueryGroupAllowedTables',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='主键ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('group_privs', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryGroupPrivs', verbose_name='权限')),
('tables', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sqlquery.DbQueryTables', verbose_name='表')),
],
options={
'verbose_name': '允许组访问的表',
'verbose_name_plural': '允许组访问的表',
'db_table': 'yasql_sqlquery_group_allowed_tables',
'default_permissions': (),
},
),
]
| 54.181287 | 172 | 0.582515 | 897 | 9,265 | 5.765886 | 0.152731 | 0.146752 | 0.069026 | 0.082173 | 0.728152 | 0.676141 | 0.628384 | 0.607889 | 0.581787 | 0.581787 | 0 | 0.006828 | 0.272855 | 9,265 | 170 | 173 | 54.5 | 0.760873 | 0.004965 | 0 | 0.539877 | 1 | 0 | 0.215471 | 0.063687 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.018405 | 0 | 0.042945 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6b8f45fdb0fa4f30fcf42cfaca12072123bf631a | 2,124 | py | Python | tests/rules/test_gcloud_cli.py | ronandoolan2/thefuck | 7ca0919e70fe23a551b2388c050aa34925c09d0c | [
"MIT"
] | null | null | null | tests/rules/test_gcloud_cli.py | ronandoolan2/thefuck | 7ca0919e70fe23a551b2388c050aa34925c09d0c | [
"MIT"
] | null | null | null | tests/rules/test_gcloud_cli.py | ronandoolan2/thefuck | 7ca0919e70fe23a551b2388c050aa34925c09d0c | [
"MIT"
] | null | null | null | import pytest
from thefuck.rules.gcloud_cli import match, get_new_command
from thefuck.types import Command
no_suggestions = '''\
ERROR: (gcloud) Command name argument expected.
'''
misspelled_command = '''\
ERROR: (gcloud) Invalid choice: 'comute'.
Usage: gcloud [optional flags] <group | command>
group may be access-context-manager | ai-platform | alpha | app |
asset | auth | beta | bigtable | builds | components |
composer | compute | config | container | dataflow |
dataproc | datastore | debug | deployment-manager |
dns | domains | endpoints | filestore | firebase |
functions | iam | iot | kms | logging | ml |
ml-engine | organizations | projects | pubsub | redis |
resource-manager | scheduler | services | source |
spanner | sql | tasks | topic
command may be docker | feedback | help | info | init | version
For detailed information on this command and its flags, run:
gcloud --help
'''
misspelled_subcommand = '''\
ERROR: (gcloud.compute) Invalid choice: 'instance'.
Maybe you meant:
gcloud compute instance-groups
gcloud compute instance-templates
gcloud compute instances
gcloud compute target-instances
To search the help text of gcloud commands, run:
gcloud help -- SEARCH_TERMS
'''
@pytest.mark.parametrize('command', [
Command('gcloud comute instances list', misspelled_subcommand),
Command('gcloud compute instance list', misspelled_subcommand)])
def test_match(command):
assert match(command)
def test_not_match():
assert not match(Command('aws dynamodb invalid', no_suggestions))
@pytest.mark.parametrize('command, result', [
(Command('gcloud comute instances list', misspelled_subcommand),
['gcloud compute instance-groups']),
(Command('gcloud compute instance list', misspelled_subcommand),
['gcloud compute instance-groups'])])
def test_get_new_command(command, result):
assert get_new_command(command) == result
| 36 | 80 | 0.660075 | 228 | 2,124 | 6.061404 | 0.5 | 0.08466 | 0.091172 | 0.058611 | 0.227207 | 0.18958 | 0.18958 | 0 | 0 | 0 | 0 | 0 | 0.245763 | 2,124 | 58 | 81 | 36.62069 | 0.862672 | 0 | 0 | 0.065217 | 0 | 0 | 0.687853 | 0.010358 | 0 | 0 | 0 | 0 | 0.065217 | 1 | 0.065217 | false | 0 | 0.065217 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6ba44c15532a1eb969c9c42b5ee34f85d42c99fb | 892 | py | Python | changePositionToIndexUpstream.py | oicr-gsi/bam-statistics | 62109bcb6ae09a0425a386dd139e5487ef019c81 | [
"MIT"
] | null | null | null | changePositionToIndexUpstream.py | oicr-gsi/bam-statistics | 62109bcb6ae09a0425a386dd139e5487ef019c81 | [
"MIT"
] | null | null | null | changePositionToIndexUpstream.py | oicr-gsi/bam-statistics | 62109bcb6ae09a0425a386dd139e5487ef019c81 | [
"MIT"
] | null | null | null | #!/usr/bin/python
## my upstream target intervals were of size 1000 each.
import sys
# use stdin if stdin is full
if not sys.stdin.isatty():
indexFile = sys.stdin
#otherwise, read from input
else:
try:
input_file = sys.argv[1]
except IndexError:
message = 'need filename as first argument if stdin is not full'
raise IndexError(message)
else:
indexFile = open(input_file, 'rU')
count = 999;
start = 0;
for line in indexFile:
if start==0:
print count
start = int(line)
count=count-1
else:
val = int(line)
if start == val - 1:
print count
count=count-1
start = int(line)
else:
print 999
count = 998
start = int(line)
| 21.238095 | 66 | 0.502242 | 104 | 892 | 4.288462 | 0.509615 | 0.06278 | 0.080717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036822 | 0.421525 | 892 | 41 | 67 | 21.756098 | 0.827519 | 0.136771 | 0 | 0.392857 | 0 | 0 | 0.070588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.035714 | null | null | 0.107143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6ba5c75d24987e85c04283050683308be22b8758 | 3,084 | py | Python | resources/python/KemendagriKTP/main.py | freezyoff/kosan-server | 0e6ec0a763075a838557258740f95bf07caf47a7 | [
"MIT"
] | null | null | null | resources/python/KemendagriKTP/main.py | freezyoff/kosan-server | 0e6ec0a763075a838557258740f95bf07caf47a7 | [
"MIT"
] | 4 | 2020-03-02T13:06:23.000Z | 2022-02-27T12:47:45.000Z | resources/python/KemendagriKTP/main.py | freezyoff/kosan-server | 0e6ec0a763075a838557258740f95bf07caf47a7 | [
"MIT"
] | null | null | null | import os
import sys
import glob
import options as setting
import xlsToCsv as csvConverter
import csvParser as parser
import dbProvider as db
def printProgress(prefix="", iteration=1, maxIteration=1, size=30, file=sys.stdout):
iteration = int(iteration)
size = int(size)
maxIteration = int(maxIteration)
x = int(size*iteration/maxIteration)
percent = 100*(float(iteration)/float(maxIteration))
format = "%s [%s%s] %.0f%% %i/%i\r"
format = format % (prefix, "#"*x, "."*(size-x), percent, iteration, maxIteration)
file.write(format)
file.flush()
if iteration and maxIteration and iteration == maxIteration:
print ""
if __name__ == "__main__":
searchExt = "xlsx"
convertExt = "csv"
country = "id"
# Clean csv file in src/csv directory
array = glob.glob(setting.paths["csv"]+"/*")
for item in array:
os.remove(item)
# 1. convert all xls files to csv
array = glob.glob(setting.paths["xls"]+"/*."+str(searchExt))
print "Start Convert .{0} to .{1}".format(searchExt, convertExt)
print "Found .{0}: {1} file(s)".format(searchExt, len(array))
if len(array) <= 0:
sys.exit()
iteration = 0
printProgress("Convert file(s) to ."+convertExt, iteration, len(array))
for item in array:
list = item.split("/")
filename = list[len(list)-1]
filename = setting.paths["csv"]+"/"+filename.replace("."+searchExt,"."+convertExt)
csvConverter.convert(item, filename)
iteration+=1
printProgress("Convert file(s) to ."+convertExt, iteration, len(array))
# 2. create table
connection = db.connect(
setting.database["host"],
setting.database["user"],
setting.database["pwd"],
setting.database["database"]
)
connection.autocommit = False
cursor = connection.cursor()
db.drop(cursor, setting.database["table"])
db.create(cursor, setting.database["table"])
connection.commit()
# 3. insert to table
maxFilenameLength = 0
for item in glob.glob(setting.paths["csv"]+"/*."+convertExt):
filename = item.split("/")
filename = filename[len(filename)-1].strip()
maxFilenameLength = max([maxFilenameLength, len(filename)])
def progressCallback(iteration, maxIteration):
printProgress("Import file: %s " % filename, iteration, maxIteration)
pass
def parseCallback(code, name, line):
# determin parent, with split its code
split = code.split(".")
parent = ""
if (len(split) > 1):
for index in range(len(split)-1):
parent += "."+split[index] if len(parent)>0 else split[index]
name = name.replace('"', '')
db.save(cursor, setting.database["table"], country, code, name, parent)
pass
print "Import data to Database `"+ setting.database["database"] +"`.`"+ setting.database["table"] + "`"
files = glob.glob(setting.paths["csv"]+"/*."+convertExt)
files.sort()
for item in files:
filename = item.split("/")
filename = filename[len(filename)-1].strip()
if maxFilenameLength-len(filename) > 0:
filename = filename + ( " " * (maxFilenameLength-len(filename)) )
parser.parseFile(item, parseCallback, progressCallback)
connection.commit()
cursor.close()
connection.close() | 28.82243 | 104 | 0.682231 | 382 | 3,084 | 5.486911 | 0.277487 | 0.064408 | 0.028626 | 0.038168 | 0.156011 | 0.130725 | 0.099237 | 0.099237 | 0.099237 | 0 | 0 | 0.009939 | 0.151751 | 3,084 | 107 | 105 | 28.82243 | 0.791284 | 0.045071 | 0 | 0.15 | 0 | 0 | 0.088435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.025 | 0.1125 | null | null | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6baa91249a1d6feaecd6484fdec577d31bedced2 | 890 | py | Python | posts/views.py | JySa65/platzi-gram | e991a59a8ebe28574671f39cf8bd552e31799cd8 | [
"MIT"
] | null | null | null | posts/views.py | JySa65/platzi-gram | e991a59a8ebe28574671f39cf8bd552e31799cd8 | [
"MIT"
] | null | null | null | posts/views.py | JySa65/platzi-gram | e991a59a8ebe28574671f39cf8bd552e31799cd8 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.generic import ListView, CreateView, DetailView
from django.contrib.auth.mixins import LoginRequiredMixin
from posts.models import Post
from posts.forms import PostForm
from django.urls import reverse_lazy
# Create your views here.
class PostListView(LoginRequiredMixin, ListView):
model = Post
ordering = ('-created_at',)
paginate_by = 30
class PostDetailView(LoginRequiredMixin, DetailView):
model = Post
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
form_class = PostForm
success_url = reverse_lazy('posts:list')
def form_valid(self, form):
_object = form.save(commit=False)
_object.user = self.request.user
_object.profile = self.request.user.profile
self.object = _object.save()
return super(PostCreateView, self).form_valid(form)
| 28.709677 | 65 | 0.741573 | 105 | 890 | 6.171429 | 0.495238 | 0.061728 | 0.046296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002732 | 0.177528 | 890 | 30 | 66 | 29.666667 | 0.882514 | 0.025843 | 0 | 0.136364 | 0 | 0 | 0.024277 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.272727 | 0 | 0.818182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6bab10a8f6ed4cb25868e69655d856cbf7ed3d4a | 1,315 | py | Python | board/views.py | mijiFernandes/pa_1 | 10ede10494a632a98ef4e234ef58a73e8ea3c52f | [
"Unlicense"
] | null | null | null | board/views.py | mijiFernandes/pa_1 | 10ede10494a632a98ef4e234ef58a73e8ea3c52f | [
"Unlicense"
] | null | null | null | board/views.py | mijiFernandes/pa_1 | 10ede10494a632a98ef4e234ef58a73e8ea3c52f | [
"Unlicense"
] | null | null | null | from django.views.generic import ListView, DetailView, TemplateView, CreateView, UpdateView, DeleteView
from board.models import Post
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from mysite.views import OwnerOnlyMixin
from django.conf import settings
#--- ListView
class PostLV(ListView):
model = Post
template_name = 'board/post_all.html'
context_object_name = 'posts'
paginate_by = 2
#--- DetailView
class PostDV(DetailView):
model = Post
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'slug', 'content']
initial = {'slug': 'auto-filling-do-not-input'}
success_url = reverse_lazy('board:index')
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class PostChangeLV(LoginRequiredMixin, ListView):
template_name = 'board/post_change_list.html'
def get_queryset(self):
return Post.objects.filter(owner=self.request.user)
class PostUpdateView(OwnerOnlyMixin, UpdateView):
model = Post
fields = ['title', 'slug', 'content']
success_url = reverse_lazy('board:index')
class PostDeleteView(OwnerOnlyMixin, DetailView):
model = Post
success_url = reverse_lazy('board:index')
| 25.784314 | 103 | 0.727757 | 154 | 1,315 | 6.097403 | 0.467532 | 0.047923 | 0.054313 | 0.067093 | 0.165069 | 0.165069 | 0 | 0 | 0 | 0 | 0 | 0.000912 | 0.16654 | 1,315 | 50 | 104 | 26.3 | 0.855839 | 0.019772 | 0 | 0.3125 | 0 | 0 | 0.112753 | 0.040435 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.1875 | 0.03125 | 0.96875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6baefc213b6e18384bf7694489eee455b60eff69 | 1,763 | py | Python | twilio_notification.py | srisankethu/cowin-service | 12595b3e5c13f1990e11ed9a82723840dc0882f1 | [
"MIT"
] | null | null | null | twilio_notification.py | srisankethu/cowin-service | 12595b3e5c13f1990e11ed9a82723840dc0882f1 | [
"MIT"
] | null | null | null | twilio_notification.py | srisankethu/cowin-service | 12595b3e5c13f1990e11ed9a82723840dc0882f1 | [
"MIT"
] | null | null | null | from twilio.rest import Client
from twilio.twiml.voice_response import Gather, VoiceResponse
import os
class TwilioNotification:
def __init__(self, sid, auth_token):
self.client = Client(sid, auth_token)
def send_call(self, action_url, contacts):
response = VoiceResponse()
gather = Gather(action = action_url, numDigits=6, timeout=10)
gather.say("To book slot ASAP, enter the 6-digit COWIN OTP you just received")
response.say("Book your vaccine slot. Check your messages for the vaccination slotsthat are open.")
response.append(gather)
for contact in contacts:
call = self.client.calls.create(
twiml=response,
from_=os.environ['TWILIO_PHONE_NUMBER'],
to=contact
)
def send_message(self, slots, contacts):
if(slots == []):
return
message = 'Book your vaccination slot at: \n'
for i in range(len(slots)):
message = message + '{number}. {vaccine} vaccine in {hospital} having {dose1_slots} slots for dose 1 and {dose2_slots} slots for dose 2 at {pincode} on {date} \n\n'.format(number = i+1, vaccine=slots[i]['vaccine'], hospital=slots[i]['name'], pincode=slots[i]['pincode'], dose1_slots=slots[i]['available_capacity_dose1'], dose2_slots=slots[i]['available_capacity_dose2'], date=slots[i]['date'])
message = message + "Visit https://selfregistration.cowin.gov.in/ to book your slot ASAP \n"
for contact in contacts:
sms = self.client.messages.create(
body=message,
from_=os.environ['TWILIO_PHONE_NUMBER']
to=contact
) | 41.97619 | 405 | 0.613727 | 215 | 1,763 | 4.916279 | 0.409302 | 0.034059 | 0.022706 | 0.037843 | 0.126774 | 0.073794 | 0.073794 | 0.073794 | 0 | 0 | 0 | 0.010228 | 0.27907 | 1,763 | 42 | 406 | 41.97619 | 0.8214 | 0 | 0 | 0.129032 | 0 | 0.032258 | 0.283447 | 0.027211 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.096774 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6bc5c3c8ad41b9e3ab17bceffccd95fe645a515a | 5,037 | py | Python | PhaseI/DataPreparation/monitor_HRRR_forecast_rainfall.py | uva-hydroinformatics-lab/FloodWarningModelProject | b7db75943a6e1dc8dd5799cb069612e3d188911a | [
"MIT"
] | 2 | 2019-05-07T09:01:18.000Z | 2019-05-07T09:01:21.000Z | PhaseI/DataPreparation/monitor_HRRR_forecast_rainfall.py | uva-hydroinformatics-lab/FloodWarningModelProject | b7db75943a6e1dc8dd5799cb069612e3d188911a | [
"MIT"
] | null | null | null | PhaseI/DataPreparation/monitor_HRRR_forecast_rainfall.py | uva-hydroinformatics-lab/FloodWarningModelProject | b7db75943a6e1dc8dd5799cb069612e3d188911a | [
"MIT"
] | 1 | 2018-05-03T17:15:02.000Z | 2018-05-03T17:15:02.000Z | from pydap.client import open_url
from pydap.exceptions import ServerError
import subprocess
import boto.ec2
import datetime as dt
import numpy as np
import csv
import schedule
import time
"""
Global parameters:
-Study area location (LL and UR corners of TUFLOW model bounds)
-Initial and average resolution values for longitude and latitude,
needed for grid point conversion
(source: http://nomads.ncep.noaa.gov:9090/dods/hrrr "info" link)
"""
initLon = -134.09548000000 # modified that to follow the latest values on the website
aResLon = 0.029
initLat = 21.14054700000 # modified that to follow the latest values on the website
aResLat = 0.027
# this values added to the original bounding box made the retrieved data to be
lon_lb = (-77.979315-0.4489797462)
lon_ub = (-76.649286-0.455314383)
lat_lb = (36.321159-0.133)
lat_ub = (37.203955-0.122955)
# Connection to AWS
conn = boto.ec2.connect_to_region("us-east-1", aws_access_key_id="<aws_access_key_id>",
aws_secret_access_key="<aws_secret_access_key>")
def getData(current_dt, delta_T):
dtime_fix = current_dt + dt.timedelta(hours=delta_T)
date = dt.datetime.strftime(dtime_fix, "%Y%m%d")
fc_hour = dt.datetime.strftime(dtime_fix, "%H")
hour = str(fc_hour)
url = 'http://nomads.ncep.noaa.gov:9090/dods/hrrr/hrrr%s/hrrr_sfc_%sz' % (date, hour)
try:
dataset = open_url(url)
if len(dataset.keys()) > 0:
return dataset, url, date, hour
else:
print "Back up method - Failed to open : %s" % url
return getData(current_dt, delta_T - 1)
except ServerError:
print "Failed to open : %s" % url
return getData(current_dt, delta_T - 1)
def gridpt(myVal, initVal, aResVal):
gridVal = int((myVal-initVal)/aResVal)
return gridVal
def data_monitor():
with open("forecasts.txt") as f:
ran = f.readlines()
ran = [x.strip() for x in ran]
print ran
# Get newest available HRRR dataset by trying (current datetime - delta time) until
# a dataset is available for that hour. This corrects for inconsistent posting
# of HRRR datasets to repository
utc_datetime = dt.datetime.utcnow()
print "Open a connection to HRRR to retrieve forecast rainfall data.............\n"
# get newest available dataset
dataset, url, date, hour = getData(utc_datetime, delta_T=0)
print ("Retrieving forecast data from: %s " % url)
# Convert time to EST
if int(hour) >= 4: # If hour is greater than 4 simply subtract 4
hour = int(hour) - 4
else: # Otherwise UTC is is the next day, so subtract one from the date also
date = int(date) - 1
hour = int(hour) - 4 + 24
filename = str(date) + "-" + str(hour)+"0000"
var = "apcpsfc"
precip = dataset[var]
print ("Dataset open")
# Convert dimensions to grid points, source: http://nomads.ncdc.noaa.gov/guide/?name=advanced
grid_lon1 = gridpt(lon_lb, initLon, aResLon)
grid_lon2 = gridpt(lon_ub, initLon, aResLon)
grid_lat1 = gridpt(lat_lb, initLat, aResLat)
grid_lat2 = gridpt(lat_ub, initLat, aResLat)
max_precip_value = []
for hr in range(len(precip.time[:])):
while True:
try:
grid = precip[hr, grid_lat1:grid_lat2, grid_lon1:grid_lon2]
max_precip_value.append(np.amax(grid.array[:]))
break
except ServerError:
'There was a server error. Let us try again'
print "Max. Precip Value: ", max(max_precip_value)
if max(max_precip_value) >= 30.0 and filename not in ran:
print max_precip_value
print "Max value", max(max_precip_value)
# In case running the model locally uncomment the following lines to run the batch file
f = open('forecasts.txt', 'w')
f.write(filename + '\n')
f.close()
filepath = 'C:/Users/Morsy/Desktop/floodWarningmodelPrototype/runs/run_workflow.bat ' + filename
p = subprocess.call(filepath, shell=True)
print p
# In case running through the AWS instance uncomment the following lines to start
# the AWS instance that includes the model
# conn.start_instances(instance_ids=['<instance_ids>'])
print "Done running the model at", dt.datetime.now()
else:
print "The model won't run for this hour"
##################################################################################################
# ***************************************** Main Program *****************************************
##################################################################################################
def main():
schedule.every(1).hour.do(data_monitor)
while True:
schedule.run_pending()
time.sleep(1)
# data_monitor()
if __name__ == "__main__":
main() | 35.723404 | 105 | 0.600953 | 657 | 5,037 | 4.488584 | 0.406393 | 0.021363 | 0.033232 | 0.021363 | 0.15429 | 0.084775 | 0.084775 | 0.084775 | 0.062394 | 0.062394 | 0 | 0.036782 | 0.249752 | 5,037 | 141 | 106 | 35.723404 | 0.743583 | 0.203693 | 0 | 0.123596 | 0 | 0.011236 | 0.1613 | 0.02777 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.101124 | null | null | 0.134831 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6bd13573370ac90140341288aa969c676558720f | 1,421 | py | Python | custom/migrations/0003_auto_20181001_1734.py | rexhepberlajolli/Insurance | 8a179976bfb3d9c476d8fc415c8dd7fc0face434 | [
"MIT"
] | null | null | null | custom/migrations/0003_auto_20181001_1734.py | rexhepberlajolli/Insurance | 8a179976bfb3d9c476d8fc415c8dd7fc0face434 | [
"MIT"
] | 23 | 2018-10-02T20:16:29.000Z | 2022-03-24T03:55:46.000Z | custom/migrations/0003_auto_20181001_1734.py | rexhepberlajolli/Insurance | 8a179976bfb3d9c476d8fc415c8dd7fc0face434 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.1 on 2018-10-01 17:34
import django.contrib.postgres.fields
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('custom', '0002_riskfield_risk_type'),
]
operations = [
migrations.AlterModelOptions(
name='riskfield',
options={'ordering': ('id',), 'verbose_name': 'Risk Field', 'verbose_name_plural': 'Risk Fields'},
),
migrations.AlterModelOptions(
name='risktype',
options={'ordering': ('-id',), 'verbose_name': 'Risk Type', 'verbose_name_plural': 'Risk Types'},
),
migrations.AddField(
model_name='risktype',
name='table_name',
field=models.UUIDField(default=uuid.uuid4, unique=True),
),
migrations.AlterField(
model_name='riskfield',
name='options',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=50), default=list, size=None),
),
migrations.AlterField(
model_name='riskfield',
name='type',
field=models.CharField(choices=[('text', 'Text'), ('select', 'Select'), ('date', 'Date'), ('number', 'Number'), ('currency', 'Currency'), ('option', 'Option'), ('color', 'Color'), ('bool', 'Boolean')], default='text', max_length=20),
),
]
| 36.435897 | 245 | 0.589022 | 140 | 1,421 | 5.864286 | 0.485714 | 0.053593 | 0.051157 | 0.065773 | 0.180268 | 0.180268 | 0 | 0 | 0 | 0 | 0 | 0.02243 | 0.247009 | 1,421 | 38 | 246 | 37.394737 | 0.74486 | 0.031668 | 0 | 0.34375 | 1 | 0 | 0.225619 | 0.017467 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.09375 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6bd719864180b59c3d21d158c7f7b73ceb5e6694 | 1,040 | py | Python | tests/unit/chroma_core/lib/storage_plugin/subscription_plugin.py | beevans/integrated-manager-for-lustre | 6b7e49b8a58058e6139ad815a4388f21a581dfa0 | [
"MIT"
] | 52 | 2018-09-13T03:26:23.000Z | 2022-03-25T16:51:37.000Z | tests/unit/chroma_core/lib/storage_plugin/subscription_plugin.py | beevans/integrated-manager-for-lustre | 6b7e49b8a58058e6139ad815a4388f21a581dfa0 | [
"MIT"
] | 1,264 | 2018-06-15T19:50:57.000Z | 2022-03-28T08:19:04.000Z | tests/unit/chroma_core/lib/storage_plugin/subscription_plugin.py | beevans/integrated-manager-for-lustre | 6b7e49b8a58058e6139ad815a4388f21a581dfa0 | [
"MIT"
] | 27 | 2018-06-18T08:51:59.000Z | 2022-03-16T15:35:34.000Z | from chroma_core.lib.storage_plugin.api import attributes
from chroma_core.lib.storage_plugin.api.identifiers import GlobalId, ScopedId
from chroma_core.lib.storage_plugin.api.plugin import Plugin
from chroma_core.lib.storage_plugin.api import resources
from chroma_core.lib.storage_plugin.api import relations
version = 1
class Controller(resources.ScannableResource):
class Meta:
identifier = GlobalId("address")
address = attributes.String()
class Lun(resources.LogicalDrive):
class Meta:
identifier = ScopedId("lun_id")
lun_id = attributes.String()
class Presentation(resources.Resource):
lun_id = attributes.String()
path = attributes.String()
host_id = attributes.Integer()
class Meta:
identifier = ScopedId("lun_id", "host_id")
relations = [
relations.Provide(provide_to=resources.DeviceNode, attributes=["host_id", "path"]),
relations.Subscribe(subscribe_to=Lun, attributes=["lun_id"]),
]
class TestPlugin(Plugin):
pass
| 26.666667 | 95 | 0.722115 | 122 | 1,040 | 5.991803 | 0.303279 | 0.068399 | 0.095759 | 0.116279 | 0.337893 | 0.337893 | 0.250342 | 0.160055 | 0 | 0 | 0 | 0.00117 | 0.177885 | 1,040 | 38 | 96 | 27.368421 | 0.853801 | 0 | 0 | 0.192308 | 0 | 0 | 0.041346 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.038462 | 0.192308 | 0 | 0.653846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6bd88c3ebeb6be89ecc6b48f2f8cb1de2882b211 | 1,865 | py | Python | google_or_tools/knapsack_cp_sat.py | tias/hakank | 87b7f180c9393afce440864eb9e5fb119bdec1a4 | [
"MIT"
] | 279 | 2015-01-10T09:55:35.000Z | 2022-03-28T02:34:03.000Z | google_or_tools/knapsack_cp_sat.py | tias/hakank | 87b7f180c9393afce440864eb9e5fb119bdec1a4 | [
"MIT"
] | 10 | 2017-10-05T15:48:50.000Z | 2021-09-20T12:06:52.000Z | google_or_tools/knapsack_cp_sat.py | tias/hakank | 87b7f180c9393afce440864eb9e5fb119bdec1a4 | [
"MIT"
] | 83 | 2015-01-20T03:44:00.000Z | 2022-03-13T23:53:06.000Z | # Copyright 2021 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Knapsack problem in OR-tools CP-SAT Solver.
Simple knapsack problem.
This is a port of my old CP model knapsack_cp.py
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
from cp_sat_utils import knapsack
def main(values, weights, n):
model = cp.CpModel()
#
# data
#
print("values:", values)
print("weights:", weights)
print("n:", n)
print()
# declare variables
#
# constraints
#
x, z, w = knapsack(model, values, weights, n)
# objective
model.Maximize(z)
#
# solution and search
#
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
print("x:", [solver.Value(x[i]) for i in range(len(values))])
print("z:", solver.Value(z))
print("w:", solver.Value(w))
print()
print("NumConflicts:", solver.NumConflicts())
print("NumBranches:", solver.NumBranches())
print("WallTime:", solver.WallTime())
values = [15, 100, 90, 60, 40, 15, 10, 1, 12, 12, 100]
weights = [2, 20, 20, 30, 40, 30, 60, 10, 21, 12, 2]
n = 102
if __name__ == "__main__":
main(values, weights, n)
| 24.539474 | 74 | 0.688472 | 278 | 1,865 | 4.553957 | 0.485612 | 0.047393 | 0.033175 | 0.045814 | 0.050553 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035691 | 0.18874 | 1,865 | 75 | 75 | 24.866667 | 0.801058 | 0.478284 | 0 | 0.074074 | 0 | 0 | 0.069817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.148148 | 0 | 0.185185 | 0.444444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
6bdcc333baa696834a6f60e9431421020f6f9298 | 762 | py | Python | L1TriggerConfig/L1ScalesProducers/python/L1CaloScalesConfig_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | L1TriggerConfig/L1ScalesProducers/python/L1CaloScalesConfig_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | L1TriggerConfig/L1ScalesProducers/python/L1CaloScalesConfig_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from L1TriggerConfig.L1ScalesProducers.l1CaloScales_cfi import *
emrcdsrc = cms.ESSource("EmptyESSource",
recordName = cms.string('L1EmEtScaleRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
jetrcdsrc = cms.ESSource("EmptyESSource",
recordName = cms.string('L1JetEtScaleRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
htmrcdsrc = cms.ESSource("EmptyESSource",
recordName = cms.string('L1HtMissScaleRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
hfrrcdsrc = cms.ESSource("EmptyESSource",
recordName = cms.string('L1HfRingEtScaleRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
| 26.275862 | 64 | 0.723097 | 76 | 762 | 7.236842 | 0.381579 | 0.08 | 0.174545 | 0.247273 | 0.654545 | 0.654545 | 0.341818 | 0.341818 | 0 | 0 | 0 | 0.029321 | 0.149606 | 762 | 28 | 65 | 27.214286 | 0.819444 | 0 | 0 | 0.363636 | 0 | 0 | 0.151316 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6be11fbead11fcb81f2ef968873ca42c8b19679c | 3,127 | py | Python | auth/application/user_service.py | nicolaszein/auth | 90112f1a4d6f368714b19daad7e8a4226594b383 | [
"MIT"
] | null | null | null | auth/application/user_service.py | nicolaszein/auth | 90112f1a4d6f368714b19daad7e8a4226594b383 | [
"MIT"
] | null | null | null | auth/application/user_service.py | nicolaszein/auth | 90112f1a4d6f368714b19daad7e8a4226594b383 | [
"MIT"
] | null | null | null | import uuid
from auth.application.exception import InvalidCredentials, UserNotActivated
from auth.domain.user import User
from auth.infrastructure.password import Password
from auth.infrastructure.token import Token
from auth.infrastructure.user_adapter import UserAdapter
class UserService:
def __init__(self):
self.__user_adapter = UserAdapter()
self.__password = Password
self.__token = Token()
def fetch_by_id(self, id):
return self.__user_adapter.fetch_by_id(id)
def sign_up(self, full_name, email, password):
hashed_password = self.__password.hash_password(password)
user = User(
id=uuid.uuid4(),
full_name=full_name,
email=email,
password=hashed_password
)
user.add_user_created_event()
return self.__user_adapter.create(user)
def sign_in(self, email, password):
user = self.__user_adapter.fetch_by_email(email=email)
if not self.__password.validate_password(password=password, hashed_password=user.password):
raise InvalidCredentials('Invalid credentials')
if not user.is_active:
raise UserNotActivated(f'User with email {email} not activated!')
return self.__user_adapter.create_session(user=user)
def create_activation(self, user_id):
user = self.__user_adapter.fetch_by_id(id=user_id)
user.create_activation()
return self.__user_adapter.update(user)
def activate(self, code):
user = self.__user_adapter.fetch_by_activation_code(code=code)
return self.__user_adapter.update(user.activate(code=code))
def refresh_session(self, refresh_token):
return self.__user_adapter.refresh_session(refresh_token=refresh_token)
def sign_out(self, access_token):
decoded_token = self.__token.validate_token(access_token)
self.__user_adapter.delete_session(session_id=decoded_token['session_id'])
def create_reset_password_token(self, email):
user = self.__user_adapter.fetch_by_email(email=email)
updated_user = user.create_reset_password_token()
return self.__user_adapter.update(updated_user)
def reset_password(self, new_password, reset_password_token):
hashed_password = self.__password.hash_password(new_password)
user = self.__user_adapter.fetch_by_reset_password_token(reset_password_token)
updated_user = user.reset_password(
new_password=hashed_password,
reset_password_token=reset_password_token
)
return self.__user_adapter.update(updated_user)
def send_activation_email(self, user_id, activation_code):
user = self.__user_adapter.fetch_by_id(user_id)
self.__user_adapter.send_activation_email(user=user, activation_code=activation_code)
def send_reset_password_email(self, user_id, reset_password_token):
user = self.__user_adapter.fetch_by_id(user_id)
self.__user_adapter.send_reset_password_email(
user=user,
reset_password_token=reset_password_token
)
| 35.134831 | 99 | 0.721458 | 391 | 3,127 | 5.317136 | 0.158568 | 0.084656 | 0.137085 | 0.080808 | 0.358826 | 0.320346 | 0.210678 | 0.147186 | 0.147186 | 0.107744 | 0 | 0.0004 | 0.201151 | 3,127 | 88 | 100 | 35.534091 | 0.831865 | 0 | 0 | 0.126984 | 0 | 0 | 0.021426 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0.285714 | 0.095238 | 0.031746 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
6bea0b523f00b0e10c188dff6116fb32a31bf903 | 1,585 | py | Python | copy.py | tzuryby/copyip | 31f32bbf7615308f2f80b4632157e4bfb207c693 | [
"Apache-2.0"
] | null | null | null | copy.py | tzuryby/copyip | 31f32bbf7615308f2f80b4632157e4bfb207c693 | [
"Apache-2.0"
] | null | null | null | copy.py | tzuryby/copyip | 31f32bbf7615308f2f80b4632157e4bfb207c693 | [
"Apache-2.0"
] | null | null | null |
import sys
import os
import urllib2
from bs4 import BeautifulSoup as bsoup
print sys.argv
if len(sys.argv) < 3:
print "Usage example: python copy.py Netflix /path/to/target-repo-directory"
sys.exit(1)
orgurl = sys.argv[1]
targetdir = sys.argv[2]
orgurl = "https://www.github.com/%s?page=1" % orgurl
print "clonning all repos from", orgurl
# move to targetdir
print "target dir", targetdir
os.chdir(targetdir)
# read page
def read_page(url):
print "reading", url
response = urllib2.urlopen(url)
return bsoup(response.read(), "html.parser")
def grab_last_page(page_content):
pages = page_content.find("div", class_="paginate-container").find_all("a")
if len(pages) > 3:
return int (pages[-2].text)
else:
return "1"
def extract_repo_links(page_content):
return (elem.attrs["href"] for elem in page_content.find_all("a", itemprop="name codeRepository"))
def clone_repo(link):
repo_full_url = "https://github.com%s.git" % link
os.system("git clone %s" % repo_full_url)
# print ("git clone %s" % repo_full_url)
# get the last page number from "div.paginate-container" of 1st page
page_content = read_page(orgurl + "1")
last_page_number = grab_last_page(page_content)
# print (page_content)
print (last_page_number)
# firsr page
for link in extract_repo_links(page_content):
clone_repo(link)
if grab_last_page > 0:
for pagenum in xrange(2,last_page_number+1):
page_content = read_page(orgurl + str(pagenum))
for link in extract_repo_links(page_content):
clone_repo(link)
| 25.15873 | 102 | 0.702208 | 243 | 1,585 | 4.403292 | 0.366255 | 0.102804 | 0.052336 | 0.056075 | 0.243925 | 0.128972 | 0.091589 | 0.091589 | 0.091589 | 0.091589 | 0 | 0.012279 | 0.177918 | 1,585 | 62 | 103 | 25.564516 | 0.808903 | 0.104101 | 0 | 0.102564 | 0 | 0 | 0.166431 | 0.021246 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.102564 | null | null | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6bebaf90bd15c0134f852db0621fb4a1fed9268b | 264 | py | Python | colorizer/__main__.py | danbradham/colorizer | 0071f7fdfa3d88e024103e585c8c194877176c8f | [
"MIT"
] | 2 | 2020-03-26T14:37:42.000Z | 2020-06-20T23:04:52.000Z | colorizer/__main__.py | danbradham/colorizer | 0071f7fdfa3d88e024103e585c8c194877176c8f | [
"MIT"
] | null | null | null | colorizer/__main__.py | danbradham/colorizer | 0071f7fdfa3d88e024103e585c8c194877176c8f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Standard library imports
import sys
# Third party imports
from Qt import QtWidgets
# Local imports
from .ui import Dialog
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
d = Dialog()
sys.exit(d.exec_())
| 15.529412 | 42 | 0.674242 | 35 | 264 | 4.828571 | 0.714286 | 0.130178 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004762 | 0.204545 | 264 | 16 | 43 | 16.5 | 0.8 | 0.30303 | 0 | 0 | 0 | 0 | 0.044693 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.428571 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
6bf44da783424910fd5398cf483e2be80ee83e20 | 3,799 | py | Python | afgraph/evolve/evolution_tree.py | TNonet/afgraph | e5b0fc57b3301d1560cba8b131a5371a5f4f96b5 | [
"MIT"
] | null | null | null | afgraph/evolve/evolution_tree.py | TNonet/afgraph | e5b0fc57b3301d1560cba8b131a5371a5f4f96b5 | [
"MIT"
] | null | null | null | afgraph/evolve/evolution_tree.py | TNonet/afgraph | e5b0fc57b3301d1560cba8b131a5371a5f4f96b5 | [
"MIT"
] | null | null | null | from ..function.node import *
from ..function.tree import *
import numpy as np
def generate_tree(name='test'):
p_branch = .2
p_infertile = .1
p_channel = 1 - p_branch - p_infertile
decay = .25
branch_nodes = [Max, Sum, Mean, Min, Product, Median]
infertile_nodes = [Constant, Input, Uniform, Normal]
channel_node = [Square, Reciprocal, Sqrt, Exp, Sin, Cos, ArcTan, Abs, Sign]
max_node_count = 10
max_num_branches = 3
max_size_branch = 3
branch_disribution = np.random.randint
tree = FunctionTree(name=name)
stack = [tree.Output.name]
c_node = tree.Input.name
branch_count = 0
nodes_count = 0
while len(stack) > 0:
p_node = stack.pop(0)
if branch_count > max_num_branches:
p_infertile += p_branch/2
p_channel += p_branch/2
p_branch = 0
print(p_infertile, p_channel, p_branch)
elif nodes_count > max_node_count - branch_count*(max_size_branch/2 -1):
decay_amount = p_channel*decay
p_channel -= decay_amount
p_infertile += decay_amount
elif nodes_count > 2* max_node_count:
p_channel = 0
p_infertile = 1
new_node_type = np.random.choice(['branch', 'infertile', 'channel'], p=[p_branch, p_infertile, p_channel])
if new_node_type == 'branch':
new_node = np.random.choice(branch_nodes)
num_nodes = branch_disribution(2, max_size_branch+1)
branch_count += 1
elif new_node_type == 'infertile':
new_node = np.random.choice(infertile_nodes)
num_nodes = 1
else:
new_node = np.random.choice(channel_node)
num_nodes = 1
new_node = new_node(name=str(nodes_count))
print('Node to be added: ', new_node.latex, new_node.name)
try:
if new_node_type == 'infertile':
tree.insert(new_node, parent_item=p_node)
else:
tree.insert(new_node, parent_item=p_node, child_item=c_node)
print('Tree Map')
print(tree.tree_map)
nodes_count += num_nodes
if new_node_type != 'infertile':
for _ in range(num_nodes):
stack.append(new_node.name)
except:
stack.insert(0, p_node)
return tree
class FunctionGenerator:
"""Generator of FunctionTrees
# Arguments:
base_cohert_name: A String, used as the base of naming new Trees
Ex: A = FunctionGenerator(base_cohert_name = 'test')
next(A) --> FunctionTree Object with name 'test_1'
next(A) --> FunctionTree Object with name 'test_2'
...
max_nodes: An Integer, maximum number of nodes in a graph
max_branches: An Integer, maximum number of branch type function
nodes in a graph
node_type_probability: A dictionary, representing a discrete node distribution
Ex: {'channel': 1/3, 'infertile': 1/3, 'branch': 1/3}
node_probability: A numpy distribution,
channel_nodes: A dictionary with FunctionNodes, used as the key,
and "probability" of selecting that node when a channel node is needed
infertile_nodes: A dictionary with FunctionNodes, used as the key,
and "probability" of selecting that node when a infertile node is needed
branch_nodes: A dictionary with FunctionNodes, used as the key,
and "probability" of selecting that node when a branch node is needed
# Returns (When called)
FunctionTree,
Logic:
Breadth first creation!
Determine_number of nodes
Determine how many branch and infertile nodes
"""
pass | 31.658333 | 114 | 0.614635 | 491 | 3,799 | 4.533605 | 0.260692 | 0.04717 | 0.024708 | 0.012129 | 0.241689 | 0.172058 | 0.172058 | 0.140611 | 0.11186 | 0.11186 | 0 | 0.013258 | 0.30508 | 3,799 | 120 | 115 | 31.658333 | 0.829924 | 0.334562 | 0 | 0.063492 | 1 | 0 | 0.035109 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015873 | false | 0.015873 | 0.047619 | 0 | 0.095238 | 0.063492 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6bf7fad60bd6530f2448ecde08c56c91a702e06b | 665 | py | Python | prob_046.py | tansly/euler | 8b420cb05223cf60b6c01aac9bfe8ce5a3b96ddc | [
"MIT"
] | 1 | 2017-02-13T19:00:59.000Z | 2017-02-13T19:00:59.000Z | prob_046.py | tansly/euler | 8b420cb05223cf60b6c01aac9bfe8ce5a3b96ddc | [
"MIT"
] | null | null | null | prob_046.py | tansly/euler | 8b420cb05223cf60b6c01aac9bfe8ce5a3b96ddc | [
"MIT"
] | null | null | null | def checkPrime(x):
if x==1:
return False
elif x==2:
return True
elif x%2==0:
return False
else:
n=3
while n<x:
if x%n==0:
return False
else:
n+=2
return True
def checksq(n, i):
return (int(((n - i) / 2.0)**0.5))**2 == (n - i) / 2.0
n = 9
found = False
while not found:
if not checkPrime(n):
i = 2
while i <= n - 2:
if checkPrime(i) and checksq(n, i):
break
else:
i += 1
if i == n - 1:
found = True
if not found:
n += 2
print n
| 19 | 58 | 0.389474 | 94 | 665 | 2.755319 | 0.265957 | 0.03861 | 0.034749 | 0.123552 | 0.131274 | 0 | 0 | 0 | 0 | 0 | 0 | 0.059172 | 0.491729 | 665 | 34 | 59 | 19.558824 | 0.707101 | 0 | 0 | 0.3125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6bf98c9ac4bb1a4708a2740a2a12d4e6b1ccfeea | 377 | py | Python | Solutions/6kyu/6kyu_ascii_cipher.py | citrok25/Codewars-1 | dc641c5079e2e8b5955eb027fd15427e5bdb2e26 | [
"MIT"
] | 46 | 2017-08-24T09:27:57.000Z | 2022-02-25T02:24:33.000Z | Solutions/6kyu/6kyu_ascii_cipher.py | abbhishek971/Codewars | 9e761811db724da1e8aae44594df42b4ee879a16 | [
"MIT"
] | null | null | null | Solutions/6kyu/6kyu_ascii_cipher.py | abbhishek971/Codewars | 9e761811db724da1e8aae44594df42b4ee879a16 | [
"MIT"
] | 35 | 2017-08-01T22:09:48.000Z | 2022-02-18T17:21:37.000Z | def ascii_cipher(message, key):
pfactor = max(
i for i in range(2, abs(key)+1)
if is_prime(i) and key%i==0
)*(-1 if key<0 else 1)
return ''.join(chr((ord(c)+pfactor)%128) for c in message)
def is_prime(n):
if n < 2: return False
return all( n%i!=0 for i in range(2, round(pow(n,0.5))+1) ) or n==2
| 31.416667 | 71 | 0.511936 | 69 | 377 | 2.753623 | 0.478261 | 0.042105 | 0.063158 | 0.115789 | 0.126316 | 0 | 0 | 0 | 0 | 0 | 0 | 0.063745 | 0.334218 | 377 | 11 | 72 | 34.272727 | 0.693227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6bff94374d1c3418f2f616b2dab661a86acc6ce0 | 19,067 | py | Python | bindings/ast.py | idobatter/PythonJS | 0161dd5aba6caeaf5b06e74cc8524efd04a36143 | [
"BSD-3-Clause"
] | 1 | 2015-11-06T02:36:29.000Z | 2015-11-06T02:36:29.000Z | bindings/ast.py | idobatter/PythonJS | 0161dd5aba6caeaf5b06e74cc8524efd04a36143 | [
"BSD-3-Clause"
] | null | null | null | bindings/ast.py | idobatter/PythonJS | 0161dd5aba6caeaf5b06e74cc8524efd04a36143 | [
"BSD-3-Clause"
] | null | null | null | # Brython AST to Python AST Bridge
# by Brett Hartshorn - copyright 2013
# License: "New BSD"
def brython_tokenize(src):
module = 'test'
return JS('__BRYTHON__.$tokenize(src, module)')
_decorators = []
def push_decorator(ctx):
_decorators.append( ctx )
def pop_decorators():
arr = list( _decorators )
_decorators.length = 0 ## javascript style
return arr
class Pass:
def __init__(self, ctx, node):
pass
class Not:
def __init__(self, ctx=None, node=None):
if ctx:
self.value = to_ast_node(ctx.tree[0]) ## not standard python
else:
self.value = None
class List:
def __init__(self, ctx, node):
self.elts = []
#self.ctx = 'Load' # 'Store' is (x,y,z) = w
for a in ctx.tree:
self.elts.append( to_ast_node(a) )
class comprehension:
def __init__(self, ctx):
if ctx.type != 'comp_for': raise TypeError
target = ctx.tree[0]
iter = ctx.tree[1]
if target.type != 'target_list': raise TypeError
if iter.type != 'comp_iterable': raise TypeError
self.target = to_ast_node( target.tree[0] ) ## TODO support mutiple targets
self.iter = to_ast_node( iter.tree[0] )
self.ifs = []
class ListComp:
def __init__(self, ctx, node):
self.elt = to_ast_node(ctx.expression[0]) ## TODO support mutiple
self.generators = []
self._vars = ctx.vars ## brython catches all names
for c in ctx.tree:
if c.type == 'comprehension':
if len(c.tree) == 1:
self.generators.append( comprehension(c.tree[0]) )
else:
raise TypeError
else:
raise TypeError
class Tuple:
def __init__(self, ctx, node):
self.elts = []
#self.ctx = 'Load' # 'Store' is (x,y,z) = w
for a in ctx.tree:
self.elts.append( to_ast_node(a) )
class Dict:
def __init__(self, ctx, node):
self.keys = []
self.values = []
#for i in range(0, len(ctx.items), 2): ## TODO fix me
i = 0
while i < len(ctx.items):
key = ctx.items[i]
val = ctx.items[i+1]
self.keys.append( to_ast_node(key) )
self.values.append( to_ast_node(val) )
i += 2
class Subscript:
def __init__(self, ctx, node):
self.value = to_ast_node(ctx.value)
if len(ctx.tree) == 1:
self.slice = Index(value=to_ast_node(ctx.tree[0]))
elif len(ctx.tree) == 2:
self.slice = Slice(
lower=to_ast_node(ctx.tree[0]),
upper=to_ast_node(ctx.tree[1])
)
elif len(ctx.tree) == 3:
self.slice = Slice(
lower=to_ast_node(ctx.tree[0]),
upper=to_ast_node(ctx.tree[1]),
step=to_ast_node(ctx.tree[2])
)
else:
raise TypeError
#self.ctx = 'Load', 'Store', 'Del'
class Index:
def __init__(self, value=None):
self.value = value
class Slice:
def __init__(self, lower=None, upper=None, step=None):
self.lower = lower
self.upper = upper
self.step = step
class Assign:
def _collect_targets(self, ctx):
if ctx.type == 'expr' and ctx.name == 'id':
a = ctx.tree[0]
if a.type == 'id':
self.targets.append( Name(ctx.tree[0]) )
elif a.type == 'attribute' and a.func == 'getattr': #and a.value.type == 'id':
self.targets.append( Attribute(a,None) )
elif a.type == 'sub' and a.func == 'getitem':
self.targets.append( to_ast_node(a) )
else:
print('_collect_targets ERROR!')
print(ctx)
raise TypeError
elif ctx.type == 'assign':
self._collect_targets( ctx.tree[0] )
self._collect_targets( ctx.tree[1] )
elif ctx.type == 'list_or_tuple':
self.targets.append( to_ast_node(ctx) )
else:
print('_collect_targets ERROR')
print( ctx )
raise TypeError
def __init__(self, ctx, node):
self.targets = []
self._collect_targets( ctx.tree[0] )
self.value = to_ast_node( ctx.tree[1] ) ## should be an: expr.name==operand
class AugAssign:
#_previous = None ## DEPRECATED
def __init__(self, ctx, node):
#AugAssign._previous = self
ctx.name = '' ## need to set name to nothing so that to_ast_node will not recurse back here
self.target = to_ast_node(ctx)
self.op = ctx.augm_assign['op']
self.value = to_ast_node( ctx.tree[1] )
class Num:
def __init__(self, ctx, node):
if ctx.value is None:
raise TypeError
self.n = ctx.value
class Str:
def __init__(self, ctx, node):
#self.s = ctx.value ## old brython
if len(ctx.tree) == 1:
self.s = ctx.tree[0]
else:
raise TypeError
class Name:
def __init__(self, ctx=None, name=None):
if name:
self.id = name
elif ctx.type == 'id':
self.id = ctx.value
else:
print ctx
raise TypeError
class Add:
pass
class Sub:
pass
class Div:
pass
class FloorDiv:
pass
class Mult:
pass
class Mod:
pass
class Pow:
pass
class LShift:
pass
class RShift:
pass
class BitOr:
pass
class BitXor:
pass
class BitAnd:
pass
class Eq:
pass
class NotEq:
pass
class Lt:
pass
class LtE:
pass
class Gt:
pass
class GtE:
pass
class In:
pass
class NotIn:
pass
class Is:
pass
class IsNot:
pass
class And:
pass
class Or:
pass
_operators = {
'+' : Add,
'-' : Sub,
'/' : Div,
'//': FloorDiv,
'*' : Mult,
'%' : Mod,
'**': Pow,
'<<': LShift,
'>>': RShift,
'|' : BitOr,
'^' : BitXor,
'&' : BitAnd,
'==': Eq,
'!=': NotEq,
'<' : Lt,
'<=': LtE,
'>' : Gt,
'>=': GtE,
'in': In,
'not_in' : NotIn,
'is': Is,
'is_not': IsNot,
'and' : And,
'or' : Or,
}
class USub:
pass
class UAdd:
pass
class Invert:
pass
class UnaryOp:
'''
note: this is constructed directly from an abstract_expr
'''
def __init__(self, op=None, operand=None):
self.operand = operand
if op == '-':
self.op = USub()
elif op == '+':
self.op = UAdd()
elif op == '~':
self.op = Invert()
elif op == 'not':
self.op = Not()
class BinOp:
def __init__(self, ctx, node):
print 'BinOp', ctx
if len(ctx.tree) != 2:
raise TypeError
self.left = to_ast_node( ctx.tree[0] )
self.right = to_ast_node( ctx.tree[1] )
if ctx.op in _operators:
klass = _operators[ctx.op]
self.op = klass()
else:
print('ERROR: unknown operator type')
print(ctx)
raise TypeError
class _arguments:
def __init__(self, ctx):
self.args = [] ## in Python2 these are Name nodes, in Py3 they are "arg" objects with: `arg`=raw-string and `annotation`=astnode
self.vararg = None # string
self.kwarg = None # string
self.defaults = []
self.kw_defaults = []
if ctx.type != 'func_args':
print('_arguments class expects ctx.type of func_args')
raise TypeError
for c in ctx.tree:
if c.type == 'func_arg_id':
self.args.append( Name(name=c.name) )
if len(c.tree):
self.defaults.append( to_ast_node(c.tree[0]) )
elif c.type == 'func_star_arg' and c.op=='*':
self.vararg = c.name
elif c.type == 'func_star_arg' and c.op=='**':
self.kwarg = c.name
else:
raise TypeError
class FunctionDef:
def __init__(self, ctx, node):
self.name = ctx.name ## raw string
self.args = _arguments( ctx.tree[0] )
self.body = []
self.decorator_list = pop_decorators()
self.returns = None ## python3 returns annotation
print 'FunctionDef::', ctx
for child in node.children:
child_ctx = child.get_ctx()
if child_ctx:
anode = to_ast_node( child_ctx, node=child )
if anode: ## ctx of type: 'single_kw' and token elif/else do not return an ast node
self.body.append( anode )
class _lambda_arguments:
def __init__(self, ctx):
self.args = []
self.vararg = None # string
self.kwarg = None # string
self.defaults = []
self.kw_defaults = []
for c in ctx.tree:
if c.type != 'call_arg': raise TypeError
name = c.vars[0]
self.args.append( Name(name=name) )
class Lambda:
def __init__(self, ctx, node):
self.args = _lambda_arguments( ctx.args[0] )
self.body = to_ast_node( ctx.tree[0] )
self._locals = ctx.locals
self._vars = ctx.vars
class Return:
def __init__(self, ctx, node):
if ctx.tree[0].type == 'abstract_expr' and len(ctx.tree[0].tree)==0:
self.value = None
else:
self.value = to_ast_node( ctx.tree[0] )
class _keyword:
def __init__(self, arg, value):
self.arg = arg ## raw string
self.value = value ## astnode
class Call:
def __init__(self, ctx, node):
self.func = to_ast_node( ctx.func )
self.args = []
self.keywords = []
self.starargs = None
self.kwargs = None
for c in ctx.tree:
if c.type == 'call_arg':
sub = c.tree[0]
if sub.type == 'kwarg':
k = _keyword(
sub.tree[0].value,
to_ast_node(sub.tree[1])
)
self.keywords.append( k )
else:
self.args.append( to_ast_node(c.tree[0]) )
else:
raise TypeError
class Expr:
def __init__(self, ctx, node):
self.value = to_ast_node(ctx.tree[0])
class ClassDef:
def __init__(self, ctx, node):
self.name = ctx.name
self.bases = []
self.body = []
self.decorator_list = pop_decorators()
if len(ctx.tree) == 1:
e = ctx.tree[0]
if e.type == 'expr' and e.name == 'tuple':
t = e.tree[0]
for b in t.tree:
self.bases.append( Name(b.tree[0]) )
else:
raise TypeError
for child in node.children:
if child.get_ctx():
anode = to_ast_node( child.get_ctx() )
if anode:
self.body.append( anode )
class Attribute:
def __init__(self, ctx, node):
self.value = to_ast_node(ctx.value)
self.attr = ctx.name
self._func = ctx.func ## brython-extra: getattr/setattr
class IfExp:
'''
if/elif/else could be translated to javascript switch/case more easily if we track elif statements,
but the python standard simply treats elif statements as nested if statements in .orelse.
In the future we can bend this rule when PythonJS becomes fully self-hosted.
'''
_previous = None
def __init__(self, ctx, node):
if ctx.token == 'if': ## can also be "elif" and "else"
IfExp._previous = self
self.test = to_ast_node( ctx.tree[0] )
self.body = []
self.orelse = []
for child in node.children:
anode = to_ast_node(child.get_ctx())
if anode:
self.body.append( anode )
class For:
def __init__(self, ctx, node):
targets = ctx.tree[0]
if targets.type != 'target_list':
raise TypeError
if len(targets.tree) == 1:
self.target = to_ast_node( targets.tree[0] )
else: ## pack into a ast.Tuple
#print('TODO pack for-loop targets into ast.Tuple')
#raise TypeError
self.target = Tuple( targets )
self.iter = to_ast_node( ctx.tree[1] )
self.body = []
for child in node.children:
anode = to_ast_node(child.get_ctx())
if anode:
self.body.append( anode )
class While:
def __init__(self, ctx, node):
self.test = to_ast_node( ctx.tree[0] )
self.body = []
for child in node.children:
anode = to_ast_node(child.get_ctx())
if anode:
self.body.append( anode )
class alias:
def __init__(self, name=None, asname=None):
self.name = name
self.asname = asname
class Import:
def __init__(self, ctx, node):
self.names = []
for c in ctx.tree:
self.names.append( alias(name=c.name,asname=c.alias) )
class ImportFrom:
def __init__(self, ctx, node):
self.module = ctx.module
self.names = []
self.level = 0
for name in ctx.names:
self.names.append( alias(name=name) )
class TryExcept:
_stack = []
def __init__(self, ctx, node):
TryExcept._stack.append( self )
self.body = []
self.handlers = []
self.orelse = []
for child in node.children:
self.body.append( to_ast_node(child.get_ctx()) )
class ExceptHandler:
def __init__(self, ctx, node):
TryExcept._stack[-1].handlers.append(self)
self.type = None
self.name = None
self.body = []
if len(ctx.tree):
self.type = to_ast_node(ctx.tree[0])
for child in node.children:
self.body.append( to_ast_node(child.get_ctx()) )
#TryExcept._stack.pop()
class Assert:
def __init__(self, ctx, node):
self.test = to_ast_node(ctx.tree[0])
self.msg = None
class Raise:
'''
Python2 and Python3 style
'''
def __init__(self, ctx, node):
# Py3 style
self.exc = to_ast_node(ctx.tree[0])
if len(ctx.tree) > 1:
self.cause = to_ast_node(ctx.tree[1])
else:
self.cause = None
# Py2 style
self.type = self.exc
self.inst = self.cause
self.tback = None
__MAP = {
'def' : FunctionDef,
'lambda' : Lambda,
'assign' : Assign,
'return' : Return,
'expr' : Expr,
'call' : Call,
'int' : Num,
'str' : Str,
'id' : Name,
'class' : ClassDef,
'op' : BinOp,
'attribute' : Attribute,
'pass' : Pass,
'for' : For,
'not' : Not,
'sub' : Subscript,
'import' : Import,
'from' : ImportFrom,
'try' : TryExcept, ## note: there is also TryFinally
'assert' : Assert,
'raise' : Raise,
}
def to_ast_node( ctx, node=None ):
print 'to-ast-node', ctx
if ctx.type == 'node':
print 'NODE::', ctx.node
return to_ast_node( ctx.tree[0], node=ctx.node )
elif ctx.type == 'assign' and ctx.tree[0].type == 'id' and ctx.tree[0].value == '$temp':
print('DEPRECATED')
raise TypeError
return AugAssign(ctx, node)
elif ctx.type == 'expr' and ctx.name == 'augm_assign':
return AugAssign(ctx, node)
elif ctx.type == 'except':
ExceptHandler(ctx, node) ## do not return, inserts self into TryExcept node
elif ctx.type in __MAP:
return __MAP[ ctx.type ]( ctx, node )
elif ctx.type == 'list_or_tuple':
if ctx.real == 'list':
return List(ctx, node)
elif ctx.real == 'tuple':
return Tuple(ctx, node)
elif ctx.real == 'list_comp':
return ListComp(ctx, node)
else:
raise TypeError
elif ctx.type == 'dict_or_set':
if ctx.real == 'dict':
return Dict(ctx, node)
elif ctx.type == 'decorator':
push_decorator( to_ast_node(ctx.tree[0]) )
elif ctx.type == 'condition' and ctx.token == 'while':
return While( ctx, node )
elif ctx.type == 'condition' and ctx.token == 'if':
return IfExp( ctx, node )
elif ctx.type == 'condition' and ctx.token == 'elif':
a = IfExp( ctx, node )
IfExp._previous.orelse.append( a )
IfExp._previous = a
elif ctx.type == 'single_kw':
if ctx.token == 'else' or ctx.token == 'elif':
#if ctx.token == 'else': ## TODO fix: "if/elif: if"
orelse = IfExp._previous.orelse
for child in node.children:
walk_nodes( child, orelse )
else:
print 'unknown token for single_kw'
print ctx
raise TypeError
elif ctx.type == 'node_js':
print(ctx.tree[0])
## special brython inline javascript ##
#if len(ctx.tree) == 1 and '__iadd__' in ctx.tree[0]:
# AugAssign._previous.op = '+'
#elif len(ctx.tree) == 1 and '__isub__' in ctx.tree[0]:
# AugAssign._previous.op = '-'
#elif len(ctx.tree) == 1 and ctx.tree[0].startswith("if($temp.$fast_augm"):
# print(ctx.tree[0])
# c = ctx.tree[0].split('"')
# if len(c) == 3:
# AugAssign._previous.target = Name( name=c[1] )
# else:
# print(c)
# raise TypeError
if len(ctx.tree) == 1 and ctx.tree[0] == 'else': ## DEPRECATED
raise TypeError
else:
print '--------special node_js error-------'
print(ctx)
raise TypeError
elif ctx.type == 'abstract_expr':
if len(ctx.tree)==1 and ctx.tree[0].type=='expr' and ctx.tree[0].name=='call' and len(ctx.tree[0].tree)==1:
call = ctx.tree[0].tree[0]
assert call.type=='call'
func = call.func
if func.type=='attribute' and func.func=='getattr':
if func.name=='__neg__':
return UnaryOp(op='-', operand=to_ast_node(func.value))
else:
raise TypeError
else:
print '---------abstract_expr error----------'
print ctx
raise TypeError
elif ctx.parent.type=='sub' and len(ctx.tree)==0:
## this is a null part of the slice: "a[1:]"
return None
else:
print '---------abstract_expr error----------'
print ctx
raise TypeError
else:
print '---------error----------'
print node
print ctx
raise TypeError
def walk_nodes( node, module ):
print 'node.type:', node.type
if node.type == 'expression':
if node.get_ctx():
anode = to_ast_node( node.get_ctx(), node=node )
if anode: ## decorators do not return
module.append( anode )
elif node.get_ctx():
anode = to_ast_node( node.get_ctx(), node=node )
if anode:
module.append( anode )
#else:
# for child in node.children:
# walk_nodes( child, module )
else:
for child in node.children:
walk_nodes( child, module )
def parse(source):
a = brython_tokenize( source )
module = list()
walk_nodes( a, module )
return module
class NodeVisitor:
def __init__(self, module):
#print('module:')
#print(module)
for node in module:
self.visit( node )
def visit(self, node):
if node is None:
print('ERROR: trying to visit None')
raise TypeError
#print('visit.name', node)
f = getattr(
self,
'visit_'+type(node).__name__,
)
return f( node )
def visit_Lambda(self, node):
args = []
for a in node.args.args:
args.append( self.visit(a) )
if node.args.vararg:
args.append( '*'+node.args.vararg )
if node.args.kwarg:
args.append( '**'+node.args.kwarg )
args = ','.join( args )
body = self.visit(node.body)
return 'lambda %s: %s' %(args, body)
def visit_ListComp(self, node):
gen = node.generators[0]
return '[' + self.visit(node.elt) + ' for ' + self.visit(gen.target) + ' in ' + self.visit(gen.iter) + ']'
def visit_Import(self, node):
a = [ alias.name for alias in node.names ]
print 'import', ','.join(a)
def visit_ImportFrom(self, node):
a = [ alias.name for alias in node.names ]
print 'from', node.module, 'import', ','.join(a)
def visit_TryExcept(self, node):
print 'try:'
for n in node.body:
a = self.visit(n)
if a: print ' ', a
for h in node.handlers:
if h.type:
print 'except ', self.visit(h.type), ':'
else:
print 'except:'
for n in h.body:
a = self.visit(n)
if a: print ' ', a
def visit_Assert(self, node):
print 'assert', self.visit(node.test)
def visit_Raise(self, node):
print 'raise', self.visit(node.type)
def visit_Expr(self, node):
return self.visit(node.value)
def visit_Str(self, node):
return node.s
def visit_Num(self, node):
return node.n
def visit_Name(self, node):
return node.id
def visit_Pass(self, node):
return 'pass'
def visit_Not(self, node):
## note: node.value is non-standard for the `Not` node
if node.value:
return ' not ' + self.visit(node.value)
else:
return ' not '
def visit_IsNot(self, node):
return ' is not '
def visit_Eq(self, node):
return '=='
def visit_NotEq(self, node):
return '!='
def visit_In(self, node):
return ' in '
def visit_Is(self, node):
return ' is '
def visit_Pow(self, node):
return '**'
def visit_Mult(self, node):
return '*'
def visit_UAdd(self, node):
return '+'
def visit_USub(self, node):
return '-'
def visit_Add(self, node):
return '+'
def visit_Sub(self, node):
return '-'
def visit_FloorDiv(self, node):
return '//'
def visit_Div(self, node):
return '/'
def visit_Mod(self, node):
return '%'
def visit_LShift(self, node):
return '<<'
def visit_RShift(self, node):
return '>>'
def visit_BitXor(self, node):
return '^'
def visit_BitOr(self, node):
return '|'
def visit_BitAnd(self, node):
return '&'
def visit_Lt(self, node):
return '<'
def visit_Gt(self, node):
return '>'
def visit_GtE(self, node):
return '>='
def visit_LtE(self, node):
return '<='
def visit_And(self, node):
return ' and '
def visit_Or(self, node):
return ' or '
def visit_NotIn(self, node):
return ' not in ' | 22.671819 | 131 | 0.639744 | 2,891 | 19,067 | 4.068834 | 0.104116 | 0.041656 | 0.040551 | 0.038086 | 0.409759 | 0.308085 | 0.24424 | 0.192298 | 0.17436 | 0.133979 | 0 | 0.00687 | 0.206063 | 19,067 | 841 | 132 | 22.671819 | 0.770181 | 0.096135 | 0 | 0.334808 | 0 | 0 | 0.067669 | 0.005609 | 0 | 0 | 0 | 0.003567 | 0.007375 | 0 | null | null | 0.047198 | 0.011799 | null | null | 0.053097 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d4010288139e89f92e7e1a1c715375c19d4b15d5 | 2,779 | py | Python | spira/yevon/gdsii/base.py | JCoetzee123/spira | dae08feba1578ecc8745b45109f4fb7bef374546 | [
"MIT"
] | null | null | null | spira/yevon/gdsii/base.py | JCoetzee123/spira | dae08feba1578ecc8745b45109f4fb7bef374546 | [
"MIT"
] | null | null | null | spira/yevon/gdsii/base.py | JCoetzee123/spira | dae08feba1578ecc8745b45109f4fb7bef374546 | [
"MIT"
] | null | null | null | from spira.core.transformable import Transformable
from spira.core.parameters.initializer import ParameterInitializer
from spira.core.parameters.initializer import MetaInitializer
from spira.core.parameters.descriptor import FunctionParameter
from spira.yevon.process.gdsii_layer import LayerParameter
from spira.core.parameters.variables import *
from spira.yevon.process import get_rule_deck
RDD = get_rule_deck()
class MetaElement(MetaInitializer):
""" """
def __call__(cls, *params, **keyword_params):
kwargs = cls.__map_parameters__(*params, **keyword_params)
cls = super().__call__(**kwargs)
cls.__keywords__ = kwargs
return cls
class __Element__(Transformable, ParameterInitializer, metaclass=MetaElement):
""" Base class for all transformable elements. """
def get_node_id(self):
if self.__id__:
return self.__id__
else:
return self.__str__()
def set_node_id(self, value):
self.__id__ = value
node_id = FunctionParameter(get_node_id, set_node_id)
location_name = StringParameter(default='')
def __init__(self, transformation=None, **kwargs):
super().__init__(transformation=transformation, **kwargs)
def __add__(self, other):
if isinstance(other, list):
l = spira.ElementList([self])
l.extend(other)
return l
elif isinstance(other, __Element__):
return spira.ElementList([self, other])
else:
raise TypeError("Wrong type of argument for addition in __Element__: " + str(type(other)))
def __radd__(self, other):
if isinstance(other, list):
l = spira.ElementList(other)
l.append(self)
return l
elif isinstance(other, __Element__):
return spira.ElementList([other, self])
else:
raise TypeError("Wrong type of argument for addition in __Element__: " + str(type(other)))
def flatten(self):
return [self]
def dependencies(self):
return None
class __LayerElement__(__Element__):
""" """
layer = LayerParameter()
def __init__(self, layer=0, transformation=None, **kwargs):
super().__init__(layer=layer, transformation=transformation, **kwargs)
def __eq__(self, other):
if other == None:
return False
if not isinstance(other, __LayerElement__):
return False
if other.layer.key != self.layer.key:
return False
if self.shape.transform_copy(self.transformation) != other.shape.transform_copy(other.transformation):
return False
return True
def __ne__(self,other):
return not self.__eq__(other)
| 30.877778 | 110 | 0.652033 | 299 | 2,779 | 5.658863 | 0.284281 | 0.037234 | 0.038416 | 0.054374 | 0.291962 | 0.252955 | 0.205674 | 0.205674 | 0.205674 | 0.085106 | 0 | 0.000481 | 0.251889 | 2,779 | 89 | 111 | 31.224719 | 0.813372 | 0.015113 | 0 | 0.234375 | 0 | 0 | 0.038334 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.171875 | false | 0 | 0.109375 | 0.046875 | 0.609375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.